g_better = an_interesting_graph()
graph_sum = readout(g_better)
print("graph sum before send() and recv() is: ", graph_sum)
super_useful_comp(g_better)
graph_sum = readout(g_better)
print("graph sum after send() and recv() is: ", graph_sum)
After Change
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
all_logits = []
for epoch in range(30):
logits = net(G, inputs)
// we save the logits for visualization later
all_logits.append(logits.detach())
logp = F.log_softmax(logits, 1)
// we only compute loss for labeled nodes
loss = F.nll_loss(logp[labeled_nodes], labels)optimizer.zero_grad()loss.backward()optimizer.step()
print("Epoch %d | Loss: %.4f" % (epoch, loss.item()))
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////