print("graph sum before send() and recv() is: ", graph_sum)
super_useful_comp(g_better)
graph_sum = readout(g_better)
print("graph sum after send() and recv() is: ", graph_sum)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Next steps
// ----------
After Change
// one-hot encoding, and only the instructor (node 0) and the club president
// (node 33) are labeled.
inputs = torch.eye(34)
labeled_nodes = torch.tensor([0, 33]) // only the instructor and the president nodes are labeled
labels = torch.tensor([0, 1]) // their labels are different
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The training loop is no fancier than other NN models. We (1) create an optimizer,
// (2) feed the inputs to the model, (3) calculate the loss and (4) use autograd
// to optimize the model.
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
all_logits = []
for epoch in range(30):
logits = net(G, inputs)
// we save the logits for visualization later
all_logits.append(logits.detach())
logp = F.log_softmax(logits, 1)
// we only compute loss for labeled nodes
loss = F.nll_loss(logp[labeled_nodes], labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()