DGL学习(一):使用DGL跑一个最简单的GCN

摘要:
defbuild_karate_club_graph():#所有78个边缘都不可重复。一个用于源端点,另一个用于目标端点。src=np.array([1,33])dst=np.arrange([0,

使用没有节点特征的图来跑DGL (输入特征为节点编号的embedding)

安装DGL :

pip install dgl

所需要的包

import dgl
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F

构建无向图:

def build_karate_club_graph():
    # All 78 edges are stored in two numpy arrays. One for source endpoints
    # while the other for destination endpoints.
    src = np.array([1, 2, 2, 3, 3, 3, 4, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 10, 10,
                    10, 11, 12, 12, 13, 13, 13, 13, 16, 16, 17, 17, 19, 19, 21, 21,
                    25, 25, 27, 27, 27, 28, 29, 29, 30, 30, 31, 31, 31, 31, 32, 32,
                    32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33,
                    33, 33, 33, 33, 33, 33, 33, 33, 33, 33])
    dst = np.array([0, 0, 1, 0, 1, 2, 0, 0, 0, 4, 5, 0, 1, 2, 3, 0, 2, 2, 0, 4,
                    5, 0, 0, 3, 0, 1, 2, 3, 5, 6, 0, 1, 0, 1, 0, 1, 23, 24, 2, 23,
                    24, 2, 23, 26, 1, 8, 0, 24, 25, 28, 2, 8, 14, 15, 18, 20, 22, 23,
                    29, 30, 31, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30,
                    31, 32])
    # Edges are directional in DGL; Make them bi-directional.
    u = np.concatenate([src, dst])
    v = np.concatenate([dst, src])
    # Construct a DGLGraph
    return dgl.DGLGraph((u, v))

G = build_karate_club_graph()
print("G中节点数 %d."% G.number_of_nodes()) # 34
print("G中边数 %d."% G.number_of_edges()) # 156

转为networkX进行可视化

def visual(G):
    # 可视化
    nx_G = G.to_networkx().to_undirected()
    pos = nx.kamada_kawai_layout(nx_G) ## 生成节点位置
    nx.draw(nx_G, pos, with_labels=True, node_color=[[.7, .7, .7]])
    plt.pause(10)

DGL学习(一):使用DGL跑一个最简单的GCN第1张

 对每个节点做embedding并作为GCN的输入特征:

## 对 34 个节点做embedding
embed = nn.Embedding(34, 5)  # 34 nodes with embedding dim equal to 5
print(embed.weight)
G.ndata['feat'] = embed.weight

训练GCN:

def train(G, inputs, embed, labeled_nodes,labels):
    net = GCN(5,5,2)
    import itertools

    optimizer = torch.optim.Adam(itertools.chain(net.parameters(), embed.parameters()), lr=0.01)
    all_logits = []
    for epoch in range(50):
        logits = net(G, inputs)
        # we save the logits for visualization later
        all_logits.append(logits.detach()) # detach代表从当前计算图中分离下来的
        logp = F.log_softmax(logits, 1)
        # 半监督学习, 只使用标记的节点计算loss
        loss = F.nll_loss(logp[labeled_nodes], labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        print('Epoch %d | Loss: %.4f' % (epoch, loss.item()))


    print(all_logits)

train(G, embed.weight, embed, torch.tensor([0,33]), torch.tensor([0,1]))
DGL学习(一):使用DGL跑一个最简单的GCN第2张DGL学习(一):使用DGL跑一个最简单的GCN第3张
Epoch 0 | Loss: 0.9247
Epoch 1 | Loss: 0.8673
Epoch 2 | Loss: 0.8160
Epoch 3 | Loss: 0.7713
Epoch 4 | Loss: 0.7328
Epoch 5 | Loss: 0.6999
Epoch 6 | Loss: 0.6748
Epoch 7 | Loss: 0.6551
Epoch 8 | Loss: 0.6392
Epoch 9 | Loss: 0.6252
Epoch 10 | Loss: 0.6120
Epoch 11 | Loss: 0.5989
Epoch 12 | Loss: 0.5854
Epoch 13 | Loss: 0.5713
Epoch 14 | Loss: 0.5559
Epoch 15 | Loss: 0.5391
Epoch 16 | Loss: 0.5210
Epoch 17 | Loss: 0.5031
Epoch 18 | Loss: 0.4867
Epoch 19 | Loss: 0.4696
Epoch 20 | Loss: 0.4522
Epoch 21 | Loss: 0.4347
Epoch 22 | Loss: 0.4168
Epoch 23 | Loss: 0.3987
Epoch 24 | Loss: 0.3808
Epoch 25 | Loss: 0.3627
Epoch 26 | Loss: 0.3448
Epoch 27 | Loss: 0.3269
Epoch 28 | Loss: 0.3090
Epoch 29 | Loss: 0.2913
Epoch 30 | Loss: 0.2738
Epoch 31 | Loss: 0.2566
Epoch 32 | Loss: 0.2396
Epoch 33 | Loss: 0.2230
Epoch 34 | Loss: 0.2069
Epoch 35 | Loss: 0.1913
Epoch 36 | Loss: 0.1762
Epoch 37 | Loss: 0.1618
Epoch 38 | Loss: 0.1479
Epoch 39 | Loss: 0.1347
Epoch 40 | Loss: 0.1224
Epoch 41 | Loss: 0.1111
Epoch 42 | Loss: 0.1007
Epoch 43 | Loss: 0.0910
Epoch 44 | Loss: 0.0822
Epoch 45 | Loss: 0.0742
Epoch 46 | Loss: 0.0670
Epoch 47 | Loss: 0.0605
Epoch 48 | Loss: 0.0546
Epoch 49 | Loss: 0.0494
View Code

对每轮的分类结果进行可视化

def draw(i):
    cls1color = '#00FFFF'
    cls2color = '#FF00FF'
    pos = {}
    colors = []
    for v in range(34):
        pos[v] = all_logits[i][v].numpy()
        cls = pos[v].argmax()
        colors.append(cls1color if cls else cls2color)
    ax.cla()
    ax.axis('off')
    ax.set_title('Epoch: %d' % i)
    nx.draw_networkx(nx_G.to_undirected(), pos, node_color=colors,
                     with_labels=True, node_size=300, ax=ax)
nx_G = G.to_networkx().to_undirected()
fig = plt.figure(dpi=150)
fig.clf()
ax = fig.subplots()
for i in range(50):
    draw(i)
    plt.pause(0.2)

plt.show()

DGL学习(一):使用DGL跑一个最简单的GCN第4张

完整代码:

import dgl
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F

def build_karate_club_graph():
    # All 78 edges are stored in two numpy arrays. One for source endpoints
    # while the other for destination endpoints.
    src = np.array([1, 2, 2, 3, 3, 3, 4, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 10, 10,
                    10, 11, 12, 12, 13, 13, 13, 13, 16, 16, 17, 17, 19, 19, 21, 21,
                    25, 25, 27, 27, 27, 28, 29, 29, 30, 30, 31, 31, 31, 31, 32, 32,
                    32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33,
                    33, 33, 33, 33, 33, 33, 33, 33, 33, 33])
    dst = np.array([0, 0, 1, 0, 1, 2, 0, 0, 0, 4, 5, 0, 1, 2, 3, 0, 2, 2, 0, 4,
                    5, 0, 0, 3, 0, 1, 2, 3, 5, 6, 0, 1, 0, 1, 0, 1, 23, 24, 2, 23,
                    24, 2, 23, 26, 1, 8, 0, 24, 25, 28, 2, 8, 14, 15, 18, 20, 22, 23,
                    29, 30, 31, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30,
                    31, 32])
    # Edges are directional in DGL; Make them bi-directional.
    u = np.concatenate([src, dst])
    v = np.concatenate([dst, src])
    # Construct a DGLGraph
    return dgl.DGLGraph((u, v))

def visual(G):
    # 可视化
    nx_G = G.to_networkx().to_undirected()
    pos = nx.kamada_kawai_layout(nx_G) ## 生成节点位置
    nx.draw(nx_G, pos, with_labels=True, node_color=[[.7, .7, .7]])
    plt.pause(10)

from dgl.nn.pytorch import GraphConv
class GCN(nn.Module):
    def __init__(self, in_feats, hidden_size, num_classes):
        super(GCN, self).__init__()
        self.conv1 = GraphConv(in_feats, hidden_size)
        self.conv2 = GraphConv(hidden_size, num_classes)

    def forward(self, g, inputs):
        h = self.conv1(g, inputs)
        h = torch.relu(h)
        h = self.conv2(g, h)
        return h

def train(G, inputs, embed, labeled_nodes,labels):
    net = GCN(5,5,2)
    import itertools

    optimizer = torch.optim.Adam(itertools.chain(net.parameters(), embed.parameters()), lr=0.01)
    all_logits = []
    for epoch in range(50):
        logits = net(G, inputs)
        # we save the logits for visualization later
        all_logits.append(logits.detach()) # detach代表从当前计算图中分离下来的
        logp = F.log_softmax(logits, 1)
        # 半监督学习, 只使用标记的节点计算loss
        loss = F.nll_loss(logp[labeled_nodes], labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        print('Epoch %d | Loss: %.4f' % (epoch, loss.item()))


    print(all_logits)


    def draw(i):
        cls1color = '#00FFFF'
        cls2color = '#FF00FF'
        pos = {}
        colors = []
        for v in range(34):
            pos[v] = all_logits[i][v].numpy()
            cls = pos[v].argmax()
            colors.append(cls1color if cls else cls2color)
        ax.cla()
        ax.axis('off')
        ax.set_title('Epoch: %d' % i)
        nx.draw_networkx(nx_G.to_undirected(), pos, node_color=colors,
                         with_labels=True, node_size=300, ax=ax)
    nx_G = G.to_networkx().to_undirected()
    fig = plt.figure(dpi=150)
    fig.clf()
    ax = fig.subplots()
    for i in range(50):
        draw(i)
        plt.pause(0.2)

    plt.show()
def main():
    G = build_karate_club_graph()
    print("G中节点数 %d."% G.number_of_nodes())
    print("G中边数 %d."% G.number_of_edges())

    visual(G)

    ## 对 34 个节点做embedding
    embed = nn.Embedding(34, 5)  # 34 nodes with embedding dim equal to 5
    print(embed.weight)
    G.ndata['feat'] = embed.weight

    # print out node 2's input feature
    print(G.ndata['feat'][2])
    # print out node 10 and 11's input features
    print(G.ndata['feat'][[10, 11]])

    train(G, embed.weight, embed, torch.tensor([0,33]), torch.tensor([0,1]))


main()

免责声明:文章转载自《DGL学习(一):使用DGL跑一个最简单的GCN》仅用于学习参考。如对内容有疑问,请及时联系本站处理。

上篇HTML + CSS CSS设置背景图片后图片没有铺满屏幕等django-filters跨表过滤下篇

宿迁高防,2C2G15M,22元/月;香港BGP,2C5G5M,25元/月 雨云优惠码:MjYwNzM=

相关文章

C++ 解析Json

参考文档:http://www.cppblog.com/wanghaiguang/archive/2013/12/26/205020.html 1. 从字符串解析json     const char* str = "{"uploadid": "UP000000","code": 100,"msg": "","files": ""}";      Json...

双向广度优先搜索

双向广度优先搜索 双向广度优先搜索是对广搜算法的一种扩展。广搜以起点以广度优先的顺序不断扩展,直到遇到目的节点。 而双向广搜算法从两个方向开展广搜,一个从起点,另一个从终点。 直到一个扩展队列中出现了另一个队列中已经扩展了的点,也就是说两个扩展方向出现了交点。 双向广搜相对于广搜算法来说,由于采用了双向同时扩展的方式,搜索树的宽度明显减宽,时间和空间复杂度...

Oracle RAC 全局等待事件 gc current block busy 和 gc cr multi block request 说明--转载(http://blog.csdn.net/tianlesoftware/article/details/7777511)

一.RAC 全局等待事件说明 在RAC环境中,和全局调整缓存相关的最常见的等待事件是global cache cr request,global cache busy和equeue。 当一个进程访问需要一个或者多个块时,Oracle会首先检查自己的Cache是否存在该块,如果发现没有,就会先通过global cache赋予这些块共享访问的权限,然后再访问...

debugging tools for windows 10下载安装问题

  配置QT5.0中debugger的时候需要下载debugging tools for windows 10,于是去https://developer.microsoft.com/zh-cn/windows/hardware/windows-driver-kit上下载WinDbg。但是碰到了一些匪夷所思的事情。  不论是下载WDK,SDK还是单独下载Win...

分布式事务分布式锁的常用解决方式

一、分布式中的CAP原则 1.1 CAP的概念 CAP原则指的是在一个分布式系统中,Consistency(一致性)、Availability(可用性)、Partition tolerance(分区容错性),最多只能满足两个,三者不能兼得 Consistency(一致性) 指的是对于每一次的读取操作,要么都能够读取到最新的写入数据,要么就错误 Avail...

回流与重绘理解,助攻CSS

HTML渲染过程中回流和重绘是比较重要的概念,了解他们有助于编写性能更好的css代码 回流:指几何属性需改变的渲染,将整个网页填白,对内容重新渲染,只不过以人眼的感官速度看上去浏览器回流是不会有任何变化的实质上每次回流都会将页面清空,再从左上角第一个像素点从左往右从上往下一一渲染。渲染树的节点发生改变,影响该节点的几何属性,导致该节点位置发生变化,就会触发...