-
Notifications
You must be signed in to change notification settings - Fork 32
/
Copy pathnetworks.py
52 lines (40 loc) · 1.46 KB
/
networks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import torch, numpy as np
import torch.nn as nn, torch.nn.functional as F
from torch.autograd import Variable
from model import utils
class HyperGCN(nn.Module):
def __init__(self, V, E, X, args):
"""
d: initial node-feature dimension
h: number of hidden units
c: number of classes
"""
super(HyperGCN, self).__init__()
d, l, c = args.d, args.depth, args.c
cuda = args.cuda and torch.cuda.is_available()
h = [d]
for i in range(l-1):
power = l - i + 2
if args.dataset == 'citeseer': power = l - i + 4
h.append(2**power)
h.append(c)
if args.fast:
reapproximate = False
structure = utils.Laplacian(V, E, X, args.mediators)
else:
reapproximate = True
structure = E
self.layers = nn.ModuleList([utils.HyperGraphConvolution(h[i], h[i+1], reapproximate, cuda) for i in range(l)])
self.do, self.l = args.dropout, args.depth
self.structure, self.m = structure, args.mediators
def forward(self, H):
"""
an l-layer GCN
"""
do, l, m = self.do, self.l, self.m
for i, hidden in enumerate(self.layers):
H = F.relu(hidden(self.structure, H, m))
if i < l - 1:
V = H
H = F.dropout(H, do, training=self.training)
return F.log_softmax(H, dim=1)