1. main.py
net = GNet( G_data. feat_dim, G_data. num_class, args)
trainer = Trainer( args, net, G_data)
trainer. train( )
2. network.py
class GNet ( nn. Module) : def __init__ ( self, in_dim, n_classes, args) : super ( GNet, self) . __init__( ) self. n_act = getattr ( nn, args. act_n) ( ) self. c_act = getattr ( nn, args. act_c) ( ) "用的是GCN的框架,输入分别是feat dim、layer dim、network act、drop net(net表示GCN网络本身的参数)" self. s_gcn = GCN( in_dim, args. l_dim, self. n_act, args. drop_n) self. g_unet = GraphUnet( args. ks, args. l_dim, args. l_dim, args. l_dim, self. n_act, args. drop_n) """nn.Linear定义一个神经网络的线性层,方法如下:torch.nn.Linear(in_features, # 输入的神经元个数out_features, # 输出神经元个数bias=True # 是否包含偏置)""" self. out_l_1 = nn. Linear( 3 * args. l_dim* ( args. l_num+ 1 ) , args. h_dim) self. out_l_2 = nn. Linear( args. h_dim, n_classes) "nn.Dropout(p = 0.3) # 表示每个神经元有0.3的可能性不被激活" self. out_drop = nn. Dropout( p= args. drop_c) Initializer. weights_init( self) def forward ( self, gs, hs, labels) : print ( 'GNet2: gs=' , type ( gs) , len ( gs) , 'hs=' , type ( hs) , len ( hs) , 'labels:' , type ( labels) , labels. shape) hs = self. embed( gs, hs) print ( 'GNet2: hs=' , type ( hs) , hs. shape) logits = self. classify( hs) return self. metric( logits, labels)
3. trainer.py
class Trainer : "init初始化,输入分别是arg参数、gcn net、graph Data,将这些装进self里面" def __init__ ( self, args, net, G_data) : self. args = argsself. net = netself. feat_dim = G_data. feat_dimself. fold_idx = G_data. fold_idxself. init( args, G_data. train_gs, G_data. test_gs) if torch. cuda. is_available( ) : self. net. cuda( ) "初始化——开始训练数据" def init ( self, args, train_gs, test_gs) : print ( '#train: %d, #test: %d' % ( len ( train_gs) , len ( test_gs) ) ) train_data = GraphData( train_gs, self. feat_dim) test_data = GraphData( test_gs, self. feat_dim) self. train_d = train_data. loader( self. args. batch, True ) self. test_d = test_data. loader( self. args. batch, False ) self. optimizer = optim. Adam( self. net. parameters( ) , lr= self. args. lr, amsgrad= True , weight_decay= 0.0008 )
def train ( self) : max_acc = 0.0 train_str = 'Train epoch %d: loss %.5f acc %.5f' test_str = 'Test epoch %d: loss %.5f acc %.5f max %.5f' line_str = '%d:\t%.5f\n' for e_id in range ( self. args. num_epochs) : self. net. train( ) loss, acc = self. run_epoch( e_id, self. train_d, self. net, self. optimizer) print ( train_str % ( e_id, loss, acc) ) with torch. no_grad( ) : self. net. eval ( ) loss, acc = self. run_epoch( e_id, self. test_d, self. net, None ) max_acc = max ( max_acc, acc) print ( test_str % ( e_id, loss, acc, max_acc) ) with open ( self. args. acc_file, 'a+' ) as f: f. write( line_str % ( self. fold_idx, max_acc) )
def run_epoch ( self, epoch, data, model, optimizer) : losses, accs, n_samples = [ ] , [ ] , 0 for batch in tqdm( data, desc= str ( epoch) , unit= 'b' ) : cur_len, gs, hs, ys = batchgs, hs, ys = map ( self. to_cuda, [ gs, hs, ys] ) loss, acc = model( gs, hs, ys) losses. append( loss* cur_len) accs. append( acc* cur_len) n_samples += cur_lenif optimizer is not None : optimizer. zero_grad( ) loss. backward( ) optimizer. step( ) avg_loss, avg_acc = sum ( losses) / n_samples, sum ( accs) / n_samplesreturn avg_loss. item( ) , avg_acc. item( )
不懂
class GraphConvolution ( Module) : """Simple GCN layer, similar to https://arxiv.org/abs/1609.02907""" def __init__ ( self, in_features, out_features, bias= True ) : super ( GraphConvolution, self) . __init__( ) self. in_features = in_featuresself. out_features = out_features"""为啥要这么做???5555555555555555555555555555""" self. weight = Parameter( torch. FloatTensor( in_features, out_features) ) if bias: self. bias = Parameter( torch. FloatTensor( out_features) ) else : self. register_parameter( 'bias' , None ) self. reset_parameters( ) def reset_parameters ( self) : stdv = 1. / math. sqrt( self. weight. size( 1 ) ) self. weight. data. uniform_( - stdv, stdv) if self. bias is not None : self. bias. data. uniform_( - stdv, stdv) def forward ( self, input , adj) : support = torch. mm( input , self. weight) output = torch. spmm( adj, support) if self. bias is not None : return output + self. biaselse : return output