Skip to content

Commit

Permalink
review
Browse files Browse the repository at this point in the history
  • Loading branch information
charlottecvn committed Feb 14, 2024
1 parent 14966b3 commit 3ab89ce
Show file tree
Hide file tree
Showing 5 changed files with 232 additions and 21 deletions.
5 changes: 0 additions & 5 deletions eval/hyperopt_gat.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,11 +261,6 @@ def objective(trial, n_trial, merged_dataset, data_order, txt_name):
lambda trial: objective(trial, trials, merged_dataset, data_order, txt_name),
n_trials=trials,
)

optuna.plot_intermediate_values(study)
optuna.plot_parallel_coordinate(study)
optuna.plot_contour(study)

print("Number of finished trials:", len(study.trials))
print("Best trial:", study.best_trial.params)
print(study.best_trial.number)
Expand Down
5 changes: 0 additions & 5 deletions eval/hyperopt_gcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,11 +259,6 @@ def objective(trial, n_trial, merged_dataset, data_order, txt_name):
lambda trial: objective(trial, trials, merged_dataset, data_order, txt_name),
n_trials=trials,
)

optuna.plot_intermediate_values(study)
optuna.plot_parallel_coordinate(study)
optuna.plot_contour(study)

print("Number of finished trials:", len(study.trials))
print("Best trial:", study.best_trial.params)
print(study.best_trial.number)
Expand Down
5 changes: 0 additions & 5 deletions eval/hyperopt_gin.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,11 +272,6 @@ def objective(trial, n_trial, merged_dataset, data_order, txt_name):
lambda trial: objective(trial, trials, merged_dataset, data_order, txt_name),
n_trials=trials,
)

optuna.plot_intermediate_values(study)
optuna.plot_parallel_coordinate(study)
optuna.plot_contour(study)

print("Number of finished trials:", len(study.trials))
print("Best trial:", study.best_trial.params)
print(study.best_trial.number)
Expand Down
229 changes: 227 additions & 2 deletions graphnetwork/GAT_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@
from torch_geometric.typing import (
Adj,
NoneType,
OptPairTensor,
OptTensor,
Size,
OptPairTensor,
PairTensor,
SparseTensor,
Size,
torch_sparse,
)
from torch_geometric.utils import (
Expand Down Expand Up @@ -267,6 +268,7 @@ def edge_update(
if edge_attr is not None and self.lin_edge is not None:
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
#print(self.lin_edge, edge_attr.size())
edge_attr = self.lin_edge(edge_attr)
edge_attr = edge_attr.view(-1, self.heads, self.out_channels)
alpha_edge = (edge_attr * self.att_edge).sum(dim=-1)
Expand Down Expand Up @@ -303,3 +305,226 @@ def forward(self, x, edge_index):

def message(self, x_i):
return self.mlp(x_i)

class GATv2Conv(MessagePassing):
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
heads: int = 1,
concat: bool = True,
negative_slope: float = 0.2,
dropout: float = 0.0,
add_self_loops: bool = True,
edge_dim: Optional[int] = None,
fill_value: Union[float, Tensor, str] = 'mean',
bias: bool = True,
share_weights: bool = False,
**kwargs,
):
super().__init__(node_dim=0, **kwargs)

self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.add_self_loops = add_self_loops
self.edge_dim = edge_dim
self.fill_value = fill_value
self.share_weights = share_weights

if isinstance(in_channels, int):
self.lin_l = Linear(in_channels, heads * out_channels, bias=bias,
weight_initializer='glorot')
if share_weights:
self.lin_r = self.lin_l
else:
self.lin_r = Linear(in_channels, heads * out_channels,
bias=bias, weight_initializer='glorot')
else:
self.lin_l = Linear(in_channels[0], heads * out_channels,
bias=bias, weight_initializer='glorot')
if share_weights:
self.lin_r = self.lin_l
else:
self.lin_r = Linear(in_channels[1], heads * out_channels,
bias=bias, weight_initializer='glorot')

self.att = Parameter(torch.empty(1, heads, out_channels))

if edge_dim is not None:
self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False,
weight_initializer='glorot')
else:
self.lin_edge = None

if bias and concat:
self.bias = Parameter(torch.empty(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)

self.reset_parameters()

def reset_parameters(self):
super().reset_parameters()
self.lin_l.reset_parameters()
self.lin_r.reset_parameters()
if self.lin_edge is not None:
self.lin_edge.reset_parameters()
glorot(self.att)
zeros(self.bias)


@overload
def forward(
self,
x: Union[Tensor, PairTensor],
edge_index: Adj,
edge_attr: OptTensor = None,
return_attention_weights: NoneType = None,
) -> Tensor:
pass

@overload
def forward( # noqa: F811
self,
x: Union[Tensor, PairTensor],
edge_index: Tensor,
edge_attr: OptTensor = None,
return_attention_weights: bool = None,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
pass

@overload
def forward( # noqa: F811
self,
x: Union[Tensor, PairTensor],
edge_index: SparseTensor,
edge_attr: OptTensor = None,
return_attention_weights: bool = None,
) -> Tuple[Tensor, SparseTensor]:
pass

def forward( # noqa: F811
self,
x: Union[Tensor, PairTensor],
edge_index: Adj,
edge_attr: OptTensor = None,
return_attention_weights: Optional[bool] = None,
) -> Union[
Tensor,
Tuple[Tensor, Tuple[Tensor, Tensor]],
Tuple[Tensor, SparseTensor],
]:
r"""Runs the forward pass of the module.
Args:
x (torch.Tensor or (torch.Tensor, torch.Tensor)): The input node
features.
edge_index (torch.Tensor or SparseTensor): The edge indices.
edge_attr (torch.Tensor, optional): The edge features.
(default: :obj:`None`)
return_attention_weights (bool, optional): If set to :obj:`True`,
will additionally return the tuple
:obj:`(edge_index, attention_weights)`, holding the computed
attention weights for each edge. (default: :obj:`None`)
"""
H, C = self.heads, self.out_channels

x_l: OptTensor = None
x_r: OptTensor = None
if isinstance(x, Tensor):
assert x.dim() == 2
x_l = self.lin_l(x).view(-1, H, C)
if self.share_weights:
x_r = x_l
else:
x_r = self.lin_r(x).view(-1, H, C)
else:
x_l, x_r = x[0], x[1]
assert x[0].dim() == 2
x_l = self.lin_l(x_l).view(-1, H, C)
if x_r is not None:
x_r = self.lin_r(x_r).view(-1, H, C)

assert x_l is not None
assert x_r is not None

if self.add_self_loops:
if isinstance(edge_index, Tensor):
num_nodes = x_l.size(0)
if x_r is not None:
num_nodes = min(num_nodes, x_r.size(0))
edge_index, edge_attr = remove_self_loops(
edge_index, edge_attr)
edge_index, edge_attr = add_self_loops(
edge_index, edge_attr, fill_value=self.fill_value,
num_nodes=num_nodes)
elif isinstance(edge_index, SparseTensor):
if self.edge_dim is None:
edge_index = torch_sparse.set_diag(edge_index)
else:
raise NotImplementedError(
"The usage of 'edge_attr' and 'add_self_loops' "
"simultaneously is currently not yet supported for "
"'edge_index' in a 'SparseTensor' form")

# edge_updater_type: (x: PairTensor, edge_attr: OptTensor)
alpha = self.edge_updater(edge_index, x=(x_l, x_r),
edge_attr=edge_attr)

# propagate_type: (x: PairTensor, alpha: Tensor)
out = self.propagate(edge_index, x=(x_l, x_r), alpha=alpha)

if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)

if self.bias is not None:
out = out + self.bias

if isinstance(return_attention_weights, bool):
if isinstance(edge_index, Tensor):
if is_torch_sparse_tensor(edge_index):
# TODO TorchScript requires to return a tuple
adj = set_sparse_value(edge_index, alpha)
return out, (adj, alpha)
else:
return out, (edge_index, alpha)
elif isinstance(edge_index, SparseTensor):
return out, edge_index.set_value(alpha, layout='coo')
else:
return out


def edge_update(self, x_j: Tensor, x_i: Tensor, edge_attr: OptTensor,
index: Tensor, ptr: OptTensor,
dim_size: Optional[int]) -> Tensor:
x = x_i + x_j

if edge_attr is not None:
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
assert self.lin_edge is not None
#print(self.lin_edge, edge_attr.size())
edge_attr = self.lin_edge(edge_attr)
edge_attr = edge_attr.view(-1, self.heads, self.out_channels)
x = x + edge_attr

x = F.leaky_relu(x, self.negative_slope)
alpha = (x * self.att).sum(dim=-1)
alpha = softmax(alpha, index, ptr, dim_size)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return alpha

def message(self, x_j: Tensor, alpha: Tensor) -> Tensor:
return x_j * alpha.unsqueeze(-1)

def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, heads={self.heads})')
9 changes: 5 additions & 4 deletions graphnetwork/GAT_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
)
from torch_geometric.nn import global_mean_pool, global_add_pool, global_max_pool

from graphnetwork.GAT_layers import GATConv, MLPembd
from graphnetwork.GAT_layers import GATv2Conv, MLPembd #GATConv


class GAT(torch.nn.Module):
Expand All @@ -24,7 +24,7 @@ def __init__(
hidden_channels_global=2,
out_channels_global=1,
num_layers=1,
edge_dim=8,
edge_dim=16,
dropout=0.15,
activation_function_gat="LeakyReLU",
activation_function_mlp="LeakyReLU",
Expand Down Expand Up @@ -69,6 +69,7 @@ def __init__(
)

self.in_channels_gat_x = in_channels_gat_x
self.in_channels_gat_edge = in_channels_gat_edge
self.hidden_channels_gat = hidden_channels_gat

# INPUT GAT BLOCK (edges)
Expand All @@ -94,7 +95,7 @@ def __init__(
# CORE GAT BLOCK
for i in range(num_layers):
self.gat_MLP_layers.append(
GATConv(
GATv2Conv(
in_channels=hidden_channels_gat,
out_channels=hidden_channels_gat,
edge_dim=edge_dim
Expand Down Expand Up @@ -142,7 +143,7 @@ def forward(self, x, edge_index, batch, edge_attr):

# GAT layers
for blocks_i in range(self.num_layers):
x = self.gat_MLP_layers[(blocks_i)](x, edge_index, edge_attr)
x = self.gat_MLP_layers[(blocks_i)+1](x, edge_index, edge_attr)
out_blocks.append(x)

# merge layers
Expand Down

0 comments on commit 3ab89ce

Please sign in to comment.