Skip to content

Graph

rydberggpt.models.graph_embedding

layers

GraphLayer

Bases: Module

Source code in src/rydberggpt/models/graph_embedding/layers.py
class GraphLayer(nn.Module):
    def __init__(self, graph_layer: nn.Module, norm_layer: nn.Module, dropout: float):
        """
        A GraphLayer is a single layer in a graph neural network, consisting of
        a graph layer, normalization layer, and dropout.

        Args:
            graph_layer (nn.Module): A graph layer, e.g., GCNConv, GATConv, etc.
            norm_layer (nn.Module): A normalization layer, e.g., LayerNorm or BatchNorm.
            dropout (float): Dropout probability.
        """
        super(GraphLayer, self).__init__()
        self.graph_layer = graph_layer
        self.norm = norm_layer
        self.dropout = nn.Dropout(dropout)

    def forward(
        self, x: torch.Tensor, edge_index: Adj, edge_attr: OptTensor
    ) -> torch.Tensor:
        """
        Forward pass through the GraphLayer.

        Args:
            x (torch.Tensor): Node feature matrix.
            edge_index (Adj): Edge indices.
            edge_attr (OptTensor): Edge feature matrix.

        Returns:
            (torch.Tensor): The output tensor after passing through the GraphLayer.
        """
        x = self.graph_layer(x, edge_index, edge_attr)
        x = F.relu(self.norm(x))
        x = self.dropout(x)
        return x
__init__(graph_layer: nn.Module, norm_layer: nn.Module, dropout: float)

A GraphLayer is a single layer in a graph neural network, consisting of a graph layer, normalization layer, and dropout.

Parameters:

Name Type Description Default
graph_layer Module

A graph layer, e.g., GCNConv, GATConv, etc.

required
norm_layer Module

A normalization layer, e.g., LayerNorm or BatchNorm.

required
dropout float

Dropout probability.

required
Source code in src/rydberggpt/models/graph_embedding/layers.py
def __init__(self, graph_layer: nn.Module, norm_layer: nn.Module, dropout: float):
    """
    A GraphLayer is a single layer in a graph neural network, consisting of
    a graph layer, normalization layer, and dropout.

    Args:
        graph_layer (nn.Module): A graph layer, e.g., GCNConv, GATConv, etc.
        norm_layer (nn.Module): A normalization layer, e.g., LayerNorm or BatchNorm.
        dropout (float): Dropout probability.
    """
    super(GraphLayer, self).__init__()
    self.graph_layer = graph_layer
    self.norm = norm_layer
    self.dropout = nn.Dropout(dropout)
forward(x: torch.Tensor, edge_index: Adj, edge_attr: OptTensor) -> torch.Tensor

Forward pass through the GraphLayer.

Parameters:

Name Type Description Default
x Tensor

Node feature matrix.

required
edge_index Adj

Edge indices.

required
edge_attr OptTensor

Edge feature matrix.

required

Returns:

Type Description
Tensor

The output tensor after passing through the GraphLayer.

Source code in src/rydberggpt/models/graph_embedding/layers.py
def forward(
    self, x: torch.Tensor, edge_index: Adj, edge_attr: OptTensor
) -> torch.Tensor:
    """
    Forward pass through the GraphLayer.

    Args:
        x (torch.Tensor): Node feature matrix.
        edge_index (Adj): Edge indices.
        edge_attr (OptTensor): Edge feature matrix.

    Returns:
        (torch.Tensor): The output tensor after passing through the GraphLayer.
    """
    x = self.graph_layer(x, edge_index, edge_attr)
    x = F.relu(self.norm(x))
    x = self.dropout(x)
    return x

models

GraphEmbedding

Bases: Module

Source code in src/rydberggpt/models/graph_embedding/models.py
class GraphEmbedding(torch.nn.Module):
    def __init__(
        self,
        graph_layer: Type[Callable],
        in_node_dim: int,
        d_hidden: int,
        d_model: int,
        num_layers: int,
        dropout: float = 0.1,
    ) -> None:
        """
        GraphEmbedding class for creating a graph embedding with multiple layers.

        Args:
            graph_layer (Type[Callable]): The graph layer to be used in the embedding.
            in_node_dim (int): The input node dimension. (omega, delta, beta)
            d_hidden (int): The hidden dimension size.
            d_model (int): The output node dimension.
            num_layers (int): The number of layers in the graph embedding.
            dropout (float, optional): The dropout rate. Defaults to 0.1.
        """
        super(GraphEmbedding, self).__init__()

        self.graph_layer = graph_layer
        self.layers = ModuleList()
        self.layers.append(
            GraphLayer(
                self.graph_layer(in_node_dim, d_hidden), LayerNorm(d_hidden), dropout
            )
        )

        for _ in range(num_layers - 2):
            self.layers.append(
                GraphLayer(
                    self.graph_layer(d_hidden, d_hidden), LayerNorm(d_hidden), dropout
                )
            )

        self.layers.append(self.graph_layer(d_hidden, d_model))
        self.final_norm = LayerNorm(d_model)

    def forward(self, data: Data) -> Tensor:
        """
        Forward pass through the graph embedding layers.

        Args:
            data (Data): The input graph data.

        Returns:
            (Tensor): The output tensor with reshaped dimensions.
        """
        # [..., num_features], [2, ...] [...]
        x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr

        for layer in self.layers[:-1]:
            # [..., num_features]
            x = layer(x, edge_index, edge_attr)

        # [..., d_model]
        x = self.final_norm(self.layers[-1](x, edge_index, edge_attr))

        x, batch_mask = to_dense_batch(x, data.batch)

        # [B, N, d_model], where N is the number of nodes or the number of atoms
        return x, batch_mask
__init__(graph_layer: Type[Callable], in_node_dim: int, d_hidden: int, d_model: int, num_layers: int, dropout: float = 0.1) -> None

GraphEmbedding class for creating a graph embedding with multiple layers.

Parameters:

Name Type Description Default
graph_layer Type[Callable]

The graph layer to be used in the embedding.

required
in_node_dim int

The input node dimension. (omega, delta, beta)

required
d_hidden int

The hidden dimension size.

required
d_model int

The output node dimension.

required
num_layers int

The number of layers in the graph embedding.

required
dropout float

The dropout rate. Defaults to 0.1.

0.1
Source code in src/rydberggpt/models/graph_embedding/models.py
def __init__(
    self,
    graph_layer: Type[Callable],
    in_node_dim: int,
    d_hidden: int,
    d_model: int,
    num_layers: int,
    dropout: float = 0.1,
) -> None:
    """
    GraphEmbedding class for creating a graph embedding with multiple layers.

    Args:
        graph_layer (Type[Callable]): The graph layer to be used in the embedding.
        in_node_dim (int): The input node dimension. (omega, delta, beta)
        d_hidden (int): The hidden dimension size.
        d_model (int): The output node dimension.
        num_layers (int): The number of layers in the graph embedding.
        dropout (float, optional): The dropout rate. Defaults to 0.1.
    """
    super(GraphEmbedding, self).__init__()

    self.graph_layer = graph_layer
    self.layers = ModuleList()
    self.layers.append(
        GraphLayer(
            self.graph_layer(in_node_dim, d_hidden), LayerNorm(d_hidden), dropout
        )
    )

    for _ in range(num_layers - 2):
        self.layers.append(
            GraphLayer(
                self.graph_layer(d_hidden, d_hidden), LayerNorm(d_hidden), dropout
            )
        )

    self.layers.append(self.graph_layer(d_hidden, d_model))
    self.final_norm = LayerNorm(d_model)
forward(data: Data) -> Tensor

Forward pass through the graph embedding layers.

Parameters:

Name Type Description Default
data Data

The input graph data.

required

Returns:

Type Description
Tensor

The output tensor with reshaped dimensions.

Source code in src/rydberggpt/models/graph_embedding/models.py
def forward(self, data: Data) -> Tensor:
    """
    Forward pass through the graph embedding layers.

    Args:
        data (Data): The input graph data.

    Returns:
        (Tensor): The output tensor with reshaped dimensions.
    """
    # [..., num_features], [2, ...] [...]
    x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr

    for layer in self.layers[:-1]:
        # [..., num_features]
        x = layer(x, edge_index, edge_attr)

    # [..., d_model]
    x = self.final_norm(self.layers[-1](x, edge_index, edge_attr))

    x, batch_mask = to_dense_batch(x, data.batch)

    # [B, N, d_model], where N is the number of nodes or the number of atoms
    return x, batch_mask