diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 0487fb53c07..9c428ef9d07 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -6,6 +6,8 @@ channels: - dask/label/dev - conda-forge - nvidia +- pytorch +- dglteam/label/cu118 dependencies: - aiohttp - c-compiler diff --git a/dependencies.yaml b/dependencies.yaml index 87edcc62683..9b858999743 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -170,12 +170,21 @@ files: # list is really minimal or if it is a superset. - test_python_common - test_python_cugraph + cugraph_dgl_dev: + matrix: + cuda: ["11.8"] + output: conda + conda_dir: python/cugraph-dgl/conda + includes: + - cugraph_dgl_dev channels: - rapidsai - rapidsai-nightly - dask/label/dev - conda-forge - nvidia + - pytorch + - dglteam/label/cu118 dependencies: checks: common: @@ -418,3 +427,15 @@ dependencies: - output_types: [conda, pyproject] packages: - *cudf + cugraph_dgl_dev: + common: + - output_types: [conda] + packages: + - cugraph==23.8.* + - pylibcugraphops==23.8.* + - pytorch>=2.0 + - pytorch-cuda==11.8 + - dgl>=1.1.0.cu* + - setuptools + - pre-commit + - pytest diff --git a/python/cugraph-dgl/conda/cugraph_dgl_dev_11.6.yml b/python/cugraph-dgl/conda/cugraph_dgl_dev_11.6.yml deleted file mode 100644 index 74b16921291..00000000000 --- a/python/cugraph-dgl/conda/cugraph_dgl_dev_11.6.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: cugraph_dgl_dev -channels: -- rapidsai-nightly -- rapidsai -- pytorch -- conda-forge -- nvidia -- dglteam - -dependencies: -- cudatoolkit=11.6 -- cugraph=23.02* -- pylibcugraphops=23.02.* -- pytorch=1.12.0 -- dgl-cuda11.6 -- setuptools -- pre-commit -- pytest diff --git a/python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml b/python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml new file mode 100644 index 00000000000..a252d5e0c78 --- /dev/null +++ b/python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml @@ -0,0 +1,20 @@ +# This file is generated by `rapids-dependency-file-generator`. +# To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. +channels: +- rapidsai +- rapidsai-nightly +- dask/label/dev +- conda-forge +- nvidia +- pytorch +- dglteam/label/cu118 +dependencies: +- cugraph==23.8.* +- dgl>=1.1.0.cu* +- pre-commit +- pylibcugraphops==23.8.* +- pytest +- pytorch-cuda==11.8 +- pytorch>=2.0 +- setuptools +name: cugraph_dgl_dev_cuda-118 diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py index 7825febc24b..239def5b677 100644 --- a/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py @@ -19,12 +19,10 @@ from cugraph_dgl.nn.conv.base import BaseConv from cugraph.utilities.utils import import_optional -from pylibcugraphops.pytorch import CSC -from pylibcugraphops.pytorch.operators import mha_gat_n2n - dgl = import_optional("dgl") torch = import_optional("torch") nn = import_optional("torch.nn") +ops_torch = import_optional("pylibcugraphops.pytorch") class GATConv(BaseConv): @@ -179,7 +177,7 @@ def forward( bipartite = not isinstance(nfeat, torch.Tensor) offsets, indices, _ = g.adj_tensors("csc") - graph = CSC( + graph = ops_torch.CSC( offsets=offsets, indices=indices, num_src_nodes=g.num_src_nodes(), @@ -212,7 +210,7 @@ def forward( ) nfeat = self.fc(nfeat) - out = mha_gat_n2n( + out = ops_torch.operators.mha_gat_n2n( (nfeat_src, nfeat_dst) if bipartite else nfeat, self.attn_weights, graph, diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py index 141adc86069..5cd5fbbaebe 100644 --- a/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py @@ -15,12 +15,10 @@ from cugraph_dgl.nn.conv.base import BaseConv from cugraph.utilities.utils import import_optional -from pylibcugraphops.pytorch import CSC -from pylibcugraphops.pytorch.operators import mha_simple_n2n - dgl = import_optional("dgl") torch = import_optional("torch") nn = import_optional("torch.nn") +ops_torch = import_optional("pylibcugraphops.pytorch") class TransformerConv(BaseConv): @@ -133,7 +131,7 @@ def forward( Edge feature tensor. Default: ``None``. """ offsets, indices, _ = g.adj_tensors("csc") - graph = CSC( + graph = ops_torch.CSC( offsets=offsets, indices=indices, num_src_nodes=g.num_src_nodes(), @@ -155,7 +153,7 @@ def forward( ) efeat = self.lin_edge(efeat) - out = mha_simple_n2n( + out = ops_torch.operators.mha_simple_n2n( key_emb=key, query_emb=query, value_emb=value, diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py index 207efcdace4..2639f66f440 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py @@ -18,26 +18,12 @@ torch = import_optional("torch") torch_geometric = import_optional("torch_geometric") - -try: # pragma: no cover - from pylibcugraphops.pytorch import CSC, HeteroCSC - - HAS_PYLIBCUGRAPHOPS = True -except ImportError: - HAS_PYLIBCUGRAPHOPS = False +ops_torch = import_optional("pylibcugraphops.pytorch") class BaseConv(torch.nn.Module): # pragma: no cover r"""An abstract base class for implementing cugraph-ops message passing layers.""" - def __init__(self): - super().__init__() - - if HAS_PYLIBCUGRAPHOPS is False: - raise ModuleNotFoundError( - f"'{self.__class__.__name__}' requires " f"'pylibcugraphops>=23.04'" - ) - def reset_parameters(self): r"""Resets all learnable parameters of the module.""" pass @@ -88,7 +74,7 @@ def get_cugraph( csc: Tuple[torch.Tensor, torch.Tensor, int], bipartite: bool = False, max_num_neighbors: Optional[int] = None, - ) -> CSC: + ) -> ops_torch.CSC: r"""Constructs a :obj:`cugraph-ops` graph object from CSC representation. Supports both bipartite and non-bipartite graphs. @@ -116,7 +102,7 @@ def get_cugraph( if max_num_neighbors is None: max_num_neighbors = -1 - return CSC( + return ops_torch.CSC( offsets=colptr, indices=row, num_src_nodes=num_src_nodes, @@ -131,7 +117,7 @@ def get_typed_cugraph( num_edge_types: Optional[int] = None, bipartite: bool = False, max_num_neighbors: Optional[int] = None, - ) -> HeteroCSC: + ) -> ops_torch.HeteroCSC: r"""Constructs a typed :obj:`cugraph` graph object from a CSC representation where each edge corresponds to a given edge type. Supports both bipartite and non-bipartite graphs. @@ -162,7 +148,7 @@ def get_typed_cugraph( row, colptr, num_src_nodes = csc edge_type = edge_type.int() - return HeteroCSC( + return ops_torch.HeteroCSC( offsets=colptr, indices=row, edge_types=edge_type, diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py index 23b7d50ba96..f0040015b4a 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py @@ -12,8 +12,6 @@ # limitations under the License. from typing import Optional, Tuple, Union -from pylibcugraphops.pytorch.operators import mha_gat_n2n - from cugraph.utilities.utils import import_optional from .base import BaseConv @@ -21,6 +19,7 @@ torch = import_optional("torch") nn = import_optional("torch.nn") torch_geometric = import_optional("torch_geometric") +ops_torch = import_optional("pylibcugraphops.pytorch") class GATConv(BaseConv): @@ -211,7 +210,7 @@ def forward( ) x = self.lin(x) - out = mha_gat_n2n( + out = ops_torch.operators.mha_gat_n2n( (x_src, x_dst) if bipartite else x, self.att, graph, diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py index d4c947b952a..d74ca6b00d0 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py @@ -12,8 +12,6 @@ # limitations under the License. from typing import Optional, Tuple, Union -from pylibcugraphops.pytorch.operators import mha_gat_v2_n2n - from cugraph.utilities.utils import import_optional from .base import BaseConv @@ -21,6 +19,7 @@ torch = import_optional("torch") nn = import_optional("torch.nn") torch_geometric = import_optional("torch_geometric") +ops_torch = import_optional("pylibcugraphops.pytorch") class GATv2Conv(BaseConv): @@ -208,7 +207,7 @@ def forward( else: x = self.lin_src(x) - out = mha_gat_v2_n2n( + out = ops_torch.operators.mha_gat_v2_n2n( (x_src, x_dst) if bipartite else x, self.att, graph, diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py index f67756eb3fe..1b8b1aa0ffa 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py @@ -12,8 +12,6 @@ # limitations under the License. from typing import Optional, Tuple, Union -from pylibcugraphops.pytorch.operators import mha_simple_n2n - from cugraph.utilities.utils import import_optional from .base import BaseConv @@ -21,6 +19,7 @@ torch = import_optional("torch") nn = import_optional("torch.nn") torch_geometric = import_optional("torch_geometric") +ops_torch = import_optional("pylibcugraphops.pytorch") class TransformerConv(BaseConv): @@ -186,7 +185,7 @@ def forward( ) edge_attr = self.lin_edge(edge_attr) - out = mha_simple_n2n( + out = ops_torch.operators.mha_simple_n2n( key, query, value,