From d5ed3093fbbb50d901a88029cc0530509da4fb52 Mon Sep 17 00:00:00 2001 From: Raymond Douglass Date: Thu, 23 Mar 2023 14:55:19 -0400 Subject: [PATCH 01/90] DOC --- .github/workflows/build.yaml | 20 ++++++------- .github/workflows/pr.yaml | 28 +++++++++---------- .github/workflows/test.yaml | 8 +++--- .../all_cuda-118_arch-x86_64.yaml | 22 +++++++-------- cpp/CMakeLists.txt | 2 +- cpp/doxygen/Doxyfile | 2 +- cpp/libcugraph_etl/CMakeLists.txt | 2 +- dependencies.yaml | 22 +++++++-------- docs/cugraph/source/conf.py | 4 +-- fetch_rapids.cmake | 2 +- python/cugraph-dgl/cugraph_dgl/__init__.py | 2 +- python/cugraph-dgl/pyproject.toml | 2 +- python/cugraph-pyg/cugraph_pyg/__init__.py | 2 +- python/cugraph-pyg/pyproject.toml | 2 +- .../client/cugraph_service_client/__init__.py | 2 +- python/cugraph-service/client/pyproject.toml | 2 +- .../server/cugraph_service_server/__init__.py | 2 +- python/cugraph-service/server/pyproject.toml | 2 +- python/cugraph/CMakeLists.txt | 2 +- python/cugraph/cugraph/__init__.py | 2 +- python/cugraph/pyproject.toml | 20 ++++++------- python/pylibcugraph/CMakeLists.txt | 2 +- python/pylibcugraph/pylibcugraph/__init__.py | 2 +- python/pylibcugraph/pyproject.toml | 12 ++++---- 24 files changed, 84 insertions(+), 84 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 4ee5c3b271c..f1cb664fe0d 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -28,7 +28,7 @@ concurrency: jobs: cpp-build: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.06 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -37,7 +37,7 @@ jobs: python-build: needs: [cpp-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.06 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -46,7 +46,7 @@ jobs: upload-conda: needs: [cpp-build, python-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@branch-23.06 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -56,7 +56,7 @@ jobs: if: github.ref_type == 'branch' && github.event_name == 'push' needs: python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.06 with: build_type: branch node_type: "gpu-latest-1" @@ -65,7 +65,7 @@ jobs: run_script: "ci/build_docs.sh" wheel-build-pylibcugraph: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.06 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -78,7 +78,7 @@ jobs: # the CMake variables in get_cumlprims_mg.cmake since CMake will just use # the clone as is. extra-repo: rapidsai/cugraph-ops - extra-repo-sha: branch-23.04 + extra-repo-sha: branch-23.06 extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" @@ -86,7 +86,7 @@ jobs: wheel-publish-pylibcugraph: needs: wheel-build-pylibcugraph secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-publish.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-publish.yml@branch-23.06 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -96,7 +96,7 @@ jobs: wheel-build-cugraph: needs: wheel-publish-pylibcugraph secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.06 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -109,7 +109,7 @@ jobs: # the CMake variables in get_cumlprims_mg.cmake since CMake will just use # the clone as is. extra-repo: rapidsai/cugraph-ops - extra-repo-sha: branch-23.04 + extra-repo-sha: branch-23.06 extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" @@ -117,7 +117,7 @@ jobs: wheel-publish-cugraph: needs: wheel-build-cugraph secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-publish.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-publish.yml@branch-23.06 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 857c7bc59bc..61b87690586 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -24,41 +24,41 @@ jobs: - wheel-build-cugraph - wheel-tests-cugraph secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@branch-23.06 checks: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@branch-23.06 with: enable_check_generated_files: false conda-cpp-build: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.06 with: build_type: pull-request node_type: cpu16 conda-cpp-tests: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.06 with: build_type: pull-request conda-python-build: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.06 with: build_type: pull-request conda-python-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.06 with: build_type: pull-request conda-notebook-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.06 with: build_type: pull-request node_type: "gpu-latest-1" @@ -68,7 +68,7 @@ jobs: docs-build: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.06 with: build_type: pull-request node_type: "gpu-latest-1" @@ -78,20 +78,20 @@ jobs: wheel-build-pylibcugraph: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.06 with: build_type: pull-request package-name: pylibcugraph package-dir: python/pylibcugraph extra-repo: rapidsai/cugraph-ops - extra-repo-sha: branch-23.04 + extra-repo-sha: branch-23.06 extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" uses-setup-env-vars: false wheel-tests-pylibcugraph: needs: wheel-build-pylibcugraph secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.06 with: build_type: pull-request package-name: pylibcugraph @@ -102,13 +102,13 @@ jobs: wheel-build-cugraph: needs: wheel-tests-pylibcugraph secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-build.yml@branch-23.06 with: build_type: pull-request package-name: cugraph package-dir: python/cugraph extra-repo: rapidsai/cugraph-ops - extra-repo-sha: branch-23.04 + extra-repo-sha: branch-23.06 extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY before-wheel: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-wheelhouse" skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" @@ -116,7 +116,7 @@ jobs: wheel-tests-cugraph: needs: wheel-build-cugraph secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.06 with: build_type: pull-request package-name: cugraph diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c79bccdbc44..2389d760e76 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -16,7 +16,7 @@ on: jobs: conda-cpp-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.06 with: build_type: nightly branch: ${{ inputs.branch }} @@ -24,7 +24,7 @@ jobs: sha: ${{ inputs.sha }} conda-python-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.06 with: build_type: nightly branch: ${{ inputs.branch }} @@ -32,7 +32,7 @@ jobs: sha: ${{ inputs.sha }} wheel-tests-pylibcugraph: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.06 with: build_type: nightly branch: ${{ inputs.branch }} @@ -44,7 +44,7 @@ jobs: test-unittest: "RAPIDS_DATASET_ROOT_DIR=./datasets pytest -v ./python/pylibcugraph/pylibcugraph/tests" wheel-tests-cugraph: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.04 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.06 with: build_type: nightly branch: ${{ inputs.branch }} diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 82e2a8a278a..25810e0018f 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -12,11 +12,11 @@ dependencies: - cmake>=3.23.1,!=3.25.0 - cuda-python>=11.7.1,<12.0 - cudatoolkit=11.8 -- cudf=23.04.* +- cudf=23.06.* - cxx-compiler - cython>=0.29,<0.30 -- dask-cuda=23.04.* -- dask-cudf=23.04.* +- dask-cuda=23.06.* +- dask-cudf=23.06.* - dask==2023.1.1 - distributed==2023.1.1 - doxygen @@ -25,11 +25,11 @@ dependencies: - graphviz - gtest=1.10.0 - ipython -- libcudf=23.04.* -- libcugraphops=23.04.* -- libraft-headers=23.04.* +- libcudf=23.06.* +- libcugraphops=23.06.* +- libraft-headers=23.06.* - libraft=23.04.* -- librmm=23.04.* +- librmm=23.06.* - nbsphinx - nccl>=2.9.9 - networkx>=2.5.1 @@ -42,15 +42,15 @@ dependencies: - pre-commit - py - pydata-sphinx-theme -- pylibraft=23.04.* +- pylibraft=23.06.* - pytest - pytest-cov - python-louvain -- raft-dask=23.04.* +- raft-dask=23.06.* - rapids-pytest-benchmark - recommonmark - requests -- rmm=23.04.* +- rmm=23.06.* - scikit-build>=0.13.1 - scikit-learn>=0.23.1 - scipy @@ -59,5 +59,5 @@ dependencies: - sphinx<6 - sphinxcontrib-websupport - ucx-proc=*=gpu -- ucx-py=0.31.* +- ucx-py=0.32.* name: all_cuda-118_arch-x86_64 diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index f4c11ff8c5e..89fe317a925 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -25,7 +25,7 @@ include(rapids-find) rapids_cuda_init_architectures(CUGRAPH) -project(CUGRAPH VERSION 23.04.00 LANGUAGES C CXX CUDA) +project(CUGRAPH VERSION 23.06.00 LANGUAGES C CXX CUDA) if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 11.0) diff --git a/cpp/doxygen/Doxyfile b/cpp/doxygen/Doxyfile index 3428562510f..5d04cd9b539 100644 --- a/cpp/doxygen/Doxyfile +++ b/cpp/doxygen/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "libcugraph" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER=23.04 +PROJECT_NUMBER=23.06 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/cpp/libcugraph_etl/CMakeLists.txt b/cpp/libcugraph_etl/CMakeLists.txt index bdbb0b33c19..13cf7b199ec 100644 --- a/cpp/libcugraph_etl/CMakeLists.txt +++ b/cpp/libcugraph_etl/CMakeLists.txt @@ -25,7 +25,7 @@ include(rapids-find) rapids_cuda_init_architectures(CUGRAPH_ETL) -project(CUGRAPH_ETL VERSION 23.04.00 LANGUAGES C CXX CUDA) +project(CUGRAPH_ETL VERSION 23.06.00 LANGUAGES C CXX CUDA) if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 11.0) diff --git a/dependencies.yaml b/dependencies.yaml index 15c98bbe884..232bbaa10b3 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -91,10 +91,10 @@ dependencies: - cxx-compiler - gmock=1.10.0 - gtest=1.10.0 - - libcugraphops=23.04.* - - libraft-headers=23.04.* + - libcugraphops=23.06.* + - libraft-headers=23.06.* - libraft=23.04.* - - librmm=23.04.* + - librmm=23.06.* - openmpi # Required for building cpp-mgtests (multi-GPU tests) specific: - output_types: [conda] @@ -158,20 +158,20 @@ dependencies: - output_types: [conda] packages: - cuda-python>=11.7.1,<12.0 - - cudf=23.04.* + - cudf=23.06.* - cython>=0.29,<0.30 - dask==2023.1.1 - distributed==2023.1.1 - - dask-cuda=23.04.* - - dask-cudf=23.04.* - - libcudf=23.04.* + - dask-cuda=23.06.* + - dask-cudf=23.06.* + - libcudf=23.06.* - nccl>=2.9.9 - - pylibraft=23.04.* - - raft-dask=23.04.* - - rmm=23.04.* + - pylibraft=23.06.* + - raft-dask=23.06.* + - rmm=23.06.* - scikit-build>=0.13.1 - ucx-proc=*=gpu - - ucx-py=0.31.* + - ucx-py=0.32.* doc: common: - output_types: [conda] diff --git a/docs/cugraph/source/conf.py b/docs/cugraph/source/conf.py index 4d11d4ef962..9835848394c 100644 --- a/docs/cugraph/source/conf.py +++ b/docs/cugraph/source/conf.py @@ -76,9 +76,9 @@ # built documents. # # The short X.Y version. -version = '23.04' +version = '23.06' # The full version, including alpha/beta/rc tags. -release = '23.04.00' +release = '23.06.00' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/fetch_rapids.cmake b/fetch_rapids.cmake index 73b83cb20d8..a897c145a66 100644 --- a/fetch_rapids.cmake +++ b/fetch_rapids.cmake @@ -12,7 +12,7 @@ # the License. # ============================================================================= if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/CUGRAPH_RAPIDS.cmake) - file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.04/RAPIDS.cmake + file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.06/RAPIDS.cmake ${CMAKE_CURRENT_BINARY_DIR}/CUGRAPH_RAPIDS.cmake ) endif() diff --git a/python/cugraph-dgl/cugraph_dgl/__init__.py b/python/cugraph-dgl/cugraph_dgl/__init__.py index 8c7d61228b5..111961d8fdc 100644 --- a/python/cugraph-dgl/cugraph_dgl/__init__.py +++ b/python/cugraph-dgl/cugraph_dgl/__init__.py @@ -20,4 +20,4 @@ import cugraph_dgl.dataloading import cugraph_dgl.nn -__version__ = "23.04.00" +__version__ = "23.06.00" diff --git a/python/cugraph-dgl/pyproject.toml b/python/cugraph-dgl/pyproject.toml index 9a2acc568bd..56ba1912a44 100644 --- a/python/cugraph-dgl/pyproject.toml +++ b/python/cugraph-dgl/pyproject.toml @@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta" [project] name = "cugraph-dgl" -version = "23.04.00" +version = "23.06.00" description = "cugraph extensions for DGL" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/cugraph-pyg/cugraph_pyg/__init__.py b/python/cugraph-pyg/cugraph_pyg/__init__.py index 81bd0868299..f5e317bfafd 100644 --- a/python/cugraph-pyg/cugraph_pyg/__init__.py +++ b/python/cugraph-pyg/cugraph_pyg/__init__.py @@ -11,4 +11,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "23.04.00" +__version__ = "23.06.00" diff --git a/python/cugraph-pyg/pyproject.toml b/python/cugraph-pyg/pyproject.toml index aded4c579f9..5f99f63fdc2 100644 --- a/python/cugraph-pyg/pyproject.toml +++ b/python/cugraph-pyg/pyproject.toml @@ -12,7 +12,7 @@ testpaths = ["cugraph_pyg/tests"] [project] name = "cugraph_pyg" -version = "23.04.00" +version = "23.06.00" description = "cugraph_pyg - PyG support for cuGraph massive-scale, ultra-fast GPU graph analytics." authors = [ { name = "NVIDIA Corporation" }, diff --git a/python/cugraph-service/client/cugraph_service_client/__init__.py b/python/cugraph-service/client/cugraph_service_client/__init__.py index e866ad2e880..0680aacfe52 100644 --- a/python/cugraph-service/client/cugraph_service_client/__init__.py +++ b/python/cugraph-service/client/cugraph_service_client/__init__.py @@ -35,4 +35,4 @@ from cugraph_service_client.client import CugraphServiceClient from cugraph_service_client.remote_graph import RemoteGraph -__version__ = "23.04.00" +__version__ = "23.06.00" diff --git a/python/cugraph-service/client/pyproject.toml b/python/cugraph-service/client/pyproject.toml index 9e3b6b8f920..7526a265660 100644 --- a/python/cugraph-service/client/pyproject.toml +++ b/python/cugraph-service/client/pyproject.toml @@ -10,7 +10,7 @@ build-backend = "setuptools.build_meta" [project] name = "cugraph-service-client" -version = "23.04.00" +version = "23.06.00" description = "cuGraph Service client" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/cugraph-service/server/cugraph_service_server/__init__.py b/python/cugraph-service/server/cugraph_service_server/__init__.py index b7102920e21..5ab860e822f 100644 --- a/python/cugraph-service/server/cugraph_service_server/__init__.py +++ b/python/cugraph-service/server/cugraph_service_server/__init__.py @@ -61,4 +61,4 @@ def start_server_blocking( server.serve() # blocks until Ctrl-C (kill -2) -__version__ = "23.04.00" +__version__ = "23.06.00" diff --git a/python/cugraph-service/server/pyproject.toml b/python/cugraph-service/server/pyproject.toml index 4260bb74889..d4459fd48f7 100644 --- a/python/cugraph-service/server/pyproject.toml +++ b/python/cugraph-service/server/pyproject.toml @@ -10,7 +10,7 @@ build-backend = "setuptools.build_meta" [project] name = "cugraph-service-server" -version = "23.04.00" +version = "23.06.00" description = "cuGraph Service server" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/cugraph/CMakeLists.txt b/python/cugraph/CMakeLists.txt index 9edfd2b741f..c62daef93b2 100644 --- a/python/cugraph/CMakeLists.txt +++ b/python/cugraph/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(cugraph_version 23.04.00) +set(cugraph_version 23.06.00) include(../../fetch_rapids.cmake) diff --git a/python/cugraph/cugraph/__init__.py b/python/cugraph/cugraph/__init__.py index b576911dcd4..ab7c3d31839 100644 --- a/python/cugraph/cugraph/__init__.py +++ b/python/cugraph/cugraph/__init__.py @@ -118,4 +118,4 @@ from cugraph import gnn -__version__ = "23.04.00" +__version__ = "23.06.00" diff --git a/python/cugraph/pyproject.toml b/python/cugraph/pyproject.toml index b9939f62232..dd76b792923 100644 --- a/python/cugraph/pyproject.toml +++ b/python/cugraph/pyproject.toml @@ -9,9 +9,9 @@ requires = [ "scikit-build>=0.13.1", "cmake>=3.23.1,!=3.25.0", "ninja", - "rmm==23.4.*", - "pylibraft==23.4.*", - "pylibcugraph==23.4.*", + "rmm==23.6.*", + "pylibraft==23.6.*", + "pylibcugraph==23.6.*", ] build-backend = "setuptools.build_meta" @@ -20,7 +20,7 @@ testpaths = ["cugraph/tests"] [project] name = "cugraph" -version = "23.04.00" +version = "23.06.00" description = "cuGraph - RAPIDS GPU Graph Analytics" readme = { file = "README.md", content-type = "text/markdown" } authors = [ @@ -30,12 +30,12 @@ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ "numba", - "dask-cuda==23.4.*", - "rmm==23.4.*", - "cudf==23.4.*", - "raft-dask==23.4.*", - "dask-cudf==23.4.*", - "pylibcugraph==23.4.*", + "dask-cuda==23.6.*", + "rmm==23.6.*", + "cudf==23.6.*", + "raft-dask==23.6.*", + "dask-cudf==23.6.*", + "pylibcugraph==23.6.*", "cupy-cuda11x", ] classifiers = [ diff --git a/python/pylibcugraph/CMakeLists.txt b/python/pylibcugraph/CMakeLists.txt index 96b3d74cfda..21097983a1b 100644 --- a/python/pylibcugraph/CMakeLists.txt +++ b/python/pylibcugraph/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(pylibcugraph_version 23.04.00) +set(pylibcugraph_version 23.06.00) include(../../fetch_rapids.cmake) diff --git a/python/pylibcugraph/pylibcugraph/__init__.py b/python/pylibcugraph/pylibcugraph/__init__.py index d653e08b127..0337e6a4fcf 100644 --- a/python/pylibcugraph/pylibcugraph/__init__.py +++ b/python/pylibcugraph/pylibcugraph/__init__.py @@ -60,4 +60,4 @@ from pylibcugraph.random import CuGraphRandomState -__version__ = "23.04.00" +__version__ = "23.06.00" diff --git a/python/pylibcugraph/pyproject.toml b/python/pylibcugraph/pyproject.toml index f13f89e75be..89ef0509092 100644 --- a/python/pylibcugraph/pyproject.toml +++ b/python/pylibcugraph/pyproject.toml @@ -9,8 +9,8 @@ requires = [ "scikit-build>=0.13.1", "cmake>=3.23.1,!=3.25.0", "ninja", - "rmm==23.4.*", - "pylibraft==23.4.*", + "rmm==23.6.*", + "pylibraft==23.6.*", ] build-backend = "setuptools.build_meta" @@ -19,7 +19,7 @@ testpaths = ["pylibcugraph/tests"] [project] name = "pylibcugraph" -version = "23.04.00" +version = "23.06.00" description = "pylibcugraph - Python bindings for the libcugraph cuGraph C/C++/CUDA library" readme = { file = "README.md", content-type = "text/markdown" } authors = [ @@ -28,8 +28,8 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ - "pylibraft==23.4.*", - "rmm==23.4.*", + "pylibraft==23.6.*", + "rmm==23.6.*", ] classifiers = [ "Intended Audience :: Developers", @@ -47,7 +47,7 @@ test = [ "pandas", "numpy", "networkx>=2.5.1", - "cudf==23.4.*", + "cudf==23.6.*", ] [project.urls] From be94c329a392100045d9da3f3fc706628ded15f9 Mon Sep 17 00:00:00 2001 From: Chuck Hastings <45364586+ChuckHastings@users.noreply.github.com> Date: Tue, 4 Apr 2023 18:18:51 -0400 Subject: [PATCH 02/90] Update raft dependency to 23.06 (#3410) Looks like we failed to update the `update-version.sh` script to handle `libraft` instead of `libraft-distance` when we made the raft changes in 23.04. This PR fixes that and updates the raft version to 23.06. Authors: - Chuck Hastings (https://github.com/ChuckHastings) Approvers: - Mark Sadang (https://github.com/msadang) - Brad Rees (https://github.com/BradReesWork) URL: https://github.com/rapidsai/cugraph/pull/3410 --- .github/workflows/pr.yaml | 4 ++-- .github/workflows/test.yaml | 4 ++-- ci/release/update-version.sh | 4 ++-- conda/environments/all_cuda-118_arch-x86_64.yaml | 2 +- dependencies.yaml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index bb66ab4cf4d..5c1506ddec3 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -121,9 +121,9 @@ jobs: build_type: pull-request package-name: cugraph # Always want to test against latest dask/distributed. - test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.04" + test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" # Skip dataset downloads on arm to save CI time -- arm only runs smoke tests. # On arm also need to install cupy from the specific site. - test-before-arm64: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.04" + test-before-arm64: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -v -m sg ./python/cugraph/cugraph/tests" test-smoketest: "python ci/wheel_smoke_test_cugraph.py" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index acb0025a2f9..57ab2c27618 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -52,7 +52,7 @@ jobs: sha: ${{ inputs.sha }} package-name: cugraph # Always want to test against latest dask/distributed. - test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.04" + test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" # On arm also need to install cupy from the specific webpage. - test-before-arm64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.04" + test-before-arm64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -v -m sg ./python/cugraph/cugraph/tests" diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index a221cdea51e..d5ec7800a70 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -75,7 +75,7 @@ for FILE in conda/environments/*.yaml dependencies.yaml; do sed_runner "s/cudf=${CURRENT_SHORT_TAG}/cudf=${NEXT_SHORT_TAG}/g" ${FILE}; sed_runner "s/rmm=${CURRENT_SHORT_TAG}/rmm=${NEXT_SHORT_TAG}/g" ${FILE}; sed_runner "s/libraft-headers=${CURRENT_SHORT_TAG}/libraft-headers=${NEXT_SHORT_TAG}/g" ${FILE}; - sed_runner "s/libraft-distance=${CURRENT_SHORT_TAG}/libraft-distance=${NEXT_SHORT_TAG}/g" ${FILE}; + sed_runner "s/libraft=${CURRENT_SHORT_TAG}/libraft=${NEXT_SHORT_TAG}/g" ${FILE}; sed_runner "s/pyraft=${CURRENT_SHORT_TAG}/pyraft=${NEXT_SHORT_TAG}/g" ${FILE}; sed_runner "s/raft-dask=${CURRENT_SHORT_TAG}/raft-dask=${NEXT_SHORT_TAG}/g" ${FILE}; sed_runner "s/pylibraft=${CURRENT_SHORT_TAG}/pylibraft=${NEXT_SHORT_TAG}/g" ${FILE}; @@ -98,7 +98,7 @@ for FILE in .github/workflows/*.yaml; do # Wheel builds clone cugraph-ops, update its branch sed_runner "s/extra-repo-sha: branch-.*/extra-repo-sha: branch-${NEXT_SHORT_TAG}/g" "${FILE}" # Wheel builds install dask-cuda from source, update its branch - sed_runner "s/dask-cuda.git@branch-[^\"\s]\+/dask-cuda.git@branch-${NEXT_SHORT_TAG}/g" "${FILE}" + sed_runner "s/dask-cuda.git@branch-[0-9][0-9].[0-9][0-9]/dask-cuda.git@branch-${NEXT_SHORT_TAG}/g" "${FILE}" done diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 25810e0018f..788cb2a3c97 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -28,7 +28,7 @@ dependencies: - libcudf=23.06.* - libcugraphops=23.06.* - libraft-headers=23.06.* -- libraft=23.04.* +- libraft=23.06.* - librmm=23.06.* - nbsphinx - nccl>=2.9.9 diff --git a/dependencies.yaml b/dependencies.yaml index 232bbaa10b3..f411b458ee7 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -93,7 +93,7 @@ dependencies: - gtest=1.10.0 - libcugraphops=23.06.* - libraft-headers=23.06.* - - libraft=23.04.* + - libraft=23.06.* - librmm=23.06.* - openmpi # Required for building cpp-mgtests (multi-GPU tests) specific: From 006142472c1839c9a80ac0389380b8da118bdf49 Mon Sep 17 00:00:00 2001 From: Alexandria Barghi Date: Thu, 6 Apr 2023 18:56:53 +0000 Subject: [PATCH 03/90] temporarily pin dask versions for 23.06 --- .github/workflows/pr.yaml | 4 ++-- .github/workflows/test.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 5c1506ddec3..4bcf22586e6 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -121,9 +121,9 @@ jobs: build_type: pull-request package-name: cugraph # Always want to test against latest dask/distributed. - test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" # Skip dataset downloads on arm to save CI time -- arm only runs smoke tests. # On arm also need to install cupy from the specific site. - test-before-arm64: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-arm64: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -v -m sg ./python/cugraph/cugraph/tests" test-smoketest: "python ci/wheel_smoke_test_cugraph.py" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 57ab2c27618..3b44667c1c3 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -52,7 +52,7 @@ jobs: sha: ${{ inputs.sha }} package-name: cugraph # Always want to test against latest dask/distributed. - test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" # On arm also need to install cupy from the specific webpage. - test-before-arm64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-arm64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -v -m sg ./python/cugraph/cugraph/tests" From c72a8759179950fdac5516c75ead76bffc04551c Mon Sep 17 00:00:00 2001 From: Alexandria Barghi Date: Thu, 6 Apr 2023 20:55:55 +0000 Subject: [PATCH 04/90] change 23.06 specs to 23.6 in dependencies.yaml --- .../all_cuda-118_arch-x86_64.yaml | 20 +++++++------- dependencies.yaml | 26 +++++++++---------- python/cugraph-service/server/pyproject.toml | 8 +++--- python/cugraph/pyproject.toml | 14 +++++----- python/pylibcugraph/pyproject.toml | 6 ++--- 5 files changed, 37 insertions(+), 37 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 393f7c49fdc..a55b62e5fe1 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -11,13 +11,13 @@ dependencies: - c-compiler - cmake>=3.23.1,!=3.25.0 - cudatoolkit=11.8 -- cudf==23.06.* +- cudf==23.6.* - cupy>=9.5.0,<12.0.0a0 - cxx-compiler - cython>=0.29,<0.30 - dask-core==2023.3.2 -- dask-cuda==23.06.* -- dask-cudf==23.06.* +- dask-cuda==23.6.* +- dask-cudf==23.6.* - dask==2023.3.2 - distributed==2023.3.2.1 - doxygen @@ -27,11 +27,11 @@ dependencies: - graphviz - gtest=1.10.0 - ipython -- libcudf=23.06.* -- libcugraphops=23.06.* -- libraft-headers=23.06.* -- libraft=23.06.* -- librmm=23.06.* +- libcudf=23.6.* +- libcugraphops=23.6.* +- libraft-headers=23.6.* +- libraft=23.6.* +- librmm=23.6.* - nbsphinx - nccl>=2.9.9 - networkx>=2.5.1 @@ -45,13 +45,13 @@ dependencies: - pandas - pre-commit - pydata-sphinx-theme -- pylibraft==23.06.* +- pylibraft==23.6.* - pytest - pytest-benchmark - pytest-cov - pytest-xdist - python-louvain -- raft-dask==23.06.* +- raft-dask==23.6.* - recommonmark - requests - rmm==23.4.* diff --git a/dependencies.yaml b/dependencies.yaml index 49d71308531..ebb5d4f0801 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -208,10 +208,10 @@ dependencies: - cxx-compiler - gmock=1.10.0 - gtest=1.10.0 - - libcugraphops=23.06.* - - libraft-headers=23.06.* - - libraft=23.06.* - - librmm=23.06.* + - libcugraphops=23.6.* + - libraft-headers=23.6.* + - libraft=23.6.* + - librmm=23.6.* - openmpi # Required for building cpp-mgtests (multi-GPU tests) specific: - output_types: [conda] @@ -281,38 +281,38 @@ dependencies: - output_types: [conda, pyproject] packages: - cython>=0.29,<0.30 - - &pylibraft pylibraft==23.06.* + - &pylibraft pylibraft==23.6.* - &rmm rmm==23.4.* - scikit-build>=0.13.1 python_build_cugraph: common: - output_types: [conda, pyproject] packages: - - pylibcugraph==23.06.* + - pylibcugraph==23.6.* python_run_cugraph: common: - output_types: [conda, pyproject] packages: - - &cudf cudf==23.06.* + - &cudf cudf==23.6.* - &dask dask==2023.3.2 - &distributed distributed==2023.3.2.1 - - &dask_cuda dask-cuda==23.06.* - - &dask_cudf dask-cudf==23.06.* + - &dask_cuda dask-cuda==23.6.* + - &dask_cudf dask-cudf==23.6.* - &numba numba>=0.56.2 - - raft-dask==23.06.* + - raft-dask==23.6.* - *rmm - &ucx_py ucx-py==0.31.* - output_types: conda packages: - &cupy cupy>=9.5.0,<12.0.0a0 - &dask-core dask-core==2023.3.2 - - libcudf=23.06.* + - libcudf=23.6.* - nccl>=2.9.9 - ucx-proc=*=gpu - output_types: pyproject packages: - &cupy_pip cupy-cuda11x>=9.5.0,<12.0.0a0 - - pylibcugraph==23.06.* + - pylibcugraph==23.6.* python_run_pylibcugraph: common: - output_types: [conda, pyproject] @@ -354,7 +354,7 @@ dependencies: packages: - *cupy_pip - *cugraph - - cugraph-service-client==23.06.* + - cugraph-service-client==23.6.* doc: common: - output_types: [conda] diff --git a/python/cugraph-service/server/pyproject.toml b/python/cugraph-service/server/pyproject.toml index 4dc413a0309..76d2e8e70af 100644 --- a/python/cugraph-service/server/pyproject.toml +++ b/python/cugraph-service/server/pyproject.toml @@ -19,12 +19,12 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ - "cudf==23.06.*", - "cugraph-service-client==23.06.*", + "cudf==23.6.*", + "cugraph-service-client==23.6.*", "cugraph==23.4.*", "cupy-cuda11x>=9.5.0,<12.0.0a0", - "dask-cuda==23.06.*", - "dask-cudf==23.06.*", + "dask-cuda==23.6.*", + "dask-cudf==23.6.*", "dask==2023.3.2", "distributed==2023.3.2.1", "numpy>=1.21", diff --git a/python/cugraph/pyproject.toml b/python/cugraph/pyproject.toml index 0a436c18541..78943fd1139 100644 --- a/python/cugraph/pyproject.toml +++ b/python/cugraph/pyproject.toml @@ -6,8 +6,8 @@ requires = [ "cmake>=3.23.1,!=3.25.0", "cython>=0.29,<0.30", "ninja", - "pylibcugraph==23.06.*", - "pylibraft==23.06.*", + "pylibcugraph==23.6.*", + "pylibraft==23.6.*", "rmm==23.4.*", "scikit-build>=0.13.1", "setuptools", @@ -29,15 +29,15 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ - "cudf==23.06.*", + "cudf==23.6.*", "cupy-cuda11x>=9.5.0,<12.0.0a0", - "dask-cuda==23.06.*", - "dask-cudf==23.06.*", + "dask-cuda==23.6.*", + "dask-cudf==23.6.*", "dask==2023.3.2", "distributed==2023.3.2.1", "numba>=0.56.2", - "pylibcugraph==23.06.*", - "raft-dask==23.06.*", + "pylibcugraph==23.6.*", + "raft-dask==23.6.*", "rmm==23.4.*", "ucx-py==0.31.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. diff --git a/python/pylibcugraph/pyproject.toml b/python/pylibcugraph/pyproject.toml index de5a1907f77..997145f5592 100644 --- a/python/pylibcugraph/pyproject.toml +++ b/python/pylibcugraph/pyproject.toml @@ -6,7 +6,7 @@ requires = [ "cmake>=3.23.1,!=3.25.0", "cython>=0.29,<0.30", "ninja", - "pylibraft==23.06.*", + "pylibraft==23.6.*", "rmm==23.4.*", "scikit-build>=0.13.1", "setuptools", @@ -28,7 +28,7 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ - "pylibraft==23.06.*", + "pylibraft==23.6.*", "rmm==23.4.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ @@ -40,7 +40,7 @@ classifiers = [ [project.optional-dependencies] test = [ - "cudf==23.06.*", + "cudf==23.6.*", "networkx>=2.5.1", "numpy>=1.21", "pandas", From 353e89498f46900228f2834480b7d0a65d081a75 Mon Sep 17 00:00:00 2001 From: Alexandria Barghi Date: Fri, 7 Apr 2023 14:03:02 +0000 Subject: [PATCH 05/90] update rmm version --- conda/environments/all_cuda-118_arch-x86_64.yaml | 2 +- dependencies.yaml | 2 +- python/cugraph-service/server/pyproject.toml | 2 +- python/cugraph/pyproject.toml | 4 ++-- python/pylibcugraph/pyproject.toml | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index a55b62e5fe1..94b1db0d0d9 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -54,7 +54,7 @@ dependencies: - raft-dask==23.6.* - recommonmark - requests -- rmm==23.4.* +- rmm==23.6.* - scikit-build>=0.13.1 - scikit-learn>=0.23.1 - scipy diff --git a/dependencies.yaml b/dependencies.yaml index ebb5d4f0801..272ad7d8b0e 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -282,7 +282,7 @@ dependencies: packages: - cython>=0.29,<0.30 - &pylibraft pylibraft==23.6.* - - &rmm rmm==23.4.* + - &rmm rmm==23.6.* - scikit-build>=0.13.1 python_build_cugraph: common: diff --git a/python/cugraph-service/server/pyproject.toml b/python/cugraph-service/server/pyproject.toml index 76d2e8e70af..49d5510caf8 100644 --- a/python/cugraph-service/server/pyproject.toml +++ b/python/cugraph-service/server/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "dask==2023.3.2", "distributed==2023.3.2.1", "numpy>=1.21", - "rmm==23.4.*", + "rmm==23.6.*", "thriftpy2", "ucx-py==0.31.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. diff --git a/python/cugraph/pyproject.toml b/python/cugraph/pyproject.toml index 78943fd1139..bb758f16bc2 100644 --- a/python/cugraph/pyproject.toml +++ b/python/cugraph/pyproject.toml @@ -8,7 +8,7 @@ requires = [ "ninja", "pylibcugraph==23.6.*", "pylibraft==23.6.*", - "rmm==23.4.*", + "rmm==23.6.*", "scikit-build>=0.13.1", "setuptools", "wheel", @@ -38,7 +38,7 @@ dependencies = [ "numba>=0.56.2", "pylibcugraph==23.6.*", "raft-dask==23.6.*", - "rmm==23.4.*", + "rmm==23.6.*", "ucx-py==0.31.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ diff --git a/python/pylibcugraph/pyproject.toml b/python/pylibcugraph/pyproject.toml index 997145f5592..9101d78bb28 100644 --- a/python/pylibcugraph/pyproject.toml +++ b/python/pylibcugraph/pyproject.toml @@ -7,7 +7,7 @@ requires = [ "cython>=0.29,<0.30", "ninja", "pylibraft==23.6.*", - "rmm==23.4.*", + "rmm==23.6.*", "scikit-build>=0.13.1", "setuptools", "wheel", @@ -29,7 +29,7 @@ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ "pylibraft==23.6.*", - "rmm==23.4.*", + "rmm==23.6.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", From 16b3f0c2d82086ea682874a97ab8e510567967c6 Mon Sep 17 00:00:00 2001 From: Alexandria Barghi Date: Fri, 7 Apr 2023 16:03:36 +0000 Subject: [PATCH 06/90] bump ucx-py to 0.32.* --- conda/environments/all_cuda-118_arch-x86_64.yaml | 2 +- dependencies.yaml | 2 +- python/cugraph-service/server/pyproject.toml | 2 +- python/cugraph/pyproject.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 94b1db0d0d9..e38d46c1906 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -63,5 +63,5 @@ dependencies: - sphinx<6 - sphinxcontrib-websupport - ucx-proc=*=gpu -- ucx-py==0.31.* +- ucx-py==0.32.* name: all_cuda-118_arch-x86_64 diff --git a/dependencies.yaml b/dependencies.yaml index 272ad7d8b0e..41919215e90 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -301,7 +301,7 @@ dependencies: - &numba numba>=0.56.2 - raft-dask==23.6.* - *rmm - - &ucx_py ucx-py==0.31.* + - &ucx_py ucx-py==0.32.* - output_types: conda packages: - &cupy cupy>=9.5.0,<12.0.0a0 diff --git a/python/cugraph-service/server/pyproject.toml b/python/cugraph-service/server/pyproject.toml index 49d5510caf8..e929fe938dd 100644 --- a/python/cugraph-service/server/pyproject.toml +++ b/python/cugraph-service/server/pyproject.toml @@ -30,7 +30,7 @@ dependencies = [ "numpy>=1.21", "rmm==23.6.*", "thriftpy2", - "ucx-py==0.31.*", + "ucx-py==0.32.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", diff --git a/python/cugraph/pyproject.toml b/python/cugraph/pyproject.toml index bb758f16bc2..f41e9774f08 100644 --- a/python/cugraph/pyproject.toml +++ b/python/cugraph/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ "pylibcugraph==23.6.*", "raft-dask==23.6.*", "rmm==23.6.*", - "ucx-py==0.31.*", + "ucx-py==0.32.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", From fe08cbe5815a2b637d453e5d81383c7437462815 Mon Sep 17 00:00:00 2001 From: Alex Barghi <105237337+alexbarghi-nv@users.noreply.github.com> Date: Mon, 10 Apr 2023 07:23:45 -0700 Subject: [PATCH 07/90] Branch 23.06 resolve merge conflict for forward merge (#3409) Resolves merge conflict Authors: - Alex Barghi (https://github.com/alexbarghi-nv) - Joseph Nke (https://github.com/jnke2016) - Vibhu Jawa (https://github.com/VibhuJawa) - Don Acosta (https://github.com/acostadon) - Vyas Ramasubramani (https://github.com/vyasr) - Artur (https://github.com/ArturKasymov) - Naim (https://github.com/naimnv) - Lawrence Mitchell (https://github.com/wence-) - Erik Welch (https://github.com/eriknw) - Brad Rees (https://github.com/BradReesWork) - Tingyu Wang (https://github.com/tingyu66) - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Brad Rees (https://github.com/BradReesWork) - Rick Ratzel (https://github.com/rlratzel) - GALI PREM SAGAR (https://github.com/galipremsagar) - Ray Douglass (https://github.com/raydouglass) - Chuck Hastings (https://github.com/ChuckHastings) URL: https://github.com/rapidsai/cugraph/pull/3409 --- .github/workflows/pr.yaml | 4 +- .github/workflows/test.yaml | 4 +- .pre-commit-config.yaml | 2 +- .../notebooks/get_node_storage.ipynb | 6 +- .../heterogeneous_dataloader_benchmark.ipynb | 6 +- .../homogenous_dataloader_benchmark.ipynb | 6 +- .../cugraph/pytest-based/bench_algos.py | 4 +- .../bench_cugraph_uniform_neighbor_sample.py | 6 +- benchmarks/cugraph/standalone/benchmark.py | 7 +- .../test_cugraph_sampling.py | 1 - ci/build_docs.sh | 3 + ci/release/apply_wheel_modifications.sh | 1 + ci/test_python.sh | 4 +- .../all_cuda-118_arch-x86_64.yaml | 40 +- conda/recipes/cugraph-pyg/meta.yaml | 4 +- conda/recipes/cugraph-service/meta.yaml | 2 +- conda/recipes/cugraph/meta.yaml | 5 +- cpp/CMakeLists.txt | 5 + cpp/include/cugraph/algorithms.hpp | 94 ++- .../cugraph/detail/utility_wrappers.hpp | 16 + cpp/src/c_api/legacy_spectral.cpp | 51 +- cpp/src/c_api/leiden.cpp | 4 - cpp/src/community/detail/common_methods.cuh | 8 +- cpp/src/community/detail/common_methods.hpp | 4 +- cpp/src/community/detail/common_methods_mg.cu | 2 +- cpp/src/community/detail/common_methods_sg.cu | 2 +- cpp/src/community/detail/mis.hpp | 33 + cpp/src/community/detail/mis_impl.cuh | 292 +++++++ cpp/src/community/detail/mis_mg.cu | 51 ++ cpp/src/community/detail/mis_sg.cu | 51 ++ cpp/src/community/detail/refine.hpp | 49 ++ cpp/src/community/detail/refine_impl.cuh | 767 ++++++++++++++++++ cpp/src/community/detail/refine_mg.cu | 142 ++++ cpp/src/community/detail/refine_sg.cu | 142 ++++ cpp/src/community/leiden_impl.cuh | 524 +++++++++++- cpp/src/community/leiden_mg.cu | 105 +++ cpp/src/community/leiden_sg.cu | 42 +- cpp/src/community/louvain_impl.cuh | 6 +- cpp/src/detail/utility_wrappers.cu | 21 + cpp/tests/CMakeLists.txt | 1 + cpp/tests/c_api/leiden_test.c | 13 +- cpp/tests/c_api/louvain_test.c | 6 +- cpp/tests/community/new_leiden_test.cpp | 238 ++++++ dependencies.yaml | 269 +++++- .../api_docs/cugraph-dgl/cugraph_dgl.rst | 15 + .../{ => cugraph-pyg}/cugraph_pyg.rst | 2 +- .../api_docs/{ => cugraph}/centrality.rst | 12 +- .../api_docs/{ => cugraph}/community.rst | 18 +- .../api_docs/{ => cugraph}/components.rst | 4 +- .../source/api_docs/{ => cugraph}/cores.rst | 4 +- .../api_docs/{ => cugraph}/cugraph_top.rst | 0 .../api_docs/{ => cugraph}/dask-cugraph.rst | 0 .../api_docs/{ => cugraph}/generator.rst | 2 +- .../{ => cugraph}/graph_implementation.rst | 2 +- .../{ => cugraph}/helper_functions.rst | 2 +- .../source/api_docs/{ => cugraph}/layout.rst | 2 +- .../{ => cugraph}/linear_assignment.rst | 2 +- .../api_docs/{ => cugraph}/link_analysis.rst | 8 +- .../{ => cugraph}/link_prediction.rst | 6 +- .../api_docs/{ => cugraph}/property_graph.rst | 2 +- .../api_docs/{ => cugraph}/sampling.rst | 4 +- .../source/api_docs/cugraph/structure.rst | 104 +++ .../api_docs/{ => cugraph}/traversal.rst | 8 +- .../source/api_docs/{ => cugraph}/tree.rst | 4 +- .../api_docs/{ => cugraph_c}/c_and_cpp.rst | 0 docs/cugraph/source/api_docs/cugraph_dgl.rst | 15 - docs/cugraph/source/api_docs/index.rst | 12 +- .../api_docs/{ => plc}/pylibcugraph.rst | 2 +- .../{ => service}/cugraph_service_client.rst | 5 +- .../{ => service}/cugraph_service_server.rst | 2 +- docs/cugraph/source/api_docs/structure.rst | 1 - docs/cugraph/source/basics/index.rst | 3 + docs/cugraph/source/dev_resources/API.rst | 5 + docs/cugraph/source/dev_resources/index.rst | 10 + .../source/graph_support/DGL_support.md | 55 ++ .../source/graph_support/PyG_support.md | 3 + .../source/graph_support/algorithms.md | 85 ++ .../source/graph_support/compatibility.rst | 8 + .../source/graph_support/cugraph_service.rst | 9 + .../graph_support/cugraphops_support.rst | 10 + .../source/graph_support/datastores.rst | 11 + .../source/graph_support/feature_stores.md | 0 .../source/graph_support/gnn_support.rst | 12 + .../source/graph_support/graph_algorithms.rst | 9 + docs/cugraph/source/graph_support/index.rst | 13 + .../source/graph_support/knowledge_stores.md | 0 .../source/graph_support/pg_example.png | Bin 0 -> 242398 bytes .../source/graph_support/property_graph.md | 54 ++ .../graph_support/wholegraph_support.rst | 0 docs/cugraph/source/index.rst | 48 +- .../source/installation/getting_cugraph.md | 55 ++ docs/cugraph/source/installation/index.rst | 10 + .../source/installation/source_build.md | 271 +++++++ .../{basics => references}/cugraph_ref.rst | 0 docs/cugraph/source/references/datasets.rst | 0 docs/cugraph/source/references/index.rst | 9 + docs/cugraph/source/references/licenses.rst | 0 docs/cugraph/source/releases/index.rst | 5 + .../source/tutorials/community_resources.md | 2 + .../{basics => tutorials}/cugraph_blogs.rst | 0 .../source/tutorials/cugraph_notebooks.md | 77 ++ .../cugraph/source/tutorials/how_to_guides.md | 9 + docs/cugraph/source/tutorials/index.rst | 11 + .../scripts => mg_utils}/README.md | 4 +- .../scripts => mg_utils}/default-config.sh | 2 +- .../scripts => mg_utils}/functions.sh | 2 +- .../scripts => mg_utils}/run-dask-process.sh | 2 +- .../cgs_mag_extension.py | 88 -- notebooks/gnn/pyg_hetero_mag.ipynb | 391 --------- notebooks/gnn/pyg_hetero_mag_cgs.ipynb | 273 ------- .../cugraph_dgl/cugraph_storage.py | 5 + .../cugraph_dgl/dataloading/dataloader.py | 29 +- .../utils/extract_graph_helpers.py | 2 - .../cugraph-dgl/cugraph_dgl/nn/conv/base.py | 50 ++ .../cugraph_dgl/nn/conv/gatconv.py | 26 +- .../cugraph_dgl/nn/conv/relgraphconv.py | 51 +- .../cugraph_dgl/nn/conv/sageconv.py | 23 +- .../examples/dataset_from_disk_cudf.ipynb | 9 +- .../muti_trainer_MG_example/workflow.py | 233 ++++++ python/cugraph-dgl/pyproject.toml | 9 +- python/cugraph-dgl/tests/nn/test_gatconv.py | 2 +- python/cugraph-dgl/tests/nn/test_sageconv.py | 14 +- .../cugraph_pyg/data/cugraph_store.py | 333 ++++---- .../cugraph_pyg/examples/README.md | 11 + .../cugraph_pyg/examples/graph_sage_mg.py | 432 ++++++++++ .../cugraph_pyg/examples/graph_sage_sg.py | 215 +++++ .../cugraph_pyg/examples/start_dask.sh} | 26 +- .../cugraph_pyg/loader/cugraph_node_loader.py | 87 +- .../cugraph_pyg/sampler/cugraph_sampler.py | 26 +- .../cugraph-pyg/cugraph_pyg/tests/conftest.py | 79 +- .../cugraph_pyg/tests/int/test_int_cugraph.py | 216 ----- .../tests/mg/test_mg_cugraph_loader.py | 25 +- .../tests/mg/test_mg_cugraph_sampler.py | 52 +- .../tests/mg/test_mg_cugraph_store.py | 189 ++--- .../cugraph_pyg/tests/test_cugraph_loader.py | 24 +- .../cugraph_pyg/tests/test_cugraph_sampler.py | 60 +- .../cugraph_pyg/tests/test_cugraph_store.py | 191 +++-- python/cugraph-pyg/pyproject.toml | 7 +- python/cugraph-service/client/pyproject.toml | 6 +- .../testing/benchmark_server_extension.py | 2 - python/cugraph-service/server/pyproject.toml | 48 +- python/cugraph/cugraph/__init__.py | 1 + .../cugraph/cugraph/centrality/CMakeLists.txt | 3 +- .../centrality/betweenness_centrality.py | 158 ++-- .../betweenness_centrality_wrapper.pyx | 232 ------ ...ty.pxd => edge_betweenness_centrality.pxd} | 12 +- .../edge_betweenness_centrality_wrapper.pyx | 4 +- .../cugraph/cugraph/community/CMakeLists.txt | 5 +- python/cugraph/cugraph/community/__init__.py | 3 +- python/cugraph/cugraph/community/ecg.py | 29 +- .../cugraph/cugraph/community/ecg_wrapper.pyx | 84 -- .../cugraph/community/induced_subgraph.py | 165 ++++ .../cugraph/community/spectral_clustering.pxd | 60 -- .../cugraph/community/spectral_clustering.py | 112 ++- .../community/spectral_clustering_wrapper.pyx | 341 -------- .../cugraph/community/subgraph_extraction.pxd | 28 - .../cugraph/community/subgraph_extraction.py | 62 +- .../community/subgraph_extraction_wrapper.pyx | 80 -- .../cugraph/community/triangle_count.py | 26 +- .../cugraph/components/connectivity.py | 14 + python/cugraph/cugraph/dask/__init__.py | 4 +- .../dask/centrality/betweenness_centrality.py | 232 ++++++ .../cugraph/dask/community/__init__.py | 3 +- .../cugraph/cugraph/dask/community/egonet.py | 6 +- .../dask/community/induced_subgraph.py | 215 +++++ .../cugraph/dask/link_analysis/pagerank.py | 38 +- .../dask/sampling/uniform_neighbor_sample.py | 54 +- .../dask/structure/mg_property_graph.py | 33 +- python/cugraph/cugraph/dask/traversal/bfs.py | 3 - .../experimental/link_prediction/jaccard.py | 23 +- .../experimental/link_prediction/overlap.py | 23 +- .../experimental/link_prediction/sorensen.py | 23 +- python/cugraph/cugraph/generators/rmat.py | 2 +- .../cugraph/gnn/data_loading/bulk_sampler.py | 30 +- .../gnn/dgl_extensions/utils/sampling.py | 2 - .../cugraph/layout/force_atlas2_wrapper.pyx | 6 +- .../cugraph/cugraph/link_analysis/pagerank.py | 38 +- python/cugraph/cugraph/sampling/node2vec.py | 3 - .../cugraph/cugraph/sampling/random_walks.py | 10 +- .../sampling/uniform_neighbor_sample.py | 27 +- .../cugraph/cugraph/structure/CMakeLists.txt | 4 +- python/cugraph/cugraph/structure/__init__.py | 1 - .../cugraph/structure/graph_classes.py | 12 +- .../simpleDistributedGraph.py | 145 +++- .../graph_implementation/simpleGraph.py | 134 ++- .../cugraph/cugraph/structure/number_map.py | 227 +----- .../cugraph/structure/property_graph.py | 16 +- .../cugraph/structure/renumber_wrapper.pyx | 605 -------------- python/cugraph/cugraph/structure/shuffle.py | 120 --- .../centrality/test_betweenness_centrality.py | 5 +- .../test_betweenness_centrality_mg.py | 191 +++++ .../test_eigenvector_centrality_mg.py | 8 +- .../centrality/test_katz_centrality_mg.py | 12 +- .../tests/community/test_balanced_cut.py | 5 +- .../cugraph/tests/community/test_ecg.py | 1 + .../community/test_induced_subgraph_mg.py | 174 ++++ .../cugraph/tests/community/test_leiden.py | 10 +- .../tests/community/test_modularity.py | 2 + .../community/test_subgraph_extraction.py | 7 +- .../cugraph/tests/core/test_core_number_mg.py | 8 +- .../cugraph/tests/core/test_k_core_mg.py | 3 - .../tests/data_store/test_property_graph.py | 67 +- .../data_store/test_property_graph_mg.py | 40 +- .../cugraph/tests/internals/test_renumber.py | 127 +-- .../tests/internals/test_renumber_mg.py | 46 -- .../tests/link_analysis/test_hits_mg.py | 7 +- .../tests/link_analysis/test_pagerank.py | 23 +- .../tests/link_analysis/test_pagerank_mg.py | 4 +- .../tests/link_prediction/test_jaccard_mg.py | 3 - .../tests/link_prediction/test_overlap_mg.py | 3 - .../tests/link_prediction/test_sorensen_mg.py | 3 - .../tests/sampling/test_bulk_sampler.py | 10 +- .../tests/sampling/test_bulk_sampler_mg.py | 10 +- .../cugraph/tests/sampling/test_egonet_mg.py | 1 - .../tests/sampling/test_random_walks_mg.py | 1 - .../sampling/test_uniform_neighbor_sample.py | 9 +- .../test_uniform_neighbor_sample_mg.py | 86 +- .../cugraph/tests/structure/test_graph.py | 163 ++-- .../cugraph/tests/structure/test_graph_mg.py | 87 +- .../cugraph/tests/traversal/test_sssp_mg.py | 3 +- python/cugraph/cugraph/traversal/bfs.py | 3 - python/cugraph/cugraph/traversal/sssp.py | 6 +- .../cugraph/cugraph/utilities/nx_factory.py | 41 +- python/cugraph/cugraph/utilities/utils.py | 9 +- python/cugraph/pyproject.toml | 50 +- .../pylibcugraph/pylibcugraph/CMakeLists.txt | 9 + python/pylibcugraph/pylibcugraph/__init__.py | 19 + .../_cugraph_c/centrality_algorithms.pxd | 25 +- .../_cugraph_c/community_algorithms.pxd | 102 +++ .../_cugraph_c/sampling_algorithms.pxd | 14 + .../analyze_clustering_edge_cut.pyx | 138 ++++ .../analyze_clustering_modularity.pyx | 142 ++++ .../analyze_clustering_ratio_cut.pyx | 138 ++++ .../pylibcugraph/balanced_cut_clustering.pyx | 157 ++++ .../pylibcugraph/betweenness_centrality.pyx | 160 ++++ python/pylibcugraph/pylibcugraph/bfs.pyx | 9 +- python/pylibcugraph/pylibcugraph/ecg.pyx | 148 ++++ .../pylibcugraph/induced_subgraph.pyx | 169 ++++ .../pylibcugraph/select_random_vertices.pyx | 122 +++ .../spectral_modularity_maximization.pyx | 157 ++++ python/pylibcugraph/pyproject.toml | 27 +- readme_pages/SOURCEBUILD.md | 1 + 242 files changed, 9313 insertions(+), 4219 deletions(-) create mode 100644 cpp/src/community/detail/mis.hpp create mode 100644 cpp/src/community/detail/mis_impl.cuh create mode 100644 cpp/src/community/detail/mis_mg.cu create mode 100644 cpp/src/community/detail/mis_sg.cu create mode 100644 cpp/src/community/detail/refine.hpp create mode 100644 cpp/src/community/detail/refine_impl.cuh create mode 100644 cpp/src/community/detail/refine_mg.cu create mode 100644 cpp/src/community/detail/refine_sg.cu create mode 100644 cpp/src/community/leiden_mg.cu create mode 100644 cpp/tests/community/new_leiden_test.cpp create mode 100644 docs/cugraph/source/api_docs/cugraph-dgl/cugraph_dgl.rst rename docs/cugraph/source/api_docs/{ => cugraph-pyg}/cugraph_pyg.rst (89%) rename docs/cugraph/source/api_docs/{ => cugraph}/centrality.rst (80%) rename docs/cugraph/source/api_docs/{ => cugraph}/community.rst (78%) rename docs/cugraph/source/api_docs/{ => cugraph}/components.rst (86%) rename docs/cugraph/source/api_docs/{ => cugraph}/cores.rst (73%) rename docs/cugraph/source/api_docs/{ => cugraph}/cugraph_top.rst (100%) rename docs/cugraph/source/api_docs/{ => cugraph}/dask-cugraph.rst (100%) rename docs/cugraph/source/api_docs/{ => cugraph}/generator.rst (80%) rename docs/cugraph/source/api_docs/{ => cugraph}/graph_implementation.rst (94%) rename docs/cugraph/source/api_docs/{ => cugraph}/helper_functions.rst (95%) rename docs/cugraph/source/api_docs/{ => cugraph}/layout.rst (80%) rename docs/cugraph/source/api_docs/{ => cugraph}/linear_assignment.rst (85%) rename docs/cugraph/source/api_docs/{ => cugraph}/link_analysis.rst (75%) rename docs/cugraph/source/api_docs/{ => cugraph}/link_prediction.rst (84%) rename docs/cugraph/source/api_docs/{ => cugraph}/property_graph.rst (96%) rename docs/cugraph/source/api_docs/{ => cugraph}/sampling.rst (80%) create mode 100644 docs/cugraph/source/api_docs/cugraph/structure.rst rename docs/cugraph/source/api_docs/{ => cugraph}/traversal.rst (82%) rename docs/cugraph/source/api_docs/{ => cugraph}/tree.rst (83%) rename docs/cugraph/source/api_docs/{ => cugraph_c}/c_and_cpp.rst (100%) delete mode 100644 docs/cugraph/source/api_docs/cugraph_dgl.rst rename docs/cugraph/source/api_docs/{ => plc}/pylibcugraph.rst (94%) rename docs/cugraph/source/api_docs/{ => service}/cugraph_service_client.rst (95%) rename docs/cugraph/source/api_docs/{ => service}/cugraph_service_server.rst (92%) create mode 100644 docs/cugraph/source/dev_resources/API.rst create mode 100644 docs/cugraph/source/dev_resources/index.rst create mode 100644 docs/cugraph/source/graph_support/DGL_support.md create mode 100644 docs/cugraph/source/graph_support/PyG_support.md create mode 100644 docs/cugraph/source/graph_support/algorithms.md create mode 100644 docs/cugraph/source/graph_support/compatibility.rst create mode 100644 docs/cugraph/source/graph_support/cugraph_service.rst create mode 100644 docs/cugraph/source/graph_support/cugraphops_support.rst create mode 100644 docs/cugraph/source/graph_support/datastores.rst create mode 100644 docs/cugraph/source/graph_support/feature_stores.md create mode 100644 docs/cugraph/source/graph_support/gnn_support.rst create mode 100644 docs/cugraph/source/graph_support/graph_algorithms.rst create mode 100644 docs/cugraph/source/graph_support/index.rst create mode 100644 docs/cugraph/source/graph_support/knowledge_stores.md create mode 100644 docs/cugraph/source/graph_support/pg_example.png create mode 100644 docs/cugraph/source/graph_support/property_graph.md create mode 100644 docs/cugraph/source/graph_support/wholegraph_support.rst create mode 100644 docs/cugraph/source/installation/getting_cugraph.md create mode 100644 docs/cugraph/source/installation/index.rst create mode 100644 docs/cugraph/source/installation/source_build.md rename docs/cugraph/source/{basics => references}/cugraph_ref.rst (100%) create mode 100644 docs/cugraph/source/references/datasets.rst create mode 100644 docs/cugraph/source/references/index.rst create mode 100644 docs/cugraph/source/references/licenses.rst create mode 100644 docs/cugraph/source/releases/index.rst create mode 100644 docs/cugraph/source/tutorials/community_resources.md rename docs/cugraph/source/{basics => tutorials}/cugraph_blogs.rst (100%) create mode 100644 docs/cugraph/source/tutorials/cugraph_notebooks.md create mode 100644 docs/cugraph/source/tutorials/how_to_guides.md create mode 100644 docs/cugraph/source/tutorials/index.rst rename {python/cugraph-service/scripts => mg_utils}/README.md (56%) rename {python/cugraph-service/scripts => mg_utils}/default-config.sh (97%) rename {python/cugraph-service/scripts => mg_utils}/functions.sh (98%) rename {python/cugraph-service/scripts => mg_utils}/run-dask-process.sh (99%) delete mode 100644 notebooks/gnn/cgs_creation_extensions/cgs_mag_extension.py delete mode 100644 notebooks/gnn/pyg_hetero_mag.ipynb delete mode 100644 notebooks/gnn/pyg_hetero_mag_cgs.ipynb create mode 100644 python/cugraph-dgl/cugraph_dgl/nn/conv/base.py create mode 100644 python/cugraph-dgl/examples/muti_trainer_MG_example/workflow.py create mode 100644 python/cugraph-pyg/cugraph_pyg/examples/README.md create mode 100644 python/cugraph-pyg/cugraph_pyg/examples/graph_sage_mg.py create mode 100644 python/cugraph-pyg/cugraph_pyg/examples/graph_sage_sg.py rename python/{cugraph/cugraph/community/ecg.pxd => cugraph-pyg/cugraph_pyg/examples/start_dask.sh} (54%) mode change 100644 => 100755 delete mode 100644 python/cugraph-pyg/cugraph_pyg/tests/int/test_int_cugraph.py delete mode 100644 python/cugraph/cugraph/centrality/betweenness_centrality_wrapper.pyx rename python/cugraph/cugraph/centrality/{betweenness_centrality.pxd => edge_betweenness_centrality.pxd} (75%) delete mode 100644 python/cugraph/cugraph/community/ecg_wrapper.pyx create mode 100644 python/cugraph/cugraph/community/induced_subgraph.py delete mode 100644 python/cugraph/cugraph/community/spectral_clustering.pxd delete mode 100644 python/cugraph/cugraph/community/spectral_clustering_wrapper.pyx delete mode 100644 python/cugraph/cugraph/community/subgraph_extraction.pxd delete mode 100644 python/cugraph/cugraph/community/subgraph_extraction_wrapper.pyx create mode 100644 python/cugraph/cugraph/dask/centrality/betweenness_centrality.py create mode 100644 python/cugraph/cugraph/dask/community/induced_subgraph.py delete mode 100644 python/cugraph/cugraph/structure/renumber_wrapper.pyx delete mode 100644 python/cugraph/cugraph/structure/shuffle.py create mode 100644 python/cugraph/cugraph/tests/centrality/test_betweenness_centrality_mg.py create mode 100644 python/cugraph/cugraph/tests/community/test_induced_subgraph_mg.py create mode 100644 python/pylibcugraph/pylibcugraph/analyze_clustering_edge_cut.pyx create mode 100644 python/pylibcugraph/pylibcugraph/analyze_clustering_modularity.pyx create mode 100644 python/pylibcugraph/pylibcugraph/analyze_clustering_ratio_cut.pyx create mode 100644 python/pylibcugraph/pylibcugraph/balanced_cut_clustering.pyx create mode 100644 python/pylibcugraph/pylibcugraph/betweenness_centrality.pyx create mode 100644 python/pylibcugraph/pylibcugraph/ecg.pyx create mode 100644 python/pylibcugraph/pylibcugraph/induced_subgraph.pyx create mode 100644 python/pylibcugraph/pylibcugraph/select_random_vertices.pyx create mode 100644 python/pylibcugraph/pylibcugraph/spectral_modularity_maximization.pyx diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 5c1506ddec3..4bcf22586e6 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -121,9 +121,9 @@ jobs: build_type: pull-request package-name: cugraph # Always want to test against latest dask/distributed. - test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" # Skip dataset downloads on arm to save CI time -- arm only runs smoke tests. # On arm also need to install cupy from the specific site. - test-before-arm64: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-arm64: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -v -m sg ./python/cugraph/cugraph/tests" test-smoketest: "python ci/wheel_smoke_test_cugraph.py" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 57ab2c27618..3b44667c1c3 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -52,7 +52,7 @@ jobs: sha: ${{ inputs.sha }} package-name: cugraph # Always want to test against latest dask/distributed. - test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" # On arm also need to install cupy from the specific webpage. - test-before-arm64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" + test-before-arm64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install 'cupy-cuda11x<12.0.0' -f https://pip.cupy.dev/aarch64 && pip install git+https://github.com/dask/dask.git@2023.3.2 git+https://github.com/dask/distributed.git@2023.3.2.1 git+https://github.com/rapidsai/dask-cuda.git@branch-23.06" test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -v -m sg ./python/cugraph/cugraph/tests" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a5d5a00ba08..3c2f5fe2cfb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: pass_filenames: false additional_dependencies: [gitpython] - repo: https://github.com/rapidsai/dependency-file-generator - rev: v1.4.0 + rev: v1.5.1 hooks: - id: rapids-dependency-file-generator args: ["--clean"] diff --git a/benchmarks/cugraph-dgl/notebooks/get_node_storage.ipynb b/benchmarks/cugraph-dgl/notebooks/get_node_storage.ipynb index 7b3f43eb2ca..95b456c7812 100644 --- a/benchmarks/cugraph-dgl/notebooks/get_node_storage.ipynb +++ b/benchmarks/cugraph-dgl/notebooks/get_node_storage.ipynb @@ -32,7 +32,8 @@ "import cupy as cp\n", "\n", "#TODO: Enable in torch nightly\n", - "# torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator)\n", + "# from rmm.allocators.torch import rmm_torch_allocator\n", + "# torch.cuda.memory.change_current_allocator(rmm_torch_allocator)\n", " \n", "import cugraph_dgl\n", "from dgl.data import AsNodePredDataset\n", @@ -92,8 +93,9 @@ " client = Client(cluster)\n", " Comms.initialize(p2p=True)\n", "else:\n", + " from rmm.allocators.torch import rmm_torch_allocator\n", " rmm.reinitialize(pool_allocator=True, initial_pool_size=5e9, maximum_pool_size=20e9)\n", - " torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator)" + " torch.cuda.memory.change_current_allocator(rmm_torch_allocator)" ] }, { diff --git a/benchmarks/cugraph-dgl/notebooks/heterogeneous_dataloader_benchmark.ipynb b/benchmarks/cugraph-dgl/notebooks/heterogeneous_dataloader_benchmark.ipynb index f9e861d745c..d3b054bb0ee 100644 --- a/benchmarks/cugraph-dgl/notebooks/heterogeneous_dataloader_benchmark.ipynb +++ b/benchmarks/cugraph-dgl/notebooks/heterogeneous_dataloader_benchmark.ipynb @@ -32,7 +32,8 @@ "import cupy as cp\n", "\n", "#TODO: Enable in torch nightly\n", - "# torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator)\n", + "# from rmm.allocators.torch import rmm_torch_allocator\n", + "# torch.cuda.memory.change_current_allocator(rmm_torch_allocator)\n", " \n", "import cugraph_dgl\n", "from dgl.data import AsNodePredDataset\n", @@ -90,7 +91,8 @@ "else:\n", " enable_cudf_spilling()\n", " rmm.reinitialize(pool_allocator=True, initial_pool_size=5e9, maximum_pool_size=20e9)\n", - " #torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator)" + " # from rmm.allocators.torch import rmm_torch_allocator\n", + " # torch.cuda.memory.change_current_allocator(rmm_torch_allocator)" ] }, { diff --git a/benchmarks/cugraph-dgl/notebooks/homogenous_dataloader_benchmark.ipynb b/benchmarks/cugraph-dgl/notebooks/homogenous_dataloader_benchmark.ipynb index 04ca9a5431f..ea1e9b34965 100644 --- a/benchmarks/cugraph-dgl/notebooks/homogenous_dataloader_benchmark.ipynb +++ b/benchmarks/cugraph-dgl/notebooks/homogenous_dataloader_benchmark.ipynb @@ -39,7 +39,8 @@ "import numpy as np\n", "\n", "#TODO: Enable in torch nightly\n", - "# torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator)\n", + "# from rmm.allocators.torch import rmm_torch_allocator\n", + "# torch.cuda.memory.change_current_allocator(rmm_torch_allocator)\n", " \n", "import cugraph_dgl\n", "from dgl.data import AsNodePredDataset\n", @@ -97,7 +98,8 @@ "else:\n", " enable_cudf_spilling()\n", " rmm.reinitialize(pool_allocator=True, initial_pool_size=5e9, maximum_pool_size=20e9)\n", - " #torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator)" + " # from rmm.allocators.torch import rmm_torch_allocator\n", + " # torch.cuda.memory.change_current_allocator(rmm_torch_allocator)" ] }, { diff --git a/benchmarks/cugraph/pytest-based/bench_algos.py b/benchmarks/cugraph/pytest-based/bench_algos.py index bdfbbfef0dc..c57731dee8d 100644 --- a/benchmarks/cugraph/pytest-based/bench_algos.py +++ b/benchmarks/cugraph/pytest-based/bench_algos.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -268,7 +268,7 @@ def bench_graph_degrees(gpubenchmark, anyGraphWithAdjListComputed): def bench_betweenness_centrality(gpubenchmark, anyGraphWithAdjListComputed): gpubenchmark(cugraph.betweenness_centrality, - anyGraphWithAdjListComputed, k=10, seed=123) + anyGraphWithAdjListComputed, k=10, random_state=123) def bench_edge_betweenness_centrality(gpubenchmark, diff --git a/benchmarks/cugraph/pytest-based/bench_cugraph_uniform_neighbor_sample.py b/benchmarks/cugraph/pytest-based/bench_cugraph_uniform_neighbor_sample.py index ca37ef74b35..8fe6e81ccf1 100644 --- a/benchmarks/cugraph/pytest-based/bench_cugraph_uniform_neighbor_sample.py +++ b/benchmarks/cugraph/pytest-based/bench_cugraph_uniform_neighbor_sample.py @@ -65,7 +65,7 @@ def create_graph(graph_data): # FIXME: edgelist_df should have column names that match the defaults # for G.from_cudf_edgelist() G.from_cudf_edgelist( - edgelist_df, source="src", destination="dst", edge_attr="wgt", legacy_renum_only=True + edgelist_df, source="src", destination="dst", edge_attr="wgt" ) num_verts = G.number_of_vertices() @@ -94,7 +94,6 @@ def create_graph(graph_data): source="src", destination="dst", edge_attr="weight", - legacy_renum_only=True, ) else: @@ -135,7 +134,7 @@ def create_mg_graph(graph_data): # for G.from_cudf_edgelist() edgelist_df = dask_cudf.from_cudf(edgelist_df) G.from_dask_cudf_edgelist( - edgelist_df, source="src", destination="dst", edge_attr="wgt", legacy_renum_only=True + edgelist_df, source="src", destination="dst", edge_attr="wgt" ) num_verts = G.number_of_vertices() @@ -164,7 +163,6 @@ def create_mg_graph(graph_data): source="src", destination="dst", edge_attr="weight", - legacy_renum_only=True, ) else: diff --git a/benchmarks/cugraph/standalone/benchmark.py b/benchmarks/cugraph/standalone/benchmark.py index 4cb9576611a..e02194648a6 100644 --- a/benchmarks/cugraph/standalone/benchmark.py +++ b/benchmarks/cugraph/standalone/benchmark.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2022, NVIDIA CORPORATION. +# Copyright (c) 2021-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -133,7 +133,6 @@ def run(self): # Algos with transposed=True : PageRank, Katz. # Algos with transposed=False: BFS, SSSP, Louvain, HITS, # Neighborhood_sampling. - # Algos supporting the legacy_renum_only: HITS, Neighborhood_sampling # for i in range(len(self.algos)): # set transpose=True when renumbering @@ -151,7 +150,7 @@ def run(self): self.algos[i][1]["alpha"] = katz_alpha if hasattr(G, "compute_renumber_edge_list"): G.compute_renumber_edge_list( - transposed=True, legacy_renum_only=True) + transposed=True) else: # FIXME: Pagerank still follows the old path. Update this once it # follows the pylibcugraph/C path @@ -166,7 +165,7 @@ def run(self): G.compute_renumber_edge_list(transposed=False) else: G.compute_renumber_edge_list( - transposed=False, legacy_renum_only=True) + transposed=False) self.__log("done.") # FIXME: need to handle individual algo args for ((algo, params), validator) in zip(self.algos, self.validators): diff --git a/benchmarks/shared/build_cugraph_ucx/test_cugraph_sampling.py b/benchmarks/shared/build_cugraph_ucx/test_cugraph_sampling.py index 6f6b1f320a1..110ad80838a 100644 --- a/benchmarks/shared/build_cugraph_ucx/test_cugraph_sampling.py +++ b/benchmarks/shared/build_cugraph_ucx/test_cugraph_sampling.py @@ -64,7 +64,6 @@ def create_mg_graph(graph_data): source="src", destination="dst", edge_attr="weight", - legacy_renum_only=True, ) return G diff --git a/ci/build_docs.sh b/ci/build_docs.sh index ec90a0ead4b..dc449437704 100755 --- a/ci/build_docs.sh +++ b/ci/build_docs.sh @@ -45,6 +45,9 @@ popd rapids-logger "Build Sphinx docs" pushd docs/cugraph +# Ensure cugraph is importable, since sphinx does not report details about this +# type of failure well. +python -c "import cugraph; print(f'Using cugraph: {cugraph}')" sphinx-build -b dirhtml source _html sphinx-build -b text source _text popd diff --git a/ci/release/apply_wheel_modifications.sh b/ci/release/apply_wheel_modifications.sh index 72c9d8a4162..ed291077494 100755 --- a/ci/release/apply_wheel_modifications.sh +++ b/ci/release/apply_wheel_modifications.sh @@ -28,3 +28,4 @@ sed -i "s/cudf/cudf${CUDA_SUFFIX}/g" python/cugraph/pyproject.toml sed -i "s/raft-dask/raft-dask${CUDA_SUFFIX}/g" python/cugraph/pyproject.toml sed -i "s/pylibcugraph/pylibcugraph${CUDA_SUFFIX}/g" python/cugraph/pyproject.toml sed -i "s/pylibraft/pylibraft${CUDA_SUFFIX}/g" python/cugraph/pyproject.toml +sed -i "s/ucx-py/ucx-py${CUDA_SUFFIX}/g" python/cugraph/pyproject.toml diff --git a/ci/test_python.sh b/ci/test_python.sh index f74a572a930..2a6be338819 100755 --- a/ci/test_python.sh +++ b/ci/test_python.sh @@ -117,10 +117,11 @@ if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then --channel "${PYTHON_CHANNEL}" \ --channel pytorch \ --channel pytorch-nightly \ - --channel dglteam/label/cu117 \ + --channel dglteam/label/cu118 \ --channel nvidia \ libcugraph \ pylibcugraph \ + pylibcugraphops \ cugraph \ cugraph-dgl \ 'dgl>=1.0' \ @@ -134,7 +135,6 @@ if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then pytest \ --cache-clear \ --ignore=mg \ - --ignore=nn \ --junitxml="${RAPIDS_TESTS_DIR}/junit-cugraph-dgl.xml" \ --cov-config=../../.coveragerc \ --cov=cugraph_dgl \ diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 788cb2a3c97..e38d46c1906 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -10,47 +10,51 @@ dependencies: - aiohttp - c-compiler - cmake>=3.23.1,!=3.25.0 -- cuda-python>=11.7.1,<12.0 - cudatoolkit=11.8 -- cudf=23.06.* +- cudf==23.6.* +- cupy>=9.5.0,<12.0.0a0 - cxx-compiler - cython>=0.29,<0.30 -- dask-cuda=23.06.* -- dask-cudf=23.06.* -- dask==2023.1.1 -- distributed==2023.1.1 +- dask-core==2023.3.2 +- dask-cuda==23.6.* +- dask-cudf==23.6.* +- dask==2023.3.2 +- distributed==2023.3.2.1 - doxygen +- fsspec[http]>=0.6.0 - gcc_linux-64=11.* - gmock=1.10.0 - graphviz - gtest=1.10.0 - ipython -- libcudf=23.06.* -- libcugraphops=23.06.* -- libraft-headers=23.06.* -- libraft=23.06.* -- librmm=23.06.* +- libcudf=23.6.* +- libcugraphops=23.6.* +- libraft-headers=23.6.* +- libraft=23.6.* +- librmm=23.6.* - nbsphinx - nccl>=2.9.9 - networkx>=2.5.1 - ninja - notebook>=0.5.0 +- numba>=0.56.2 +- numpy>=1.21 - numpydoc - nvcc_linux-64=11.8 - openmpi -- pip +- pandas - pre-commit -- py - pydata-sphinx-theme -- pylibraft=23.06.* +- pylibraft==23.6.* - pytest +- pytest-benchmark - pytest-cov +- pytest-xdist - python-louvain -- raft-dask=23.06.* -- rapids-pytest-benchmark +- raft-dask==23.6.* - recommonmark - requests -- rmm=23.06.* +- rmm==23.6.* - scikit-build>=0.13.1 - scikit-learn>=0.23.1 - scipy @@ -59,5 +63,5 @@ dependencies: - sphinx<6 - sphinxcontrib-websupport - ucx-proc=*=gpu -- ucx-py=0.32.* +- ucx-py==0.32.* name: all_cuda-118_arch-x86_64 diff --git a/conda/recipes/cugraph-pyg/meta.yaml b/conda/recipes/cugraph-pyg/meta.yaml index 36630415853..097f49bf527 100644 --- a/conda/recipes/cugraph-pyg/meta.yaml +++ b/conda/recipes/cugraph-pyg/meta.yaml @@ -26,13 +26,13 @@ requirements: - python x.x - scikit-build >=0.13.1 run: - - distributed >=2023.1.1 + - distributed ==2023.3.2.1 - numba >=0.56.2 - numpy - pytorch >=2.0 - cupy >=9.5.0,<12.0.0a0 - cugraph ={{ version }} - - pyg =2.3 + - pyg >=2.3,<2.4 tests: imports: diff --git a/conda/recipes/cugraph-service/meta.yaml b/conda/recipes/cugraph-service/meta.yaml index 4215c57e16a..499e28e88fc 100644 --- a/conda/recipes/cugraph-service/meta.yaml +++ b/conda/recipes/cugraph-service/meta.yaml @@ -57,7 +57,7 @@ outputs: - cupy >=9.5.0,<12.0.0a0 - dask-cuda ={{ minor_version }} - dask-cudf ={{ minor_version }} - - distributed >=2023.1.1 + - distributed ==2023.3.2.1 - numpy - python x.x - thriftpy2 >=0.4.15 diff --git a/conda/recipes/cugraph/meta.yaml b/conda/recipes/cugraph/meta.yaml index 74163f8cf52..0e6946c54bd 100644 --- a/conda/recipes/cugraph/meta.yaml +++ b/conda/recipes/cugraph/meta.yaml @@ -72,8 +72,9 @@ requirements: - cupy >=9.5.0,<12.0.0a0 - dask-cuda ={{ minor_version }} - dask-cudf ={{ minor_version }} - - dask >=2023.1.1 - - distributed >=2023.1.1 + - dask ==2023.3.2 + - dask-core ==2023.3.2 + - distributed ==2023.3.2.1 - libcugraph ={{ version }} - libraft ={{ minor_version }} - libraft-headers ={{ minor_version }} diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index b4dc46527ac..8a47defed35 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -186,6 +186,10 @@ set(CUGRAPH_SOURCES src/sampling/random_walks_mg.cu src/community/detail/common_methods_mg.cu src/community/detail/common_methods_sg.cu + src/community/detail/refine_sg.cu + src/community/detail/refine_mg.cu + src/community/detail/mis_sg.cu + src/community/detail/mis_mg.cu src/detail/utility_wrappers.cu src/structure/graph_view_mg.cu src/utilities/cython.cu @@ -207,6 +211,7 @@ set(CUGRAPH_SOURCES src/community/louvain_sg.cu src/community/louvain_mg.cu src/community/leiden_sg.cu + src/community/leiden_mg.cu src/community/legacy/louvain.cu src/community/legacy/leiden.cu src/community/legacy/ktruss.cu diff --git a/cpp/include/cugraph/algorithms.hpp b/cpp/include/cugraph/algorithms.hpp index 36716d1c17a..5eb347eb716 100644 --- a/cpp/include/cugraph/algorithms.hpp +++ b/cpp/include/cugraph/algorithms.hpp @@ -678,7 +678,7 @@ void flatten_dendrogram(raft::handle_t const& handle, typename graph_view_t::vertex_type* clustering); /** - * @brief Leiden implementation + * @brief Legacy Leiden implementation * * Compute a clustering of the graph by maximizing modularity using the Leiden improvements * to the Louvain method. @@ -700,7 +700,7 @@ void flatten_dendrogram(raft::handle_t const& handle, * @param[in] handle Library handle (RAFT). If a communicator is set in the handle, * @param[in] graph input graph object (CSR) * @param[out] clustering Pointer to device array where the clustering should be stored - * @param[in] max_iter (optional) maximum number of iterations to run (default 100) + * @param[in] max_level (optional) maximum number of levels to run (default 100) * @param[in] resolution (optional) The value of the resolution parameter to use. * Called gamma in the modularity formula, this changes the size * of the communities. Higher resolutions lead to more smaller @@ -715,9 +715,97 @@ template std::pair leiden(raft::handle_t const& handle, legacy::GraphCSRView const& graph, vertex_t* clustering, - size_t max_iter = 100, + size_t max_level = 100, weight_t resolution = weight_t{1}); +/** + * @brief Leiden implementation + * + * Compute a clustering of the graph by maximizing modularity using the Leiden improvements + * to the Louvain method. + * + * Computed using the Leiden method described in: + * + * Traag, V. A., Waltman, L., & van Eck, N. J. (2019). From Louvain to Leiden: + * guaranteeing well-connected communities. Scientific reports, 9(1), 5233. + * doi: 10.1038/s41598-019-41695-z + * + * @throws cugraph::logic_error when an error occurs. + * + * @tparam vertex_t Type of vertex identifiers. + * Supported value : int (signed, 32-bit) + * @tparam edge_t Type of edge identifiers. + * Supported value : int (signed, 32-bit) + * @tparam weight_t Type of edge weights. Supported values : float or double. + * + * @param[in] handle Library handle (RAFT). If a communicator is set in the handle, + * @param graph_view Graph view object. + * @param edge_weight_view Optional view object holding edge weights for @p graph_view. If @p + * edge_weight_view.has_value() == false, edge weights are assumed to be 1.0. + * @param[in] max_level (optional) maximum number of levels to run (default 100) + * @param[in] resolution (optional) The value of the resolution parameter to use. + * Called gamma in the modularity formula, this changes the size + * of the communities. Higher resolutions lead to more smaller + * communities, lower resolutions lead to fewer larger + * communities. (default 1) + * + * @return a pair containing: + * 1) unique pointer to dendrogram + * 2) modularity of the returned clustering + * + */ +template +std::pair>, weight_t> leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + size_t max_level = 100, + weight_t resolution = weight_t{1}); + +/** + * @brief Leiden implementation + * + * Compute a clustering of the graph by maximizing modularity using the Leiden improvements + * to the Louvain method. + * + * Computed using the Leiden method described in: + * + * Traag, V. A., Waltman, L., & van Eck, N. J. (2019). From Louvain to Leiden: + * guaranteeing well-connected communities. Scientific reports, 9(1), 5233. + * doi: 10.1038/s41598-019-41695-z + * + * @throws cugraph::logic_error when an error occurs. + * + * @tparam vertex_t Type of vertex identifiers. + * Supported value : int (signed, 32-bit) + * @tparam edge_t Type of edge identifiers. + * Supported value : int (signed, 32-bit) + * @tparam weight_t Type of edge weights. Supported values : float or double. + * + * @param[in] handle Library handle (RAFT). If a communicator is set in the handle, + * @param graph_view Graph view object. + * @param edge_weight_view Optional view object holding edge weights for @p graph_view. If @p + * edge_weight_view.has_value() == false, edge weights are assumed to be 1.0. + * @param[in] max_level (optional) maximum number of levels to run (default 100) + * @param[in] resolution (optional) The value of the resolution parameter to use. + * Called gamma in the modularity formula, this changes the size + * of the communities. Higher resolutions lead to more smaller + * communities, lower resolutions lead to fewer larger + * communities. (default 1) + * + * @return a pair containing: + * 1) number of levels of the returned clustering + * 2) modularity of the returned clustering + */ +template +std::pair leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + vertex_t* clustering, // FIXME: Use (device_)span instead + size_t max_level = 100, + weight_t resolution = weight_t{1}); + /** * @brief Computes the ecg clustering of the given graph. * diff --git a/cpp/include/cugraph/detail/utility_wrappers.hpp b/cpp/include/cugraph/detail/utility_wrappers.hpp index 8aa35bbceaa..a15dbf34cf9 100644 --- a/cpp/include/cugraph/detail/utility_wrappers.hpp +++ b/cpp/include/cugraph/detail/utility_wrappers.hpp @@ -158,5 +158,21 @@ std::tuple, rmm::device_uvector> filter_de template bool is_sorted(raft::handle_t const& handle, raft::device_span span); +/** + * @brief Check if two device spans are equal. Returns true if every element in the spans are + * equal. + * + * @tparam data_t type of data in span + * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and + * handles to various CUDA libraries) to run graph algorithms. + * @param span1 The span of data to compare + * @param span2 The span of data to compare + * @return true if equal, false if not equal + */ +template +bool is_equal(raft::handle_t const& handle, + raft::device_span span1, + raft::device_span span2); + } // namespace detail } // namespace cugraph diff --git a/cpp/src/c_api/legacy_spectral.cpp b/cpp/src/c_api/legacy_spectral.cpp index cf75cd96813..9d1a0273057 100644 --- a/cpp/src/c_api/legacy_spectral.cpp +++ b/cpp/src/c_api/legacy_spectral.cpp @@ -307,9 +307,11 @@ struct analyze_clustering_ratio_cut_functor : public cugraph::c_api::abstract_fu weight_t score; - if (cugraph::detail::is_sorted(handle_, - raft::device_span{ - vertices_->as_type(), vertices_->size_})) { + if (cugraph::detail::is_equal( + handle_, + raft::device_span{vertices_->as_type(), + vertices_->size_}, + raft::device_span{number_map->data(), number_map->size()})) { cugraph::ext_raft::analyzeClustering_ratio_cut( legacy_graph_view, n_clusters_, clusters_->as_type(), &score); } else { @@ -321,6 +323,15 @@ struct analyze_clustering_ratio_cut_functor : public cugraph::c_api::abstract_fu raft::copy( tmp_c.data(), clusters_->as_type(), clusters_->size_, handle_.get_stream()); + cugraph::renumber_ext_vertices( + handle_, + tmp_v.data(), + tmp_v.size(), + number_map->data(), + graph_view.local_vertex_partition_range_first(), + graph_view.local_vertex_partition_range_last(), + false); + cugraph::c_api::detail::sort_by_key( handle_, raft::device_span{tmp_v.data(), tmp_v.size()}, @@ -403,9 +414,11 @@ struct analyze_clustering_edge_cut_functor : public cugraph::c_api::abstract_fun weight_t score; - if (cugraph::detail::is_sorted(handle_, - raft::device_span{ - vertices_->as_type(), vertices_->size_})) { + if (cugraph::detail::is_equal( + handle_, + raft::device_span{vertices_->as_type(), + vertices_->size_}, + raft::device_span{number_map->data(), number_map->size()})) { cugraph::ext_raft::analyzeClustering_edge_cut( legacy_graph_view, n_clusters_, clusters_->as_type(), &score); } else { @@ -417,6 +430,15 @@ struct analyze_clustering_edge_cut_functor : public cugraph::c_api::abstract_fun raft::copy( tmp_c.data(), clusters_->as_type(), clusters_->size_, handle_.get_stream()); + cugraph::renumber_ext_vertices( + handle_, + tmp_v.data(), + tmp_v.size(), + number_map->data(), + graph_view.local_vertex_partition_range_first(), + graph_view.local_vertex_partition_range_last(), + false); + cugraph::c_api::detail::sort_by_key( handle_, raft::device_span{tmp_v.data(), tmp_v.size()}, @@ -499,9 +521,11 @@ struct analyze_clustering_modularity_functor : public cugraph::c_api::abstract_f weight_t score; - if (cugraph::detail::is_sorted(handle_, - raft::device_span{ - vertices_->as_type(), vertices_->size_})) { + if (cugraph::detail::is_equal( + handle_, + raft::device_span{vertices_->as_type(), + vertices_->size_}, + raft::device_span{number_map->data(), number_map->size()})) { cugraph::ext_raft::analyzeClustering_modularity( legacy_graph_view, n_clusters_, clusters_->as_type(), &score); } else { @@ -513,6 +537,15 @@ struct analyze_clustering_modularity_functor : public cugraph::c_api::abstract_f raft::copy( tmp_c.data(), clusters_->as_type(), clusters_->size_, handle_.get_stream()); + cugraph::renumber_ext_vertices( + handle_, + tmp_v.data(), + tmp_v.size(), + number_map->data(), + graph_view.local_vertex_partition_range_first(), + graph_view.local_vertex_partition_range_last(), + false); + cugraph::c_api::detail::sort_by_key( handle_, raft::device_span{tmp_v.data(), tmp_v.size()}, diff --git a/cpp/src/c_api/leiden.cpp b/cpp/src/c_api/leiden.cpp index db82d084b92..074ffc2d195 100644 --- a/cpp/src/c_api/leiden.cpp +++ b/cpp/src/c_api/leiden.cpp @@ -86,7 +86,6 @@ struct leiden_functor : public cugraph::c_api::abstract_functor { rmm::device_uvector clusters(graph_view.local_vertex_partition_range_size(), handle_.get_stream()); -#if 0 auto [level, modularity] = cugraph::leiden( handle_, graph_view, @@ -103,9 +102,6 @@ struct leiden_functor : public cugraph::c_api::abstract_functor { modularity, new cugraph::c_api::cugraph_type_erased_device_array_t(vertices, graph_->vertex_type_), new cugraph::c_api::cugraph_type_erased_device_array_t(clusters, graph_->vertex_type_)}; -#else - CUGRAPH_FAIL("NOT IMPLEMENTED YET"); -#endif } } }; diff --git a/cpp/src/community/detail/common_methods.cuh b/cpp/src/community/detail/common_methods.cuh index ba408977333..62ede6eaafb 100644 --- a/cpp/src/community/detail/common_methods.cuh +++ b/cpp/src/community/detail/common_methods.cuh @@ -145,6 +145,9 @@ weight_t compute_modularity( { CUGRAPH_EXPECTS(edge_weight_view.has_value(), "Graph must be weighted."); + // + // Sum(Sigma_tot_c^2), over all clusters c + // weight_t sum_degree_squared = thrust::transform_reduce( handle.get_thrust_policy(), cluster_weights.begin(), @@ -158,6 +161,7 @@ weight_t compute_modularity( handle.get_comms(), sum_degree_squared, raft::comms::op_t::SUM, handle.get_stream()); } + // Sum(Sigma_in_c), over all clusters c weight_t sum_internal = transform_reduce_e( handle, graph_view, @@ -189,11 +193,11 @@ std::tuple< std::optional, weight_t>>> graph_contraction(raft::handle_t const& handle, cugraph::graph_view_t const& graph_view, - std::optional> edge_weights, + std::optional> edge_weights_view, raft::device_span labels) { auto [new_graph, new_edge_weights, numbering_map] = - coarsen_graph(handle, graph_view, edge_weights, labels.data(), true); + coarsen_graph(handle, graph_view, edge_weights_view, labels.data(), true); auto new_graph_view = new_graph.view(); diff --git a/cpp/src/community/detail/common_methods.hpp b/cpp/src/community/detail/common_methods.hpp index 9f6641ab858..62aded12cd3 100644 --- a/cpp/src/community/detail/common_methods.hpp +++ b/cpp/src/community/detail/common_methods.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,8 +15,6 @@ */ #pragma once -//#define TIMING - #include #include #include diff --git a/cpp/src/community/detail/common_methods_mg.cu b/cpp/src/community/detail/common_methods_mg.cu index 3d52658338e..f053e32eb32 100644 --- a/cpp/src/community/detail/common_methods_mg.cu +++ b/cpp/src/community/detail/common_methods_mg.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/community/detail/common_methods_sg.cu b/cpp/src/community/detail/common_methods_sg.cu index fdbd0468b75..59da571f0c7 100644 --- a/cpp/src/community/detail/common_methods_sg.cu +++ b/cpp/src/community/detail/common_methods_sg.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/community/detail/mis.hpp b/cpp/src/community/detail/mis.hpp new file mode 100644 index 00000000000..8a86757a5bc --- /dev/null +++ b/cpp/src/community/detail/mis.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once +#include +#include +#include + +#include +#include + +namespace cugraph { +namespace detail { + +template +rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view); +} // namespace detail +} // namespace cugraph diff --git a/cpp/src/community/detail/mis_impl.cuh b/cpp/src/community/detail/mis_impl.cuh new file mode 100644 index 00000000000..c09da35f711 --- /dev/null +++ b/cpp/src/community/detail/mis_impl.cuh @@ -0,0 +1,292 @@ + +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace cugraph { + +namespace detail { + +template +rmm::device_uvector compute_mis( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view) +{ + using GraphViewType = cugraph::graph_view_t; + + vertex_t local_vtx_partitoin_size = graph_view.local_vertex_partition_range_size(); + + rmm::device_uvector remaining_vertices(local_vtx_partitoin_size, handle.get_stream()); + + auto vertex_begin = + thrust::make_counting_iterator(graph_view.local_vertex_partition_range_first()); + auto vertex_end = thrust::make_counting_iterator(graph_view.local_vertex_partition_range_last()); + + // Compute out-degree + auto out_degrees = graph_view.compute_out_degrees(handle); + + // Vertices with non-zero out-degree are possible candidates for MIS. + remaining_vertices.resize( + thrust::distance(remaining_vertices.begin(), + thrust::copy_if(handle.get_thrust_policy(), + vertex_begin, + vertex_end, + out_degrees.begin(), + remaining_vertices.begin(), + [] __device__(auto deg) { return deg > 0; })), + handle.get_stream()); + + // Set ID of each vertex as its rank + rmm::device_uvector ranks(local_vtx_partitoin_size, handle.get_stream()); + thrust::copy(handle.get_thrust_policy(), vertex_begin, vertex_end, ranks.begin()); + + // Set ranks of zero out-degree vetices to std::numeric_limits::lowest() + thrust::for_each( + handle.get_thrust_policy(), + vertex_begin, + vertex_end, + [out_degrees = raft::device_span(out_degrees.data(), out_degrees.size()), + ranks = raft::device_span(ranks.data(), ranks.size()), + v_first = graph_view.local_vertex_partition_range_first()] __device__(auto v) { + auto v_offset = v - v_first; + if (out_degrees[v_offset] == 0) { ranks[v_offset] = std::numeric_limits::lowest(); } + }); + + out_degrees.resize(0, handle.get_stream()); + out_degrees.shrink_to_fit(handle.get_stream()); + + thrust::default_random_engine g; + size_t seed = 0; + if constexpr (multi_gpu) { seed = handle.get_comms().get_rank(); } + g.seed(seed); + + size_t loop_counter = 0; + while (true) { + loop_counter++; + + // Copy ranks into temporary vector to begin with + + rmm::device_uvector temporary_ranks(local_vtx_partitoin_size, handle.get_stream()); + thrust::copy(handle.get_thrust_policy(), ranks.begin(), ranks.end(), temporary_ranks.begin()); + + // Select a random set of candidate vertices + // FIXME: use common utility function to select a subset of remaining vertices + // and for MG extension, select from disributed array remaining vertices + thrust::shuffle( + handle.get_thrust_policy(), remaining_vertices.begin(), remaining_vertices.end(), g); + + vertex_t nr_candidates = + (remaining_vertices.size() < 1024) + ? remaining_vertices.size() + : std::min(static_cast((0.50 + 0.25 * loop_counter) * remaining_vertices.size()), + static_cast(remaining_vertices.size())); + + // Set temporary ranks of non-candidate vertices to std::numeric_limits::lowest() + thrust::for_each( + handle.get_thrust_policy(), + remaining_vertices.begin(), + remaining_vertices.end() - nr_candidates, + [temporary_ranks = + raft::device_span(temporary_ranks.data(), temporary_ranks.size()), + v_first = graph_view.local_vertex_partition_range_first()] __device__(auto v) { + // + // if rank of a non-candidate vertex is not std::numeric_limits::max() (i.e. the + // vertex is not already in MIS), set it to std::numeric_limits::lowest() + // + auto v_offset = v - v_first; + if (temporary_ranks[v_offset] < std::numeric_limits::max()) { + temporary_ranks[v_offset] = std::numeric_limits::lowest(); + } + }); + + // Caches for ranks + edge_src_property_t src_rank_cache(handle); + edge_dst_property_t dst_rank_cache(handle); + + // Update rank caches with temporary ranks + if constexpr (multi_gpu) { + src_rank_cache = edge_src_property_t(handle, graph_view); + dst_rank_cache = edge_dst_property_t(handle, graph_view); + update_edge_src_property(handle, graph_view, temporary_ranks.begin(), src_rank_cache); + update_edge_dst_property(handle, graph_view, temporary_ranks.begin(), dst_rank_cache); + } + + // + // Find maximum rank outgoing neighbor for each vertex + // (In case of Leiden decision graph, each vertex has at most one outgoing edge) + // + + rmm::device_uvector max_outgoing_ranks(local_vtx_partitoin_size, handle.get_stream()); + + per_v_transform_reduce_outgoing_e( + handle, + graph_view, + multi_gpu + ? src_rank_cache.view() + : detail::edge_major_property_view_t(temporary_ranks.data()), + multi_gpu ? dst_rank_cache.view() + : detail::edge_minor_property_view_t( + temporary_ranks.data(), vertex_t{0}), + edge_dummy_property_t{}.view(), + [] __device__(auto src, auto dst, auto src_rank, auto dst_rank, auto wt) { return dst_rank; }, + std::numeric_limits::lowest(), + cugraph::reduce_op::maximum{}, + max_outgoing_ranks.begin()); + + // + // Find maximum rank incoming neighbor for each vertex + // + + rmm::device_uvector max_incoming_ranks(local_vtx_partitoin_size, handle.get_stream()); + + per_v_transform_reduce_incoming_e( + handle, + graph_view, + multi_gpu + ? src_rank_cache.view() + : detail::edge_major_property_view_t(temporary_ranks.data()), + multi_gpu ? dst_rank_cache.view() + : detail::edge_minor_property_view_t( + temporary_ranks.data(), vertex_t{0}), + edge_dummy_property_t{}.view(), + [] __device__(auto src, auto dst, auto src_rank, auto dst_rank, auto wt) { return src_rank; }, + std::numeric_limits::lowest(), + cugraph::reduce_op::maximum{}, + max_incoming_ranks.begin()); + + temporary_ranks.resize(0, handle.get_stream()); + temporary_ranks.shrink_to_fit(handle.get_stream()); + + // + // Compute max of outgoing and incoming neighbors + // + thrust::transform(handle.get_thrust_policy(), + max_incoming_ranks.begin(), + max_incoming_ranks.end(), + max_outgoing_ranks.begin(), + max_outgoing_ranks.begin(), + thrust::maximum()); + + max_incoming_ranks.resize(0, handle.get_stream()); + max_incoming_ranks.shrink_to_fit(handle.get_stream()); + + // + // If the max neighbor of a vertex is already in MIS (i.e. has rank + // std::numeric_limits::max()), discard it, otherwise, + // include the vertex if it has larger rank than its maximum rank neighbor + // + auto last = thrust::remove_if( + handle.get_thrust_policy(), + remaining_vertices.end() - nr_candidates, + remaining_vertices.end(), + [max_rank_neighbor_first = max_outgoing_ranks.begin(), + ranks = raft::device_span(ranks.data(), ranks.size()), + v_first = graph_view.local_vertex_partition_range_first()] __device__(auto v) { + auto v_offset = v - v_first; + auto max_neighbor_rank = *(max_rank_neighbor_first + v_offset); + auto rank_of_v = ranks[v_offset]; + + if (max_neighbor_rank >= std::numeric_limits::max()) { + // Maximum rank neighbor is alreay in MIS + // Discard current vertex by setting its rank to + // std::numeric_limits::lowest() + ranks[v_offset] = std::numeric_limits::lowest(); + return true; + } + + if (rank_of_v >= max_neighbor_rank) { + // Include v and set its rank to std::numeric_limits::max() + ranks[v_offset] = std::numeric_limits::max(); + return true; + } + return false; + }); + + max_outgoing_ranks.resize(0, handle.get_stream()); + max_outgoing_ranks.shrink_to_fit(handle.get_stream()); + + remaining_vertices.resize(thrust::distance(remaining_vertices.begin(), last), + handle.get_stream()); + remaining_vertices.shrink_to_fit(handle.get_stream()); + + vertex_t nr_remaining_vertices_to_check = remaining_vertices.size(); + if (multi_gpu) { + nr_remaining_vertices_to_check = host_scalar_allreduce(handle.get_comms(), + nr_remaining_vertices_to_check, + raft::comms::op_t::SUM, + handle.get_stream()); + } + + if (nr_remaining_vertices_to_check == 0) { break; } + } + + // Count number of vertices included in MIS + + vertex_t nr_vertices_included_in_mis = thrust::count_if( + handle.get_thrust_policy(), ranks.begin(), ranks.end(), [] __device__(auto v_rank) { + return v_rank >= std::numeric_limits::max(); + }); + + // Build MIS and return + rmm::device_uvector mis(nr_vertices_included_in_mis, handle.get_stream()); + thrust::copy_if( + handle.get_thrust_policy(), + vertex_begin, + vertex_end, + ranks.begin(), + mis.begin(), + [] __device__(auto v_rank) { return v_rank >= std::numeric_limits::max(); }); + + ranks.resize(0, handle.get_stream()); + ranks.shrink_to_fit(handle.get_stream()); + return mis; +} +} // namespace detail +} // namespace cugraph diff --git a/cpp/src/community/detail/mis_mg.cu b/cpp/src/community/detail/mis_mg.cu new file mode 100644 index 00000000000..def60f698ee --- /dev/null +++ b/cpp/src/community/detail/mis_mg.cu @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +namespace cugraph { +namespace detail { +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +} // namespace detail +} // namespace cugraph diff --git a/cpp/src/community/detail/mis_sg.cu b/cpp/src/community/detail/mis_sg.cu new file mode 100644 index 00000000000..4da2b4ea741 --- /dev/null +++ b/cpp/src/community/detail/mis_sg.cu @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +namespace cugraph { +namespace detail { +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +template rmm::device_uvector compute_mis( + raft::handle_t const& handle, + graph_view_t const& decision_graph_view, + std::optional> edge_weight_view); + +} // namespace detail +} // namespace cugraph diff --git a/cpp/src/community/detail/refine.hpp b/cpp/src/community/detail/refine.hpp new file mode 100644 index 00000000000..0dd069645f3 --- /dev/null +++ b/cpp/src/community/detail/refine.hpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include +#include +#include + +#include +#include + +namespace cugraph { +namespace detail { + +template +std::tuple, + std::pair, + rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> + edge_weight_view, + weight_t total_edge_weight, + weight_t resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t const& src_vertex_weights_cache, + edge_src_property_t const& src_clusters_cache, + edge_dst_property_t const& dst_clusters_cache, + bool up_down); + +} +} // namespace cugraph diff --git a/cpp/src/community/detail/refine_impl.cuh b/cpp/src/community/detail/refine_impl.cuh new file mode 100644 index 00000000000..2976a83773e --- /dev/null +++ b/cpp/src/community/detail/refine_impl.cuh @@ -0,0 +1,767 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +CUCO_DECLARE_BITWISE_COMPARABLE(float) +CUCO_DECLARE_BITWISE_COMPARABLE(double) + +namespace cugraph { +namespace detail { + +// FIXME: check if this is still the case +// a workaround for cudaErrorInvalidDeviceFunction error when device lambda is used +template +struct leiden_key_aggregated_edge_op_t { + weight_t total_edge_weight{}; + weight_t gamma{}; + __device__ auto operator()( + vertex_t src, + vertex_t neighboring_leiden_cluster, + thrust::tuple src_info, + cluster_value_t keyed_data, + weight_t aggregated_weight_to_neighboring_leiden_cluster) const + { + // Data associated with src vertex + auto src_weighted_deg = thrust::get<0>(src_info); + auto src_vertex_cut_to_louvain = thrust::get<1>(src_info); + auto louvain_cluster_volume = thrust::get<2>(src_info); + auto is_src_active = thrust::get<3>(src_info); + auto src_leiden_cluster = thrust::get<4>(src_info); + auto src_louvain_cluster = thrust::get<5>(src_info); + + // Data associated with target leiden (aka refined) cluster + + auto dst_leiden_volume = thrust::get<0>(keyed_data); + auto dst_leiden_cut_to_louvain = thrust::get<1>(keyed_data); + auto dst_leiden_cluster_id = thrust::get<2>(keyed_data); + auto louvain_of_dst_leiden_cluster = thrust::get<3>(keyed_data); + + // E(Cr, S-Cr) > ||Cr||*(||S|| -||Cr||) + bool is_dst_leiden_cluster_well_connected = + dst_leiden_cut_to_louvain > + gamma * dst_leiden_volume * (louvain_cluster_volume - dst_leiden_volume); + + // E(v, Cr-v) - ||v||* ||Cr-v||/||V(G)|| + // aggregated_weight_to_neighboring_leiden_cluster == E(v, Cr-v)? + + weight_t theta = -1.0; + // if ((is_src_active > 0) && is_src_well_connected) { + if (is_src_active > 0) { + if ((louvain_of_dst_leiden_cluster == src_louvain_cluster) && + is_dst_leiden_cluster_well_connected) { + theta = aggregated_weight_to_neighboring_leiden_cluster - + gamma * src_weighted_deg * dst_leiden_volume / total_edge_weight; + } + } + + return thrust::make_tuple(theta, neighboring_leiden_cluster); + } +}; + +template +std::tuple, + std::pair, + rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + GraphViewType const& graph_view, + std::optional> + edge_weight_view, + weight_t total_edge_weight, + weight_t resolution, + rmm::device_uvector const& weighted_degree_of_vertices, + rmm::device_uvector&& louvain_cluster_keys, + rmm::device_uvector&& louvain_cluster_weights, + rmm::device_uvector&& louvain_assignment_of_vertices, + edge_src_property_t const& src_vertex_weights_cache, + edge_src_property_t const& + src_louvain_assignment_cache, + edge_dst_property_t const& + dst_louvain_assignment_cache, + bool up_down) +{ + const weight_t POSITIVE_GAIN = 1e-6; + using vertex_t = typename GraphViewType::vertex_type; + using edge_t = typename GraphViewType::edge_type; + + kv_store_t cluster_key_weight_map(louvain_cluster_keys.begin(), + louvain_cluster_keys.end(), + louvain_cluster_weights.data(), + invalid_vertex_id::value, + std::numeric_limits::max(), + handle.get_stream()); + louvain_cluster_keys.resize(0, handle.get_stream()); + louvain_cluster_keys.shrink_to_fit(handle.get_stream()); + + louvain_cluster_weights.resize(0, handle.get_stream()); + louvain_cluster_weights.shrink_to_fit(handle.get_stream()); + + rmm::device_uvector vertex_louvain_cluster_weights(0, handle.get_stream()); + if (GraphViewType::is_multi_gpu) { + auto& comm = handle.get_comms(); + auto const comm_size = comm.get_size(); + auto& major_comm = handle.get_subcomm(cugraph::partition_manager::major_comm_name()); + auto const major_comm_size = major_comm.get_size(); + auto& minor_comm = handle.get_subcomm(cugraph::partition_manager::minor_comm_name()); + auto const minor_comm_size = minor_comm.get_size(); + + cugraph::detail::compute_gpu_id_from_ext_vertex_t vertex_to_gpu_id_op{ + comm_size, major_comm_size, minor_comm_size}; + + vertex_louvain_cluster_weights = + cugraph::collect_values_for_keys(handle, + cluster_key_weight_map.view(), + louvain_assignment_of_vertices.begin(), + louvain_assignment_of_vertices.end(), + vertex_to_gpu_id_op); + + } else { + vertex_louvain_cluster_weights.resize(louvain_assignment_of_vertices.size(), + handle.get_stream()); + + cluster_key_weight_map.view().find(louvain_assignment_of_vertices.begin(), + louvain_assignment_of_vertices.end(), + vertex_louvain_cluster_weights.begin(), + handle.get_stream()); + } + // + // For each vertex, compute its weighted degree (||v||) + // and cut between itself and its Louvain community (E(v, S-v)) + // + + rmm::device_uvector weighted_cut_of_vertices_to_louvain( + graph_view.local_vertex_partition_range_size(), handle.get_stream()); + + per_v_transform_reduce_outgoing_e( + handle, + graph_view, + GraphViewType::is_multi_gpu ? src_louvain_assignment_cache.view() + : detail::edge_major_property_view_t( + louvain_assignment_of_vertices.data()), + GraphViewType::is_multi_gpu ? dst_louvain_assignment_cache.view() + : detail::edge_minor_property_view_t( + louvain_assignment_of_vertices.data(), vertex_t{0}), + *edge_weight_view, + [] __device__(auto src, auto dst, auto src_cluster, auto dst_cluster, auto wt) { + weight_t weighted_cut_contribution{0}; + + if (src == dst) // self loop + weighted_cut_contribution = 0; + else if (src_cluster == dst_cluster) + weighted_cut_contribution = wt; + + return weighted_cut_contribution; + }, + weight_t{0}, + cugraph::reduce_op::plus{}, + weighted_cut_of_vertices_to_louvain.begin()); + + rmm::device_uvector singleton_and_connected_flags( + graph_view.local_vertex_partition_range_size(), handle.get_stream()); + + auto wcut_deg_and_cluster_vol_triple_begin = + thrust::make_zip_iterator(thrust::make_tuple(weighted_cut_of_vertices_to_louvain.begin(), + weighted_degree_of_vertices.begin(), + vertex_louvain_cluster_weights.begin())); + auto wcut_deg_and_cluster_vol_triple_end = + thrust::make_zip_iterator(thrust::make_tuple(weighted_cut_of_vertices_to_louvain.end(), + weighted_degree_of_vertices.end(), + vertex_louvain_cluster_weights.end())); + + thrust::transform(handle.get_thrust_policy(), + wcut_deg_and_cluster_vol_triple_begin, + wcut_deg_and_cluster_vol_triple_end, + singleton_and_connected_flags.begin(), + [gamma = resolution] __device__(auto wcut_wdeg_and_louvain_volume) { + auto wcut = thrust::get<0>(wcut_wdeg_and_louvain_volume); + auto wdeg = thrust::get<1>(wcut_wdeg_and_louvain_volume); + auto louvain_volume = thrust::get<2>(wcut_wdeg_and_louvain_volume); + return wcut > (gamma * wdeg * (louvain_volume - wdeg)); + }); + + edge_src_property_t src_louvain_cluster_weight_cache(handle); + edge_src_property_t src_cut_to_louvain_cache(handle); + + if (GraphViewType::is_multi_gpu) { + // Update cluster weight, weighted degree and cut for edge sources + src_louvain_cluster_weight_cache = + edge_src_property_t(handle, graph_view); + update_edge_src_property( + handle, graph_view, vertex_louvain_cluster_weights.begin(), src_louvain_cluster_weight_cache); + + src_cut_to_louvain_cache = edge_src_property_t(handle, graph_view); + update_edge_src_property( + handle, graph_view, weighted_cut_of_vertices_to_louvain.begin(), src_cut_to_louvain_cache); + + vertex_louvain_cluster_weights.resize(0, handle.get_stream()); + vertex_louvain_cluster_weights.shrink_to_fit(handle.get_stream()); + + weighted_cut_of_vertices_to_louvain.resize(0, handle.get_stream()); + weighted_cut_of_vertices_to_louvain.shrink_to_fit(handle.get_stream()); + } + + // + // Assign Lieden community Id for vertices. + // Each vertex starts as a singleton community in the leiden partition + // + + rmm::device_uvector leiden_assignment = rmm::device_uvector( + graph_view.local_vertex_partition_range_size(), handle.get_stream()); + + detail::sequence_fill(handle.get_stream(), + leiden_assignment.begin(), + leiden_assignment.size(), + graph_view.local_vertex_partition_range_first()); + + edge_src_property_t src_leiden_assignment_cache(handle); + edge_dst_property_t dst_leiden_assignment_cache(handle); + edge_src_property_t src_singleton_and_connected_flag_cache(handle); + + kv_store_t leiden_to_louvain_map( + leiden_assignment.begin(), + leiden_assignment.end(), + louvain_assignment_of_vertices.begin(), + invalid_vertex_id::value, + invalid_vertex_id::value, + handle.get_stream()); + + while (true) { + vertex_t nr_remaining_active_vertices = + thrust::count_if(handle.get_thrust_policy(), + singleton_and_connected_flags.begin(), + singleton_and_connected_flags.end(), + [] __device__(auto flag) { return flag > 0; }); + + if (GraphViewType::is_multi_gpu) { + nr_remaining_active_vertices = host_scalar_allreduce(handle.get_comms(), + nr_remaining_active_vertices, + raft::comms::op_t::SUM, + handle.get_stream()); + } + + if (nr_remaining_active_vertices == 0) { break; } + + // Update Leiden assignment to edge sources and destinitions + // and singleton mask to edge sources + + if constexpr (GraphViewType::is_multi_gpu) { + src_leiden_assignment_cache = + edge_src_property_t(handle, graph_view); + dst_leiden_assignment_cache = + edge_dst_property_t(handle, graph_view); + src_singleton_and_connected_flag_cache = + edge_src_property_t(handle, graph_view); + + update_edge_src_property( + handle, graph_view, leiden_assignment.begin(), src_leiden_assignment_cache); + + update_edge_dst_property( + handle, graph_view, leiden_assignment.begin(), dst_leiden_assignment_cache); + + update_edge_src_property(handle, + graph_view, + singleton_and_connected_flags.begin(), + src_singleton_and_connected_flag_cache); + } + + auto src_input_property_values = + GraphViewType::is_multi_gpu + ? view_concat(src_louvain_assignment_cache.view(), src_leiden_assignment_cache.view()) + : view_concat(detail::edge_major_property_view_t( + louvain_assignment_of_vertices.data()), + detail::edge_major_property_view_t( + leiden_assignment.data())); + + auto dst_input_property_values = + GraphViewType::is_multi_gpu + ? view_concat(dst_louvain_assignment_cache.view(), dst_leiden_assignment_cache.view()) + : view_concat(detail::edge_minor_property_view_t( + louvain_assignment_of_vertices.data(), vertex_t{0}), + detail::edge_minor_property_view_t( + leiden_assignment.data(), vertex_t{0})); + + rmm::device_uvector leiden_keys_used_in_edge_reduction(0, handle.get_stream()); + rmm::device_uvector refined_community_volumes(0, handle.get_stream()); + rmm::device_uvector refined_community_cuts(0, handle.get_stream()); + + // + // For each refined community, compute its volume + // (i.e.sum of weighted degree of all vertices inside it, ||Cr||) and + // and cut between itself and its Louvain community (E(Cr, S-Cr)) + // + // FIXME: Can we update ||Cr|| and E(Cr, S-Cr) instead of recomputing? + + std::forward_as_tuple(leiden_keys_used_in_edge_reduction, + std::tie(refined_community_volumes, refined_community_cuts)) = + cugraph::transform_reduce_e_by_dst_key( + handle, + graph_view, + src_input_property_values, + dst_input_property_values, + *edge_weight_view, + GraphViewType::is_multi_gpu ? dst_leiden_assignment_cache.view() + : detail::edge_minor_property_view_t( + leiden_assignment.data(), vertex_t{0}), + + [] __device__(auto src, + auto dst, + thrust::tuple src_louvain_leidn, + thrust::tuple dst_louvain_leiden, + auto wt) { + weight_t refined_partition_volume_contribution{0}; + weight_t refined_partition_cut_contribution{0}; + + auto src_louvain = thrust::get<0>(src_louvain_leidn); + auto src_leiden = thrust::get<1>(src_louvain_leidn); + + auto dst_louvain = thrust::get<0>(dst_louvain_leiden); + auto dst_leiden = thrust::get<1>(dst_louvain_leiden); + + if (src_louvain == dst_louvain) { + if (src_leiden == dst_leiden) { + refined_partition_volume_contribution = wt; + } else { + refined_partition_cut_contribution = wt; + } + } + return thrust::make_tuple(refined_partition_volume_contribution, + refined_partition_cut_contribution); + }, + thrust::make_tuple(weight_t{0}, weight_t{0}), + reduce_op::plus>{}); + + // + // Primitives to decide best (at least good) next clusters for vertices + // + + // ||v|| + // E(v, louvain(v)) + // ||louvain(v)|| + // is_singleton_and_connected(v) + // leiden(v) + // louvain(v) + + auto zipped_src_device_view = + GraphViewType::is_multi_gpu + ? view_concat(src_vertex_weights_cache.view(), + src_cut_to_louvain_cache.view(), + src_louvain_cluster_weight_cache.view(), + src_singleton_and_connected_flag_cache.view(), + src_leiden_assignment_cache.view(), + src_louvain_assignment_cache.view()) + : view_concat( + detail::edge_major_property_view_t( + weighted_degree_of_vertices.data()), + detail::edge_major_property_view_t( + weighted_cut_of_vertices_to_louvain.data()), + detail::edge_major_property_view_t( + vertex_louvain_cluster_weights.data()), + detail::edge_major_property_view_t( + singleton_and_connected_flags.data()), + detail::edge_major_property_view_t(leiden_assignment.data()), + detail::edge_major_property_view_t( + louvain_assignment_of_vertices.data())); + + rmm::device_uvector louvain_of_leiden_keys_used_in_edge_reduction( + leiden_keys_used_in_edge_reduction.size(), handle.get_stream()); + leiden_to_louvain_map.view().find(leiden_keys_used_in_edge_reduction.begin(), + leiden_keys_used_in_edge_reduction.end(), + louvain_of_leiden_keys_used_in_edge_reduction.begin(), + handle.get_stream()); + + // ||Cr|| //f(Cr) + // E(Cr, louvain(v) - Cr) //f(Cr) + // leiden(Cr) // f(Cr) + // louvain(Cr) // f(Cr) + auto values_for_leiden_cluster_keys = thrust::make_zip_iterator( + thrust::make_tuple(refined_community_volumes.begin(), + refined_community_cuts.begin(), + leiden_keys_used_in_edge_reduction.begin(), // redundant + louvain_of_leiden_keys_used_in_edge_reduction.begin())); + + using value_t = thrust::tuple; + kv_store_t leiden_cluster_key_values_map( + leiden_keys_used_in_edge_reduction.begin(), + leiden_keys_used_in_edge_reduction.begin() + leiden_keys_used_in_edge_reduction.size(), + values_for_leiden_cluster_keys, + thrust::make_tuple(std::numeric_limits::max(), + std::numeric_limits::max(), + invalid_vertex_id::value, + invalid_vertex_id::value), + false, + handle.get_stream()); + + // + // Decide best/positive move for each vertex + // + + auto gain_and_dst_output_pairs = allocate_dataframe_buffer>( + graph_view.local_vertex_partition_range_size(), handle.get_stream()); + + per_v_transform_reduce_dst_key_aggregated_outgoing_e( + handle, + graph_view, + zipped_src_device_view, + *edge_weight_view, + GraphViewType::is_multi_gpu ? dst_leiden_assignment_cache.view() + : detail::edge_minor_property_view_t( + leiden_assignment.data(), vertex_t{0}), + leiden_cluster_key_values_map.view(), + detail::leiden_key_aggregated_edge_op_t{total_edge_weight, + resolution}, + thrust::make_tuple(weight_t{0}, vertex_t{-1}), + reduce_op::maximum>(), + cugraph::get_dataframe_buffer_begin(gain_and_dst_output_pairs)); + + src_leiden_assignment_cache.clear(handle); + dst_leiden_assignment_cache.clear(handle); + src_singleton_and_connected_flag_cache.clear(handle); + + louvain_of_leiden_keys_used_in_edge_reduction.resize(0, handle.get_stream()); + louvain_of_leiden_keys_used_in_edge_reduction.shrink_to_fit(handle.get_stream()); + leiden_keys_used_in_edge_reduction.resize(0, handle.get_stream()); + leiden_keys_used_in_edge_reduction.shrink_to_fit(handle.get_stream()); + refined_community_volumes.resize(0, handle.get_stream()); + refined_community_volumes.shrink_to_fit(handle.get_stream()); + refined_community_cuts.resize(0, handle.get_stream()); + refined_community_cuts.shrink_to_fit(handle.get_stream()); + + // + // Create edgelist from (source, target community, modulraity gain) tuple + // + + vertex_t num_vertices = graph_view.local_vertex_partition_range_size(); + auto gain_and_dst_first = cugraph::get_dataframe_buffer_cbegin(gain_and_dst_output_pairs); + auto gain_and_dst_last = cugraph::get_dataframe_buffer_cend(gain_and_dst_output_pairs); + + auto vertex_begin = + thrust::make_counting_iterator(graph_view.local_vertex_partition_range_first()); + auto vertex_end = + thrust::make_counting_iterator(graph_view.local_vertex_partition_range_last()); + + // edge (src, dst, gain) + auto edge_begin = thrust::make_zip_iterator( + thrust::make_tuple(vertex_begin, + thrust::get<1>(gain_and_dst_first.get_iterator_tuple()), + thrust::get<0>(gain_and_dst_first.get_iterator_tuple()))); + auto edge_end = thrust::make_zip_iterator( + thrust::make_tuple(vertex_end, + thrust::get<1>(gain_and_dst_last.get_iterator_tuple()), + thrust::get<0>(gain_and_dst_last.get_iterator_tuple()))); + + // + // Filter out moves with -ve gains + // + + vertex_t nr_valid_tuples = thrust::count_if(handle.get_thrust_policy(), + gain_and_dst_first, + gain_and_dst_last, + [] __device__(auto gain_dst_pair) { + weight_t gain = thrust::get<0>(gain_dst_pair); + vertex_t dst = thrust::get<1>(gain_dst_pair); + return (gain > POSITIVE_GAIN) && (dst >= 0); + }); + + if (GraphViewType::is_multi_gpu) { + nr_valid_tuples = host_scalar_allreduce( + handle.get_comms(), nr_valid_tuples, raft::comms::op_t::SUM, handle.get_stream()); + } + + if (nr_valid_tuples == 0) { + cugraph::resize_dataframe_buffer(gain_and_dst_output_pairs, 0, handle.get_stream()); + cugraph::shrink_to_fit_dataframe_buffer(gain_and_dst_output_pairs, handle.get_stream()); + break; + } + + rmm::device_uvector d_srcs(nr_valid_tuples, handle.get_stream()); + rmm::device_uvector d_dsts(nr_valid_tuples, handle.get_stream()); + std::optional> d_weights = + std::make_optional(rmm::device_uvector(nr_valid_tuples, handle.get_stream())); + + auto d_src_dst_gain_iterator = thrust::make_zip_iterator( + thrust::make_tuple(d_srcs.begin(), d_dsts.begin(), (*d_weights).begin())); + + thrust::copy_if(handle.get_thrust_policy(), + edge_begin, + edge_end, + d_src_dst_gain_iterator, + [] __device__(thrust::tuple src_dst_gain) { + vertex_t src = thrust::get<0>(src_dst_gain); + vertex_t dst = thrust::get<1>(src_dst_gain); + weight_t gain = thrust::get<2>(src_dst_gain); + + return (gain > POSITIVE_GAIN) && (dst >= 0); + }); + + // + // Create decision graph from edgelist + // + constexpr bool storage_transposed = false; + constexpr bool multi_gpu = GraphViewType::is_multi_gpu; + using DecisionGraphViewType = cugraph::graph_view_t; + + cugraph::graph_t decision_graph(handle); + + std::optional> renumber_map{std::nullopt}; + std::optional> coarse_edge_weights{ + std::nullopt}; + + std::tie(decision_graph, coarse_edge_weights, std::ignore, std::ignore, renumber_map) = + create_graph_from_edgelist(handle, + std::nullopt, + std::move(d_srcs), + std::move(d_dsts), + std::move(d_weights), + std::nullopt, + std::nullopt, + cugraph::graph_properties_t{false, false}, + true); + + auto decision_graph_view = decision_graph.view(); + + // + // Determine a set of moves using MIS of the decision_graph + // + + auto vertices_in_mis = compute_mis( + handle, + decision_graph_view, + coarse_edge_weights ? std::make_optional(coarse_edge_weights->view()) : std::nullopt); + + rmm::device_uvector numbering_indices((*renumber_map).size(), handle.get_stream()); + detail::sequence_fill(handle.get_stream(), + numbering_indices.data(), + numbering_indices.size(), + decision_graph_view.local_vertex_partition_range_first()); + + // + // Apply Renumber map to get original vertex ids + // + relabel( + handle, + std::make_tuple(static_cast(numbering_indices.begin()), + static_cast((*renumber_map).begin())), + decision_graph_view.local_vertex_partition_range_size(), + vertices_in_mis.data(), + vertices_in_mis.size(), + false); + + numbering_indices.resize(0, handle.get_stream()); + numbering_indices.shrink_to_fit(handle.get_stream()); + + (*renumber_map).resize(0, handle.get_stream()); + (*renumber_map).shrink_to_fit(handle.get_stream()); + + // + // Mark the chosen vertices as non-singleton and update their leiden cluster to dst + // + + thrust::for_each( + handle.get_thrust_policy(), + vertices_in_mis.begin(), + vertices_in_mis.end(), + [dst_first = thrust::get<1>(gain_and_dst_first.get_iterator_tuple()), + leiden_assignment = leiden_assignment.data(), + singleton_and_connected_flags = singleton_and_connected_flags.data(), + v_first = graph_view.local_vertex_partition_range_first()] __device__(vertex_t v) { + auto v_offset = v - v_first; + auto dst = *(dst_first + v_offset); + singleton_and_connected_flags[v_offset] = false; + leiden_assignment[v_offset] = dst; + }); + + // + // Find the set of dest vertices + // + rmm::device_uvector dst_vertices(vertices_in_mis.size(), handle.get_stream()); + + thrust::transform( + handle.get_thrust_policy(), + vertices_in_mis.begin(), + vertices_in_mis.end(), + dst_vertices.begin(), + [dst_first = thrust::get<1>(gain_and_dst_first.get_iterator_tuple()), + v_first = graph_view.local_vertex_partition_range_first()] __device__(vertex_t v) { + auto dst = *(dst_first + v - v_first); + return dst; + }); + + cugraph::resize_dataframe_buffer(gain_and_dst_output_pairs, 0, handle.get_stream()); + cugraph::shrink_to_fit_dataframe_buffer(gain_and_dst_output_pairs, handle.get_stream()); + + vertices_in_mis.resize(0, handle.get_stream()); + vertices_in_mis.shrink_to_fit(handle.get_stream()); + + thrust::sort(handle.get_thrust_policy(), dst_vertices.begin(), dst_vertices.end()); + + dst_vertices.resize( + static_cast(thrust::distance( + dst_vertices.begin(), + thrust::unique(handle.get_thrust_policy(), dst_vertices.begin(), dst_vertices.end()))), + handle.get_stream()); + + if constexpr (GraphViewType::is_multi_gpu) { + dst_vertices = + shuffle_ext_vertices_to_local_gpu_by_vertex_partitioning(handle, std::move(dst_vertices)); + + thrust::sort(handle.get_thrust_policy(), dst_vertices.begin(), dst_vertices.end()); + + dst_vertices.resize( + static_cast(thrust::distance( + dst_vertices.begin(), + thrust::unique(handle.get_thrust_policy(), dst_vertices.begin(), dst_vertices.end()))), + handle.get_stream()); + } + + // + // Makr all the dest vertices as non-sigleton + // + thrust::for_each( + handle.get_thrust_policy(), + dst_vertices.begin(), + dst_vertices.end(), + [singleton_and_connected_flags = singleton_and_connected_flags.data(), + v_first = graph_view.local_vertex_partition_range_first()] __device__(vertex_t v) { + singleton_and_connected_flags[v - v_first] = false; + }); + + dst_vertices.resize(0, handle.get_stream()); + dst_vertices.shrink_to_fit(handle.get_stream()); + } + + src_louvain_cluster_weight_cache.clear(handle); + src_cut_to_louvain_cache.clear(handle); + + louvain_assignment_of_vertices.resize(0, handle.get_stream()); + louvain_assignment_of_vertices.shrink_to_fit(handle.get_stream()); + + singleton_and_connected_flags.resize(0, handle.get_stream()); + singleton_and_connected_flags.shrink_to_fit(handle.get_stream()); + vertex_louvain_cluster_weights.resize(0, handle.get_stream()); + vertex_louvain_cluster_weights.shrink_to_fit(handle.get_stream()); + weighted_cut_of_vertices_to_louvain.resize(0, handle.get_stream()); + weighted_cut_of_vertices_to_louvain.shrink_to_fit(handle.get_stream()); + + // + // Re-read Leiden to Louvain map, but for remaining (after moving) Leiden communities + // + rmm::device_uvector leiden_keys_to_read_louvain(leiden_assignment.size(), + handle.get_stream()); + + thrust::copy(handle.get_thrust_policy(), + leiden_assignment.begin(), + leiden_assignment.end(), + leiden_keys_to_read_louvain.begin()); + + thrust::sort(handle.get_thrust_policy(), + leiden_keys_to_read_louvain.begin(), + leiden_keys_to_read_louvain.end()); + + auto nr_unique_leiden_clusters = + static_cast(thrust::distance(leiden_keys_to_read_louvain.begin(), + thrust::unique(handle.get_thrust_policy(), + leiden_keys_to_read_louvain.begin(), + leiden_keys_to_read_louvain.end()))); + + leiden_keys_to_read_louvain.resize(nr_unique_leiden_clusters, handle.get_stream()); + + if constexpr (GraphViewType::is_multi_gpu) { + leiden_keys_to_read_louvain = + cugraph::detail::shuffle_ext_vertices_to_local_gpu_by_vertex_partitioning( + handle, std::move(leiden_keys_to_read_louvain)); + + thrust::sort(handle.get_thrust_policy(), + leiden_keys_to_read_louvain.begin(), + leiden_keys_to_read_louvain.end()); + + nr_unique_leiden_clusters = + static_cast(thrust::distance(leiden_keys_to_read_louvain.begin(), + thrust::unique(handle.get_thrust_policy(), + leiden_keys_to_read_louvain.begin(), + leiden_keys_to_read_louvain.end()))); + leiden_keys_to_read_louvain.resize(nr_unique_leiden_clusters, handle.get_stream()); + } + + rmm::device_uvector lovain_of_leiden_cluster_keys(0, handle.get_stream()); + + if (GraphViewType::is_multi_gpu) { + auto& comm = handle.get_comms(); + auto const comm_size = comm.get_size(); + auto& major_comm = handle.get_subcomm(cugraph::partition_manager::major_comm_name()); + auto const major_comm_size = major_comm.get_size(); + auto& minor_comm = handle.get_subcomm(cugraph::partition_manager::minor_comm_name()); + auto const minor_comm_size = minor_comm.get_size(); + + cugraph::detail::compute_gpu_id_from_ext_vertex_t vertex_to_gpu_id_op{ + comm_size, major_comm_size, minor_comm_size}; + + lovain_of_leiden_cluster_keys = + cugraph::collect_values_for_keys(handle, + leiden_to_louvain_map.view(), + leiden_keys_to_read_louvain.begin(), + leiden_keys_to_read_louvain.end(), + vertex_to_gpu_id_op); + } else { + lovain_of_leiden_cluster_keys.resize(leiden_keys_to_read_louvain.size(), handle.get_stream()); + + leiden_to_louvain_map.view().find(leiden_keys_to_read_louvain.begin(), + leiden_keys_to_read_louvain.end(), + lovain_of_leiden_cluster_keys.begin(), + handle.get_stream()); + } + return std::make_tuple(std::move(leiden_assignment), + std::make_pair(std::move(leiden_keys_to_read_louvain), + std::move(lovain_of_leiden_cluster_keys))); +} +} // namespace detail +} // namespace cugraph diff --git a/cpp/src/community/detail/refine_mg.cu b/cpp/src/community/detail/refine_mg.cu new file mode 100644 index 00000000000..570298126bf --- /dev/null +++ b/cpp/src/community/detail/refine_mg.cu @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +namespace cugraph { +namespace detail { + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + float total_edge_weight, + float resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, float> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + float total_edge_weight, + float resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, float> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + float total_edge_weight, + float resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, float> const& + src_vertex_weights_cache, + edge_src_property_t, int64_t> const& + src_clusters_cache, + edge_dst_property_t, int64_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + double total_edge_weight, + double resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, double> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + double total_edge_weight, + double resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, double> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + double total_edge_weight, + double resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, double> const& + src_vertex_weights_cache, + edge_src_property_t, int64_t> const& + src_clusters_cache, + edge_dst_property_t, int64_t> const& + dst_clusters_cache, + bool up_down); + +} // namespace detail +} // namespace cugraph diff --git a/cpp/src/community/detail/refine_sg.cu b/cpp/src/community/detail/refine_sg.cu new file mode 100644 index 00000000000..2e8f80ebb78 --- /dev/null +++ b/cpp/src/community/detail/refine_sg.cu @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +namespace cugraph { +namespace detail { + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + float total_edge_weight, + float resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, float> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + float total_edge_weight, + float resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, float> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + float total_edge_weight, + float resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, float> const& + src_vertex_weights_cache, + edge_src_property_t, int64_t> const& + src_clusters_cache, + edge_dst_property_t, int64_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + double total_edge_weight, + double resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, double> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + double total_edge_weight, + double resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, double> const& + src_vertex_weights_cache, + edge_src_property_t, int32_t> const& + src_clusters_cache, + edge_dst_property_t, int32_t> const& + dst_clusters_cache, + bool up_down); + +template std::tuple, + std::pair, rmm::device_uvector>> +refine_clustering( + raft::handle_t const& handle, + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + double total_edge_weight, + double resolution, + rmm::device_uvector const& vertex_weights_v, + rmm::device_uvector&& cluster_keys_v, + rmm::device_uvector&& cluster_weights_v, + rmm::device_uvector&& next_clusters_v, + edge_src_property_t, double> const& + src_vertex_weights_cache, + edge_src_property_t, int64_t> const& + src_clusters_cache, + edge_dst_property_t, int64_t> const& + dst_clusters_cache, + bool up_down); + +} // namespace detail +} // namespace cugraph diff --git a/cpp/src/community/leiden_impl.cuh b/cpp/src/community/leiden_impl.cuh index 9808259120e..8549c1ae8a9 100644 --- a/cpp/src/community/leiden_impl.cuh +++ b/cpp/src/community/leiden_impl.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,19 +14,21 @@ * limitations under the License. */ #pragma once + +#include +#include #include -#include -#include -#include -#include +#include -#include -#include +#include +#include +#include +#include -#include +#include -#include -#include +#include +#include namespace cugraph { @@ -42,7 +44,11 @@ void check_clustering(graph_view_t const& gr CUGRAPH_EXPECTS(clustering != nullptr, "Invalid input argument: clustering is null"); } -template +template std::pair>, weight_t> leiden( raft::handle_t const& handle, graph_view_t const& graph_view, @@ -50,9 +56,476 @@ std::pair>, weight_t> leiden( size_t max_level, weight_t resolution) { - // TODO: everything - CUGRAPH_FAIL("unimplemented"); - return std::make_pair(std::make_unique>(), weight_t{0.0}); + using graph_t = cugraph::graph_t; + using graph_view_t = cugraph::graph_view_t; + + std::unique_ptr> dendrogram = std::make_unique>(); + + graph_t current_graph(handle); + graph_view_t current_graph_view(graph_view); + + std::optional> current_edge_weight_view( + edge_weight_view); + std::optional> coarsen_graph_edge_weight(handle); + +#ifdef TIMING + HighResTimer hr_timer{}; +#endif + + weight_t best_modularity = weight_t{-1.0}; + weight_t total_edge_weight = + compute_total_edge_weight(handle, current_graph_view, *current_edge_weight_view); + + rmm::device_uvector louvain_of_refined_graph(0, handle.get_stream()); // #V + + while (dendrogram->num_levels() < max_level) { + // + // Initialize every cluster to reference each vertex to itself + // + dendrogram->add_level(current_graph_view.local_vertex_partition_range_first(), + current_graph_view.local_vertex_partition_range_size(), + handle.get_stream()); + +// +// Compute the vertex and cluster weights, these are different for each +// graph in the hierarchical decomposition +#ifdef TIMING + detail::timer_start( + handle, hr_timer, "compute_vertex_and_cluster_weights"); +#endif + + rmm::device_uvector vertex_weights = + compute_out_weight_sums(handle, current_graph_view, *current_edge_weight_view); + rmm::device_uvector cluster_keys(0, handle.get_stream()); + rmm::device_uvector cluster_weights(0, handle.get_stream()); + + if (dendrogram->num_levels() == 1) { + cluster_keys.resize(vertex_weights.size(), handle.get_stream()); + cluster_weights.resize(vertex_weights.size(), handle.get_stream()); + + detail::sequence_fill(handle.get_stream(), + dendrogram->current_level_begin(), + dendrogram->current_level_size(), + current_graph_view.local_vertex_partition_range_first()); + + detail::sequence_fill(handle.get_stream(), + cluster_keys.begin(), + cluster_keys.size(), + current_graph_view.local_vertex_partition_range_first()); + + raft::copy(cluster_weights.begin(), + vertex_weights.begin(), + vertex_weights.size(), + handle.get_stream()); + + if constexpr (graph_view_t::is_multi_gpu) { + std::tie(cluster_keys, cluster_weights) = + shuffle_ext_vertex_value_pairs_to_local_gpu_by_vertex_partitioning( + handle, std::move(cluster_keys), std::move(cluster_weights)); + } + + } else { + rmm::device_uvector tmp_weights_buffer(vertex_weights.size(), + handle.get_stream()); // #C + + raft::copy(dendrogram->current_level_begin(), + louvain_of_refined_graph.begin(), + louvain_of_refined_graph.size(), + handle.get_stream()); + + raft::copy(tmp_weights_buffer.begin(), + vertex_weights.begin(), + vertex_weights.size(), + handle.get_stream()); + + thrust::sort_by_key(handle.get_thrust_policy(), + louvain_of_refined_graph.begin(), + louvain_of_refined_graph.end(), + tmp_weights_buffer.begin()); + + auto num_unique_louvain_clusters_in_refined_partition = + thrust::count_if(handle.get_thrust_policy(), + thrust::make_counting_iterator(size_t{0}), + thrust::make_counting_iterator(louvain_of_refined_graph.size()), + is_first_in_run_t{louvain_of_refined_graph.data()}); + + cluster_keys.resize(num_unique_louvain_clusters_in_refined_partition, handle.get_stream()); + cluster_weights.resize(num_unique_louvain_clusters_in_refined_partition, handle.get_stream()); + + thrust::reduce_by_key(handle.get_thrust_policy(), + louvain_of_refined_graph.begin(), + louvain_of_refined_graph.end(), + tmp_weights_buffer.begin(), + cluster_keys.begin(), + cluster_weights.begin()); + + louvain_of_refined_graph.resize(0, handle.get_stream()); + louvain_of_refined_graph.shrink_to_fit(handle.get_stream()); + + tmp_weights_buffer.resize(0, handle.get_stream()); + tmp_weights_buffer.shrink_to_fit(handle.get_stream()); + + if constexpr (graph_view_t::is_multi_gpu) { + rmm::device_uvector tmp_keys_buffer(0, handle.get_stream()); // #C + + std::tie(tmp_keys_buffer, tmp_weights_buffer) = + shuffle_ext_vertex_value_pairs_to_local_gpu_by_vertex_partitioning( + handle, std::move(cluster_keys), std::move(cluster_weights)); + + thrust::sort_by_key(handle.get_thrust_policy(), + tmp_keys_buffer.begin(), + tmp_keys_buffer.end(), + tmp_weights_buffer.begin()); + + num_unique_louvain_clusters_in_refined_partition = + thrust::count_if(handle.get_thrust_policy(), + thrust::make_counting_iterator(size_t{0}), + thrust::make_counting_iterator(tmp_keys_buffer.size()), + is_first_in_run_t{tmp_keys_buffer.data()}); + + cluster_keys.resize(num_unique_louvain_clusters_in_refined_partition, handle.get_stream()); + cluster_weights.resize(num_unique_louvain_clusters_in_refined_partition, + handle.get_stream()); + + thrust::reduce_by_key(handle.get_thrust_policy(), + tmp_keys_buffer.begin(), + tmp_keys_buffer.end(), + tmp_weights_buffer.begin(), + cluster_keys.begin(), + cluster_weights.begin()); + + tmp_keys_buffer.resize(0, handle.get_stream()); + tmp_keys_buffer.shrink_to_fit(handle.get_stream()); + tmp_weights_buffer.resize(0, handle.get_stream()); + tmp_weights_buffer.shrink_to_fit(handle.get_stream()); + } + } + + edge_src_property_t src_vertex_weights_cache(handle); + if constexpr (graph_view_t::is_multi_gpu) { + src_vertex_weights_cache = + edge_src_property_t(handle, current_graph_view); + update_edge_src_property( + handle, current_graph_view, vertex_weights.begin(), src_vertex_weights_cache); + vertex_weights.resize(0, handle.get_stream()); + vertex_weights.shrink_to_fit(handle.get_stream()); + } + +#ifdef TIMING + detail::timer_stop(handle, hr_timer); +#endif + +// Update the clustering assignment, this is the main loop of Louvain +#ifdef TIMING + detail::timer_start(handle, hr_timer, "update_clustering"); +#endif + + rmm::device_uvector louvain_assignment_for_vertices = + rmm::device_uvector(dendrogram->current_level_size(), handle.get_stream()); + + raft::copy(louvain_assignment_for_vertices.begin(), + dendrogram->current_level_begin(), + dendrogram->current_level_size(), + handle.get_stream()); + + edge_src_property_t src_louvain_assignment_cache(handle); + edge_dst_property_t dst_louvain_assignment_cache(handle); + if constexpr (multi_gpu) { + src_louvain_assignment_cache = + edge_src_property_t(handle, current_graph_view); + update_edge_src_property(handle, + current_graph_view, + louvain_assignment_for_vertices.begin(), + src_louvain_assignment_cache); + dst_louvain_assignment_cache = + edge_dst_property_t(handle, current_graph_view); + update_edge_dst_property(handle, + current_graph_view, + louvain_assignment_for_vertices.begin(), + dst_louvain_assignment_cache); + + louvain_assignment_for_vertices.resize(0, handle.get_stream()); + louvain_assignment_for_vertices.shrink_to_fit(handle.get_stream()); + } + + weight_t new_Q = detail::compute_modularity(handle, + current_graph_view, + current_edge_weight_view, + src_louvain_assignment_cache, + dst_louvain_assignment_cache, + louvain_assignment_for_vertices, + cluster_weights, + total_edge_weight, + resolution); + weight_t cur_Q = new_Q - 1; + + // To avoid the potential of having two vertices swap cluster_keys + // we will only allow vertices to move up (true) or down (false) + // during each iteration of the loop + bool up_down = true; + bool no_movement = true; + while (new_Q > (cur_Q + 1e-4)) { + cur_Q = new_Q; + + // + // Keep a copy of detail::update_clustering_by_delta_modularity if we want to + // resue detail::update_clustering_by_delta_modularity without changing + // + + // + // FIX: Existing detail::update_clustering_by_delta_modularity is slow. + // To make is faster as proposed by Leiden algorithm, 1) keep track of the + // vertices that have moved. And then 2) for all the vertices that have moved, + // check if their neighbors belong to the same community. + // If the neighbors belong to different communities, the collect them in a queue/list + // In the next iteration, only conside vertices in the queue/list, until there the + // queue/list is empty. + // + // IMPORTANT NOTE: Need to think which vertices are considered first + // + + louvain_assignment_for_vertices = + detail::update_clustering_by_delta_modularity(handle, + current_graph_view, + current_edge_weight_view, + total_edge_weight, + resolution, + vertex_weights, + std::move(cluster_keys), + std::move(cluster_weights), + std::move(louvain_assignment_for_vertices), + src_vertex_weights_cache, + src_louvain_assignment_cache, + dst_louvain_assignment_cache, + up_down); + + if constexpr (graph_view_t::is_multi_gpu) { + update_edge_src_property(handle, + current_graph_view, + louvain_assignment_for_vertices.begin(), + src_louvain_assignment_cache); + update_edge_dst_property(handle, + current_graph_view, + louvain_assignment_for_vertices.begin(), + dst_louvain_assignment_cache); + } + + std::tie(cluster_keys, cluster_weights) = + detail::compute_cluster_keys_and_values(handle, + current_graph_view, + current_edge_weight_view, + louvain_assignment_for_vertices, + src_louvain_assignment_cache); + + up_down = !up_down; + + new_Q = detail::compute_modularity(handle, + current_graph_view, + current_edge_weight_view, + src_louvain_assignment_cache, + dst_louvain_assignment_cache, + louvain_assignment_for_vertices, + cluster_weights, + total_edge_weight, + resolution); + + if (new_Q > (cur_Q + 1e-4)) { + raft::copy(dendrogram->current_level_begin(), + louvain_assignment_for_vertices.begin(), + louvain_assignment_for_vertices.size(), + handle.get_stream()); + no_movement = false; + } + } + +#ifdef TIMING + detail::timer_stop(handle, hr_timer); +#endif + + bool terminate = no_movement || (cur_Q <= best_modularity); + +#ifdef TIMING + detail::timer_start(handle, hr_timer, "contract graph"); +#endif + + if (!terminate) { best_modularity = cur_Q; } + + // Count number of unique louvain clusters + + rmm::device_uvector copied_louvain_partition(dendrogram->current_level_size(), + handle.get_stream()); + thrust::copy(handle.get_thrust_policy(), + dendrogram->current_level_begin(), + dendrogram->current_level_begin() + dendrogram->current_level_size(), + copied_louvain_partition.begin()); + + thrust::sort( + handle.get_thrust_policy(), copied_louvain_partition.begin(), copied_louvain_partition.end()); + + auto nr_unique_louvain_clusters = + static_cast(thrust::distance(copied_louvain_partition.begin(), + thrust::unique(handle.get_thrust_policy(), + copied_louvain_partition.begin(), + copied_louvain_partition.end()))); + + copied_louvain_partition.resize(nr_unique_louvain_clusters, handle.get_stream()); + + if constexpr (graph_view_t::is_multi_gpu) { + copied_louvain_partition = + cugraph::detail::shuffle_ext_vertices_to_local_gpu_by_vertex_partitioning( + handle, std::move(copied_louvain_partition)); + + thrust::sort(handle.get_thrust_policy(), + copied_louvain_partition.begin(), + copied_louvain_partition.end()); + + nr_unique_louvain_clusters = + static_cast(thrust::distance(copied_louvain_partition.begin(), + thrust::unique(handle.get_thrust_policy(), + copied_louvain_partition.begin(), + copied_louvain_partition.end()))); + + copied_louvain_partition.resize(nr_unique_louvain_clusters, handle.get_stream()); + + nr_unique_louvain_clusters = host_scalar_allreduce(handle.get_comms(), + nr_unique_louvain_clusters, + raft::comms::op_t::SUM, + handle.get_stream()); + } + + terminate = + terminate || (nr_unique_louvain_clusters == current_graph_view.number_of_vertices()); + + rmm::device_uvector refined_leiden_partition(0, handle.get_stream()); + std::pair, rmm::device_uvector> leiden_to_louvain_map{ + rmm::device_uvector(0, handle.get_stream()), + rmm::device_uvector(0, handle.get_stream())}; + + if (!terminate) { + // Refine the current partition + thrust::copy(handle.get_thrust_policy(), + dendrogram->current_level_begin(), + dendrogram->current_level_begin() + dendrogram->current_level_size(), + louvain_assignment_for_vertices.begin()); + + if constexpr (graph_view_t::is_multi_gpu) { + update_edge_src_property(handle, + current_graph_view, + louvain_assignment_for_vertices.begin(), + src_louvain_assignment_cache); + update_edge_dst_property(handle, + current_graph_view, + louvain_assignment_for_vertices.begin(), + dst_louvain_assignment_cache); + } + + std::tie(refined_leiden_partition, leiden_to_louvain_map) = + detail::refine_clustering(handle, + current_graph_view, + current_edge_weight_view, + total_edge_weight, + resolution, + vertex_weights, + std::move(cluster_keys), + std::move(cluster_weights), + std::move(louvain_assignment_for_vertices), + src_vertex_weights_cache, + src_louvain_assignment_cache, + dst_louvain_assignment_cache, + up_down); + } + // Clear buffer and contract the graph + + cluster_keys.resize(0, handle.get_stream()); + cluster_weights.resize(0, handle.get_stream()); + vertex_weights.resize(0, handle.get_stream()); + louvain_assignment_for_vertices.resize(0, handle.get_stream()); + cluster_keys.shrink_to_fit(handle.get_stream()); + cluster_weights.shrink_to_fit(handle.get_stream()); + vertex_weights.shrink_to_fit(handle.get_stream()); + louvain_assignment_for_vertices.shrink_to_fit(handle.get_stream()); + src_vertex_weights_cache.clear(handle); + src_louvain_assignment_cache.clear(handle); + dst_louvain_assignment_cache.clear(handle); + + if (!terminate) { + auto nr_unique_leiden = static_cast(leiden_to_louvain_map.first.size()); + if (graph_view_t::is_multi_gpu) { + nr_unique_leiden = host_scalar_allreduce( + handle.get_comms(), nr_unique_leiden, raft::comms::op_t::SUM, handle.get_stream()); + } + terminate = terminate || (nr_unique_leiden == current_graph_view.number_of_vertices()); + + if (nr_unique_leiden < current_graph_view.number_of_vertices()) { + // Create aggregate graph based on refined (leiden) partition + std::optional> cluster_assignment{std::nullopt}; + std::tie(current_graph, coarsen_graph_edge_weight, cluster_assignment) = + coarsen_graph(handle, + current_graph_view, + current_edge_weight_view, + refined_leiden_partition.data(), + true); + + current_graph_view = current_graph.view(); + + current_edge_weight_view = + std::make_optional>( + (*coarsen_graph_edge_weight).view()); + + // cluster_assignment contains leiden cluster ids of aggregated nodes + // After call to relabel, cluster_assignment will louvain cluster ids of the aggregated + // nodes + relabel( + handle, + std::make_tuple(static_cast(leiden_to_louvain_map.first.begin()), + static_cast(leiden_to_louvain_map.second.begin())), + leiden_to_louvain_map.first.size(), + (*cluster_assignment).data(), + (*cluster_assignment).size(), + false); + + louvain_of_refined_graph.resize(current_graph_view.local_vertex_partition_range_size(), + handle.get_stream()); + + raft::copy(louvain_of_refined_graph.begin(), + (*cluster_assignment).begin(), + (*cluster_assignment).size(), + handle.get_stream()); + } + } + + // Relabel dendrogram + rmm::device_uvector numbering_indices(copied_louvain_partition.size(), + handle.get_stream()); + detail::sequence_fill(handle.get_stream(), + numbering_indices.data(), + numbering_indices.size(), + current_graph_view.local_vertex_partition_range_first()); + + relabel( + handle, + std::make_tuple(static_cast(copied_louvain_partition.begin()), + static_cast(numbering_indices.begin())), + copied_louvain_partition.size(), + dendrogram->current_level_begin(), + dendrogram->current_level_size(), + false); + + copied_louvain_partition.resize(0, handle.get_stream()); + copied_louvain_partition.shrink_to_fit(handle.get_stream()); + + if (terminate) { break; } + +#ifdef TIMING + detail::timer_stop(handle, hr_timer); +#endif + } + +#ifdef TIMING + detail::timer_display(handle, hr_timer, std::cout); +#endif + + return std::make_pair(std::move(dendrogram), best_modularity); } // FIXME: Can we have a common flatten_dendrogram to be used by both @@ -96,4 +569,27 @@ void flatten_dendrogram(raft::handle_t const& handle, detail::flatten_dendrogram(handle, graph_view, dendrogram, clustering); } +template +std::pair leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + vertex_t* clustering, + size_t max_level, + weight_t resolution) +{ + CUGRAPH_EXPECTS(edge_weight_view.has_value(), "Graph must be weighted"); + detail::check_clustering(graph_view, clustering); + + std::unique_ptr> dendrogram; + weight_t modularity; + + std::tie(dendrogram, modularity) = + detail::leiden(handle, graph_view, edge_weight_view, max_level, resolution); + + detail::flatten_dendrogram(handle, graph_view, *dendrogram, clustering); + + return std::make_pair(dendrogram->num_levels(), modularity); +} + } // namespace cugraph diff --git a/cpp/src/community/leiden_mg.cu b/cpp/src/community/leiden_mg.cu new file mode 100644 index 00000000000..77e4c9a96b6 --- /dev/null +++ b/cpp/src/community/leiden_mg.cu @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace cugraph { + +// SG instantiation + +template std::pair>, float> leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + size_t max_level, + float resolution); + +template std::pair>, float> leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + size_t max_level, + float resolution); + +template std::pair>, float> leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + size_t max_level, + float resolution); + +template std::pair>, double> leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + size_t max_level, + double resolution); + +template std::pair>, double> leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + size_t max_level, + double resolution); + +template std::pair>, double> leiden( + raft::handle_t const& handle, + graph_view_t const& graph_view, + std::optional> edge_weight_view, + size_t max_level, + double resolution); + +template std::pair leiden(raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + float); +template std::pair leiden( + raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + double); +template std::pair leiden(raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + float); +template std::pair leiden( + raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + double); +template std::pair leiden(raft::handle_t const&, + graph_view_t const&, + std::optional>, + int64_t*, + size_t, + float); +template std::pair leiden( + raft::handle_t const&, + graph_view_t const&, + std::optional>, + int64_t*, + size_t, + double); + +} // namespace cugraph diff --git a/cpp/src/community/leiden_sg.cu b/cpp/src/community/leiden_sg.cu index 61e01b787bf..1c821649fa1 100644 --- a/cpp/src/community/leiden_sg.cu +++ b/cpp/src/community/leiden_sg.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -62,4 +62,44 @@ template std::pair>, double> leiden( size_t max_level, double resolution); +template std::pair leiden(raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + float); +template std::pair leiden( + raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + double); +template std::pair leiden(raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + float); +template std::pair leiden( + raft::handle_t const&, + graph_view_t const&, + std::optional>, + int32_t*, + size_t, + double); +template std::pair leiden(raft::handle_t const&, + graph_view_t const&, + std::optional>, + int64_t*, + size_t, + float); +template std::pair leiden( + raft::handle_t const&, + graph_view_t const&, + std::optional>, + int64_t*, + size_t, + double); + } // namespace cugraph diff --git a/cpp/src/community/louvain_impl.cuh b/cpp/src/community/louvain_impl.cuh index b62cfc84261..7d205ffa48e 100644 --- a/cpp/src/community/louvain_impl.cuh +++ b/cpp/src/community/louvain_impl.cuh @@ -16,14 +16,12 @@ #pragma once -//#define TIMING +// #define TIMING #include #include - -// FIXME: Only outstanding items preventing this becoming a .hpp file #include - +// FIXME: Only outstanding items preventing this becoming a .hpp file #include #include #include diff --git a/cpp/src/detail/utility_wrappers.cu b/cpp/src/detail/utility_wrappers.cu index dbf3186ee57..2d5bf6215b1 100644 --- a/cpp/src/detail/utility_wrappers.cu +++ b/cpp/src/detail/utility_wrappers.cu @@ -206,5 +206,26 @@ template bool is_sorted(raft::handle_t const& handle, raft::device_span span); template bool is_sorted(raft::handle_t const& handle, raft::device_span span); +template +bool is_equal(raft::handle_t const& handle, + raft::device_span span1, + raft::device_span span2) +{ + return thrust::equal(handle.get_thrust_policy(), span1.begin(), span1.end(), span2.begin()); +} + +template bool is_equal(raft::handle_t const& handle, + raft::device_span span1, + raft::device_span span2); +template bool is_equal(raft::handle_t const& handle, + raft::device_span span1, + raft::device_span span2); +template bool is_equal(raft::handle_t const& handle, + raft::device_span span1, + raft::device_span span2); +template bool is_equal(raft::handle_t const& handle, + raft::device_span span1, + raft::device_span span2); + } // namespace detail } // namespace cugraph diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 7bf044c828d..e20a31295e4 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -218,6 +218,7 @@ ConfigureTest(LOUVAIN_TEST community/louvain_test.cpp) ################################################################################################### # - LEIDEN tests --------------------------------------------------------------------------------- ConfigureTest(LEIDEN_TEST community/leiden_test.cpp) +ConfigureTest(NEW_LEIDEN_TEST community/new_leiden_test.cpp) ################################################################################################### # - ECG tests --------------------------------------------------------------------------------- diff --git a/cpp/tests/c_api/leiden_test.c b/cpp/tests/c_api/leiden_test.c index 496366ba6c6..64d1b68b032 100644 --- a/cpp/tests/c_api/leiden_test.c +++ b/cpp/tests/c_api/leiden_test.c @@ -56,7 +56,7 @@ int generic_leiden_test(vertex_t* h_src, ret_code = cugraph_leiden(p_handle, p_graph, max_level, resolution, FALSE, &p_result, &ret_error); -#if 1 +#if 0 TEST_ASSERT(test_ret_value, ret_code != CUGRAPH_SUCCESS, "cugraph_leiden should have failed"); #else TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); @@ -81,11 +81,6 @@ int generic_leiden_test(vertex_t* h_src, p_handle, (byte_t*)h_clusters, clusters, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); - for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { - TEST_ASSERT( - test_ret_value, h_result[h_vertices[i]] == h_clusters[i], "cluster results don't match"); - } - TEST_ASSERT(test_ret_value, nearlyEqual(modularity, expected_modularity, 0.001), "modularity doesn't match"); @@ -103,7 +98,7 @@ int generic_leiden_test(vertex_t* h_src, int test_leiden() { - size_t num_edges = 8; + size_t num_edges = 16; size_t num_vertices = 6; size_t max_level = 10; weight_t resolution = 1.0; @@ -112,8 +107,8 @@ int test_leiden() vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = { 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f, 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; - vertex_t h_result[] = {0, 1, 0, 1, 1, 1}; - weight_t expected_modularity = 0.218166; + vertex_t h_result[] = {0, 0, 0, 1, 1, 1}; + weight_t expected_modularity = 0.215969; // Louvain wants store_transposed = FALSE return generic_leiden_test(h_src, diff --git a/cpp/tests/c_api/louvain_test.c b/cpp/tests/c_api/louvain_test.c index cec75c5ebc8..eed8af4bdc7 100644 --- a/cpp/tests/c_api/louvain_test.c +++ b/cpp/tests/c_api/louvain_test.c @@ -100,7 +100,7 @@ int generic_louvain_test(vertex_t* h_src, int test_louvain() { - size_t num_edges = 8; + size_t num_edges = 16; size_t num_vertices = 6; size_t max_level = 10; weight_t resolution = 1.0; @@ -109,8 +109,8 @@ int test_louvain() vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = { 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f, 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; - vertex_t h_result[] = {0, 1, 0, 1, 1, 1}; - weight_t expected_modularity = 0.218166; + vertex_t h_result[] = {0, 0, 0, 1, 1, 1}; + weight_t expected_modularity = 0.215969; // Louvain wants store_transposed = FALSE return generic_louvain_test(h_src, diff --git a/cpp/tests/community/new_leiden_test.cpp b/cpp/tests/community/new_leiden_test.cpp new file mode 100644 index 00000000000..7eab855fbc8 --- /dev/null +++ b/cpp/tests/community/new_leiden_test.cpp @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + */ +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +struct Leiden_Usecase { + size_t max_level_{100}; + double resolution_{1.0}; + bool check_correctness_{false}; + int expected_level_{0}; + float expected_modularity_{0}; +}; + +template +class Tests_Leiden : public ::testing::TestWithParam> { + public: + Tests_Leiden() {} + + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + virtual void SetUp() {} + virtual void TearDown() {} + + template + void run_current_test(std::tuple const& param) + { + auto [leiden_usecase, input_usecase] = param; + + raft::handle_t handle{}; + HighResTimer hr_timer{}; + + // Can't currently check correctness if we renumber + bool renumber = true; + if (leiden_usecase.check_correctness_) renumber = false; + + if (cugraph::test::g_perf) { + RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement + hr_timer.start("Construct graph"); + } + + auto [graph, edge_weights, d_renumber_map_labels] = + cugraph::test::construct_graph( + handle, input_usecase, true, renumber); + + if (cugraph::test::g_perf) { + RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement + hr_timer.stop(); + hr_timer.display_and_clear(std::cout); + } + + auto graph_view = graph.view(); + auto edge_weight_view = + edge_weights ? std::make_optional((*edge_weights).view()) : std::nullopt; + + // "FIXME": remove this check once we drop support for Pascal + // + // Calling leiden on Pascal will throw an exception, we'll check that + // this is the behavior while we still support Pascal (device_prop.major < 7) + // + cudaDeviceProp device_prop; + RAFT_CUDA_TRY(cudaGetDeviceProperties(&device_prop, 0)); + + if (cugraph::test::g_perf) { + RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement + hr_timer.start("Leiden"); + } + + if (device_prop.major < 7) { + EXPECT_THROW(leiden(graph_view, + edge_weight_view, + graph_view.local_vertex_partition_range_size(), + leiden_usecase.max_level_, + leiden_usecase.resolution_, + leiden_usecase.check_correctness_, + leiden_usecase.expected_level_, + leiden_usecase.expected_modularity_), + cugraph::logic_error); + } else { + leiden(graph_view, + edge_weight_view, + graph_view.local_vertex_partition_range_size(), + leiden_usecase.max_level_, + leiden_usecase.resolution_, + leiden_usecase.check_correctness_, + leiden_usecase.expected_level_, + leiden_usecase.expected_modularity_); + } + + if (cugraph::test::g_perf) { + RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement + hr_timer.stop(); + hr_timer.display_and_clear(std::cout); + } + } + + template + void leiden( + cugraph::graph_view_t const& graph_view, + std::optional> edge_weight_view, + vertex_t num_vertices, + size_t max_level, + float resolution, + bool check_correctness, + int expected_level, + float expected_modularity) + { + raft::handle_t handle{}; + + rmm::device_uvector clustering_v(num_vertices, handle.get_stream()); + size_t level; + weight_t modularity; + + std::tie(level, modularity) = cugraph::leiden( + handle, graph_view, edge_weight_view, clustering_v.data(), max_level, resolution); + + float compare_modularity = static_cast(modularity); + + if (check_correctness) { + ASSERT_FLOAT_EQ(compare_modularity, expected_modularity); + ASSERT_EQ(level, expected_level); + } + } +}; + +using Tests_Leiden_File = Tests_Leiden; +using Tests_Leiden_File32 = Tests_Leiden; +using Tests_Leiden_File64 = Tests_Leiden; +using Tests_Leiden_Rmat = Tests_Leiden; +using Tests_Leiden_Rmat32 = Tests_Leiden; +using Tests_Leiden_Rmat64 = Tests_Leiden; + +TEST_P(Tests_Leiden_File, CheckInt32Int32FloatFloat) +{ + run_current_test( + override_File_Usecase_with_cmd_line_arguments(GetParam())); +} + +TEST_P(Tests_Leiden_File, CheckInt64Int64FloatFloat) +{ + run_current_test( + override_File_Usecase_with_cmd_line_arguments(GetParam())); +} + +TEST_P(Tests_Leiden_File32, CheckInt32Int32FloatFloat) +{ + run_current_test( + override_File_Usecase_with_cmd_line_arguments(GetParam())); +} + +TEST_P(Tests_Leiden_File64, CheckInt64Int64FloatFloat) +{ + run_current_test( + override_File_Usecase_with_cmd_line_arguments(GetParam())); +} + +TEST_P(Tests_Leiden_Rmat, CheckInt32Int32FloatFloat) +{ + run_current_test( + override_Rmat_Usecase_with_cmd_line_arguments(GetParam())); +} + +TEST_P(Tests_Leiden_Rmat, CheckInt64Int64FloatFloat) +{ + run_current_test( + override_Rmat_Usecase_with_cmd_line_arguments(GetParam())); +} + +TEST_P(Tests_Leiden_Rmat32, CheckInt32Int32FloatFloat) +{ + run_current_test( + override_Rmat_Usecase_with_cmd_line_arguments(GetParam())); +} + +TEST_P(Tests_Leiden_Rmat64, CheckInt64Int64FloatFloat) +{ + run_current_test( + override_Rmat_Usecase_with_cmd_line_arguments(GetParam())); +} + +// FIXME: Expand testing once we evaluate RMM memory use +INSTANTIATE_TEST_SUITE_P( + simple_test, + Tests_Leiden_File, + ::testing::Combine(::testing::Values(Leiden_Usecase{100, 1, false, 3, 0.408695}), + ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx")))); + +INSTANTIATE_TEST_SUITE_P( + file_benchmark_test, /* note that the test filename can be overridden in benchmarking (with + --gtest_filter to select only the file_benchmark_test with a specific + vertex & edge type combination) by command line arguments and do not + include more than one File_Usecase that differ only in filename + (to avoid running same benchmarks more than once) */ + Tests_Leiden_File32, + ::testing::Combine( + // disable correctness checks for large graphs + ::testing::Values(Leiden_Usecase{}), + ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx")))); + +INSTANTIATE_TEST_SUITE_P( + file64_benchmark_test, /* note that the test filename can be overridden in benchmarking (with + --gtest_filter to select only the file_benchmark_test with a specific + vertex & edge type combination) by command line arguments and do not + include more than one File_Usecase that differ only in filename + (to avoid running same benchmarks more than once) */ + Tests_Leiden_File64, + ::testing::Combine( + // disable correctness checks for large graphs + ::testing::Values(Leiden_Usecase{}), + ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx")))); + +CUGRAPH_TEST_PROGRAM_MAIN() diff --git a/dependencies.yaml b/dependencies.yaml index f411b458ee7..41919215e90 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -8,13 +8,17 @@ files: includes: - checks - common_build - - common_python_test - cpp_build - cudatoolkit - docs - - python_build + - python_build_cythonize + - python_run_cugraph + - python_run_pylibcugraph + - python_run_cugraph_dgl - test_notebook - - test_python + - test_python_common + - test_python_cugraph + - test_python_pylibcugraph checks: output: none includes: @@ -33,17 +37,131 @@ files: test_notebooks: output: none includes: - - common_python_test - cudatoolkit - py_version - test_notebook + - test_python_common + - test_python_cugraph test_python: output: none includes: - - common_python_test - cudatoolkit - py_version - - test_python + - test_python_common + - test_python_cugraph + - test_python_pylibcugraph + py_build_cugraph: + output: pyproject + pyproject_dir: python/cugraph + extras: + table: build-system + includes: + - common_build + - python_build_wheel + - python_build_cythonize + - python_build_cugraph + py_run_cugraph: + output: pyproject + pyproject_dir: python/cugraph + extras: + table: project + includes: + - python_run_cugraph + py_test_cugraph: + output: pyproject + pyproject_dir: python/cugraph + extras: + table: project.optional-dependencies + key: test + includes: + - test_python_common + - test_python_cugraph + py_build_pylibcugraph: + output: pyproject + pyproject_dir: python/pylibcugraph + extras: + table: build-system + includes: + - common_build + - python_build_wheel + - python_build_cythonize + py_run_pylibcugraph: + output: pyproject + pyproject_dir: python/pylibcugraph + extras: + table: project + includes: + - python_run_pylibcugraph + py_test_pylibcugraph: + output: pyproject + pyproject_dir: python/pylibcugraph + extras: + table: project.optional-dependencies + key: test + includes: + - test_python_common + - test_python_pylibcugraph + py_build_cugraph_dgl: + output: pyproject + pyproject_dir: python/cugraph-dgl + extras: + table: build-system + includes: + - python_build_wheel + py_run_cugraph_dgl: + output: pyproject + pyproject_dir: python/cugraph-dgl + extras: + table: project + includes: + - python_run_cugraph_dgl + py_build_cugraph_pyg: + output: pyproject + pyproject_dir: python/cugraph-pyg + extras: + table: build-system + includes: + - python_build_wheel + py_build_cugraph_service_client: + output: pyproject + pyproject_dir: python/cugraph-service/client + extras: + table: build-system + includes: + - python_build_wheel + py_run_cugraph_service_client: + output: pyproject + pyproject_dir: python/cugraph-service/client + extras: + table: project + includes: + - python_run_cugraph_service_client + py_build_cugraph_service_server: + output: pyproject + pyproject_dir: python/cugraph-service/server + extras: + table: build-system + includes: + - python_build_wheel + py_run_cugraph_service_server: + output: pyproject + pyproject_dir: python/cugraph-service/server + extras: + table: project + includes: + - python_run_cugraph_service_server + py_test_cugraph_service_server: + output: pyproject + pyproject_dir: python/cugraph-service/server + extras: + table: project.optional-dependencies + key: test + includes: + # TODO: I think that the contents of the server's pyproject.toml + # dependencies were just copied from cugraph, so I'm not sure if this + # list is really minimal or if it is a superset. + - test_python_common + - test_python_cugraph channels: - rapidsai - rapidsai-nightly @@ -78,11 +196,10 @@ dependencies: - cudatoolkit=11.2 common_build: common: - - output_types: [conda] + - output_types: [conda, pyproject] packages: - cmake>=3.23.1,!=3.25.0 - ninja - - pip cpp_build: common: - output_types: [conda] @@ -91,10 +208,10 @@ dependencies: - cxx-compiler - gmock=1.10.0 - gtest=1.10.0 - - libcugraphops=23.06.* - - libraft-headers=23.06.* - - libraft=23.06.* - - librmm=23.06.* + - libcugraphops=23.6.* + - libraft-headers=23.6.* + - libraft=23.6.* + - librmm=23.6.* - openmpi # Required for building cpp-mgtests (multi-GPU tests) specific: - output_types: [conda] @@ -153,25 +270,91 @@ dependencies: - matrix: packages: - python>=3.8,<3.11 - python_build: + python_build_wheel: common: - - output_types: [conda] + - output_types: [conda, pyproject] + packages: + - wheel + - setuptools + python_build_cythonize: + common: + - output_types: [conda, pyproject] packages: - - cuda-python>=11.7.1,<12.0 - - cudf=23.06.* - cython>=0.29,<0.30 - - dask==2023.1.1 - - distributed==2023.1.1 - - dask-cuda=23.06.* - - dask-cudf=23.06.* - - libcudf=23.06.* - - nccl>=2.9.9 - - pylibraft=23.06.* - - raft-dask=23.06.* - - rmm=23.06.* + - &pylibraft pylibraft==23.6.* + - &rmm rmm==23.6.* - scikit-build>=0.13.1 + python_build_cugraph: + common: + - output_types: [conda, pyproject] + packages: + - pylibcugraph==23.6.* + python_run_cugraph: + common: + - output_types: [conda, pyproject] + packages: + - &cudf cudf==23.6.* + - &dask dask==2023.3.2 + - &distributed distributed==2023.3.2.1 + - &dask_cuda dask-cuda==23.6.* + - &dask_cudf dask-cudf==23.6.* + - &numba numba>=0.56.2 + - raft-dask==23.6.* + - *rmm + - &ucx_py ucx-py==0.32.* + - output_types: conda + packages: + - &cupy cupy>=9.5.0,<12.0.0a0 + - &dask-core dask-core==2023.3.2 + - libcudf=23.6.* + - nccl>=2.9.9 - ucx-proc=*=gpu - - ucx-py=0.32.* + - output_types: pyproject + packages: + - &cupy_pip cupy-cuda11x>=9.5.0,<12.0.0a0 + - pylibcugraph==23.6.* + python_run_pylibcugraph: + common: + - output_types: [conda, pyproject] + packages: + - *pylibraft + - *rmm + python_run_cugraph_dgl: + common: + - output_types: [conda, pyproject] + packages: + - *numba + - &numpy numpy>=1.21 + - output_types: [pyproject] + packages: + - &cugraph cugraph==23.4.* + python_run_cugraph_service_client: + common: + - output_types: [conda, pyproject] + packages: + - &thrift thriftpy2 + python_run_cugraph_service_server: + common: + - output_types: [conda, pyproject] + packages: + - *cudf + - *dask + - *dask_cuda + - *dask_cudf + - *distributed + - *numpy + - *rmm + - *thrift + - *ucx_py + - output_types: conda + packages: + - *cupy + - *dask-core + - output_types: pyproject + packages: + - *cupy_pip + - *cugraph + - cugraph-service-client==23.6.* doc: common: - output_types: [conda] @@ -185,27 +368,37 @@ dependencies: - sphinxcontrib-websupport - sphinx-markdown-tables - sphinx-copybutton - common_python_test: - common: - - output_types: [conda, requirements] - packages: - - aiohttp - - networkx>=2.5.1 - - requests - - scipy test_notebook: common: - output_types: [conda, requirements] packages: - ipython - notebook>=0.5.0 - test_python: + test_python_common: common: - - output_types: [conda, requirements] + - output_types: [conda, pyproject] packages: - - py + - networkx>=2.5.1 + - *numpy + - pandas - pytest + - pytest-benchmark - pytest-cov + - pytest-xdist + - scipy + test_python_cugraph: + common: + - output_types: [conda, pyproject] + packages: + - aiohttp + # cudf will use fsspec but is protocol independent. cugraph tests + # specifically require http for the test files it asks cudf to read. + - fsspec[http]>=0.6.0 - python-louvain - - rapids-pytest-benchmark + - requests - scikit-learn>=0.23.1 + test_python_pylibcugraph: + common: + - output_types: [conda, pyproject] + packages: + - *cudf diff --git a/docs/cugraph/source/api_docs/cugraph-dgl/cugraph_dgl.rst b/docs/cugraph/source/api_docs/cugraph-dgl/cugraph_dgl.rst new file mode 100644 index 00000000000..4ffecd8d042 --- /dev/null +++ b/docs/cugraph/source/api_docs/cugraph-dgl/cugraph_dgl.rst @@ -0,0 +1,15 @@ +~~~~~~~~~~~~~~~~~~~~~~~~~~ +cugraph-dgl API Reference +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +cugraph-dgl + +.. currentmodule:: cugraph_dgl + +Methods +------- +.. autosummary:: + :toctree: ../api/cugraph-dgl/ + + convert.cugraph_storage_from_heterograph + cugraph_storage.CuGraphStorage diff --git a/docs/cugraph/source/api_docs/cugraph_pyg.rst b/docs/cugraph/source/api_docs/cugraph-pyg/cugraph_pyg.rst similarity index 89% rename from docs/cugraph/source/api_docs/cugraph_pyg.rst rename to docs/cugraph/source/api_docs/cugraph-pyg/cugraph_pyg.rst index 7e25b3547bf..2cd8969aa66 100644 --- a/docs/cugraph/source/api_docs/cugraph_pyg.rst +++ b/docs/cugraph/source/api_docs/cugraph-pyg/cugraph_pyg.rst @@ -7,7 +7,7 @@ cugraph-pyg .. currentmodule:: cugraph_pyg .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph-pyg/ cugraph_pyg.data.cugraph_store.EXPERIMENTAL__CuGraphStore cugraph_pyg.sampler.cugraph_sampler.EXPERIMENTAL__CuGraphSampler diff --git a/docs/cugraph/source/api_docs/centrality.rst b/docs/cugraph/source/api_docs/cugraph/centrality.rst similarity index 80% rename from docs/cugraph/source/api_docs/centrality.rst rename to docs/cugraph/source/api_docs/cugraph/centrality.rst index 9b634cb6deb..c3b026597d9 100644 --- a/docs/cugraph/source/api_docs/centrality.rst +++ b/docs/cugraph/source/api_docs/cugraph/centrality.rst @@ -8,7 +8,7 @@ Centrality Betweenness Centrality ---------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.betweenness_centrality cugraph.edge_betweenness_centrality @@ -16,34 +16,34 @@ Betweenness Centrality Katz Centrality --------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.katz_centrality Katz Centrality (MG) -------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.centrality.katz_centrality.katz_centrality Degree Centrality ----------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.degree_centrality Eigenvector Centrality ---------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.centrality.eigenvector_centrality Eigenvector Centrality (MG) --------------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.centrality.eigenvector_centrality.eigenvector_centrality diff --git a/docs/cugraph/source/api_docs/community.rst b/docs/cugraph/source/api_docs/cugraph/community.rst similarity index 78% rename from docs/cugraph/source/api_docs/community.rst rename to docs/cugraph/source/api_docs/cugraph/community.rst index 94eb1a49cf0..acbaa086f9a 100644 --- a/docs/cugraph/source/api_docs/community.rst +++ b/docs/cugraph/source/api_docs/cugraph/community.rst @@ -8,7 +8,7 @@ Community EgoNet ------ .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.batched_ego_graphs cugraph.ego_graph @@ -16,7 +16,7 @@ EgoNet Ensemble clustering for graphs (ECG) ------------------------------------ .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.ecg @@ -24,7 +24,7 @@ Ensemble clustering for graphs (ECG) K-Truss ------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.k_truss cugraph.ktruss_subgraph @@ -32,7 +32,7 @@ K-Truss Leiden ------ .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.leiden @@ -40,7 +40,7 @@ Leiden Louvain ------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.louvain @@ -48,14 +48,14 @@ Louvain Louvain (MG) ------------ .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.community.louvain.louvain Spectral Clustering ------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.analyzeClustering_edge_cut cugraph.analyzeClustering_modularity @@ -67,7 +67,7 @@ Spectral Clustering Subgraph Extraction ------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.subgraph @@ -75,6 +75,6 @@ Subgraph Extraction Triangle Counting ----------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.triangle_count diff --git a/docs/cugraph/source/api_docs/components.rst b/docs/cugraph/source/api_docs/cugraph/components.rst similarity index 86% rename from docs/cugraph/source/api_docs/components.rst rename to docs/cugraph/source/api_docs/cugraph/components.rst index 8616c874290..5835972cd4d 100644 --- a/docs/cugraph/source/api_docs/components.rst +++ b/docs/cugraph/source/api_docs/cugraph/components.rst @@ -8,7 +8,7 @@ Components Connected Components -------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.connected_components cugraph.strongly_connected_components @@ -18,7 +18,7 @@ Connected Components Connected Components (MG) ------------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.components.connectivity.weakly_connected_components diff --git a/docs/cugraph/source/api_docs/cores.rst b/docs/cugraph/source/api_docs/cugraph/cores.rst similarity index 73% rename from docs/cugraph/source/api_docs/cores.rst rename to docs/cugraph/source/api_docs/cugraph/cores.rst index 0e48c584b4d..9d274d1c484 100644 --- a/docs/cugraph/source/api_docs/cores.rst +++ b/docs/cugraph/source/api_docs/cugraph/cores.rst @@ -8,7 +8,7 @@ Cores Core Number ----------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.core_number @@ -16,6 +16,6 @@ Core Number K-Core ------ .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.k_core diff --git a/docs/cugraph/source/api_docs/cugraph_top.rst b/docs/cugraph/source/api_docs/cugraph/cugraph_top.rst similarity index 100% rename from docs/cugraph/source/api_docs/cugraph_top.rst rename to docs/cugraph/source/api_docs/cugraph/cugraph_top.rst diff --git a/docs/cugraph/source/api_docs/dask-cugraph.rst b/docs/cugraph/source/api_docs/cugraph/dask-cugraph.rst similarity index 100% rename from docs/cugraph/source/api_docs/dask-cugraph.rst rename to docs/cugraph/source/api_docs/cugraph/dask-cugraph.rst diff --git a/docs/cugraph/source/api_docs/generator.rst b/docs/cugraph/source/api_docs/cugraph/generator.rst similarity index 80% rename from docs/cugraph/source/api_docs/generator.rst rename to docs/cugraph/source/api_docs/cugraph/generator.rst index e89fe25e867..9b4ebbcf7a4 100644 --- a/docs/cugraph/source/api_docs/generator.rst +++ b/docs/cugraph/source/api_docs/cugraph/generator.rst @@ -8,7 +8,7 @@ Generator RMAT ---- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.generators.rmat diff --git a/docs/cugraph/source/api_docs/graph_implementation.rst b/docs/cugraph/source/api_docs/cugraph/graph_implementation.rst similarity index 94% rename from docs/cugraph/source/api_docs/graph_implementation.rst rename to docs/cugraph/source/api_docs/cugraph/graph_implementation.rst index f6dc38a273e..91c16c24248 100644 --- a/docs/cugraph/source/api_docs/graph_implementation.rst +++ b/docs/cugraph/source/api_docs/cugraph/graph_implementation.rst @@ -7,7 +7,7 @@ Graph Implementation Graph Implementation -------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ view_edge_list delete_edge_list diff --git a/docs/cugraph/source/api_docs/helper_functions.rst b/docs/cugraph/source/api_docs/cugraph/helper_functions.rst similarity index 95% rename from docs/cugraph/source/api_docs/helper_functions.rst rename to docs/cugraph/source/api_docs/cugraph/helper_functions.rst index 08585d264e1..ec3248bfa27 100644 --- a/docs/cugraph/source/api_docs/helper_functions.rst +++ b/docs/cugraph/source/api_docs/cugraph/helper_functions.rst @@ -8,7 +8,7 @@ DASK MG Helper functions Methods ------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.comms.comms.initialize cugraph.dask.comms.comms.destroy diff --git a/docs/cugraph/source/api_docs/layout.rst b/docs/cugraph/source/api_docs/cugraph/layout.rst similarity index 80% rename from docs/cugraph/source/api_docs/layout.rst rename to docs/cugraph/source/api_docs/cugraph/layout.rst index b3943e4d399..1c097346b6c 100644 --- a/docs/cugraph/source/api_docs/layout.rst +++ b/docs/cugraph/source/api_docs/cugraph/layout.rst @@ -8,7 +8,7 @@ Layout Force Atlas 2 ------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.force_atlas2 diff --git a/docs/cugraph/source/api_docs/linear_assignment.rst b/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst similarity index 85% rename from docs/cugraph/source/api_docs/linear_assignment.rst rename to docs/cugraph/source/api_docs/cugraph/linear_assignment.rst index 383bb4d4322..dfdf6da96db 100644 --- a/docs/cugraph/source/api_docs/linear_assignment.rst +++ b/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst @@ -8,7 +8,7 @@ Linear Assignment Hungarian --------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.hungarian cugraph.dense_hungarian diff --git a/docs/cugraph/source/api_docs/link_analysis.rst b/docs/cugraph/source/api_docs/cugraph/link_analysis.rst similarity index 75% rename from docs/cugraph/source/api_docs/link_analysis.rst rename to docs/cugraph/source/api_docs/cugraph/link_analysis.rst index b84dd2ccb75..5f977b47724 100644 --- a/docs/cugraph/source/api_docs/link_analysis.rst +++ b/docs/cugraph/source/api_docs/cugraph/link_analysis.rst @@ -8,14 +8,14 @@ Link Analysis HITS ---- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.hits HITS (MG) --------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.link_analysis.hits.hits @@ -23,14 +23,14 @@ HITS (MG) Pagerank -------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.pagerank Pagerank (MG) ------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.link_analysis.pagerank.pagerank diff --git a/docs/cugraph/source/api_docs/link_prediction.rst b/docs/cugraph/source/api_docs/cugraph/link_prediction.rst similarity index 84% rename from docs/cugraph/source/api_docs/link_prediction.rst rename to docs/cugraph/source/api_docs/cugraph/link_prediction.rst index 7c7b34f2f7c..b2134fba9f9 100644 --- a/docs/cugraph/source/api_docs/link_prediction.rst +++ b/docs/cugraph/source/api_docs/cugraph/link_prediction.rst @@ -8,7 +8,7 @@ Link Prediction Jaccard Coefficient ------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.jaccard cugraph.jaccard_coefficient @@ -18,7 +18,7 @@ Jaccard Coefficient Overlap Coefficient ------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.overlap cugraph.overlap_coefficient @@ -28,7 +28,7 @@ Overlap Coefficient Sorensen Coefficient -------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.sorensen cugraph.sorensen_coefficient diff --git a/docs/cugraph/source/api_docs/property_graph.rst b/docs/cugraph/source/api_docs/cugraph/property_graph.rst similarity index 96% rename from docs/cugraph/source/api_docs/property_graph.rst rename to docs/cugraph/source/api_docs/cugraph/property_graph.rst index f3653c08555..672aa7dae2d 100644 --- a/docs/cugraph/source/api_docs/property_graph.rst +++ b/docs/cugraph/source/api_docs/cugraph/property_graph.rst @@ -7,7 +7,7 @@ Property Graph Property Graph ------------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ PropertySelection PropertyGraph diff --git a/docs/cugraph/source/api_docs/sampling.rst b/docs/cugraph/source/api_docs/cugraph/sampling.rst similarity index 80% rename from docs/cugraph/source/api_docs/sampling.rst rename to docs/cugraph/source/api_docs/cugraph/sampling.rst index bab8aee6bb5..52004a5b1cc 100644 --- a/docs/cugraph/source/api_docs/sampling.rst +++ b/docs/cugraph/source/api_docs/cugraph/sampling.rst @@ -8,7 +8,7 @@ Sampling Random Walks ------------ .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.random_walks cugraph.ego_graph @@ -17,6 +17,6 @@ Random Walks Node2Vec --------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.node2vec diff --git a/docs/cugraph/source/api_docs/cugraph/structure.rst b/docs/cugraph/source/api_docs/cugraph/structure.rst new file mode 100644 index 00000000000..5114cb57b47 --- /dev/null +++ b/docs/cugraph/source/api_docs/cugraph/structure.rst @@ -0,0 +1,104 @@ +============= +Graph Classes +============= +.. currentmodule:: cugraph + +Constructors +------------ +.. autosummary:: + :toctree: ../api/cugraph/ + + Graph + MultiGraph + BiPartiteGraph + + + +Adding Data +----------- +.. autosummary:: + :toctree: ../api/cugraph/ + + + Graph.from_cudf_adjlist + Graph.from_cudf_edgelist + Graph.from_dask_cudf_edgelist + Graph.from_pandas_adjacency + Graph.from_pandas_edgelist + Graph.from_numpy_array + Graph.from_numpy_matrix + Graph.add_internal_vertex_id + Graph.add_nodes_from + Graph.clear + Graph.unrenumber + +Checks +------ +.. autosummary:: + :toctree: ../api/cugraph/ + + Graph.has_isolated_vertices + Graph.is_bipartite + Graph.is_directed + Graph.is_multigraph + Graph.is_multipartite + Graph.is_renumbered + Graph.is_weighted + Graph.lookup_internal_vertex_id + Graph.to_directed + Graph.to_undirected + + +Symmetrize +---------- +.. autosummary:: + :toctree: ../api/cugraph/ + + cugraph.symmetrize + cugraph.symmetrize_ddf + cugraph.symmetrize_df + + +Conversion from Other Formats +----------------------------- +.. autosummary:: + :toctree: ../api/cugraph/ + + cugraph.from_adjlist + cugraph.from_cudf_edgelist + cugraph.from_edgelist + cugraph.from_numpy_array + cugraph.from_numpy_matrix + cugraph.from_pandas_adjacency + cugraph.from_pandas_edgelist + cugraph.to_numpy_array + cugraph.to_numpy_matrix + cugraph.to_pandas_adjacency + cugraph.to_pandas_edgelist + +NumberMap +----------------------------- +.. autosummary:: + :toctree: ../api/cugraph/ + + cugraph.structure.NumberMap + cugraph.structure.NumberMap.MultiGPU + cugraph.structure.NumberMap.SingleGPU + cugraph.structure.NumberMap.from_internal_vertex_id + cugraph.structure.NumberMap.to_internal_vertex_id + cugraph.structure.NumberMap.add_internal_vertex_id + cugraph.structure.NumberMap.compute_vals + cugraph.structure.NumberMap.compute_vals_types + cugraph.structure.NumberMap.generate_unused_column_name + cugraph.structure.NumberMap.renumber + cugraph.structure.NumberMap.renumber_and_segment + cugraph.structure.NumberMap.set_renumbered_col_names + cugraph.structure.NumberMap.unrenumber + cugraph.structure.NumberMap.vertex_column_size + +Other +----------------------------- +.. autosummary:: + :toctree: ../api/cugraph/ + + cugraph.hypergraph diff --git a/docs/cugraph/source/api_docs/traversal.rst b/docs/cugraph/source/api_docs/cugraph/traversal.rst similarity index 82% rename from docs/cugraph/source/api_docs/traversal.rst rename to docs/cugraph/source/api_docs/cugraph/traversal.rst index 675bf89f3e0..c8fcc6b721c 100644 --- a/docs/cugraph/source/api_docs/traversal.rst +++ b/docs/cugraph/source/api_docs/cugraph/traversal.rst @@ -8,7 +8,7 @@ Traversal Breadth-first-search -------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.bfs cugraph.bfs_edges @@ -16,14 +16,14 @@ Breadth-first-search Breadth-first-search (MG) ------------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.traversal.bfs.bfs Single-source-shortest-path --------------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.filter_unreachable cugraph.shortest_path @@ -33,6 +33,6 @@ Single-source-shortest-path Single-source-shortest-path (MG) -------------------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.dask.traversal.sssp.sssp diff --git a/docs/cugraph/source/api_docs/tree.rst b/docs/cugraph/source/api_docs/cugraph/tree.rst similarity index 83% rename from docs/cugraph/source/api_docs/tree.rst rename to docs/cugraph/source/api_docs/cugraph/tree.rst index 7952c4fa34d..38bfbad7d62 100644 --- a/docs/cugraph/source/api_docs/tree.rst +++ b/docs/cugraph/source/api_docs/cugraph/tree.rst @@ -8,7 +8,7 @@ Tree Minimum Spanning Tree --------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.tree.minimum_spanning_tree.minimum_spanning_tree @@ -16,7 +16,7 @@ Minimum Spanning Tree Maximum Spanning Tree --------------------- .. autosummary:: - :toctree: api/ + :toctree: ../api/cugraph/ cugraph.tree.minimum_spanning_tree.maximum_spanning_tree diff --git a/docs/cugraph/source/api_docs/c_and_cpp.rst b/docs/cugraph/source/api_docs/cugraph_c/c_and_cpp.rst similarity index 100% rename from docs/cugraph/source/api_docs/c_and_cpp.rst rename to docs/cugraph/source/api_docs/cugraph_c/c_and_cpp.rst diff --git a/docs/cugraph/source/api_docs/cugraph_dgl.rst b/docs/cugraph/source/api_docs/cugraph_dgl.rst deleted file mode 100644 index 9a8b3ba6c82..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_dgl.rst +++ /dev/null @@ -1,15 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~~~~~ -cugraph-dgl API Refrerence -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -cugraph-dgl - -.. currentmodule:: cugraph_dgl - -Methods -------- -.. autosummary:: - :toctree: api/ - - cugraph_dgl.cugraph_storage_from_heterograph - cugraph_dgl.cugraph_storage.CuGraphStorage diff --git a/docs/cugraph/source/api_docs/index.rst b/docs/cugraph/source/api_docs/index.rst index 0e7598af08e..f6307d5ac36 100644 --- a/docs/cugraph/source/api_docs/index.rst +++ b/docs/cugraph/source/api_docs/index.rst @@ -8,9 +8,9 @@ This page provides a list of all publicly accessible modules, methods and classe :maxdepth: 2 :caption: Python API Documentation - cugraph_top.rst - pylibcugraph.rst - cugraph_dgl.rst - cugraph_pyg.rst - cugraph_service_client.rst - cugraph_service_server.rst + cugraph/cugraph_top.rst + plc/pylibcugraph.rst + cugraph-dgl/cugraph_dgl.rst + cugraph-pyg/cugraph_pyg.rst + service/cugraph_service_client.rst + service/cugraph_service_server.rst diff --git a/docs/cugraph/source/api_docs/pylibcugraph.rst b/docs/cugraph/source/api_docs/plc/pylibcugraph.rst similarity index 94% rename from docs/cugraph/source/api_docs/pylibcugraph.rst rename to docs/cugraph/source/api_docs/plc/pylibcugraph.rst index 7b63cd7dc78..7ebdd67e923 100644 --- a/docs/cugraph/source/api_docs/pylibcugraph.rst +++ b/docs/cugraph/source/api_docs/plc/pylibcugraph.rst @@ -9,7 +9,7 @@ pylibcugraph Methods ------- .. autosummary:: - :toctree: api/ + :toctree: ../api/plc/ pylibcugraph.eigenvector_centrality pylibcugraph.katz_centrality diff --git a/docs/cugraph/source/api_docs/cugraph_service_client.rst b/docs/cugraph/source/api_docs/service/cugraph_service_client.rst similarity index 95% rename from docs/cugraph/source/api_docs/cugraph_service_client.rst rename to docs/cugraph/source/api_docs/service/cugraph_service_client.rst index 1ea727bd354..383b31d269a 100644 --- a/docs/cugraph/source/api_docs/cugraph_service_client.rst +++ b/docs/cugraph/source/api_docs/service/cugraph_service_client.rst @@ -7,7 +7,7 @@ cugraph-service .. currentmodule:: cugraph-service .. autosummary:: - :toctree: api/ + :toctree: ../api/service/ cugraph_service_client.client.RunAsyncioThread cugraph_service_client.client.run_async @@ -18,6 +18,3 @@ cugraph-service cugraph_service_client.types.UnionWrapper cugraph_service_client.types.ValueWrapper cugraph_service_client.types.GraphVertexEdgeIDWrapper - - - diff --git a/docs/cugraph/source/api_docs/cugraph_service_server.rst b/docs/cugraph/source/api_docs/service/cugraph_service_server.rst similarity index 92% rename from docs/cugraph/source/api_docs/cugraph_service_server.rst rename to docs/cugraph/source/api_docs/service/cugraph_service_server.rst index 5f4c6aa5eb5..a7e8b547573 100644 --- a/docs/cugraph/source/api_docs/cugraph_service_server.rst +++ b/docs/cugraph/source/api_docs/service/cugraph_service_server.rst @@ -7,7 +7,7 @@ cugraph-service .. currentmodule:: cugraph-service .. autosummary:: - :toctree: api/ + :toctree: ../api/service/ cugraph_service_server.cugraph_handler.call_algo cugraph_service_server.cugraph_handler.ExtensionServerFacade diff --git a/docs/cugraph/source/api_docs/structure.rst b/docs/cugraph/source/api_docs/structure.rst index ab097156cb9..0d6e287927a 100644 --- a/docs/cugraph/source/api_docs/structure.rst +++ b/docs/cugraph/source/api_docs/structure.rst @@ -102,4 +102,3 @@ Other :toctree: api/ cugraph.hypergraph - cugraph.structure.shuffle diff --git a/docs/cugraph/source/basics/index.rst b/docs/cugraph/source/basics/index.rst index 46331fc6ce5..1875ac22bd8 100644 --- a/docs/cugraph/source/basics/index.rst +++ b/docs/cugraph/source/basics/index.rst @@ -6,4 +6,7 @@ Basics .. toctree:: :maxdepth: 2 + cugraph_intro cugraph_toc.md + nx_transition + cugraph_cascading diff --git a/docs/cugraph/source/dev_resources/API.rst b/docs/cugraph/source/dev_resources/API.rst new file mode 100644 index 00000000000..409e307fd9f --- /dev/null +++ b/docs/cugraph/source/dev_resources/API.rst @@ -0,0 +1,5 @@ +=== +API +=== + +https://docs.rapids.ai/api/cugraph/nightly/api_docs/index.html \ No newline at end of file diff --git a/docs/cugraph/source/dev_resources/index.rst b/docs/cugraph/source/dev_resources/index.rst new file mode 100644 index 00000000000..8568772b35c --- /dev/null +++ b/docs/cugraph/source/dev_resources/index.rst @@ -0,0 +1,10 @@ +=================== +Developer Resources +=================== + + +.. toctree:: + :maxdepth: 3 + + https://docs.rapids.ai/maintainers + API.rst \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/DGL_support.md b/docs/cugraph/source/graph_support/DGL_support.md new file mode 100644 index 00000000000..4db2a4d777c --- /dev/null +++ b/docs/cugraph/source/graph_support/DGL_support.md @@ -0,0 +1,55 @@ +# cugraph_dgl + +## Description + +[RAPIDS](https://rapids.ai) cugraph_dgl provides a duck-typed version of the [DGLGraph](https://docs.dgl.ai/api/python/dgl.DGLGraph.html#dgl.DGLGraph) class, which uses cugraph for storing graph structure and node/edge feature data. Using cugraph as the backend allows DGL users to access a collection of GPU accelerated algorithms for graph analytics, such as centrality computation and community detection. + +## Conda + +Install and update cugraph-dgl and the required dependencies using the command: + +``` +conda install mamba -n base -c conda-forge +mamba install cugraph-dgl -c rapidsai-nightly -c rapidsai -c pytorch -c conda-forge -c nvidia -c dglteam +``` + +## Build from Source + +### Create the conda development environment +``` +mamba env create -n cugraph_dgl_dev --file conda/cugraph_dgl_dev_11.6.yml +``` + +### Install in editable mode +``` +pip install -e . +``` + +### Run tests + +``` +pytest tests/* +``` + + +## Usage +```diff + ++from cugraph_dgl.convert import cugraph_storage_from_heterograph ++cugraph_g = cugraph_storage_from_heterograph(dgl_g) + +sampler = dgl.dataloading.NeighborSampler( + [15, 10, 5], prefetch_node_feats=['feat'], prefetch_labels=['label']) + +train_dataloader = dgl.dataloading.DataLoader( +- dgl_g, ++ cugraph_g, +train_idx, +sampler, +device=device, +batch_size=1024, +shuffle=True, +drop_last=False, +num_workers=0) +``` + diff --git a/docs/cugraph/source/graph_support/PyG_support.md b/docs/cugraph/source/graph_support/PyG_support.md new file mode 100644 index 00000000000..42d4d1c5506 --- /dev/null +++ b/docs/cugraph/source/graph_support/PyG_support.md @@ -0,0 +1,3 @@ +# cugraph_pyg + +[RAPIDS](https://rapids.ai) cugraph_pyg enables the ability to use cugraph graph storage and sampling with PyTorch Geometric (PyG). PyG users will have access to cuGraph through the PyG GraphStore, FeatureStore, and Sampler interfaces. \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/algorithms.md b/docs/cugraph/source/graph_support/algorithms.md new file mode 100644 index 00000000000..fa2e7cc9553 --- /dev/null +++ b/docs/cugraph/source/graph_support/algorithms.md @@ -0,0 +1,85 @@ +# List of Supported and Planned Algorithms + +## Supported Graph + +| Type | Description | +| ---------- | ----------------------------------------------------------- | +| Graph | A directed or undirected Graph (use directed={True, False}) | +| Multigraph | A Graph with multiple edges between a vertex pair | +| | | + +ALL Algorithms support Graphs and MultiGraph (directed and undirected) + +--- + +
+ +# Supported Algorithms + +_Italic_ algorithms are planned for future releases. + +Note: Multi-GPU, or MG, includes support for Multi-Node Multi-GPU (also called MNMG). + +| Category | Algorithm | Scale | Notes | +| ----------------- | ---------------------------------- | ------------------- | --------------------------------------------------------------- | +| Centrality | | | | +| | Katz | __Multi-GPU__ | | +| | Betweenness Centrality | Single-GPU | MG planned for 23.02 | +| | Edge Betweenness Centrality | Single-GPU | MG planned for 23.02 | +| | Eigenvector Centrality | __Multi-GPU__ | | +| | Degree Centrality | __Multi-GPU__ | Python only | +| Community | | | | +| | Leiden | Single-GPU | MG planned for 23.02 | +| | Louvain | __Multi-GPU__ | | +| | Ensemble Clustering for Graphs | Single-GPU | | +| | Spectral-Clustering - Balanced Cut | Single-GPU | | +| | Spectral-Clustering - Modularity | Single-GPU | | +| | Subgraph Extraction | Single-GPU | | +| | Triangle Counting | __Multi-GPU__ | | +| | K-Truss | Single-GPU | | +| Components | | | | +| | Weakly Connected Components | __Multi-GPU__ | | +| | Strongly Connected Components | Single-GPU | | +| Core | | | | +| | K-Core | **Multi-GPU** | | +| | Core Number | **Multi-GPU** | | +| _Flow_ | | | | +| | _MaxFlow_ | --- | | +| _Influence_ | | | | +| | _Influence Maximization_ | --- | | +| Layout | | | | +| | Force Atlas 2 | Single-GPU | | +| Linear Assignment | | | | +| | Hungarian | Single-GPU | [README](cpp/src/linear_assignment/README-hungarian.md) | +| Link Analysis | | | | +| | Pagerank | __Multi-GPU__ | [C++ README](cpp/src/centrality/README.md#Pagerank) | +| | Personal Pagerank | __Multi-GPU__ | [C++ README](cpp/src/centrality/README.md#Personalized-Pagerank) | +| | HITS | __Multi-GPU__ | | +| Link Prediction | | | | +| | Jaccard Similarity | **Multi-GPU** | MG as of 22.12
Directed graph only | +| | Weighted Jaccard Similarity | Single-GPU | | +| | Overlap Similarity | **Multi-GPU** | MG as of 22.12 | +| | Sorensen Coefficient | **Multi-GPU** | MG as of 22.12 | +| | _Local Clustering Coefficient_ | --- | | +| Sampling | | | | +| | Uniform Random Walks (RW) | **Multi-GPU** | | +| | *Biased Random Walks (RW)* | --- | | +| | Egonet | **Multi-GPU** | | +| | Node2Vec | Single-GPU | MG planned for 23.02 | +| | Uniform Neighborhood sampling | __Multi-GPU__ | | +| Traversal | | | | +| | Breadth First Search (BFS) | __Multi-GPU__ | with cutoff support``[C++ README](cpp/src/traversal/README.md#BFS) | +| | Single Source Shortest Path (SSSP) | __Multi-GPU__ | [C++ README](cpp/src/traversal/README.md#SSSP) | +| | _ASSP / APSP_ | --- | | +| Tree | | | | +| | Minimum Spanning Tree | Single-GPU | | +| | Maximum Spanning Tree | Single-GPU | | +| Other | | | | +| | Renumbering | __Multi-GPU__ | multiple columns, any data type | +| | Symmetrize | __Multi-GPU__ | | +| | Path Extraction | | Extract paths from BFS/SSP results in parallel | +| | Two Hop Neighbors | __Multi-GPU__ | | +| Data Generator | | | | +| | RMAT | __Multi-GPU__ | | +| | _Barabasi-Albert_ | --- | | +| | | | | diff --git a/docs/cugraph/source/graph_support/compatibility.rst b/docs/cugraph/source/graph_support/compatibility.rst new file mode 100644 index 00000000000..be0cc21c2dd --- /dev/null +++ b/docs/cugraph/source/graph_support/compatibility.rst @@ -0,0 +1,8 @@ +Compatibility +============= + + +.. toctree:: + :maxdepth: 3 + +Compatibility document coming soon \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/cugraph_service.rst b/docs/cugraph/source/graph_support/cugraph_service.rst new file mode 100644 index 00000000000..620b22d1e0b --- /dev/null +++ b/docs/cugraph/source/graph_support/cugraph_service.rst @@ -0,0 +1,9 @@ +=============== +CuGraph Service +=============== + + +.. toctree:: + :maxdepth: 3 + +Cugraph Service for remote access to a server-based cuGraph(https://github.com/rapidsai/cugraph/blob/branch-23.04/python/cugraph-service/README.md) \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/cugraphops_support.rst b/docs/cugraph/source/graph_support/cugraphops_support.rst new file mode 100644 index 00000000000..08ae3b218c7 --- /dev/null +++ b/docs/cugraph/source/graph_support/cugraphops_support.rst @@ -0,0 +1,10 @@ +================== +cugraphops Support +================== + +cugraph-ops aims to be a low-level, framework agnostic library providing commonly used computational primitives for GNNs and other graph operations. + +.. toctree:: + :maxdepth: 3 + + https://github.com/rapidsai/cugraph-ops/blob/branch-23.04/README.md \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/datastores.rst b/docs/cugraph/source/graph_support/datastores.rst new file mode 100644 index 00000000000..82db2c0e1b2 --- /dev/null +++ b/docs/cugraph/source/graph_support/datastores.rst @@ -0,0 +1,11 @@ +=========== +Data Stores +=========== + + +.. toctree:: + :maxdepth: 3 + + property_graph.md + feature_stores.md + knowledge_stores.md \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/feature_stores.md b/docs/cugraph/source/graph_support/feature_stores.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/cugraph/source/graph_support/gnn_support.rst b/docs/cugraph/source/graph_support/gnn_support.rst new file mode 100644 index 00000000000..2acb2254a5d --- /dev/null +++ b/docs/cugraph/source/graph_support/gnn_support.rst @@ -0,0 +1,12 @@ +============================ +Graph Neural Network Support +============================ + + +.. toctree:: + :maxdepth: 3 + + PyG_support.md + DGL_support.md + cugraphops_support.rst + wholegraph_support.rst \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/graph_algorithms.rst b/docs/cugraph/source/graph_support/graph_algorithms.rst new file mode 100644 index 00000000000..885be03bf9a --- /dev/null +++ b/docs/cugraph/source/graph_support/graph_algorithms.rst @@ -0,0 +1,9 @@ +========== +Algorithms +========== + + +.. toctree:: + :maxdepth: 3 + + algorithms.md \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/index.rst b/docs/cugraph/source/graph_support/index.rst new file mode 100644 index 00000000000..9526fae7eb2 --- /dev/null +++ b/docs/cugraph/source/graph_support/index.rst @@ -0,0 +1,13 @@ +============= +Graph Support +============= + + +.. toctree:: + :maxdepth: 3 + + graph_algorithms.rst + compatibility.rst + gnn_support.rst + datastores.rst + cugraph_service.rst \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/knowledge_stores.md b/docs/cugraph/source/graph_support/knowledge_stores.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/cugraph/source/graph_support/pg_example.png b/docs/cugraph/source/graph_support/pg_example.png new file mode 100644 index 0000000000000000000000000000000000000000..5ce8a0f20549deaa34b968787f0b711ed8bc71fb GIT binary patch literal 242398 zcmYJaRahL+(gljU`!M*R!3TGD4UphISnvSB-GjSB2oAwrgEP2m2=2k%xtw$Ud+)>a zd@V1#s`pw|t5!#Y(n^H z?bh+@ERar%(7V80{g~$a^_sWWS?3)bDz&RTN{Eq^JScD~s}N8kIQ#<&tdW&Z!( zd-*aB19-oDtoXDC+?e6pcymq=^uB;&^K(g9OAA0J4seo@LYz4%r1V94zM@4Ign8)Zac zLgs-eUOATBNR#|XlD}11b){>p#?TaAnuWj1Cz0zKB^o8zwrh@4c>wXe*yzJ1PkTR_ zFNipBKX19*;P=kkg9YRcIqJ8FrP(?J<*x){#w}k1~c*ivwZP6W>!psu$5ZVb@oU`BGR!tRS2EO;5r0 z;ZyFV|9A?RjqfPUu(S2`Eg+h}&SpnOFE{Od4TfR#BIC_UTh_>+3q*};sU-Cir+YDU z>r$7}Eodul#Di?$nCC9@rk*e+q^dmp(#i+{f4NiA3$Qvpk?H5qGR~2r^B4O4)BBA`TirwaCk-32w&&QSa__qZ;-b-ov=irwcIe9D#S5>YI1mKo2?ao6 z^eNqf@eeI8r^4`JL`W$*f@WC-0rdqY*YTnm5|rfq{nJk8HlvxJ?G00V&D0hF&%`US zM|XTq98}X2jykmeW%?WEi5Kr9Rq>5|4nu$TQ&{>J*b;OMA%>a(>-E&t_v}Xgzu8p) z(Z2`F<)W8Id=IYc^%?#7Le0PDDbFW*QOt{y-S-mKsL<&39=_wY2ifDP6Z>K!oO1Jy z(kEiw_nLa``CO_)wbMDJ-+1w?w3)>Cn)fDFrmphRwCiVc>ADhtX!us2QumsATix-< zEWIUBc2YlW$5U6&=EPh5UxTh%$K2a6WJ7gY(+vtkwiO%L98&m6zKBgU0jv0DRhTKDn`DY{59Q4DgNioZHiOQHe4)~Giao0_tMZn-fnjEzosw>%Bc4x2v+>BM zpLx}E2DHwVvr&3SljgIr5>eZuf={&*u*1VB%Bqs;8g+?cce@@tOFKGmnqF&?O=&q@h=l4e?>W(Fd3LwQo=fH&#S5HUmU3T-8b=?|*jvf0q1?$cDVI z8Bume6E@XkY(~jF`Fdy3++QyjAB}%M^DQi#4w9RPZ&Bo-ws84TIE>#8#CX0QByYz} z=)SfvjZ+%V_&8}6Q##;SJWLyFGL)M)HNqtes^`?fk%W)T5rzN{*RoI59)2i=v0q(T zkQp~vtzZd=Cbz8kF)T>+7G?v%jhBvfEgINN`W4}GT;dOZOSkT~iZR;8jFaxK(pe5^ zRpiR0n#BGo>Ru*13)ZaO{*r3XhEYWS9q4x2E4ZFY8X%ce~ia6a>Egs>h!Utk2GyLv$+o zmwr0%?5X_KP9SsVehfhHQpDC~r(>M*JH<|?*vQIo4}*P4F&8h-t#lB!ajd*G#lEPxDGBv|6 zvmZm!d;A|B(OGO)5#v7I6;!U1G+3B_Nn?-Um!NmVQVzA#y7FB*uQOH2!z>}AT%w(X zY#$-E!#!F}gfhiAqtX{k8jl!Xjhgovb-x(2g`nckrYlJjkfek6YVwQo7~`Ho#~9>B zXr3VxAvV;rknu417joLTLYvjRNqaF?$Fiux6#KHQIjN)gp=-gyLe4yvJ*KJn;mgA8 zIly&&A+CJFed6b5jgr2Gh=+Z!@1XKh@_nCW!klkYs&N;)9hfAQJ8I|@{>ea0lBkcc zhST7Z8O_9}7iBWRp6y=_8`YpMliKCSp}`a31tzJZ-p9k6_^ae0?nL9TxrA5rGPuF< zeZMsWxMe%n#49l~n6P^l5R|N+m>BXwIOB_Kmqwzt5_l#}`?GJ?gso$IwCQg!m-yg& zZPacz6Ty_;$`T0hJ+I~tc#okHivfWA2+W@?@g+7I6cnGD&8CDf zAuCZk6kOhNQ>!)^*Cs0fyaC!1Yx?18L-po=809j%#M7S)_FAtuBUh3;{*|Y=U}T^%P%`9;L6P$W zofD2mHn^3X55G7xeY^M6p9}=X_hp-BHJx;tUcBLwau7_4b9!(i~%du!bP?T4jOaU`o5N0acY8M1D zMt#YE%AIj1?KZwNx*sv!3P`Z@SvE74y+%hgyJc%sun2*qK@tlFsXR)ZgNcHfh~H2| z6BkndG|gy&b_o^ZVmj!}gy)lJZc6$IyVY+@J7+tcu8ttCe-p#A7HE4EOrkjUy01(| zea1sb51qjZowSs&RDs|?uEQ5xC%h#M5grUpno`_ff*1VbAf!#xyo^$TmFW6?ixCTS zs7{yWJrSKo%?P&1{n8B)xLOgB@T!ZC&hX@p42xsiL{@L{QVPQ=6IXb!7(0iJ@lb9 z3CdKFxzbA+uvU^`BkZQ&9;*+^81)G+TD&VZ1k}B%Mo;Q{zXTA_0{`r=dqux| z5Y`~-=Jew|-a;jw)-*|y)}ATB5!cw$RF*0Q52ytTm3zEVcjn0J$ zp99pWSqd%;C-aj}z{!SSX`)@3=8VSDjzfNG)fE4GXXWb&6eP zDw#+bCE@0d39PE-`)?VvSUs9QN0O(%t{PpJL&IF~W@S}EGCbQkDBJsOgu|FPA~lMy z3?{WZ)ow2z6*m+IS4uCWXV3>+KiHoZ9t@H|&(dzx;Rn9u4lfASJRST7^|54z;sqGYr4+n@#KIU`1hk_620 zvWL+ALl5gHa&xlucGu0rK{E?svlYTt?3}nOb3|`2q36MGdiWQA}k;?Aex27=Dn-?{q29?Y|ClWi(qn1mq;)M?Iua^m=b-uRMMK$%D>XYW{N+Q zj(4VqEYe)Sg&K_Dnm@$K6hnMtR592;@sK8+(6sZqnRq{;i7PxD}FW@+H&kYLS0BERPbfBPp#o2Qrk87A1fDaAtqU*d zll9mQ5keB}%%1zF+KYm;1ec#^%X042tu`Qfc9ntjpB{yje`BV62~lW$9<;Q(s~zy4 z|M7x;IU~=g9tThESwO`;N2Rb+NFE-_hS=-J{4lglco>1b$JJRWVGCf8=mq^rqv9g2 zMDfMOb22lwmfZX|X|lG^c#wk=J&aJ@s;_N+J7^J6^(SafsvWn%rSXH9$=wI(8r6%E zZ{~XQU#~xWmkytMa7wF{O?Iggv-6xFN8yYGh}NWNe*>rM4emiErJj6@)AJ~z9A9piolfBQqepALdg^#(Uuiu+)h%#01y*v&QRcYR6y|P|E1%aXO`N zGJpd<4f+!x&R|lFdGv`60GoGZde0!3VU7ktS@_RW%wkCOUlu`!^HNaXecD`|1XYfu z0EJz|7BLYNbY9lMqvlxCKW`n;kN&@dRnBr`2N>8*?)M}cRP|vrueJDT$0h!(!nC-o zCy8&nd10seK4OiCy}*yWT@!uHbCiXQe=fw;p2;@wp_BUE#(RM#!Gut+ z9-z6%(*H6n0p)Mg?$MmSy3x6|!!*0C9Eb=)iPj+rP6oAel(A46c`s0%-GCxQbfW~k zM{vA-*qi(wd7EB$F^^xjfjUnkrpM147Fur)?n@WV>*g2UlP1S&7UyG^LHk=&OCwgR z3bUp_A#%Qb1oIF75-xfrlnu3ms;2$Y8%&w@?2LnTv(i9U8D4g8`+t_M(-vzVsvWz% z7s1kGYZzx?;A>X2aJg$p3*C4~MK{_Nx5Wg5_y|9M5)PxN3bBs}4|2KRV@};Ci#>?* z8?fYlj5Cpwwy01iek+sRN(zC9u)b5#e{-~G{|H*YEYKG7dB_vKupsyCWQzB#;9pFf z--}DUnINRU>5z6z`1I_U@;tk3>p1pS?WUq;xUy*oihk2HHif$3EY`f`9hOWyCYS5$ z5TC3Y<3f%qrv1i1*4tuORbb96A0!r+ic$e16fgp&%V(ICq6g4kv zcFDW()^irD@Oot!Vj*}6gbX=%78&U1Q^NBguE)Gxf9d=~dZQbiB=*T5e34E1v`%EC z3UJ!do@fxq`lsqV)y2IiL5Gug+5HJV)T<0t=GWw9b8OwJ9wet~aK>Gg>b0M3H4hkA zyZfN?D8^2Gxg#p9k@5iMy$7Cf>{)UtTW=sjsI9xhqD|Yqpx4on08{ z<%=e#w}cUbpF@)uGdV16aaRyjyZee1BD|ZBlH6JQ$ZbTAZYHiD8-JbAE3dS}i4cdG zPtACYeJL03=JMwFa;LGH0K^FIc6N$OM*{wyc0$XD4y@qf%-y(E*A+dbrV?qbBv3mu zjLC1;Hko>qT-C#w5R@~A+$fHTbpp+G2JNFHr<-ZeUp~}3|EtsS*tx&B-<`FUjBdHK zX&cKZxPzihl};`;Qe=Fd0dYwhv5pUR708`8B0?a;kyp=#rF4utv$j0c6h&gLR?q?>Vkx)#sOPbB9Yu97G@NyEuOuxG#?h zS*=5WEO}TCr(+m9j2f`K)=)zmR4)g~5pk`vz?2iH_0Ic=I%9%ur!>c_jlvXsJIMKlVZOe?D2h~DK=%6 z{RNf$XRsh(D=N5yxN&-H;yQ{ZbJoj$&8i|DzVpx$`rw(j6dAvNw|Oy+c#zH;7r+bO z%#J2?w~Kn9oBJK>;nj%&`nQ3W?CzJUr1k?^1sHpc!JYbCi>LP;$j+rPJ+(2*N4CZw0v6~|kS%m|CRk8RuMWpSI zW1Y7Au9Wlq2VqDl|2>z8AcTBskWI#hn}-*FA5nP0E`=zWf3eXj+WqZ%LDMCA)rbU! zV<4WK)lYQ;dAs$Tp2@ol#Jt2V(^>GG3Xmd@)pnBqAcu&Vs>;%vT z;`jvA3j*kQ^(%(%Q+SLemIOT)3@-xmow67r2QR9oshUoUx0GP6!-t{mr_=&2i65W7 z^WNXQ@?==6Kso5t!pVWw#Cy9`Ne41&flPja%1m4+3Nz>uMNkBvE0eSnE2hc3!#(xr zJbrRsg;|rdk$a{w=9~)@q}$eL5PTw_#MhbQbUi%^S=*|cHSxh}e8zG>B^|CkGV16Iw==uXvHLIU6`-{v`8U-nX~->OB-48yD1UL(6*%!$uLbgxzK4?Y3rbFhcyKc zMcsz3$}Wm_jOLT)9^H&-z6_BVVR1=HhpX7&X4T3KWSX#5^Crl&|Vb zmaf0-#VS*cCbXdGcaQdmDlIv900n%Hngsa^$#4y&^*R!mT-L@MvIWg)p z5$>JKw^~@sz@Gsw?$PS!0IW+b)zLk_7p@_}V_<`WA*Ve$TZ$qs2h#1Yf0(>QS9dD% zr5_r+=;OlGlSa2E8Jr-f*5jcrRn?i>gLD?6EBi=4HH|p3Q61ypsY8lfXhIc$j zTb4M)ga+L=IEN+Uc?GjM4SZMFL^x)i%wPd-2A8CV-T~3e|0x&x{#!1HWBA|452nQY z5{A)&XR9o1NQDEsNEAcx6)K(!VN;!KXRp$n z672LTgxhGpzj8cswyjhV?mv|dR4uh)C2_HJ(};YuYPlh#tMc)t7Sa4!O>!&XMnvrK zh0QP?x$)X{I7#&Q;O(|2Lh3DGRIePgW2a$B zPqOM3H5Jq=8JEl2@w6Td`$HF1BPCulo{R3XXD|qY;Z-rVEzb; zEphlih31#{xNkL)1AVQ60`af-=u|^}GYhZ6HM}r!M>L{BE?CCdV!`Yaeu0w{evy+CzM=kkFWINW_eaOt_nCfHt^T*iR&&40 z*h{OOZ*6UJ=AX3;P~X1$SvAyfEnKkR{dSz4cdkzTu&-z4sd36fXPWAvz2jWjvTkh5 zwO&1uJFq`z^_I_-oIN`Ucy<yx&?CySNcJxmj+#%5!%`DG%V)-7+1Z^b-u1AU17gN0lf4Lf#D=w-xm|5QjH3ulF83g1zx$3aaxKB^0-k6`lcjQ#3EvP z3#|Npeb|^a^!J+{RgJZrGeUj#qiKqD+#&x-Yx958DhPcJenh8(_uk$75jt#UW&euqPWc@OLR##@^%IEc+3>=XK$ zD1%>ydJVKfEud*-eZ`&KAQzR2BIeOc>W@j6R9WopwbEFPO)nqN^o(g734hwEo>AW8 z9sRtkSdtRsXwCtz#vu-PH8SO*d6JjpszYD8MrX}yC*l%-BBta z?9Ro>=k;N4Q28gliP4zH_kAu)49igLfwA}@oUFs}usvDPnq6wv?Vtu*iPVqaCPV7yD9}txPA7! z*RGbD%r?2lVdTwyqJ+G@l02sNZ!BcP+>5p&qlyp7OeRO^Sz-6e0OZdo^& zzB-ioS?5qh)FShN_UoHDuliM2TRGq=&5H|Mj^I+0F!P9Qr+qy?JXVfM9V1Ut-6auo zbbY06vqUnV>9I%g--cQ6e*fPj`BnqHblFfe>4xu zUbua9s<@NWiBxT?sS-0G1c5{ex=1HzH4u~<4&`u?gf(~<%>T@x{VM=eE{62KT|MQf zu@L}rW}Yw+qez-vI5S?59Uj~oYfr2&)h=QwtHuG8H(nC*6*PSFKPDcfBDt{*Lr{6- z?+yrmIjo{lT!u<(K97(CCVVfnzs@!F<&f-s|CoQOm(9_f>H$ihj6nI2?K~jXz_Z=h zSc-X-(qYK)eY2qVgmvp%I{tt#o$y@nm+|Yw&qvcE8e!;H76J=qq-A{(9N2_v__KO5 zwoe*z*mRM68oNxpI=xy3tOijo0?G!{@{z=+<*~kymR+AI@&UnUv%j+S6WhHjW(Q>M z!r)DM2#aEOJ7s4$7jcpHzLG(*LbpfR+&{W)iI?DG@8D?eA7CAxs2DEOVohfZGoz)r zTH7lpV+Yzjt?JOTd{&Z9XI$CTeQJ+&ujCF-I8A0E>IYZMuSP;48SS69h8vgP^-HAc zM>LkDRH^Rgz-z<=FGt`E>P<0c(f}E%n@}whQ3ii?d=OYV}IxsFDaoE;j=DfVm6UpGIVax>J)hRj) zRSxhgiyDWEvgsiNtx>TF%ae#Bfr99!tSuTo(Fo|Egt$ zyX@xyQPf9ZZzErB_oAw*!~eroD|~O0 zt9U;Vc8H_9ME^Irw%cTU8UVYilK!CSs=YmKSB!I$04UnIH(iZ!JH)|d+fP5l!6D9R zkGQr7|1Z{9-mb%qx`m9Li-jQZNXlkq=|aTPYOGnSGJaQ4{R-Clh;UPrx$g>Otb%~CAu475yB|SE9Jx+ICp&tkMlS*^B@y+gS^z7r+SsaJC3T&?} zGj2mQTpVHlX5_$dNT#LT)3LAJ!g>G;by5to7Utr}-$7bpg=01TVKOL+1 zQ-^xASQ~i(>Ed?mmb^#5irge2>bai{JCXx5(0fbxX5}ECb>LjE1{Htg%GqVipoNyD z`!91Mr==S8BOzkG+Z>#$es=IOW)_FLJLjo<27Gh2IzR1`sg4ZWpgmN&S=hg2aH6}D z4)t812?%P&4FCskB=mzlj%%;qG(=kxw|6`y_^Zhs@%!8txA$FVs@i1%z9;y^f#@8ZY z^RGlEOV7OWr7f6r;cYbaRU$K_dLXyoTr*gLprU0&RAd93KO3?AFBn2T_*+0hnyN1H z8Ekh+p90_(fbTFK9WN^I=3oA!rI`NquOO)=CUU6IJN6F1Bk0m%P$ zW9R6#3wi1$XataBs^r(tVxzs(APcU&iJ;B|9Z%xi%gtJ=$wA83XWj{4moRPdCl&+W zd%8)d|Duz=gyprlwMx9RLS6+=%Z$z_!(4`+UBip>Xd(hZhcn)6>YmBgmg^dOd%VX_ z($^SZ!nbmwf5u`lzHe8&metI%B%9PXPK3p5lLOE#2yM=4c+r_?a7O`8MEP>Wlj63s zlo8d+$I7~A6yhsWIS(i0yFl;HFtTK85g*5aQ$>sfC#djo+N^_1@D{7sQvWa(SS((9is1swzEb!DxA3}^hjf4r}3u*_|Vjj zKS-(1S}Ggo#Z(E4mK@l$m^)+msGSVV{z==q?M3mRodoHOhX!NJSv=I?wJ_ON1)+O> zf@B?Hy3oIHbxm`KWqd3e7^5ZX`ZVVd47M6=*UQxW*1Chq*BqnZvUb=z9Vk}R?` zu6*ymY@Cn#(NHhGkdp#qc!{D~!k?%tn><(%Hm=fJxC|jmnNr^(;;&Wci+Q-BFS5oW z;$JL0&XX4_{3>z)fVc+R? zLgvBQ7GYct*5spt-5uCVRMJgZDiKSL!6bWxYbLVlF|571;Y)qD5hfib3!Hlqs0k`# z?ZXROMtMJVCexAhbJ@AJ{*B9*rr9eRoH8s3(%9qyFEbq^TxH(5_fj0fCb8!T2R1Ha zADENUW6Oa4A1ozkE5U&$>}ux6>Qckzq+4nft_D=1wiB*>@GtY0;}VIp625hi&bqS= zHMH?S{a$cVg>`}qF6>=j9AI3@zqTk1KRL@W>2-#ZHaK+Vh+y)0^ zU-J4-YTUOdNmC^!juMstQ2rp@AId%~$L<2mim9${%I4HC{4XOMPD-(v^3`rll*+YS zQ|_9u#gxDL@ol-LSV1UUq)n#N>+6!I?Z_M@-vlTO+Hv~6{TfPYPu)I+b97Zs|4vbq z5#j$&;Me00-A-2nc0m*2Px7lI*q$V8`$71Sf-K_DJ{}8-W|-PG*E!(Q4nUPGZ& zCRA*bX;j-`=a;^aV?NqF}MNf_W!e;*r4S)D{L zEcR(TN0*339KI_Ghp84zKHVWB(91!t_G6rY3H=yHw5Kf3ADTdQT$SG_dxXB?O(!TU zW9q%9Vdht;n*yh_9K#Ex5Ps;vMIok}VdJr&uv0%NM~K-~ zT*e46id)EX%*t>tgKi@L3CO76UsBi9n0|f@9=T@oo^|CN|RS9I~L{U(IAX|qOreqno^tY=ED^fYd0hfHx`gG+|tQG(&x@LNES2* zN@;DW;D^IU5ss`r9*XX?*%L?w%KN;onGyPDkv`w8 z3)_JOQ|33TZi|~I0h#80cc}VicS|!%l+L|rax^wtfC8Z$ma}HLIt4KZri0#t6>VS?n{BNkWGi3l4st z{j&B07fY_&q`(|S0&r!k%)}~a45NbL-QR@L9}$gwgI#AEsN8B{X*}7D>NFW&OEu$U zOJ&Y%{vM>J?Ceg!mOT=pGO9Ufnl#%} zjl&u6`avN~5Lz;`l5T2KQiB=GzHDyzrs4XHY$+4P3}m_2CP*usX7(76E4&*FynWX# zIk+~(wfBBF(_gpL7~MvnWeV$$BN;B#9FkXn`9kf*Z?yAfl9-{h;D<@&$r@9?;5{84zy#{Inr%jsR%^G)XUBR1{A3Qg+- z#c<<rag`a^YM%bwL@q2I&eYzgR?K z1izL=sCW<;*Bcd#kRAe$P`HhN5hN=tAtf$f*rXe0+v?TvhEhNPRp+FEOAk=SFjX!z z2@_BCUBcf?vFBjvM>Kty0bNy*f(io1WKO8tw3R0O6<-nH9i~C9}Bf zy!I?d9+E&>E1nEAAGG5imHZCYuVGQCiQnC?vqK~UtLu7cK!{9(2&u>O8AEPzv9m&5 zO!Yain}jL!C4(}t>9c;&yYxVJzRkdcXyzCrhW9U`{=jjsT9Fe>HAK9wBXN2y-6jA3 ziU^uv)ic9y|BvrZKXeAqG>Q1(7%FIktVgR*U8WvW{6}*=(s4A%40byWq#i^O%aQ+= z17EpQTuol3!NxSaOCpYjTwxITea;+gGJMJ&N8bkCm_L z0&oho;RFcMR6Gz#pEgMKvjxczURiU1$|jcXXbP56aoJe{JEh{+^Zr)`?*yEq4elBV(l~%%6gJ6M@)Oxj#%3+NaGb3 zkc|8_u2J&cQIOw`N{xWwd6T5$UxZinwHjcl!0;ppg;o@ibK=yqL{7$+ zL>o}5V<;H0Ow&XWw1I=8+=@U_Ru4xg;O9`WsF0kE{V?|UC4Snp7||L*knx@1xsKcZ zI~;hZd<(x9Lj+$6|A_mg&PtR8tnz|LjvB@s^**i%n#sDpevYbZwfva2t(Pgc%2b_IK zst6_R^cftEwIV*Lty{MjO-=x_UU=#zlnU3OUuJTvTzw%x-N6G4@BXt6Of`m#X`2uL zALnTZoHp&etF)q&kn{;P1p1FrGVHpPcEp1cko+VibC0KudA#d6GdKvXU=i?#*6FJhyB_ffzrk{PW3B!&mT$KHRM1Ecp*-i2QtsC9KJ0t9X& z5%~QN8pzqv6ya2dEE1^HyZHfA1h!Yyo|g&OxYn;`8i_cTGZbWG+T{ z#IT3mtmK$r2f%l+D4z%Dewf24qoP%Zt3I+opHM$ZnC7YEu|=?_rjv+BhF?;VOPay7 znB0FkV}*lf*~jE4x;l$0Z{sp&%c4m^SKR^mX4IPk>M1q1;h%k-2af4|zUQNE;^oPWpujBP?hMDXGq(rywMgIxv>xz&zZ~sOdm- z5w=;){8<>^vid??ItAVApIw47jJ()J(1D>UNt(H&F(rPOEPhbaT2zUdA?MU$2$N?e zZp$FPxo)cX$N@KYv@(zBBMnfP%%Wf>n$9}ZzKi@SwMoX`AOAE%g)#GsH;nP8e3q(h@-6tJ@^j|~-m`?~7Clv<<=DTE9~H4OA`QY3GZuYJa1 zG*E+$KD~uLu=t+WiI})-Ou};IAhaQc6AX@v=H9UrznQGIuw2hdrKLW+G1s}it%v%Z z0miIXLS9ETrz@$@e2E!;p3Oioc3!7=S`V@)W16Pxon0gt!|b4azDkMH%v=~;&6U!< zWrD&jEQIbm2tO7tr7;*MX-H%WK6$|i;wG-n`b>Z5lN;xp&2?*O=f(@>gZ$Y4?&12w zzl6+x^;2d*SBhs#88@24GP={BpX%4_-+Sx;{{=)-d|HTVLHlm1HB!EgCgE8B1A|f1E;$036v9FOC zln~;{l`@L7rjnY6w&1JnkhkLYWUc&c&O^QWyi9XuqCIAC9F$-~f6ZVmOyUp@7+@nw zvWaR{%1NlZa}Xvy4A39SastjTjz)L+S*~)9B3&&sNvuWwnJ43NzWjt|Dml1CJJ$|^ z*y=}*aGUCMpym_0Z$?@ZM=NrH9K5YUv|IF$b zgjPU}T|!~_eN{DGnLCniiIO;`-F=h zuc}G2E}=VqL<%6er{hufk013zZwn|T{smk((LtA>9yq);#TdB2*<~cmBV5gKY zm{q0a`9%HX3L2~&ANp1PN#5&(3U7@+rZS*C>eJ|}? zm9GYqb1G_%vDraKTC5I6Bxq_BX?H!9jP%i-C+h)1U*R{?-v{Skh>Y1s7v+t<6ghg2 zB#2u$W-!RIE{nI^WB-_@+aLQ`(X;c$U{Ak3iWmRU!dX8M!ylfv^=hhV3VW{vd7G0R z6Ebl?)n{8C9G%IbQ5iI8`^(U>594V*{e8k_gJwX`Z6FH}MY>m?dSx&NLvkok^H zZk#30k>m;~zDjZfJ$bsMNKKZjs3FJoaud{QA&$0jxoh-eA9$!6_dfTZ9S7kK$F{hK zp~lGx^tij(MvvDFfN*0;$PNP6u2eku`(a;DIGa_uHyt909mD)BtdtE`aQ%QMsH(B@ z)^)R8dMjrkHHP+_R3YbF10f2nxh-N#L?ZjkCer0ViD5#ue$GiJWMQIdSl$1Xn$q~ALw0~^GY|Hx|!kFbbij$hs?@dwcbS_12 z!2dzPev0p@NiR11@|rQgPA0SIf{Zgz6N3LKLa+fJ7x^32^*NjJYAYEh`{_`KTNI8# zGwLfc3UkRf&{<`H&dW49_ZJ}e`q<$5n* zpRCF#I0pnJiJ*L-AJf_S4vjfM;ih*5*h>D(pHVVLe``EJp*khOF_9D#!!IWM(^uUA z|M-~2;UW+y(Pb^TA>UmiaW9HrT*aM_+;^I3-v z)Ie!zX?Tn)4wKaYCEAWsMpzNM8**%R|`ZxS_RVTL6tC3() z88sW{$J{^4K)%3KK}>Nx>?NJO>FL`~WRca+-02CqB!MKT#APn@SA+8G_*W^uuo?2F zRyYtzz7b)tL&R`qc--r~C_B8Vh7s$9o?-kbO`3`!VPBQSR|f=>or%V=*utJXc?VY{ zzgK`nu`E0wZX?nOY6a4hj!#%rddJND73CejdpI~CpJ{)&-Ss#|Pjn75j*qnv1*1au zb_ICT+XNIwz+iH^#&d4fq2AlKGeUE%hAIx(w*2L_?@7?%f#ZpgdtS`y!7T)*M$}L3 z`bIBon1H@sj43bBg1#(QzWJ$Uz-&BTgdZ(IAB(NTQCV+JxZ0F*`Ng`b^ zyI<2-H6TFSZ}5CCh7o9d2<6kceDp;tQtii$lVTYEwHNLdz{mzhrmTOeqWArSM_+vf z*%CwKB8sQhSvhx~5?!Y(P6*~)^5^}QD;&+vrs&97{l4Wd`}pPZ(Erpp&~!p5RD1~mPMHR{1>t=J8!H{~>Pw$|m6cQh7B0Gf z?gXpR3U1YU;0N*Hx_)cusd;}iV3cQeur5>g@{A`Q~rLw9#K(jDLUo^!rm%s;N{Sv%IcYje|>E2kqf z^pMMU)(&*(pO+*P>eX&qm1w&w#)Lf?;Uv7o@TTne%F|q#;!f%eu zb+9%B*clN7PM={h*!{So*O<0RO(@9OSO*Brl@lb!Xs#c3$DhQ}V@7naAAdj%8EaLS ze`A9oYK2Gq5-ljq6<;ATDI1l!t65-C;854GRQZ|8T)QCAHYiFF0B84H$~WXUA5=Gk zp+Jvm;fErQ#72JT@K4S5I9v!vMm&PQZgxYg9HTOXx(X8uy11qi7k?QxnSg{~2gb2; zdivp(y-LS$5kHXWK_;+ug#x5t<4{(M*;zRkH#7hCO?*~-Up^$wK(K4Fk>DfmEw6^B|bb>gTPeAL;{Jud+3R|zQsE(O20%*R_;&S2O+!7ZgQp~I-&Ew zT%$~XlgH^V;1v!UZO6!eu9UT^LQcS&{T+U>j`_~y9-2jLNk`!#qaQ5zDEt<`Yns4~ zn6P&vq9z~;k8zxqj-A$Tn*BWUt_9rbfmkD8m&AamqJNFTe``WPHGXmC3`?R}w{kze zkPhxb{+9Dz8CIp(n_Un+7FQihuI6i^d@7zYjF{6NNUPH$MvdASMxIL z(o#h1wspBZqv!HdcFhZ0(}JQcydN<~9k#Fn!DJZi;a35hhjrg1p9iEYi>GTXDE0p3 zK(RfEpzQh+yfub_hx3*#RaYwa;9OA&f}g&(VF zd%@``QcnK-XvYRPBl&BvkXsa9%32?{5|NT=18ZNc)KaR1C&6_94T6$zOH2C3Jxiv% zXXx-gGc?`95L#a5RqgU{X1o%4c39cu^kr)*ffT!&Jsx49)*VC~INYA)&#%DhDilz& z#leufxL{oy4&xMx2Y2{j(-8)_Eug0Mb;tD_(8C6>uo)I7Y!~*MKJU<3?848Rj?zi| zV=%?zT=~pA`2H6K4JKzFGlGmn)R_18bn>dPj9L|K5D8oHJZ!Sl?Kk6__ZIu~vvfeT z1oPu$fe!3*C0iG<9%}E)uV3531LUJ{K3X&<;$gn}>_9NU1z9M2p-wPC$IS=GJB*WO ze{bVLBQ*!)gDMVeaiPHTnotCbd9q< zBj;vv=w&wYsH7~>RwkRSRVqdB1jr_d5GCJI+k|D}G$t8~qalMAwH+`1^q+sRUBp2M z3xf>CZ4iza@$cdc9RuGto$YT?^x9$7-YlZl*Nq)rkEQI6WowMhra~$EuZc#!28${7 zAfZBdEv<}Lgr(vNMG(!)W_fJjSY5$7*JF+c8_xzKHP2x@mk2#(V3S@J_c1|S=cccS=Gph;{ z7mUE0Zd@|61IM$tq5>%;lvtmSa1(y3k%7T4r0av7W+@66!Paf~|-#bac|kW~F|NU}cL zcz!DuQVw8krj^wPR*Xy**?qaOBFdYrl~_J?dm|)`k)S1NvkAh$3rE6-j3$dEIi|9^ zj3VpEF3=u0pd*DPt^M*cQPURH@qb&=M=6g>2{-Z78R7-!7a~LIGrfzk&mGjuevZcq zRO#!{0pE<{)zN%8;Wp1fzVi??wkZ`xswEi35Ic+4COD zR0^@P%hDTM-_~bq*yGR_MO4(Q`?p6v-bb=k)Y!4jM|q+gw{K`{*!NrJ{`SIPjxM|y z-@NM->5cs$CMK7GDJT&TeFKsiJTq77@!?L(q-gIAYz2nebf8VtZJ~B>FHljAJ16d= z3L=H_Cpw6WRWQEb2UwAx&+_efYMNSO9m2eV&vT0c3X1coVLtx@zf-z`5$$jxrCxlZ8wwVFJk*Zi*Xwz4 z%BLH!P6XCr5NFs1eg7!TMU&;86{+l0DuegW28NO*IfFvtSpxlMb^S-Mc^xq#rJ)2# zs|C}pV}R8okH%*7(m5@pDp?qybE~Cq8H5n6_-u%#m3%;ap|S_K3)O5~m)gtQ8)oa+ zwM!17RNyk3uB^E@h~igRI?>L+rVL}i`8r2kUHE#y>0|R7%t~g7YKjLQ{`GcZ#tU4B z?j&iulQM_SpsAP>9n09d=F*b|(dbO(?^ zEBQ#kOvtK77<@=vR!F=Z(Pt@v9Z{C)gLRda+~&Qu2&kQZfT;5KpXiGSx{)sitP@p% zJo)`NEzYZhK7%N<3rJM6TlbtxLbx)UkraCs#=e7l#n|m|j-m9^M{WC`mZa48@s=^HVXv{oaK$gg%|!mt!Jtq0k(VSNkTU_XJ9w^KKBQETP7OxgS;l zQ9N0oDa+BvP-XqCC4)|Ki=#)ydv1V0M0dDdt&OM=DEKn%IbxH@u#-Z!7ymZSZ0vEnGlQRu z_hfNFL>Pd7JKWV`7OE$<9l-(lS1P7|G+626o$VKRbS8Vr zDDpKbW(J>0Kp7g7dPD{W#5T^UjU-&D28D!3NJ)+)?Q@4mgGx{O2_R5*JX+3G%J>Uo zmNKu2X%?s-*`Xkf%6IOvbW;t~&4su@@ANq`z_bTa z`A3I=^055g1Y-MOG)TBoDGCY6&4Jh)^>0nFuB&e+rU(zIpG;z+WZ{T|o=9)C-0|mc z0gBQ`i1>|5_IcE((8dcGbVwsoR6q_N$3)yqzy<}HMwkat^^sXZ70o0}%E1*Qg+pX%!JqLhVXbVL{zx22b3t_OG@2H5O(P zV7_|f2jzc3JB=M&a{#!_uH}GP&A{rVTzX)-hhK)wVrXxZ<&P`4c!qEROXiQ&k1Zb) z(1{K9a`#kQzC9j66+GnzCc%Jex*jd=NoHG$RVYGz+W` zE3SL(N1ufS-G1!>ZYUU?^h=x+ z_-wAr%r>FY0H9$eQ_RdU3f?eR472AlLbO8&6UNA={pEaG+yyNo>ywVhNFuf~eXHxL z!9%8OqZRfuRhJXWNS?y;H8|Qbhb!J<0#O_H5SiQC@dXKD05~oS$@d|o8mn}zp!X4< zgDMcWPi0HKZ;=}{+4eNiVM!&n;9h5amf*b&YR2YQwS600LxfQof>4g$$;U>$AAyv- z$dO(z8(3!)=1wtC(z?_65^g>BG|jg{+H=-b-BpFBe!vFQ9&Ez9LSbj5%m&4daCf@hN!(Y|j4eV^R0hQ%?!s>J#u2U@M%UJC%yx5%Q?YWA z-0|5sd+XrAo^+tv9H|O`0p)#G7; z^SVkYpAV(hjFJdjrjKUtng3i1k5s}~GSf7Ju0u1Fo6|m+P0>2P{-dveXkVg!9vkcn z@eXIqkn$}L5V6A7)8`+VjaW*`3MKB>@V_^Y5dY~*MJM7pJgYDB4&l|W^qE}MC;RN7 zF%oz|`e9$)1d&-bZQ`|P|8>){PaW4PzsO6O`MzJ}ip9@Mzgoo{pgoSNTgT!+>)7>` z4@uZ+A>pc#&V0Wt%MX|!-mk{U-!fOum|DNZB4%xvBIxw62JrzBpbvibCPlLMW@qnZ;LVoQJRZ1oA(6@5p^9m zp?eM?$1P1F_f_-Kniw&5X;g4lepBYG3FZz1UqOFkKBZ%l;VRGH zqd z($E*kdA=8(6`qshVo8lBp80-X^@jJ*=xFHDE$n0GGB2s1)mWKV-;8Pa5o-B6maQlWJ)tl2hB)1Qgpx1IFlm9ht94z*UXCUj@wYL^``0S^D6|DGEE z!54XBDQc6n!PNlJ+6ekl<4KcW1Nw12OKaZB^zzFVPXmX239t0DO+VjeDMN$8Cl@hj zH48yHdHmobUE#))=DvpdBrARc1V4sz0?xRqEX3+rLU(H3LXHH-e@PPm67qbH*c0zx zY%}xbIaWnw$5nt#pC9(Ii%VB z&V`_uGp73mh%3wnW~jeq#d2QEW&#OSPf4!2&b zoqfC{sSUr5^lGLPCPm@Qx7n+&sOwj8$HY7<`ZuT@vAYz3MZz~h@TG$+9XTBzyRt+T@{fIepb9*^)E!D|7mQHmFVP*mnFF`< z;O(JAMj<0WdMAlRLrZ3DvyN1K5Ty#FP)hv=2|OUPVyP{cN%$mX!L%of@+mj$PTVW_<+e4&PCK{_Tr8G5=UOMtzQpwHlw7{`~euW~JuD?f}jvOkKm59W) z*g%K!Gyaid)E>)?+`hwdXXM1IU+ewZ4b89q5Il%LSJ?mA=*u2If+hQ~M#s?WeP>DJ zLxtjf%8@H$R(^O{c3{w&Xk3N;su&Xs_z>rT0D&Cbr4Fma^{XgoQe|PA)N7DNw%Q32 zwjBrSSL6o)n*zLDkn9^$Bn4kRr1#cAGO~HRE;3JJ>G3VCxN6W6h7Pr9ar8U_0SI0L zIKLfr4}B?ad^akIfj6&mu}elGXfoRz;}ZrPBMx5N4CySxVTiGySUB1@yE<)E4Oc+?wf-)q=H<@M=%h-L;a-W^8N zE#i@&%ii&iSJ&pvT+u5=ni+%~e+%wu%lfRJ%TUw}Ht1yhtx=wd+}lC#LX$yfL>aqz z0KY2|a!v75c8Y4>*t0U{4^h17hO=0ajx3<0BFVIq_nU=fp*VMG*P%s+>^%u`I1KZB zq!0`!8_u2*3n9|;kJ>YXLiHXHWtoBg+d6wHFB(K^WEo@_!mZ;pelM+Rtl^b8;Y%GI zhi%TMuNqkNENU>I*j6AU9LSY;p5SXa%dVMf8JlR@aXdllqp&D`phsMF!eM;*$jhhK zZWwd3XyS-dv7#Dz{mMn^VHq!Meu^|+a0!zs6HwjEg7j{_qjeFNnk)m@yhKsas1K;@ zLlZH_!c2<9&;=snBcL{ZE0wmn0N6rEf9S|4l|*u6lZQOV~7a{=R!OEk6rnb6PsihTIG0ZzZ zDgMH?M`@+VQ;y8Bj>X$<#Mf^g*&1iY-Mi}r4)42e zYs&G8CzLj_rFwzI2VtS3pF7xHGM-*7GKwP20E5S_p0ZLm1@ReGGG9Tqauu@l2;<@p zY(0J7WICJ)ZTKTX(~AC^btgfqy}^p6nV5z5neqBNG4)L3Ct`J&$dBBpW!91Pkw@(O zDPVQ+H?vz20j{MdMhD?1O2=~G8x(s-F`Mc^kl9b0_~q3g_>rB5A{y=p@7i%8LzLoa zz)Zwds^%t)F+qtq{&Uc^uB9Ry&)SnQ=#G6^(xfIeX(Q}2HpQt_hPM+7?M-J$`x-ZPexBee21mLJVnryvi{e-=*6+k=`P4 zd)Y??^!dMtK4kh_R*vWvZWWgrk}g^@Dt4Lf;J+fyi99LIg}T!eZZ?P9RrU3QCOdqj z2zRJ$R9Q{Wo#Hdp1G)}e^w9Kc_}I{agi^p@))iH_dw(K{5fv@&WXGCNSrwO*7~ECNOt<;LQ#bR%>zBYd z8;6#h-R%f8>v1thXznsh`||k4WQnO>0?+Fk#4c5 zY!7-pOdP)nOYfauy$WX#62I}J=^@Mv0R<4-+x+JA(R67`#Jt)SSRkxb<}iCg3S&_U>hXp`Q?>C*=1H2RUWCiP&BiWap(=&OS0+M0FW$Z7e+wWIr(l`hAHMd ztWUam;))rYY-K-Dpv;bls;+Rl9;j84j%X!uSs6`S3%SPoR~sd00U#LKaywd9+0XaR zAO9BiR3@nT-0igL_&h5pHs}wZTz@x2_(#qfum<5<58nvl;%#p;;lAHj@TZTfAN=IV z5u8iLGtR_!1^iaG410WatGUsd61Osp2_)CgewA-Mb#^Rg**gDyTy)X_M-2TRczEFubFN<<}#CQocao?Wakk(^B_#8{)#x;CbnG!F<+KT z|Mi5$>w|iLaz6dGKt1!=8(#~$nK_#04@}HLKAfhto~D`=xDPWP#&!r)f^<8K6(yZa zcCN#cKF3Oh=oQrgUYR*-Y9EC5-4X}?2yzJ(&svBH4{`b^Fc+lH&Xq9wr-n0#Nx;~H zl=@vZd*uj;qpMw5k$X-ZOEVh^-etr9&0e7DsH#d^!mSzUAdK{V=&Q(&vC6sIR(?pb zgxs@Csbk(g|GqP?>%H3DEx{iC-(<0+;L4cX`?tO+I=%NIr~VS&*GulIy1Ehh`3k2g zqK`W1OUYYC4Q&<%BC=1V4G*x(ugsenI@y*=q{jNtUGu&c*SZHml}xi=7b%&!e>Ogt z3VAYMCSfGi=(7rEG&IBwxp6DLH zD)<>tz6u&|?fIo9?7#{(6jS3jq8u^th|>>4CNq~fV#KJypsFHlNV=W4T63sZbrN$k zjYf@Jp=~5Ojm<}UrL$!+_98fiiWHxThYcAHW)dX@+1*Nz=fEL{P&dj-HA0JV7*dGJ zfD>EQ{fo~X=aia#Xp~_G``urdSkNGQ4@IWRE;V2G)E;01`Vub)r+r;iya(YfR$6&&dz#+ zzpy7s0OS_figXr=}p5|<_8+=m~{S!PT7`H5IrS)Ftf8J#TG;c&39pL3?Y za?NL@jX-0dnU=R#DT}+&qjO9u9``A_H!xY@9Y!oU#EWCkLl21 z0|g{R$+?zA;+Fbx6~>{%pvX!GXLfB1Lt$KG?|@Cm&KtA8G&_!nsS_OscAnb+{tQrJ zD!Ivt7|ZCh1Tu<~O_GT}*OBsil-UO7RM=So`|Pk&-KsHL4i?$D=3t){i0wm}2(GGo z7r)v%j3{yn#8$SC5`%)6?Apv6V|Ll+VPpk>%1T>+&QTN$nH2?_Oo|F@e4?H7$_8L{ z1#%!I#{}b%!gpz0-oycoD-zyY%KL8_8@(UKHxQ2brx7Ji?Aaa7k(5(X>aH;Bb$EL4 z+$!)gXD)4d)j4A7#91RZhS}Ztkvl7CQd(GnxOAmWOQFL>-s|f&FRHQFzwz=yQWPB2 z4h0~FSDvO&O>f0_PU?x%xAx`Ihj#d|j}2xXl_b8tgTz_;F}K!-z={Iy?PB+ypy(@% zWreo5Mdh(wjcWLylDNnFdEtAi?Oh84| zYt!p$X$b>w*wLo}fjSc_NA#N{ep=sW+Y_U|;T2zwQ$Os#e6~eNeT*q?rTF2Aql!Wj zNJ4+i#QhDpv>K(2c#EMeRQmZPB+4FFg5{}!c;#Qn%ezGG+&o4GR#rT=rjwvEb^p&s zP>{6N%!q~B$H+}jnstNUU>TyvP0t@IZPN*|QPmehE-&hm|7Y#ZUPqs+zi0caNB`fK z8~71QpBWJTFk!UznXPm)k3e5apiBFzbxqxnEIsdKG}QKg@1(@g%scUi8%mNnycD#S zx!3%R_R>9T%5c7Q8Gh=LIg&!9lF6d%J$k%UO)Hu093HD0{o8d$1nG%R{tB4#}F@=Kq?&PhVb&n)R>X;!c%14hU#Mw+iJ(+;rtEQ3@(XtgX7ls z&Vy|9b!r$B(siqpZR1^k2q)lGGq9%t<;wv9fVf-yRBA=(zwThT_9Yx6;KK?gWb!qD z*3{>pDX~z|kluEoMhTH+@^wtfXz6|jjpVJoFl!Ria~@ukf}R(&*hD5pNRLaWGv#+} ze*BX>eRX5<^YfR6{9*3=_V5_S@pGrwaBaFL2*V!D2JxHo0m#22 zO_!2X6c3sw9`hr}diKM>_zT$u@b787v1(Wnat$2(scTAr=4U?zP;e1YIYcOrJ}3v~ zqT^WrH^l&?x*d!}ypu;#%^8+pGVUlxe@yg)`o%4|Qz8e!%xi1ROhLB;Es8&h3^wHI zIitzi+lu>RewjX@Buh!eE6x(>*@xaRSawl3sExkr6p+Fe;+_K-N)d(uYEADk- z(MWc!2R0_@?pe&BC?A;v#*rX2k*)tW-sDw*=p*LAbcv*e1;euUU*Rpr7vEde)Ywlr z(~{^Cyw$!bKRzXGYao-Bv`>8xp#*crzA~3A131W9niZYg@Y3C=8Mu?QIk>7Oi8l3&JtoiffJp&3|a1o z>qlbDK{$DaC_khm-rtF%+pul z*cWt=5&!Y81V2cO++z?DpoNB_CmtbLVPt%VdE--pO8J($4bQ;4B{uoi zH&r6hck(~8&ooESA1@Q~ul8?C6$mgliNWUz|3yRi(Zt2GdHuiuV4|Cs&;*HBGpUgZ zf}1N?#?y7FE2dE$y^rE0!sLIuOAH!<+ItLrODU>nfU5W8aejmGdqY@UC^8ampf|sl z;umM})-)`tDb=vV5hXa%nKdd^QVlyG$9=JT5$?R~XKug^EKtJclviXR41q3%f`f0!}G}53k{Q3OBe>3xlQ^d$f8ELP1@h< zbsam&_B8Q&I?(YCySf$Axp5D|6nJ# zG*TXX3>RAbR=yDdZ?>QtXDeNP4fM{yFt?3!EJvof<}-aFl^DlEQXPsuDT{0G6#9vT zFt1dL@8S_n7a&j61J^O^VN{n5{aE^?$%rNDOVvoKU%H&amb^J}(9j^@^K`qzVsdA^ zl0A{fb$kDe^Y{IpS-khTQMPaNjL{>Y7q2BEF?6}>^lrwX;gnlw;YJ|K*6a1i_%q4A z%Q+LDYk*^&@A|4W-`oSKGKF8-=d7J4=S6?Yg4m`o%h+bc=bifJplyhLhcb@>`Ox3V zH6oUK0_7Qj;T$5a*5tbN@z^!B;qc%QwMv%K7PitF-)|-bc%xr02&ONXmv&Ab;!{o* zRi}^II6byUBPY)%u{BFuSWMW~r8cVG4fQchfAJLZ-lgC2Z#pH}exwIL8B_EPqDf~}6 z8vVE-j<75z%n}#y@EgCcK(@;??hu|(+~uBunP4)Y6uSnjka99?&Kj$=;BGTlbi-Zr z6bGv%JBAzC3nocod*@aQxyT0T=oIy!9iu3mNCS2OBfs4SJEJ%h$;#XD))3y-nxUv0 zE3F$109Oq^O??gMCyPrDa^{sigJ##($@gWN-&4CeAATsZ5_I6V9V}ah?!fZI5+~kF>p6_`H6j;Vb^gaR4Tin zbW-F(7v>^^Gtos5WJ;RxXGUGCJ+k`W8$tJ}cno)plXo?EZ||>;Bz1MaA(8Hdk144X z9PF?scU7{k58kqQ8d+W177K>#nLRqV_ue~}?MUuUJvrjEe13)I=7Z%coGn&X6Pr`8 zlY2oG(UKXOJAz9)mbgFvgn4lYFIUFTkqxDuMBW`!RlmNH94%S$b$Kfswcp3Cg#S+K zy!H7v`)Ol_jMj)U{`VQ&;uZJ8bIIMMQ|b%oZL^L{D;cE>RblpIw==sJ2i57pT-NQQ z|71~TFtNjgS@Oj-Q>#cOrAbTdiO{G0xh!+H@AIOH!V-O+4aHeUl+w>`DyCwg1so?K z1*7Tahy$3a2QNFuQn@+Ed53M4CVPSAh{W$}()E^q$m4%op4)Tw5g}icCH_?{YhmWC z$YK-h7W-}V3W(6ZuVouadHqjR~HokyOM43kN1yCsfMetKtG;c4h7`wC&Pc5 znqXi!!eBm5h>tdB`eNhin`H;WQ-p9B0Rs+!O~c3&M03WbKXH4^XU()1&b%&&Wo_!N(m1LT(y*mLx-dfZhyQLG@f1ujY- zO%fY>I1PjtmUFu^$Ay6t4U8rcFdeS1e(P)rwoA_u<#s;?o>&qd{!aDR{bV*t=-oWl z{q|Wnfg@K7>wqHG(OueXz1cB!S?_8~yG3CzGQsEPXr_k>4#Nj^?-LI%I@1T?3SG~r zqA8V_?jNP{Dcz5!*D_{KWyFg6R52z?7P%`!Dyw6Dw5N;VGmcr{T*|!_R7f&7-lUcM zKUc=mnA6O&c3g$DRVKQcwOz-brkY)WcrnNfs!DGIxbln#Cz9JtE{;Tn@lWq4oW}z^ zN|{)>FuDROcoUe?N5X63b$o<^?|zYC)zKMP<3Se?KyV!5DP3#q<3$#wiN)Sz5gT0hNQuZG!;C? zlUgx|sj6`V&^idu`{@P)X)(pcf1&^Pl1gYg9-RPuSC|8ms#QBxq(v(rKx*G|`*VJ# zy<4E9PBgZHoW$L658KCi+j&dfKsA&E{|==50DO?6?x56;C=X-;a2Rv{Kue~!_<;HY zy-ap<6&IsG0^MF}_E8UbyNU&cD6|I`ecg3P>u@$vKvc}3Jm~OZy z*&~JCFJ3kq7NTz8zn-ParT;jQh8jyoUyp~Qk^Nttt9BE3t?1iZL0 z9rR~@y>WcrUH$|*Ib54@xMDgIQPnbfvc%<}sn@5mohUbo`zP`EdHiX4$;F=>o*F+k zn+%>n-Qd*AxyBJ3gg3qhZn@K%$l~LuxV6k$^6&c0Kv2Tny zfSMGJrP{^~iAl%NuA?|j0D)#<_?;Vz0QEWC^GIQiw*xOHob)+Z=P2;+W^gptIh_S^!VoNH0(rp}O+@hAI5B-gNbWCsoyfjF7n=Q-x7rtxt+G=$$TY zbvrHz`eFS1C&^R}>hl1mj#@qfsJ2}b#m^lV@QXQP?I_F%!Up5^_xMpG&Rvp=DRY66 z?sdOC?FdL0*;;kg1_-ppqy9CeFRDTiTthRX40;-;Oa?nDA)55;CFSsUQ!po#XeFYA-x`Rrs(k2994OkarVwUo4D$3*3}i7({wB&VZHtA z`M#OP+el9pC$TcP@hf%Ey~!Y3-2y4>O1sxZ2<`66)f-v4Ye^q5=q*jTK8=cd`=H}4Ek^WcaY zLcnRv2`78lUnh6Pp%j5scCMe?7DP*7C;i?W_Ipbn;HKAisHyBNak@~6RZYwtQb&;`=hXA4TI?RrvVI^O=Ty$pQG`J) zgg)O9Pv}wcU(=^D5hz6`_QPM3C4lDYw3n)gEjc10V$ugdaC2oKA@`QC%SO_r>?rJ5ymrAt%C$%e8XFyUSREeyWC6RH4+2O; zzmVY~yH8!V1}-3);UB*YtpRh++%yek9~UZ60zWCFR?|hxbemQV#QBZC0>JvGj z`+S#NF)t%e0u z;>7zR47aH@f2|sXWK{oZC^klv)3Of4`P{LKZj((rBd@7=Rraqs8&PMq43KqtDfJA< zE_ZZS`N(95hDH)?ejD9viBo{mTeEfg@&A*ld~3+{$BN6@H!m+^Y8qG?TNsGal(Ze& zmJgDv!=$Z(Iuw&h)colwHor=u=%}skjiR47jdfSz%{c-wEXwhbd>zMoX>H%dE-~;& zLJvo&T;r)$31XrdE{MpC>U|hbpR~?R`;r+q;lLR5?ucV{1ush_b!Hj0^3M_YnxG~$taHQ%HxD+GzdCHy!Rhm2_A3b4`H|s zQB~r#^%is*D=`)PtaavCk3VPEE2Q*LY`AITdlAXYq1v~m^60Ov1IFmfzWrIe|ATqpf_DrxCip?=+=uhcepR<3Y@+7&2n>?DL+lYCLRAU^= zgaPaJTC-lzG_zWN?NW&mPdSYACthQ$yLXbpeMbu`mU0+*9d>9G!{<506=2N$3rQT7 z8P+mbKs6Dr;DAZH3X7~`Yvvn!)b_hB!5{)R*)vHT8^v&3qnSu$r+KvOac77+BSJTl zuLFEA*i>BV?Mr=h;aLrKTDJ(q!TKNK(AW7_6cLc8RNqcNhy`7L4X@J?`+4~qoZBTM z@XgMt3L8s#88(TW8P_Mnza!->b;w;+Q7bvGYRUc5FbW6H&M8l5wYBf13djXI)(hLm zgA4mGCRhuCy;3+Kqzef0nJkZ&f7JF%$3`Zgrkf%bf}WSvoJY5g=$ zZ5Mj@r)&%Cat!#N4Bq!w2uBx+9}HP zMTVT=tz9!4ass~3JM5ov*nrR$ggHCoQRZhivYV}4PhDv4X_rBKDsSp{$R|!DFMEm2 zwxcl0a+3JR*}7|kkhqq}wUsh}h_7?`cXz^V_Ys*JvCmC2ozGd^7`y#P%U^BUcxhm( zpvByvXfOH+HD4Zf8(|4At0!vVw>_8#Lm`Iif4XRe#yRMPpce-l zjZDEk8VJddm&NL56vF(W=rqR*7_EqNOHrEu6&Qnrq4%=6KP`E^CJ8ic$@$Wr_eX;k zb<;KB&BfdHRKZ~O!)1BCnrp6MUwUB1gW7`yZODL(Y{W(Bq9ex#F^F| zQEEZG%0y7|3o*!f%=|!;^^lz4CkF5wxFPEllLW}EM%2+KZVA&$2kLBuCDE1uXEuY% zt@Na()252s9bp*!XocS6;)opE@LSXTf^gS(fj$WK>E*{@;Z9qUl&wm;3Ly(r4jO)Z zLIX_5w`^vAa2zcsZsg|TUI48_s^g{%Ov9~5UED5gI4wH%>4KGv!H{G;((2bF_j*mk zyO2jhCF9KnF^i3!XsgdbouT-(QRzU3ijcHyGAJtm+VZq*9TYW4h;6gr@&ZH|-Ja;) zYdZY>^$E!bOe&sDAQG2wy}v#1$Fb3uEoV}P@)L@tmQy2-k~8%t?JN9OO4eet#ynTe zn(YO>PDp^jpCeX-t?roHi_HK-0|jE4XA!2g1!w-I#*~iQd&teTHfpO~5n$4mMZzly z1fB+>O*6f~*sUUb8k9Xdw^+PYd zn#Hs>lYV=x^92!c-IY-q8P8h*Y+PUIwfeiWHWlfb^YGDN<~Jl5!)*QTlGRLjX50X` z^{I%X>VQVEo)ceTC##L=AfQv)U-l40#?flYVX>Hw{Dz&zH%)+_UGCuP4lQPR&z7qyQscOPy&W*0DDZk5C-BUqUD9^R)oT2s8) z&(VC3d8#S&D0z;_wo?uYl5CY-z`ntz1X-Llx=O?B55vXDcprpGVJLk&SYx6M;;UUT zFK7m^XF+l{8maG8*5SHJ$&VNkVaz4~Lnc29I`Ti3^=?)Mj5--=EI4nsxHwgMaC2J~ zt%z0>#Z0`yQ#{}?=5oz06-~(kkQO;n5d%Vh4JzQq{wY+oKiQ#Avg0FPyF)9e{g4o* zxfrq5egur?1^ss* zll}0_AwsqiLKPHxS7z&yImDi_%n#@UG{4~aRAJxR!+WLRW`=X+`_El>__t=t>h7}jjh~#_6fO9HvD?)4 z^9&iDcPue1Hp(l zYV=p90DCIf@iyvC_76g}p(dhj2y&cq;;;3SYQeMOAkCoFXBab;|J8tvJE14Zg7NUGAz%7|#VfMm`0wsP@KR zTwpPu7q#ErSOv!hDlfJDDh6wm(YwZvT#*x2|j5~Xg;pSOaJ9#cjqHghWu4G$`xv~LkEoYwFV zZu=j8eFL+pj)3&@7&EhDRdW4X!fB~VQ|EqQ)1O;=zLq=5q`@+YU*lylm3-cVn3D9IXMSCIR_>+ERsqb&%|rM@G{t?dBn_eKU#z`u31h$h6%ZwD z_9-1OVNFZptx5A^g)m5wL}rB|u)r&}N1<0)PfVtf^-grCs~HP=0q*nz{_ERQ{fue@ zqMAP5CEW2|q6Tl;GnbgDF~4(W2=T0y82{;Aelsf!7aGO7Etp3L85hAN@#A#ON(g=t z4*INA`8Q|Ws4+|QqivjD*d^blvOND{0i-T->@aX}dm3)DA2tpOj%%u>l0m7K^jMZ^ zy1&-KfH)aI50TTc-s`bRvu2sJN;i^1T?Dz!Xv1b&r+Cny2lx%zpv24@rx-beQ4jKybj^HR$0a`gf1>m;# zt}XDAsj9HZdeV?xZrSHF?3yR_Qg!|Qh5SqR0&Be>+1UJnm3-lh`pIEe;lc0O=;UEI z_S9}^hp+rbxJ#q~t!3#9FzXBKKr0q&3&__VZAH;hFLLx560PWXe!6ST$)f=CFdFq5hJK%Low8!U8(F(5b zc$|0i)OHV8HjGdW21?2B5fO5uJB|p;ztXF1rYIMPNU4|fCkI91N_V>@d<~mye19%t z<+A=@zKmu0e*h>!*S^Bq*hAQweF{q>cH{Jx_wnl9KjEcapWuVWFR(rRFsiekLrumj z$R4wa=Whu@L=t31;l(RJ=CU-?u9N{Xzg#mw=6X!O zk%d;|{#K3S7u07I8_sXesry}q$%sX#&&8lIPMDe87NN^Kz^l|5err51f0+jsENcy) zsCbe#V2zvFpIgHHm9AQz*Q(G4;U!1u~&*XhZsJL~!p)1oc=A`5GsxR|VqxZ6lChzFNp-hf;z4ywoVJ3ht9jsJsO@0}+5BZwKk7A3w7*q!|bRt7gCZ~9IgFZmOSr|mBV|5Y|lxnT-a>{Km`xnd>#Vq4fd1-^k~B(qK|-FcsLybIHC=JrZif-9@A&YW=yt$k^E)xD1IC1Q#+Z=Kcsh{j8Gi)Rj|l3B zCw*n#lCNIB>yjlHy!a(9U66r%2FUytO@|TD)%M(T&nb}Ewrv~!`OkkUzpDX8|7z*yGXWXt&DCBm ziB)agi)jx8VEiqS2$N5U2{EB;c-6#ly~+@NZ#km7Z&VY@g$%C1zLbx#Ip}4?KD-XI zdgdT$%2pg+`$rrr`veISE0H&^2G4K%3NP*Y7Y>%cgH;L5SRC1e%IxQ`Gyi$4ia3C% zC(2L}@H|$}J&DvuW&B7v!AW4I40+=l(UkjF^UGLIEVd15zqC%lIvMW5p6l237X$=|R$?RQu- zyB-Bo_F{kL`$&FbBZB&tBB19I%)TX?@K%Zn-&1JL`5$Zxe;He%UPX2Kdsyv%0-IuA zLPg{$q)ps}QolOv&3*;@i{3`=^lgY5x&j+RPopaSO{6`!4M9Cu68cslth;<#F5_e5 z)sg`+zcn*J=DJM3lchnFkucIY3dGQ5gQ@O$`AulQ3%v63YbdW=iA5`tuy}1Oiprvp zy&?<+Wl2;9a#y4ycWFBEmS+4kS|-Z8hI=hP>o96*Ci0hOVsTjka+ahabz!`UQ@Aom zh3BgH8OSb4!IFxF$SFDA772)1<2{3<;XAOw?=`HQ{T!0+-;U63GPlb{1yL~%>_G9htU}Dt^+;fy9<5;mLT$xHP{;S5)Kvp5o-gEBYo61WQ^E>)F-whr0*I8 zbX$qYzLkjRu^!>wH>lZ>jUJibo*5u>T_$(2Jb>gAJ$Y3jlUWJGa9=)AGhMlPx|ZyS+$o90k=ix_&@zPB>YP`H9wn4 z4dkRtNBuWAM`%&JA`*xZU0_`=@tiA|k@$xF)=UQzUDCs%TUl8d^7Hd?>eMMU@txS! z@dae8)BNw7+=u!vORG~0$lRiXjMgI)#l|>pz$PU0*^0PcI}p)(3j+F9z`y@$Ld*&T z_FhS-*oMQ!f5GXMpCWVo9{BcOii)6Du{-HK_zzi(nZ48C|KKvr?Y|gfZcoC5+Y>P3 z-fRT2!S}vv6+*hMV?)0Ykv%r4nS(>w=+C)h8NBXYfuL?H;NP_r3wkYq*TAI+7_=N= z{YnwsuLSdY7GgT{@xHeJfjvqP(q}pRdz51S-HQ>_y+VPE*S)JTzdOtBUV@qT7GiGC zMerZE4F1ex?mdM#UGN$Hx&!$8PX1i?1Ai$0H;TtK!lzpi!UwFPEBTeci>~zY`16>( zdzPxttb+!w#I)OU;MHRZ7Ib5M?kPb)&sB)*FZ1XSwgzlPbnlHw7_bd71Vf`o=C@}C z$Xu7nT`UhEX|3h8lKPXM-t?2#O;*pQ3t#fDJZV^EK(M^#V9R=mlKCSp$-|TJFVdCm zpGI00ew6q>dDzRtT}&nqlfjZ=;+{Tz8j+EaSg~RS-g@h;mU%n$MnKY^J9ob2FWhU_ z^9P9ewSb95@`tm8UF3Pa$P+D}zsrzG>1|QEF<>GR1ce^alP-*#U$pD$(ni(K-UB(9=A3$ub zEwaoxAUC#p_o)p4vP0CFDZ&wYgp)TN88|0N!$h-IBQ^EqC7^+lwoC1Kv&87Nu25FdQ_2OjVR zoc;Psl}^48xrsxV-fXPA5K^N1RwA0`Ll%RS8H}R_Y-U+JN4+X|ZZ;xN7KiLZP#GYgSBVg{K;=f& zBNKGbYSk+fZGg;g(hQKf4wJ`^Jb<*G0R9!n8=Od>-Qz9yyzEG;`!)TfVVAFFq-B)` zS^z?q{ui0_cQ52F6*wH{_|*Yf+{G?u3RUHR40K1_Wu5aS6lA^q19PS&eWu)_wF zBYN;UwLVcu_to(0wG2_iHlSd3EtV~4MA``Y4_*PU9t4HE^ReKbJopn10(z{Vy999$ z5-j>HWV%K0>n*Qj2p`>ZFz=om1ov2tfZJ9e;?8nJ-c4w@cO7H}?QoX0;9dgEfC^+j zz6-JUl_Ox_Qh4J%}E@7CwU(VNTCHq&>ZzfK-N%eya&vETf04Aw>x4zYe~H zrw~~`sSg2;b&8Uu5oE2Y-W%vA%P;WU^r}R-z#SnajIa_;|8T-zcu&hp+&qWjy#xTc z&G`B}G6u-}cFX{o>o9rv2)f9_hkxsj!z7)6 zivk#)FE!4EaaA2e^tb$9lXitPX;(-8t5;n0Kr-w%XF7-o$ml&Qvj5k={#Aj@h7B98 z`1Jb!+T>o9J5;|r#dhiQ$fybN)@T#r^;=IUSOcH#i`l?0Lr{<9h=05io8n)_>56~i z#r6Nfb8G&F;@Qwz~B;OjoX6t zA%~DVek(y~DM}|E!1D2jku-o1&|{7IVk)$M8H%SI!0|=zV{hb}$Q-r>e*KCuvqv5> z#%#yF*w;}saW6sY5E}FU2On(x7e3tbHFie6g7EtZP6L)=@$?2%20w$`i94|? z*|SHjdi_f=xnJc@)Snw-JGa$f@QPPUFzBx3N3@c@$2o!rr8}P~mkPNe|2TlV$MjUZU1gTH|{P|ELEJrhb5| zC%0gJj{0~{&*3$ljqCd8~{*((stJNJ>mcMtUyl>ki=K zkN$|WUrS_0qAr}}xw*iz!jpCJ9MMCznV(4V6aoOmR`R&@}F=v|e!RF1)%}7Z}QJ-Fa`KA8a^Z(xDew8~>?nsf? z&TVy=M35PMTMPmQlqrz$r-JWai_rU4B9cHOkP$O<9bw=NtcW;-sXgP6KW7W}FMSK; zNe58qy9I~WzK(xCe+d<_$59$kgO|4b32S4jkv6jo+jF16OS}GxQ=8tyo7I2D{^EDA zXl^y4A76op2bUsb;8H@&V)*qbWjPyga`C@V9sMRUhHb*^d$W-^ax-?uzl!Z4&mrlt zb(nTfIwszog5v4>v3kJ~BtNwYfrCnsKY0%h=KleUXICL+*b2<-m4t}lC0G^SfMtZO z(4k8Z{p4z#Ed4ublHW!0<69BXYo+ReQL9)>PZZtYzOr&Rfkb*&1Wr^>i%4L_S~|dp zB>XCnF?wWvvu1$Ib(nrzYrvvOe;8@|TAzTtdb-4hR~k^Q5tRm3pho_36v>0v`r&K& zo%{-Qk^9*aBugg3jx&&%?)+N54?i($hwrs?gxcJQ(sEP3`ap-B-Z`3aT zmiQ}^o;`xh_U+ph$Q(O%tmW&wUxLYP_@5vnJu-o6t)+neYZ1tX++SWV^b{D;XPmXOh^r`DnmH!N4 zCl(=b$_gA@`xgFu`Vu}k@&*2M@*k*K^d{!^&&G@%DFmisgb!JP$otnIbnq(7>9G(y zV_rvX{M*R1^vD#?X+(X-`&c@)9)W#|;nlAgMRS`_oB9VVncfJmUPa7jB{qaSOTV{} zGQ1qKyQjiyU^Z5V9l)CK1BiZ_VADGtvwLS@Yxwin68bU{ACV;m)*yVq288rouLc^+ zD%b)tVRU7{ufRzJAw`pH3)l#_=AOw8 z_w_$~#CC7n*Rs}drFZpdM|1UKgMRl%o3kkoLKcI z)GvM;`_Mri#pEjzo9I;9{CHlp*;Q=cI3W*ZY;BjqKe zP1uCM2TKw0$SO2ue2AKa_mDnxC&J{*EP{)CZ6#kL3B&~V5s(pS0240azEVps&1F}5 zZ!SyXYEt87dQ}o+jIWk{Yi5AVb(*@Lb;7SgZPVk_WU`VIr}xvStcO+;G++*Y{hcS zm^Blr>1jB2;y6D4@(UI3qBK5?BktGd&niDXPYq-wT+Wk>a}kh{aW?`o=U5>=U@XrW zOsu0rKP1PDk{Qjuf6t*0~zfXtNjy`&#z^zrSZ2$Aw)jE z?z@2?vjG9URwL^E^@!@X9$vSXB6RQu?9O}-e?0>H>nL!fRqBiQu=Rn!O(f^8SMA#P^Upx*8FK z%Mdc448FZqAc))LzAs?i+|!62P>!6@HCPw&GM3Lij;Q`?vEa@{h<$tus!~2gnb&iO z7*qzY?n|&}%0cXne;?_?cES7qv3C{#S{3{I-%c9A7BP|TP*IF~?Jg9hYuQeg4Z6F# zyOw1;=?=SFK_#VQ19<+=Gv__a3nJI+z1%Ts_K8%PKdlN>nTRzWG1^ zUa1nvO59oyFR;!$1k{*{um-abS#KVKs?S8%+A|Q;d@edQnTU|)QxWsXY;N=iHk^vk`|02P;}H4KR0PzTf?h3B5Yu2Gg6^A! zfO{rcQ=obOuJxuNyy;xT-@l0Wor2It1dAq`5X!Skp`mm;Jwfua5!L^PR? zkVbP&OG-Gy+xec!rjKugxCSzRKXnac{){pSSqWMR zo3*2vMdLJs#h*gpj)P(S=a)POWk1SbH2g*BOuCMpFqm6U?lpAnM&}Oho59Cn2W( z0z$}mo{vRv?J4M1YdYbDu)^yCYmG+){pxlPp`#k%p~if(;9gMendnlBz|)9!Jl4v> z!Mwi~ozvC`8pER2kWktMG?;_V1c+ejfwiU?yWocNcpYJb;1F4FkpY@6HK(8h!x2z_ z3cA#rgw70mSmT){OrbTW5in*TrXFFV2K}b!INZ@f6DlC7OCT}AbA}M*=NB{l< zFkrwyq^GCj$dMz)jdjgMAY=Sx809U+96Ew+TDNo$vm?1gs}i3k$MCCHi~Pu)uKOD* z6CM0_u~^Tm2Upw+98_T2gU_{~qqV4`(v4v~`F~1RI578jGPhEp?o`PC8XKl= zLTgIAY?NB{cY!$Gt2zD`%1#${dQ)2a?z`{Mr%xX<_4>ep1IGMnQF>m*xHTcbszg^T zUV)YmcEKyRMG#)5nHCw-l2CUZq6srGH5VhI+CqfYCd@RRkM;&&wC>7ygg2gnfI3qV zSa+&fSgvagniDjl2qPWt8p+CgDl7Nt1_GiQ%^_S69(W$pfS_~F3`EslfS5W95nXSA zS=Bd)AP~%ZhOyG`boUgqRYG{frSyL>f(btn4d#x99|D^}t4dt!>Ut6nZvmvVDS z;lB&yQf~f)*s*)w?0lR$$Nj&S|E=1!YZtP!voU7O81(Gf6Qf3rGP6uho^(u?<-?pY zEjoM_MwoQ_Sn+43+N$+OP7T?pMM&<8QA}Xcx-^0u#Ids@o2?)k56J*{X$h>rW7y)9cserWuwx$*f zFuD_DBJW+q3V9(yn#@D{TH3gA96@Fbx~gTSHsL_KKG&O!F105kwDBy0#&mS7KAKQ6 z8Nqeb%`*+*G}FR*;RJ|Cf=zgB!i}~=xNjP|+&zx&rXZ^6d~~{R3cA#sg%E95Krjic zyAXl(mJ&ebA+W(T-e(GrXV6?NI1><1cLJehCShilxqm=|spwF9JUZ5yfS|^N9FQA|Ez0Cd0HP&rQPAL}1=%*GL!Hgwpjj;UHJ?sk{ z_tg6`$22n@{=Tx`yZxR|pFWK>Yt|qxF3v!)Hb1?Zl>NpmkWtKAEiwX`mv0Xz$V_1+ zKgVhbVI>@L-*iORT!@J33lY_DF^0dHhq>V!FuU6t40>r95}#av*vICg@3To5_+l!0 zJ-rOWU(Y7+OhMv57GmIY$%ubsJ|de;Ls$dB!(_t2R74PLv`v8q+Qu}Ti-Auq$EY`W zJ^c%AJQ>4Y%|>d%hnU=cEg~M6kM55w$JlpPV`k85%nn|Q*hl9esL4c(c{>C1Lf2u$ z8!Hg^_#6y-Jq>e0H(^f18q5gH#jutc2yRC3X*eFepIU|?FJ&OKp}L|b^Enxox&*Ac zf*A{FBAh@*Q>T9)Wd4|_i|QK4{5{nb2N|n?J1->(!iXa^*a_oexxhIMN(#8&)Bmj9 zA8~UzyFcQ_SuW@1a(1>{$_>SQj$a&a4x!bB!hgEf;o!IL>3^?m;E>XK9~##X5D;L# zsMX!{%{Skem04{!(shk0FDbRk=qubAaBQ*d1v`IFrS~QjQR{|UTMRn#Fb>YY zZX+_XMXfNFOGkVekt2yJS&4u{{#z|M=evgN`{_RRH9Vze?jy0iq=5}X=fyeQDHPVd ztd=9I^~f@lr@yc4cP?FW^5jV*Cnp<_*|~G)RkenUlodsa4XeXVYQ>^t)3x*xK_;}m zD)`w57R0bZ52`s0p*3eCu-Xjt{$~a@Oge z5oU#K#D+0@k@(acOlp^hjGkX%a@%|ic_A4iUrt9)LQSwXWn_ik>FzP;a?eCed}9N? zm|cjEX79(qH|L`NYYVV_%0V1la|C>#t2Wfek43_0D2fW|E7|{+>$jO;Dh5FE#*396+OY?F_mXr42?8 zuhUQdI42rgYXT_i)7C&x*a?EVR3QEGD#Ej~x?_AbaR1h<|A^;$E17#FwTc_^}~K>c7=& zfzZCmK(ue#3*);j#>yd^(68kznx`Rc$PR3q{XN!C`U;bR>35Ufc;(*iX4T+{0jm-9 z#1!;;aRD}s{{cr<0h>qdNB9HN(aq7-q@BA%>dl~7Q?1Rw-a6V4a?Ul7xu!0wt~kiB zioHNC{*^eCIMPaq*oCbrSLxbR*ep=5$t8r5q)J+GrMkeDlK7+8@oa?uOrd`MT{!># zmr5-qcFT%$WhA9z*=ggyw6rwS-PEN^7tEhO-*h)gslpNf=^DG$#r98T&gYW9r?TFg z^;6ubP*&w{gN)J#pIufGJ6$-^hN9&>25z=rr+TS+`p)D3*YL9o3K6w}Nr|rVrp6l>M@(~oj2do|UA>yAO zk8z!rVZ*p@5Zrt)L1qckdw+-tZ8H(vWDGji9Exsr#~`rY1O(TegfMLdP+MbLW@1W* z^++A?8HTq@LEA=y@Mi6v7#EO&3BhUT`RW|BYcdGk9vOx?v3XcK4)Q8$;flYTin>6f#UemOhom$Oscn3_TH^@l*lAx?)E8UN3e zDi?u<#2H`&CmPovGs&@I$IL7g?VY2UC~exbF*_Q|+*Zz&nSJbRX8ZrQR(w`VlqPK9 z`DH-Hdg(eS;-N%oN@7__jB_58cu(h&5ks}SvXzH~fSN2!4TDc${Z#~crFW(kHkeN!5QP<0CL_M$o zZSEO~Hg^*~2pw$+AkWt7gOFEdVBMU(*f47sw$Azy@6Fqdyb5ji~!)qsx8cDH9M%XzE&PD#o{6kEGt8A@2FP z=+I;odcM2}b7ME7&l_{mzUdIWQ-2`ds1=WPO?zW*j}1ub`!P08JB-P}t1!At8uCVe zN3)L*^UM@X4O@*B{oY5^Q^KAqIXRu@@cab*LBDjC6xJ-b9^f*ZA{ zs1lNro1;gM;`7fxH-myT{vjeF0!x=J#ooPpjT>9p*XtU)f|L9`m7Ytv%mEE0ts~AN zXZ&ZtqR=5arXBy}#>5l-(e!AoNNo+s)m9tA8v0ImW;~59kyj2WNoVrk?EcAIe~uuc zY4Flj3YdGz-r2pAC=8BwrBGO+J4-i~l*=poT}%D)%P(fvYo!gf=-FxPYEt%7*`Z2a zlww+Bw2n+@gYzI0pef2tSs^!^hrl|s5cl*-EE)7AKFRzQyH*2v9L+z4ZR3B!gx1-Z6R-(ciQi(>D_Q7LeJr9I&qHLR`RH`_ zWCYfpiP?c4U}f)b(DQ{Q=>J98kC_EnKXEsPx6Q(!HaPsfSHIVuHsw)mMzbU6K5?h({;Usps ztD@P0AV=ZGg3v0gob^k|zpyiY{Rh`ypkGR^inT&*bc>}J9k2m26E zUYd!JI)sJV^I74~N8;nj=<`Af!duKi)V?7RvKqqkt>J81}ecse2 z^-ZnH=qxK7$vG4lU1nAzJ)P5~CFY+DoTbF_f1{i$At@c(ttqy$-AJ5)L}MW|%~pHr z=nGs+(rZl`4HR~srKD?YII{ha*vG$jKV4(Hv)xLrF2(TJ0AX|ig@a5KR_a!`nKCp$ zmpMxd)1578aVesCF^YLj5r4v#A|v|A%o;f(5t2xhj&!aLC{@Mg$Lp=ZzZ#FCu}KBg zv5gj{3c1Glf(JU)n;;^FkmsR%rkrE6Ga)W|`uzL0Ct8VJGloexqCnmS5 zpq5XZs(eXZQeK6a&>riVn zI@TGBuJtES#xo3K5u|RTdJ_=Vcq+mgP37@K1l66yXPUw=O(3}NIkiw<-5C_E2jf^1 z#-4I*%O&9C-;rw|^Y>L(9ApX$RhX$XQh~&5t3}JGpD8|HfmoXY+J&SAE(J8v3KLo` z>lCF}2}q7C(zzxV3*$x`E*6~S_cJS#)2uuSsGneka+0;cX;xsTD2iW4wPK>v!z88m!_u5U)JWe0q|1J8i(n_{uN{mh6_o^Q~$Op@OJr zv&A&&Fs|v|z!yWHaGDC9RstDQblBJZi}a}bNTdWVTefULw{G3AapT64_kT4h3uCNt zfA)fSNoF0H&Sv_xwIJSHglh4-nsd>;!BT`*TZqtm7a_9#5(G4yjSd8ah-QL=ab|(J zAdNF=Fx@PIr~Qz$@nm4#DTruDC}=PP;nag_PexF!DTryb0Kv5g4Yg+@wALI%)?L8L zem-G>;8brOx)CI_ok2w7Ip|Vj1R@(vLInK_sWpROF#{32S46#;2)s`%CQA_BU@q@D z6B=4-b{iDk9kcfMKh!#2tHxjVxZ1+`lSW$Y=F_4G1l5KmS`Hb6VzM+ zQ2wY~1DU_Cy5b`4@zIZ9qQ#dlu7JY==76!KTVRB;MNn7{Vo=$S9EuV5Q~IGKenk1oas`{rQd z4>Pdg`{`Ko^#tU9Jr1kC8H4rTjmPHQld*lzEPQZa3BEg#hrI<`2tJ=PkM5$M2kHAs z-mB2Sk~qaAE9!-eb~TQPSr{wUXSE5%6?B$8b_6zn`_2T zsbxkmDrMVW4ax=?<;W6{S(0Qx#$FIl-94eT79hIbVno+jgowKrBAgOheE|YkIfpdW z)NHNRI~f7BtuZyB4dbIMm8p_2oM@*tALq^X}Z7wMVD$52@`XvYwLm8=yCrt zbh~dVLTb!LbmJw2jQQwVjnGnCQ=hdT(pW;z7{oN3XTlO%gOE{sCb|;{!fP($`4Wb6 zF5MAK2vCBQ5CbOauClfVP`8+Npe_;m9bSu2!~35H7YW1d5VJ6z18A;+%r*6U>VE<< zr%x83;1mlIg3Or$7E&ysoW+z_jzah>rI=r55*52Deg!r9-V^B1)iwEDVMXhRzaqsS zg;>L%sxd;$*#ew6qq7Tn#T;BLvM`o^9p&7bB6aZKK?5)vJJY#yXETFE3!`cEV8M!% zDmME%XK0bA@{DG5IF+T5$vVrQU==nYUv^ z&V3k_(*WagTVVNz9kG4i2z*_TguQ3B;Mm#k2+Y3{N0joc7Qh1L?*h_@T2}@8qY&e?P=8KBf1H}NB~4w5PWEFB&}5w*>C|O z8_YL5Q%5(PjNlrh2`$qR%W%ZjTY|_Ma}ZjM?rJ6>rd}$7@0*Rl8j}%w|6Fvxe-66U zm_%5aPjFaBfLMgc+KVX*2rmShngp4e+E+=tT}x&YTx^iBSNF9;E6 zP!$!hKrPUhq6IGJig--3a|9geV4=&BhITZQumV;;zO}g=D-vdM{jABxR;$f5xlq(V zt)~66gq7mrQ;gD6CJycRGlivr9J#s(WSsJ8iTnjKs|^?uRcL|`8%{p_@Iwq7I1p{y zwl(XD*aL#?>D^M&ZTA=ZWy6f^?rI!!p~j6uGJ$p_R|}`og+2pL;wZ7=SIS-<@5bJu zo!GE@I%cmA!tk_b&@ZV0dM>*I@yoA6&y-T=o$iaC>7GbTcSC%-3*s{dC@cN`Igrr|wn>vFVfgUjrX|ic*ZaI$6jK+U@nVp%x|Reo zYGpCgj|nn?_e@80o%x8Zv4|kEl$G%Ubg4ZB!HuV)bM>JJYd8U&YECe_PIs<718?6o z7TpLekSuBB`0a_WkMj7b7~`5GT%lt8AC zMQkx)MnF@@Vv4#2GfYU7bCNR>g3cL!(>dTqMYE}_ugP!3R9lLtV#Zm~Y3htO#i1Q{ z0>g8x*rhxDRFsxvPDfrfqI1V9`>QBbQi2WVuWtX))z5+#|sKgH)8=#6Ds!M$AT?b^Zj_t+SC@q zvK~Of@>>wMq8xf>mL;6{5?I_3m*$GtG&l4h$i!t(2rcnBmc&tdP+|!)F@&4$gq*}I zSM*_-g(0Du&o?499$L63@y-^5za2kS}O)?Vtz*Gb^ z8;`(dW6=GvN$B0Tmm5Lm43 z2?Q_Ns=#?y>)*o3za!T`=I^VnD9Gqt7qHM$p<2X(L<8>57+|q7tx;q@H zrv7bZK6{;&Dq%s71u*ADl?ko?;?b>&N)?ts#BMRM;low}l-;{`n^{i!T91v5HDB)9 zC0Ta1vVlcuR_3-pwp?wMqTa1$9i36@2La&JxzjjJXwdZOeZ^m5`%lZTXj=@1X8Z$* zE2<(stt@(GyCX4AVB(4>LWv|Uk7ld95tHeG?paO?V&tcQ9(fIaw4ny$;L-h;6>JW;~u^eFhDGJT_IY-yB zHC9INO;f_vnXDoIHpcvW@hw2Lx3ti@L|5ZGQ)u*DdOnHNibW}oSxfd_OEb+FmWT$b zjHe-r6@N^Cm^i$41{XXFhL@+E+ImT+X+?(CuY#E0T@f{sn(%p3igF^DLDCehn{Vs%POZQ9)AtaiR5}o6UNJ>ne z!~@aUZivc~5O64wbQ@{h(mvPS;f7`eomje!&+|fjt~X*cy%3#32wK7Oq?<4*^D!j7 z7lBU?F2JF)+leN-nDUOHSkPtH4Gx`=%oBd(P%cc9R=WIm)CV7YU}h`{Sna{vS08*@ zdCfV`X|JU>AJ957p#&LirZ^ozgqd#jCL^rj6ohJ4(0$|3x&9c$zrFwq`ftaUxj$e+ zP!gs^W@E$jU06T$Tcizt7i*{dfYjmJkT>B&teW%<^2UFSm4kO+ai8@_8n^>%Cj5Y{ zGxuRm>_!Z3y&Ow=y^Gw@-(elE%Nzd@Ru0{OtdXB!3_<4O1-~G7=zAFO(n2IWu?Pd6 zPesDx3lQ0SE;`hlg02mxV|vI2?40`(j_x>%pH?5k+{k={HX_K>or>UElL#u)2re@T z5d<4PS6E#N&o$-Rl356;r^WCjbF9^U%?iK&owa02Kt{WX{|00>Ujv!Hu)5+Pqqdhq z7Er}%@^&_y~4uL<+&Ho$8|3vsBkE3(*hwyf@ z`!OIS7$2?=nS>uxErvE%^={c^hNxVTQFcnL(JRU8t?9z zfP-f@6R>{b$IVd`sVmIl7Ar~W5giX>>WhoyUnyr$(I9VaVIeTJ-t~D! zNWq}iOR>1ucBGH^6ifT>z|c<1(YMtCteWsW2EMZp8zz5?G3}Gkso@B`e&-;(amNs} zuQ?V$&E}$A%^3)Ja1oOGevS7R9l}p*3$bhUX`~MN0Fn1kMtFlM2(GO?op?;(iJ%C` zLh4MVu1z9mS_|P3VuB@tn7W$EZHbNrH|JYx{+wvPCbh;y)Si#oc~z{)J?vV=-4>ArmiA&#X&~zT{9?D0oX79=)nVcwn<~O@bShg?rwP7(;cl{ z-O$m+1%Z@qlyDbMibu(LSBC_xnAz&hr-g$xN)cd zUd9hy8%lGD&J_mBo%~Q3biMB3PBT6O%_WM9ZfiJ&VvcDhj1k^AUejMr>3N)?6ryOj>qQGP5g4I}Fn0&Njc4l9gRP zTA4+yWCaw0mTg3Fd=5~o)-=1q*Y)a^(Wyp0X@1&C?8TllTd-~a2u$1f1bQajh`1Gg zh|d5FLG!-wMnbkbdJs-bH&LDoVgxh}V2RFkHLWcMUI;mMD~&PFb*B(ia(oQH7|HTQ zG$n>m6HT~@On0NGg~t_fd9J3*NidU;S`KkbE2HPKdogw0b6B^lKlT*m;1o2w=@d?B zV6sGyr8TA;Ei#Nm>#=P)9~r-^?6_8Upr&4T>eR_Vs{Ot$Zk7B0t?bHD`A*{qrB=+( z#p@4*Y0y>;X9B&=4)gQ{}f66 zwqfOf&DcEsC*+L!63hDR#MFq@^dlFWrtQOou36YHaTivN_zXkdOh&@fNf_`_2BKTc zM@NEHM=fag;C%FZjX=~Y87l{Vf*o@YU~beVgg2XN)~^Y!O>klOHDfBYzFJ&pR!8un zeU%&lV_IMcE)IAJ;&TVpo5y2oy8?U5f{TwYrC-vl&OC(I(7@xxh-$bL-5O5Ab9aZ} zpEX|ON8bA=K54b!Do5$9NQ|=6o-@c?Q&*L`;)8unT2MKnG-3fSP98sscUnA%@QS4{ zjk35x87wO8i)24fxObs}ELtmPNjIS!8>bLXJ-vtoAF1To+%g z^{;>oPk&_iltr3PX=IfukL)tnA=RriGCWEn$Di!$VMMVq&k3F!qalHY7Uq>v*GwQ~D|ILx!$IsiyB(|c4#t>` z46fHC-B{OX z9E~etv)m}stpUN(yJxr|F{c#6QWlYmD`IeVJ!F3$hCS!9HF^gp7;i;PjEpK2F*42( zZg7s^c!p0^=y*6W-1*4(m+HWQ16aO%IePZ&iBCTH#JmqF!K;0IH7KS8=0nXS)SzZP zAVFsCTD;ag9{;?f2in%3N|;$pcv;NKeF=ep$JM7IqR}jje|rs^TW5{hor+;Kk7$h_T7!# z0Y76xn>7e*JPDoajYpUI6VRc~NW6S|BKo|r1oI+5Kw2j14Jw!5ujLd@%hCf zyZKIHnF~rTKE9NGDYMWz3lMVO5=7Kkj_!??Be2nAJY6*q|ETsVK3VrUiccsbDv?RI zGbNRb9O86%O=HZJ?yl)=Oaw+mh#~;w2EpGDP#fY9~D@FH}8-+<`3X^2BnMI59xufDr>A8r+-ZZgmy$ z8Urx7wmZwNdln%^y19;G`yr6Y^r9ag#vkJ!uMJC~AG~*bW*H=;R>G(?4X|ogJj44Q z!+pZEL21FfGiTKzrNO>}Hj<4d1=!_=i66=zP@1u%@lL~r4Kq+`L#E!5iSMgM>HQa> z@a!S#RyXF#d28_M12K5w&M35PI0;?rFF-)8g{*`ZBeLFNgw&pe;F^;V(O@cKADoVm z2IJAG<}d^`9EaEz(-GQi96Hn*h~Orp(c{q>=u~?!LYj_2aN|)3q69Y{jfBT%B97AL zzP{*Kdk~LjAoTw62yHqRp-sjiyxBzR+Jay-LK=-n=i0-K-u>ZO2;%*M8caa+gR>CT zY$m$a8HY|aM)UrYt!}S-Mj*WDOvFFD5COF(!F21?(V}{DjP5M9*~PlE7&9|Q=D#3y z<{`57as=PA2wkg9L;G68@Z6nU@qGO@_-O4HC_d%LaV3?NGMZ&%i=|n=YwGGz|C6pI zBPxij^27p}2b-d|zZa6c-I4F(O_1?G4lCSj$`*pfW*0wJ)TOY-r!-P%mrV0~R?-_N zs=RHm5Fl(J#0WUn&~BqDd=Gb1xywK4>j(y`yBYmHd( zVA;Ex>(|rGYQY^rW&qCxGWi6VQ~$?7MwOXevDqMDS8&ef0usALMT3I1m|nYf?aTse z8#Zh(i|VO6%62b5rAu_3#@@)x_S5-(_BADT_Iog0OWK%2>nCZxj{PoVsg{_N6stvs zm!m)n-|;0D5@L?y8*JM*A5*usN5aY)2wCcm*bEo+Tw~2T3Quz*nD`+k*VjOdb4!a_ zXu1eJnSQZYT?61Tnr8uk#RN>;y7|n9NcXe~ORsD=yE|>T)9B z#AlU3RFW^E7MH`g)sJJ_-l+`t_vVSMO(+>3rUKz}o^!yAB0*x}hw=xMx(GG(diLzu zX6m(bT$}j5YLvpIfyf7Ou4s=jSw1ZpuQ%z0m+uQj@Pp$K-o%=%6Iy*H!YMI4j@EdR z8siX9Z6reLO*C5t1XLe`z?$O`P-8p-YY|H7sw-y-k0%%jt2Y%vwI`x$^>IcIrJc-T zn$1RJLjq5=(dfo&BN}LBUX5RwjBx6qgpuw|=OFQcMaIvd8k1=@&8*-XO6k#T5yEOQ zT#XkJvW6XZ}YhI%UI)Q=(IHaS}^Y391BnO4PcAFLpB}4nJ=LRmluXerazkrQ2LzZ>;n4LK=^k(N3VGa7gmx zj$z1mn5jia@FYmeCfF$4lAUh;_`su#ftpQp^O3Kwc|O63-CAUUidvx-UJH*NG2gB)ddmh9)uV|MxFysa?gteGS+nK*i2q$K&CW7 zM&n`}ZfsyNEiyh7y0^_}Z`pb9dS3%G-6=5|LzC-?_%(zlwFV`7nkm}xDW%aXxhiID zcoUx;U5=yY_Ogs1%Wy&qQfO-y+^~@h^jk{ zP%#1Fb!VV^lLdsGIYz?k%|%qh1x637slAP6oBfm`8!Ry95gLEO^T3+geqa`XWR`KG z6?`M=&qI)|p}p2|39U5^;c~!3K>XVFjJc3OdQfAB$YdU_2}1aUQOi?cqW*s|6&70FqCT zG2r6L%33dbp6x+D z>DIJ}Py{i88PhW2&T9!JxxOCA^mWHd9;ec7YsHHAwq|vFa?jn!@%F+N-%=)gx{v%) zSCj6wk@s55FzS3GpM4{rccX_Nfk*M+fvxVo1STJ>CfwOjvdN8Zc#YNCqShu4hTVhb zR;!E+T7nt57xY{TWbz0yCt0v78(sq$>)1YbKDNQe?n1IbM%_pnf1x#J=FgvBvX@g< zR@MbTqe@fmHS5XtPk!0{UVP7zNY|n4__IJpEi?{$Gpbj|0-$0Pohf9VqK-rO{zMLv z-VHwRJ14F5lR7IK zZ{Ht+m+uWhwC-vJG{X{imso7?#+^T z-E0I@C&)C?J?5fYO?A~Qp!r;i^!bFMh4g1WA{!>r-zDf)onhp?!VO%_WY}jr^jXv` zv5tRhC;j}fWfxv=wwbCevoOMl#5%U@G>EvwPP*e*W|kcxOdDP{oPwYxW6|}2p=jSc z5id9HjK}M~fXz8OaO#-KKq)0OM|yJ>qp6HkzO-TInz|a)6$hEqeC6u(S7W)=_jnek zjvhi+g3KUqUt|(s*1P($a`z{A_z)yKkmFYl-`0N|yWa@F7m?lZMSKi)g$3e^M;}3& zj}Ml6dtecPVlFG_g_IOeZ=|z=7wD*^Wxc1L(bYPlZXW@Oz(L?5U9BPOUA+kiKE_^f zq*fMPuQ4gh2|$aer}%hTtuGV-iQFx9=qZFDxk+;O#60S&%2dR`r=G!~7hXWRk1yed zkVBAI;u0qRQFeHFV*>$8<9H+*!?TLl zjV8(8Vy0AsY=Nia3DH|eev0=g%Rd{x#r|u!!T-``^MJQckFEqthV`$>z zQQ>Pd^V;n zoYpe}DKm1=vRNRWuNH{lN5&%j;i(90IFUdz72!>0BDC>zbgMrFfvl86o6SRT(*@|- za2|r15+oYWL2#2fW;RZ@M$-}Ad_F=Sn2RtcmjV96G@d#^9Ab611G@XjzX49!pqy2P5G$%YfFpKV{BJ}=Qw3}h>8Ps$Z zh0xT5W)g-sxbZX-ZcCqL^b5^?>&A9xn*}wQj=&}~yGUeq-i~ga>?}EN7u0kzIyM-J zpofMc6tlmY58AOY#_v}aZziG7b|dAiDAKaX!U)$+}^kay?Zm_v|wzxf0Bv{O5L{>0 z$iDX;tiG=r)>5`LYKTt=Dqp_#8Zs(hkLB0jgb$v10iSi^S7qz=I2{^};t?b8ZD0^S zXxbb}m2SrNruXCXHf`{A%eSzlW*wy8bSpl4_Bnjou`}Ln&=@PPzX5BTG{k4!I^f;6 zUPbE7*CWZt6FG!60oZm=FUF4rEP@lY$OvN8awVt{w3M_gIgT#?89%=ZK<3mnkSVFy z?8?nqIoqiP#)b}ObBPTp+S5k>qxEMrOJ@A|@uo#UyDv-WdRxg48)S5BiH=o(ic)m0 zBmD$1mjW4T%tzYS{M5N$@a55TEZx}#y;t0d9xJ^GFs_I(gLbt_uckqJAtu8cQR&`h z0IwFaGXUcRGFsTq%w|%nj6H}~KteYbv^Yr#$Ve)%B4O0S_wxfjv6+{?KC`c}CA#&^*0x>jgXsTG=C{|3!oMdR|%qfv$D@W2hP zqUm)n;lUeTL*t4snDhH9zl^4pUZ%h2^#^F*u;-}IU}ZdBoUG^y|c-O`MHK6u0H^p9>UQvVO7 z$#ws^G-EtAF0c6DGv53>YF2y@Rm;}I{dFJ3#}?Uk6**bh7G~53+Lj%{JGd26o{-R|AC`}d*g8C zauiJ*iErO}4PU<961(4i8}C)QkCl7{R{9mN#KjLgYd691euHpw(E=QqFdoMed*Y|y zAeXehLV)JoYKJoee@# z*1ZHWDirk&bguZ<&%7mbq(AFD@lay6A@~)C&wYfvF9%|1dVM6UEQJKZf!2u;mN6!*P*_r|y1erv9^uXjGa8kF^(^@(@fh(7mrZhsep`wd6CmR<4oOP$dA`7Y@E zS_nG566BC>X#YwVbfC0lc0-$&19(pJ*E*u@OKpv`d$}FjzSx##?a}_# z4(Rw=2XuP96ZMX0|1!;9X^+k=JJbJ8JZ?vK9cbPTonP;OfVVo)Z9BAnp%vP_@(!Nk>Zi zSO0VA!1(L>N;tYcAA$DI1fkV4?eWgb9nm*oFuwTw8x)=?wianr{!}vgjVMpr9~jp_ z=4w(`9As2ImJIeawWn70Jr6ROZk|}<>d8vi({!1vV&%Nr+Yc*Td@-AFb2uUtU$lN5 zvnpSYt5pErf2$o%j2MNSmtV!^4jr&FECh?5`6rhA^D!J9HG-ctOYy_7KG@W) zHBL<(gC8Sr$STePyh0tBfs;THyOmU9sxL=dtX;2k=8^7%*oZ zPLCdm84onUMgq*L*I&j5o!jGRuLOM8>K&}O?q;OlQ41I}3SS0>V^HN=G48JF_$Vj{ zA4h~^@#Bx-VBZ1Ad+l}1f8sG@zWECF3?7JMBZlMM&KbjUeOZMsYEs!Ni&wWrK`b zWb!E4*JdsKuCgmO8(3^evCV8{yOY>T$NKuW!Nj)H;(5u*$);;V15O1uY8m+T*I$kQ zwmaKDtF=Mq0vgL7xv+-BINevTh@wKah+puh&;5jNPNZPw`Zp1~q!QxO+|0g7aRdax zg<53KflNE@H(`K@UCm+FTfe!$eJQ_VhFKmYu*F*DaYbo=;fPa@ z^5nVWDAahMt6c5dvkUX*Ovn7WGjQz40f*_|L#a$uon_{v*iuqP>RkV>fy~vYt|-VT z9Vl@)#we=1Ht+uAp~L9Xv$VP3&c+yS|Rzi zDwtQk3bsG;0*)mN0A|ib##&GGv!%%+_%@&$(s})ohnizgWGv21n2G`xgfkmA z!1gxp;KL5>@pVua><;RNtt}qMGXEQ}_O5z3+j|f;wQ7UGRqn#9hWF#Wz+kNH+zAu! zZ-y^oqOiGB2kaR&48;p)0jsj`{kV~s-n=Ph{j)jtjv9zB@b{LG8M>}gM#Tp z@M-H8kbDQB&7W}RscF{M_!q4kW33Hy36OEL$~c3JT4V$=YLT%)=GrMn6WI9^X6LcE{Z@QkP%>LC0}bbUqVN| z1u_DR?wPLWnOz!j=@k*Zq9Wo_Z{&Gp#A%HgUf+Y@q;93y9CyTNg;PrlrTU($KwuC*EISP?$89CoYX}w5TdFzU+%VXFejr945>ZS`~|urlR)`CQ9vY zK6L0%(<*Z@JUYLht3fFwdNliEPoiUqX``b93urBO>TneJsM}E@1UmmK=I@Ndjedfp z-_tK==k#NmJabO>)0iz)@xa>k>(FmNf6Sdf4@Zt4Gdr1EVPm+NK+h=sI?g5Ph`Cxs zuii9%Iz_V5mdtI4xd3EJ9)u?nI=NxpN}U}JP0qU zkXhMEG#FR2Zq!1v+P54|ba)4Q-+B>wcin{B`fJ$VFAkV64BvNYgRKugi0_|&7JHw60qbw5iforMyytaT=2-z- z8$O7mef#kG1;FA3IMTZxHb43gV8lqAn=p31F(o7v%O_Y6b~N?%$S68v!(#^XW>_d^ZmAO;p>YxKv-WY)wg>h#($5yN z0%C=5?yUS`Wxxuzut;U61xtm61$;#szA{WemOhozcq8P$oatWSm!dg+$rJt$-{@y>Ha3@OOQ$SbVa_08&95YK1lTRL7$76|wcPhw%05 zFW{pGn_}C&cjKGJ4e>)$R_5g^AjQoKsote9*VP>x>(<8c*l?U3)(5|IB=j_Dgp`|a z#`YEu;k(yg#(o0HhmD$GYt0(iT)PIcudjraF7C*7_s8a&@5WCpp23;7I%EG6uV6>r zCRkOo4p!b?75NOu#=7;e{n1D8b<3CW-OJBoXS1f*(x3r;dhHeLf2Sq3)USh#+i%6Y z58aRNUwR&2{qs?5x%Wk6!KIktZSn=^b*C(rK2CkGc{Tz(5AtSpPDR0o_8EbJB!Yas50EivceMt0f&(w*W; zh;c(qz87M0{1Bd676J2aK-Bz3SoC2O@^??f)iekdS>ot=q$rIz(K7wugCgBjiWTjqU zRpd&VN~3=;QL0~12gVp}NSXasyYs>f}%iGhMLhb8~6h9w) z+^8Oob#9H_tzJUjJ-1_trzb(e4@tUj<%(ESwmkB+x00tf;fH`h$k^oVYsT|r6DT(N zR>b=iZpX&b*CWHbH2p1uB)ZS_D~n_gFC>>Kjg=M4A)R`Tzd!QIltFH}vdAl27OP8@ zHZ4-=zJ6F;zA`pcyaDU{N}F*)>s<+U45#yKq*a=JO*;cJ1RH^h%rCbf-ZhZ9GRm$T z?N%9Qc(H*-BBcsdEj5~&EqKYx%QJ&fwU^WM88h(n?!CsHrS!F@0f)>s{CDaUt8`v} zrkF)K-_u!~#IFS!!5{gFui@SP&>k49I8?B(-=rK*0eqrq#y4jrLpevRYyc zWDsa_d=QaY5fQ0(pkMAkk^IF#{8+dS`|%!*;8PsN2iS9F6}J954AVb+0==>;nQkF< zF_{*beEDsi3tYOdvchD66OR=xo||2?v-~Ng7*4g!5PB4jl}w{4cO(7F?$~p7J2B?O zp8}aBOO_xuHWr&UZ8GQfJF?%6zL~2U3)NcX(uuvp~fYSvm=TfQQ`Z_o%IG-!a;6)IxAn?F9j z{${*awJP#?o=$*S&B|V_6xjrpbY7Q2Fp|Hj+_l(UrpA<*u{mzYB1CMxu`)K@eLJ#m zt&9v`A6}~!gSD5EtJ$+j-CY6%`KeWZwaDH!4^M2Q{Z=nc85dNzA&1vwxp*SYozEnQ z@bocj$0T{UVuhER35V`)R`hlFrDxT)1TxF#^=o;(z2$%)#=ft@F43{zL}O&+-d;yW z?yiB%|6bV@Xen#VER<2x$`1nM`MD@V(B`fg5ci-W3!EpkM^^Wu& zii^*nxKP*g?VQtigoF6`*eWdC)*d}qT#sJbky@=FdDcRA8eprI7HeZi%|aqb2xO=W zIARUlco8_<%)r~2Jlf@ZAvE0|;b~RSE3XBXeG!XY#hLiFC<*yr^~1{dW0CdA0DO6J z34TEeHXZDZ(OViJK9hb4WC$WXG)S0G6_r{RF&X~6C+!^YWB^Jaqp;CDDqFi~)2#z! zWEYc0`z7Tue@82PcRCBFamebXP|~z(m_MKvE?j7~k@)DNkIcCp|Mq*4e$^&RI zob= zbHPI{^$niZ8ZBz!$Ro^X(K-z*)~?&{(cf)6))*6k!Unq8Zs5k1_Et-grbBPz`DVw$ zdpQjIS{|pldK$l0@Hw(+mQFp1VOUOhGW#vjPK(;9g{)k3Cm}%aGv4}-}n6&Pn`1sIVoWf5QxSe5~I#s5Z zQ)>0n7$ohp^wn2i88aIy^&YNnkkLz41b%jbqX3C#*SCU^ZBgU`)lGuJZ;w(Ee z1A_-V^z5o&*ywq`QQ?k>cl#1)gp+FN?Jm%VVvN53eKSxVaIm+_2N#+k{2;-0I>@ zfKf{i;fVlbwo?$m5Q2<2V8+^CN!t^s1xH;=0xlbJ&dW8Bxw6W+VziGhKDVJoz+%IU z&ZShj+S?jP<>#)!fc}Hgzuy2>##51g zr4gH23EeZw@m^YUM$@+~5kzTsY_-5BeCL5nEi$eIArC|^^~0!~rr5lD1dicLrcurC z(g0{B)_}TdZysE9_3r zO}^!cT~X7(awGk}Bi7<@^pk}|K|vv=O`D3*V@6@e&TS|vR(T;e>Q-gCmdJU4&prI5$tbeB>{_q+vr zpJ)w4_5ubC#fhkJY=8Ix%)72E7T$6Lva8>RwRcv>qEZBgIs}L3A4A&1jgfTAO_*24 zA4_k(9_iKY#KzliN19(Lq?9X*+#7F17LU~}v(u*>QVAldgp5yKeg>bmdI@XpxeGZi zzQ}d)!Ak$q`2LwEu)SU_EUQ=s*^O)9leeG4NB?Msyvh}^kPx)guQa}X{2%zR*@MWc zay#C8;C`S-1P~X4&mVaR$v0F)#vM0d_v<$Gw|9CZl^MqEu7 zmOX1r8%?V9N1J47Mv}}eII+5r)b*tuulb9`^4!K)>Xu z2q*CYw(sqQ;aT?~A zm{scX@j`I&_2`q|61l%D#*fIwhQlEkmT@Bzm%E@p{qLK6D+Z@Lggk=I&sc{ohsR?? zeiQV{EN`Z2YfTun$_Pkec~32*CxGff-K-a*h4grDh39sAaOJ`A&G&oZ;cq@3T6j#W9^mpF@2B?itm z4w0D-|J&+a4+n8C1r4S0YG@nYxR;z_VB>j^9{~KbW#>CBkM{J)JGD~RgB*xwb zO4G6==+QOjmF4c5y5j1>7js#4WxJM`F^icfikK*hMeI!GN)*QkGVLF*LB<Pa$ z-om+zFj!$2B5lipB3x}gZaBlV#91jY_$4#1I&l@dqtYb&)xaD?icJVV^Fl#*g zaO|0xY^&c8XXAR{eZomz)hhTgE(}PWhrOL!A*pO>q?WFL z`4z6i-gjDKf6F)V^%GCxmzFJ25FLRty%KRQI1nj>sUKf_8t0~r#hHHb_^4hDEGBHE zd;6HZp#)TGDBAaFwOXTGUCoZ%xfFqn^fg`t6JDe6o(CDV%BV}p0V@I-in^@MgN)U6 zb{=FJ5@Z(PbdlP}tb*zq$oywY6{Ov5A!P%M%$$`o%t*JpnXH)Bi?DRn*E$sd?fhUJ z`tU_qEMQ=+rI5SCm8gV%M%hJAVX39GQ`r#2XxRu(-| zT#zVGS$Q40FRg{7&wAq*Y{y3@7GlEchtMyxf&m!=E*fv+kXVI7tuWM0*OppnGOb;+ z%`7GZF@zb0ORYn(e1-(xZ%}$oq<nRT|wb#96P=b2akP^ zeMi5+o?~BQ-?4A7pXUdUeT#!MJ4mzNip=HaN4nWX+0A1Kk7Z`;zN0_iQsm#aIB?W5 zJ9yMdc>b#~mw)HwfVoB@KdfWRj$u0XgOlj|!fUN?(@%&0=iNAm{Xdn9WYKRIjzho(bQ5VZDr9T#}5-^ni-H;>EVMl zZhpvBO9(6UYe?L#J-|Cb%0mw|Wh~#&yR5t+@M-C$YBby~y(^ZN{K% zr)(iatoAE|{axB(S9}Q48r8=V*D_dDsVYu&>Wckuy^YN`-Hl9w&4X3BMX|@i*yCX&&evJvXRNNX^C07I zflTB2ShZv>K}H)!u#jNFFcp9i78%z-=HFI!1!;rK#pkxVlp9M)=L;}jfXpj8~F#+kTG3cgs`}DU-V764~y3%;EPYc#-2}3gVY3^gjrYT1mouvPiYD@ zP!%h_93qvHv=jq+Cm=IrIf_nc^G;Sqgb}l6mH?9RuF6?3WQ!>)Dd+P7#dxYN1?36H z>Mo8}p^^+D`7bNWTm5X9vBhpba0IW!oqiVLSfZb8f8OoIy0cu$&W1!y_b*oKA$>ef12C(>t2Gq1r<(sNM8dj2JNi2lpMf5{4SZ zoL}B0)D;C8Qy}Q;w1|bl8Fgt0WPl?l4x_{U7RabYW|f=9oOn?@k>jdetUa)z?CmJ* z6pK%vB*XycS1^&`k3qDj}5m~ z2O^^J^^;Fyb*bx+LeO}(Zap06)(+pk_5xP=m&Xdf^4R^x+t}T@JvPNRn&pd;*cifJ}ex8`@>5d;>dl|oWcoS=CSHXO5?WQeo@y13sZC>f+Xk9rEESC}+ zTrQRi-3ekWD6&Atg&HSwEh8E?qm_BH90P%A-h&_$vbZY7Zj8gbN8ZJ*qCBkr{8fxx zSq9N7z0hT)H#%pPMYqhmG4r!n{CI9BemtuB!kSRI|mRHM{F$5D0BVPrkDA5j`I6_G9kk@f~P8;%>Abbsxh~3!O*SLVI4@e#D(Tzl(lX zIfWr-=CIl5)%iEFX_BTz_>aN zy(aaV=sLa@I*q%JVZGDDU*Pzg5jgxFGN6$)y8 zXX3CLbw#;H49N8M^~G{8FJ!uDy0cm!+_2Uhdm#JzswfJN$H9b~Lq)OwLb{hQaH!OziMkl(x!GAiGM{ZBlH!*9QZHP!A$x`&UM zne<+rS~wZl0l&6-4SD4&VcRWt;&|(J_~PkjkyYh3EG<(8+a7ul1$+UAql58&i^fR5 ztrBLJ^25)szJj7It?^N#`;hLVtpl`LuP3(fIcRBLDd=bbPp^2j; zw~el;|AkUaGtSSQW}SJ8q5_mAXU?LC`K9=*S_3R~=4=su+4DKJd@>#L*S&$hOR6AY zC7~e8gY7tCL;wK)^hrcPRPf@CP>q#Q3y6W0%Y#h!EFbCw4W1jx@I}P(su;g12Jar; zgx!THSpC@(7?bXS?rE+FB4mWDsz~6djRjxD;`_5}@%^dASh(qa3?byir_o=*2!SP- z5@A|k)ZKIu$e6J(YI!02WV%_PLvsO`+(f5(Ab!c6h#d4X-h43vEg$HIfJgeF`!gdj zy4xb;&-euU-)Ci`nZGA3IakCANoC8~lbAhg5{3@%j~(xCGjUeHcsy2k2B%0Xg^cPV zCU~{VSZxLR0xzPRrVtwW4lJ0|3t(Qbnk4AACDtmp0w7&!clP`}mECG+w;T#;v~SqV znKQBH7gjERWu-@=nCCO;zy=swh`8spkRsCo-Tl1hSHum7$Mc=5BYa{*3`*w*c-p

SA$se&RAg!z)vItsoyZ(+_ zu&2)=MlVMBMf=<+ z?+9WFi`AiW3dhgwME0i<=((s0`ev0yV!j)KQw0yMj)A*^6^)1Sx)5Y+(Li58PR@Cd z(azWr%WuM{^#Ry+FdKW$CSmIjPh&!+2YM2aqVn7jnq84FR3FQ}<(Cq&@Xd)Sn6tJa zLB@v#k-Do0GK8fNLW~9+>pVW^{J>rVGBh)LD^WB6*xZ|TVLXmo>w&~vKO`)@6G45R z#!F9k!;6i3;m!Jqc&kPny3`$n_{XMUa))H(kKTb@Yxbe|N5-ETu2|=sK5-l~W=zA# zF{7~a!w=0EkJC)jC$*>`B_U>lb=--b(KgL(8!Li99cq4Lodv@5xtrs z-b`BpXhQ&1gnxIX@m*TTF*rCFixw@yp+kpC%>R;#@6jXyiTPP?wFF}9b3rZD{)4|F zVQ>sV=1zi4b@WNOj}`NE=q`Zg2SHq>Cwegd^hhIwXL_M$jvspE`ZA4sA~xNfqK^T0 z#53Q;=XshBj~=v3$n{1-wz^uqjakn;p3}eBR4>G3_#-j9JQ6d@BR;(h;xjZG(2xH6 z8aF+7O>DY#ea}4RzZ`y;WH3Kx@LGPL#Ank_b>pVVE#2^%M3x0{w2x;v6HPc}rto|5 zqk>_}q2#(Ej=I7g$9p8`enxyQ(w*gEyYsqvR(?2{=gnZ=&M0N{xJ*CZ)1Oj`$E6vM zrO-Wv;YjhssMU9)-is_pdbY#>1VQ`0PkVRr4QSB5zFf|K)LlwlS&*Spy0Mm4KcDSS z9zKe$O`Dm$mb67duBQjG+}*IsofR-EdbOA&xq0IK>hndQn zw;Q%Ku8;3ucosYB*F)YNcVOxD6_8);4t)OjL)h7@A?B6$!`24(;k%a4;p>N+VEbLS z;p3Y3;@jHyVN1obSjBs&FNJx7~#Gcix7TrF;l3-uS#q6>Kb5200AJYHtsu zxe}~O`(WMOw_sD%TY2t=4gTfuUe!C1LwD(3?pRf(G&6i(FE=b7pLomNOyJs{*>i3D*i)4Ix zd?+So--AIbyy?ah-L=3T{R^Xi3WI<}g`=i{M`e3iV`OM=W;anZI9MzH3S>-+jJ7pk zh0vE@qGOf>V9dyQ=+$)*f}RwW#v?VbVXR;?epKQIDgTQ5e+ppE!6Z5IxH zUW9$$oW{(FOE7)v0(}0}cPM1yKF!E1G_iMv$J%U@@oUDus4LF_GUuFxE*0OVYlW{y zdBhfLY>gmBe#%F=)w}!qDf9d!IElKEwr$&n`1p9_2|x*RcTJ+R}q*KaIz9mzdA|B76FwF=w&NPnu;Ilc^;W+z=@&aNHCu*fhC48h%Po}6VDvcF)ii-zU^139#$ufsso~PA{qGN%I{IEA*w3qXL*F)>J zsKR$OU0PZoPmrTq^`uUi8z$jrSxlc=#HBHCt$nw-@r=-Oc^9 zc149{qc?#XV5U_qt9wGk|Y%phA9h8D;W zVvMN3unH(61RM=%+=qSJhG+`^spm)~e$o_FOi;iUMJ|2hZ8FwOX3H?}E5g|*e zVep#Qu<5{P?7=*2`8fomSf~$J?ukTxMe2*t%s^roOhL_pUSEdV5j#4w4DYF~F1w&( zIOxW-$VeDwt$7of;(-Af_aXb^q4;^v0erG)AJQjp!-T-)=<|;W=+R&}g6|uE4tMlI z>$`fPL%qT1+j21$4cLKYV|Su=$P~<-n1=noSc6du&oJ&7eA0*^kU>?JC9+&5B zBr4sF`O2Lj<4q{>VukIFcnxBAROa@UAOf0LL4(r1z=m*<$YX&_tnw}^c|nfco9<#= zznbnP{8%wF1Lc->f|)qwV~W*%%C<;O-3f|$vV zx+m=vM&(VNtNZuTy0iR_o%~>!EWh+jiZjcNT8<+b#z^IR&7#crMNh`t!0fxws3kwF z2DHI9dp_qIQrE8_#-@i$t1AvNQfBnK#BnZ=32D>}L)|sq+65a~vFCca8Tb&OsFKff z*Wz(r*yiGk9WI`P4r^~A0mo)fUsG9c@bJSbH!ng3!GIv4Yu6A&wrQ^;9t$+qy1QYM zn;W*XlGgy=E!4H>oED%{<-g6v$E?dD|Fi<|4o`2hs_y%gtrVH(5|XkASStxL%Xxh| z@0mrB|65D@o7I7JZ>>nY)}=H-s1!E4`C*$^X>9hrfzPA0Yut>#+Cyork2f;umkn1J z1IeG2YasJiQ;KQE`I!?XAY+Y@VG+s~RD9+v3)Is%Rd5(ZXSKs|0S**?j*Z`r$GF@_ z(I>aGDJa5OD5y(EV^hw948L>?$mpxbVa7ruCexE&j+(W^0z?I%3RGSbl~xMzX$`RO z^GNJEw+u%M79r=|P8hhN5yIwHN6gYEvHa`7*o9PlcWe?CZ+#MjmtTjz1eu;{8A|tH z!RyXKSBvnmu;Q0qe73%{%J5$0cwc`L7PFm!36E9SnORE=f5K{i7UtzKB)bmsz8Jx> z;-slta0EE~K@m1heIIiJSEBEelMvQy1cI85LfhIy(5mhrv~4yVFV&96(=~$8zuO#q zw00LxA1byQSybpKLJ2j{m?HLbXM`EwEGuS3brjJ2YQdx?Nfd<*ex;#4(&ycAsz5! z7k`A$s)WQ$KhvsWS~!_+f>$vArSO>PJ2Isd(|tL_=Uj)loC*XRA4I0PAz_t|0gMRd z3GF)HBabkVOSm9JB;cO(9S85eBdea+`1H0ne zJ>RNyH2mOQVvDCI)_A&`Mcq`vr&G`I^fQBg zw-O4r6FRoJd1D>jrw~e3`<69>bOkxwi&gIY`sr0{$v zfn_KC+GN&|@k16X{SEE}4_9x(hX+AJgKw?v0Y0Scps00bEg>t{i?HJ9fy`26kW;QQ zR(g1uR-z65gc=VwvjssefhtoA`q4kFQmic%a$L$Ig)pXjyiZXpkbo@Dj0gJfwZ=%C zK}P55I?K5RGFP!uOe@}5S!&@oRvfA$O~VY0m4USzwklDr$5cQ!r?3}a9Zkpd4KE`) zi3I}-gPxn+5kfswaKJ*s0vUB7*&w4<8GQ{AV$wbN)yTpuPrF^yE#XIcY>q#oR#wHx zwJ&4)FEjWZ^dWX1TZ5DjhGBGe7^ZI?j_pTsup9ZHpvD1uZ!Hm@cqqHx%pbLq&vsz;G6Tlp!BMmgvfJ1_nU`Q)13y$Pi z;rkmZ8&U);KmGI*I&|oO^z?L`IB~+532^?pAj5FmQj-51N{13c>REn3=q2wzyblQ@ zJK@>jvgp3x2K3JHVO}AO6D$%|vm#C;|3%omqXMt=AC50M;U@oDOT{-ApXcz zZe|>rT3jOq0W^yxsPxJ$kG?sTk+8BN!WMcXDwR;9R-jye#AeWslrrd%UY_nMpf~TU z{HUGs1vOf>Auhc%B2)bkxzdOE)!P7;T8gy)n&88RjXy2&#}KQagpxn;^@o~s8I`}kmGX&)^2a5dAJwXK2Hph@>} z=XEYv;!TjKcpdFMOqWv*A!Dtl57Kx)?cuc2yDXM?_+u&WvE0=MYs+1SEtPK|jCh!q z48e@LmNpWcw9>BNYE^~n@KN1H`23+qu)S7otglo7Nra_Uz63Ep4`W+y$XR0rLc(khLz}99OwI@8(J2!a{c;n9;U5riJocY%vW9*3pahWMXQ@ga8d#? zYLyYl2x2Ud(ZJrAOfMv^;a8#x8y1j~xI7<(EH8(+74@-rXC%Hlkb;x=2#3yX$G&r0 zvFF@}_z|11@z)WUvhgwWR%=YMFZ!gipmr3l;h9<&rYzya*X-qF`sxKN{L+ju;bB3^ z^Yb91FGz3nT*K#B>5qsdH(}O}R`~2_5{?xA%t&DTXt5a0OfrorJSW%`9|3;Yc@&FB ztU~kNIZR82p%lm9xvP$jSlq&pkw2Hh#@Mr&G%ZTPN=+-gDl7sk@{EBQy}G}lvRhoVFyq#(TM-@}j-5Mqn(;BV z-Cq}El))TgUZB%Uv`_hL#b-|QS&MPt@Bt)_=z^z%N+V=udBmo9u)?=M#`oIu z=2Swv37+UM$pZoN{1C8+#|vH1X{if>na2e*@f%7JH2lzcfg9eQ;DYv(-O+i9FJA8L zg4%6d@X81m1n3^iOQF*YU%WZmg;2$Oo>UnzEBw%Yx(nW(ME@3gAeP`1G^Y$&j&i}9 zV>Cvt4B`n-k?I~Ja9t8i{)AYqF%k#J=vZbBkTEbLw-43%Vt?#kYID zw_eL1NNWEBkkODutsrA9xUDd0_iGLI9pG*4nY5NLpaHNOS-A^na(uk7v|J^O_A7(0 zy0pic;REo|t1n>T4L2a`-n+5mvF6zI_8a*4sVDK%D=*{6r=CFm9aXTnTv=>s)C7Cq zc^jWT{{&KM+<}z4??7tRs>r?LF08)mZe-tE4Qrb}jQ3xA9lHVpv8Y}hEU!}o-$exC z?685@{rr>2zx4*}eXtomzPl>cl`f5B0>L6e!j}7LpeQ5^XGe`j;iQRpzi}O8m-awD zfka(P+Znc^?%i=}@F0BLqB$0oEr;=*rLnI512`5JkNqKCvG%6xkxdZOLU2#bbgE-K$`+3*_kZvWQ(-6@6DWz=WJ9k@9I5 zY&qHsJI)M5{;vs``$cOE-%yudk^YG1mtC(6S0u2|(<;6ykRr1QFj*BTrD66^qItDy|Yk&sagM4B!qmQws0Kf{X5wBD37*HFxox4cd;BvC+|lqz>s^pd}_vctcB z|9&$*X5`3``1adx&A1tR?95+N>A6Ip7YScwNmit2T_Ur|G1b092M|BJ8=eg*kI|Ry&bk?|4Ryiu{apz!rSU{h!HWwT zbacV>Pr2a6r(IC@9S?LIaRXlK>4$pnxZ<8ST+lqi1yA;K!J~a$@a#xJOu82NQFI_OA8C#wJ)%)Obu=(`I12jp0|pHS`uE1}4z2LTo3Emp$-yK-L)B!5;e)wOgDWUgkV zn6~nWGOT$aj#r^WVhn#8pK*fcXRP{^ZjYY%9^3a!!SL*QW?E)!j+gaS<0v?+77u$? zfV0@+W&|}>L2UQ+n{yV@vFVi%mvTLNCtr_2S$AOQ+8P+Nt_J$#-@`Aan-H7n!!I{$ zri2P{vlT(U1yYeY{)o)1OpqyS!lHu83`|sWHQkv)P38`3Pl9zq&DqZ-k7a#F*pI#+fQl9F-}f?$pnPm_Kwe`m`N_VV$O8Nb6Y$d~h({ zsnHv6R~vwU1``p|WER36oQ(mkmSWM+4R}9mH%|Y=>qSP2dPeR!wPBn437&XtMfh(; zKw~lxkDZj=QsFFrMP-Nk#~*)0AC{+P{g^#_N&x4t3u_9q@|tMWmP>(*B~lj11mn5j z$^@C~k(gP=j73rRl9`pGac1d+_Ec|-+I%nCPxiwTi7se2u{?UG-h*y4Zp1_3E_f>5 z4KMWZz?(x#t~)bm!f8qxqS zCEkv=2H%V(U0m=8!x1p;CPXj19k2E)i|TK!{}&M_VppiXsLe96mrK_7cc6rSn1}DS!F6C|LNzjIXnO# z4~fJ2&<@z#_Dvk>8IMmocEIj{ZYb=TfSs?tgl_^m;rM{wINrY}ehBM^?0>YtC#~Pc z3EE@!bbQjeBeEW8fe%}?qFFDT=-m&Wcj<;NLqhOlOe8kG^*TNa3BXU`fmmI)Hdfwv z12)`w2Qtf*MV^}m1^Xb^y%d(ZmBNziZo_9Ux5Vk_7`%IbW8_pUgLMqA0f({`@X3=; zp)ituHmHM*jT+zpzh0x?zonCsu0~lLI{dQ@J+S_phex z*jCA@cfe!)*~HySy^uShxyQOu^xR0}Es>7IiNP zXr#07brdEBWTdO5CZiN$)BMqWCH-6BjqqhGM3YJ*LVfr&kIhm)yjAc>R1j$~I}2p| z2r?BBneA^i8PnX}dr31XbWhEY(f~#?TZ7lf@%qGEAB4>>g{f;_$0xt05Zm^#tgz!l zaidLVix?AymJn)ApJizx;GmP!#|toH+)ON*xEPFDmT@Ar236|QNdZ9DLQBR z>D9GYY(!9L<&{e*fxut4vbU7N{=o+yAT~DEjE^~X?3ihdv7b%He?6u5@vlL~9-}6; z@6bWS4-3b$!8amg<}Cz?3WRsf^s&;eD&HRJMrGa@vgR(lHQLv-$g~_*8ga|-M9iWp zc(Ioos=eWYe?+_B?V;uH)_^kTHtu#r&#Z?Rx>v%TPq?7wOD?#-qZ=L%^TIQU{^&Tl zBKqXqhJovELri*U^vth_@D*N2%({Ue8aLzZk$wasSG4MPJvt1ohGy-VkCJak+X-HH zB$6OCsw$%9G{CF9uEUE7r3}ctI?x3JGOI95mGE?|8|t@l!2?~r@Xx5Syk}W-n^ghv z8I{etE@pFCNu~uL8i*@h=OuO&XS)(NPIlch`Qeo5YgY2^k)ictG~nO$f-Yu>*(IOx zI5_8aG$d4o_i2Y8e))!PK$%kVd%4Y0R~}^4JbSi)g>V6jAP2~tut27-w=dFMy|BsC z7aKL0moTGBIGOs&oA1Kj*k0H@b_{-8FbhAnde5!+;FZ@=5Z@aZKOXPB^A_^{`44>AxeE$=_eD|no><%L zQEYnjDIAaMiO*ZN!Pgzyk?*3gmc=sv%E+tJ z1n0WPVb=@KVfFPI_u>w%)SKhsj}6so<4iyxzIxyRY_DGrp9BWs>k&h6JZ(OX%^iZx z&oo0)>C(vZ@F67Wd9Q)Y|9NF9Y>AHTbMuxc8d=5ym*;29EGD%@9LKMPA0XwEXe2DH z#)7Rf3sgTA2!aK>a8UP>4KO;k#XhGS!Hct)FDHVI#fPkhF7t(`rqRaW`TtXaFObuU)9#1bs=Z2HeuJ$OjsWk|#YxPCvCIgZ9(qzo)o{5cf zKERJ#_Mz}+#=Sk;$p)EXja5?P%^6x1QVRIfS|e&P`fUr$#pi!Tb@b>_WMyTcM~@zO z@4fd7xH#|J{?~*wh565bOe8_(W&}^ajg@gFf($EarfsdeVnD{U%J`yx{%v@BoDcpP z=Yltf`yqDOZ3Zq{kM_U~Pr2aP1P_ExzXeZ4l}F82%b?EdmC&%2Kc0!KfG5L!Q2R|+ z)Ogba&-L;}yGf<-*4VOWHIaEP&4(aU8nNj<=#f?$U1n9pgQ32t_PPrm3vt66eQrRt z*Idx3n=e|A_rx>3T+pDCA8Nl-77qk?pv|bVXc6Ipmj*B|E-j6Ol^08(ndHG`< z!NCmprDV8!V#_^saVCBcP7fJ_{bMKK_>5`5>}fdNzc)5K@G!PhiUPV}$3xAr`H9Ey zO+W`6>eC%Z2K2z^p`CHMUw?v0Cmih(h#vxj@k2rajt&`uGrfAFFem~aHGdpmJoN$! zqI=-0*IQyw*8reTKm7FK>)3H$O?=+sVSI3JHLUjXW0*X!-m5Hj+)@qiwRjR+Tebpv zCE|x?pT=kRH^rCtRKw0P*CE@p6t-8d3v>;_4^5gN$;St?ef+Wb=3B74?VC6f)CPHk ztRzow_D5jjnNgO@tXxe zaX5xAS-l)3I1~`tSZ16#$#lw!Qv-AzEhVaC)Iy?IDj?{5v-?|w#YJH5;(6G!f4?bm zO6n<1-~J5PFz*{oj!Y-G%tBa;ap>G=DB9HShqr6?LhHJ{5c=3iObANBnrR>6ht2y< zchwnQYy9ACl~>f=C4dpgsC(-y&+STG@>ho!XSI9xZqubSV88(E+O^9(pFKXte%8O9 zQi@UHP~NhI=Xw;0td*{uKxQvNCT>^+o(Z`bLDL8_>DL*MG2I~&rf)4N&^^N)eO6UK zz&sDM8t+2b@J7${o6tMs7Q8je4R^ocf=&}l6KtxWd5AyCKjeZcPr2f;u(F6>au30x z0^!3G_qOuFbNxK=4*httuM3_UNdH;EOM0*JLG(%&ygu3mb-K8qrpCw&@IlzD+wjj= zAKcU09bIO7qQi7g)a&eu+g@_P<8k~*SXv2hk8#C26I>Cw)E)iuDxzoFb$DZ_FYX~Y z-P6hwuMKCuO!kJ>eliQ!WvYcmtN#8uiFJt8B114rFHH!f$lQR8T{c+~CowHDRUIud z0-39&MMn2liKD`o89-xXlyo#krZYiiFe~(IS9h#+wZ?O-k?!t-JZ~?2S+gPbKllvZ zt9%!dH7)wiYS`1RHGXK*5;@iH#pc?zuH*igGB)-^w_qRL7E(UA zy$aI&OW|FD%z=MCjSp|T4Y^+4Sn1}6H1D$brcML=%4gnLt^%@L+_B2d7x}JkgpF$; z^M77BB3@LnJ?^YF)K{zrZU}&J!iHd<->R+F0Y0jDdkv5Xo|DC zjhx`*aw2yZiCRyz#eiB^)McasTQCyELPj%@)HP&US@Hxjgd2fNwkOTqDblSL8(n8a z;FICc>&sEf(vAhIX{m8Fv!`N{S&%Qk1hhSja1<0QGK0eLejpN^)Lg8V0pdLPE5j*eL z`NxKri{-DbzWnk_3>`WYlO|2VfddChx{?Gkf6ZQ4_R_kR{ z8t=or*JqX1kRcGH`0<)s&^NCV`mOcnH69c{#Ao@VN2)JkSNb6#vkcLAfq(TLPrThhcY^G)cr3s^TTY zZ1iw53KU=L!Jv^i07Kpz0Is5Ea31m3;S*M^ujtm%ca2N-<4}1^Vd{*4<*7{*BTe2 z^-DDMm$k79RrPJC^46>3zhc@SIrjrTKC&DmvL8lVQblx6x7KPgg@>bnxUAR(jp=TZ zSOtqfMq3SNfjzaf#1TAtvXJh%+7t2l>Q2&BYuc$$WTC0))-eX6)GzKR1nI^SbyX2| z_*Eu%TI9>L-k1Wv9AcK;im`buu=C(396k3TdF2?2&K0n{IEkWS4RS6fl<>I)CWX{B z72BLMKA2vMice$Js(eIskH(6WG#omr6;!%-Dvjf1_YmOjg^frtM zPC@MRv(d51D7;f^INH@4MX2eIH);|PU!R7>L)PK_v>$Qk^V29i#1At0N5<4_Y_p*x zUz!ar(*L@x6&EV)a^1UkZ!BB3%s{$e3!+?wybBxg;tyWt#rniHlO(O*|eF-Ehya+BHTIarVV8cEBI`JjF$G#Cv2p-yI}>PCy&=? z4HmUxsIuPZ=8i1{oNPiv63?}up5P;mqAdcn`fi3R{qZe}RNqntGBOBGsjebZHlCclW^t-_mBI%f*DS6i=&bOySMr zy>x8~!<|>kpFn0!CC}x(^WCiVYXoT9eF-jF4~Nj9Rf@H!pPpIIaxs|vyK)U={+def zp+tx%X8jWNiI~OMSc3~>iuit1A*(_^qqR;7aq`?@?8SDZf6)ztvZ`8NU@Xw=!s1_w zDR8XyWvoHK*0j=?JbqcOErqB&Z$zkYV?o!QV#Wf{-Yl9$G2>&heOLgNVL@7k_8!)l z0s3o;3P{~c-BB7#6CpsNd6b&2Q>&uq$|gwrvO9h#%;r-+|b=VGD{+7eTv+7Z+-5jiT-x3R&?UIdy{9%THb= zDAWMnW9M+-lQY;b{~Jt=%0=umbJ4!;Sahg00f7x?qD_r)Xj5whLLZ%kQJq&Hd%_3! zY2z`R-OJabhO*NZ#9RzAlE1Rj`Z0NVdFa)v7dC9zV9o_FdOktS-yUQL&l=MyWk6;` z5S|IGh@e?F5D3be{GsLbqjUTyzGjJit+*ROF=M}!)(sH25&ktM#2fwBS3*LzHc3=# ziG=1EgnB}WAV^@;bG<)$tuM!X=!1w9`k$&*f0?J#z0B&py_g>p^9d{&1Qvl)KEWX0 z-CQ4@>5AAqAB1TJ72zt5c4mzz+6hp!5N231-DD7I)RLO+hA=`+7{N#5-IS+e>6bP3 zP36LWE(TT@9|l->Znt3B03!f2vm0|%?r2<|EBfc&jd}!`(7x^P?bS2=nu-a-lsOV5 z6e@Ksb-2F?dir@23OtcTpvWLtXyLgH1e%Q$L4ZIcpJ0$fSkYcat3BNb46fKlbFGlO zg|eBVV=Z2%Rd_eK(q4<-@z_-8uGTeL^>;mCMSC4>@bNOBqgInVSFPpZYQcn?2iAM~ znyJ(RomGB>5(1Q9X*=ENepn@%m5a4P zu=ZOLUSQxVq)eH?5G#Uq>0#RRU`s&oaFJ?~` zP3P7sq7;FQ@t5YtZ@SlJc3MD8d&Ov6j6f!gc2NYM#Eb?Ql>Q>#Ke`zE&V9n?(RdgZ zT?J=Rcv4Lsd|pCK;c0>i!R8df?lj{>ozxmXQ&dPPD8`Zni!pZi80^@%6{k-qeG@3o zXvgqVC_G1<{+y;1Fn&+6f+{$7n#Cd`Qe_gItEJ`icZK*W^B7VGeTjjurJ-}ZNoZGd zJlfqyu&F%;of-~DaP#39*lG?I^vcBt>EBpu(WvoEUj-^_^=<@RYN;`y_^n)Mz<-8J zb)mVKhAObne^BVmHZ8e~a)AS9F_u!7vbt@iHSi#c=V7?Anwv(HQ#v;obQzXix# z{0I)Sb4+VzwVegk-opZ!06ZI70l~8>mw-$}wi_Yem!bjg-h>FPVC{p1Rb|k7Z5i~W z-Xp6tL8OdXS+{3SIYcIVB8DFuz49s;pou0(Boa>ISNj?;3E_D}8uP7oh0dx-H`gI9 zU5oI^pHhe<+{EWHPnuR7FXm@&LV!2(D#2xSSu+MLEX|Xkqb(3L4#geO+PXm76r_|! zLPjO@NGpdP8o1B<$Eq7@6+c?^fx$f4Jx^nQ7@mATB(CyByvE0An-SAmd%iX0kBYjW z4anFq!*axi7@3)tSsv?yC?-eSr?@#l#uXub+nX&4^g^sAkc(f|h3blfjLEAEhsmSL z@KlG69mLx$8Y979tsAA0?csw=H&*ysJy)$VlpNCqZs7tiIFs+L=~-is-J9mjS$aXP%F{#Qy7 zuDq(1V~f?IaGI5R0W07_oGD_Zp~ctu+Dyf%mH;|arF`P-X;#pO>GB(F{&oyTXEsM{ z5}|?+(t`z6cYZB)XThhjD{56q$aO~_7I1x7X!Rpx^kLzdsDANO7BDPC!gD+j#==aq zj$*WAxyG0+fJ9;FHM0DS>`OCtC$u z+fIBv%OnCB~C(WQaCs^bN1tT+q!tOAfC#z_(3 zq(C6ULZz_y4Epu!hY1rV;`{HdSq^3pE=Aiq6fvHPn7~yn70^xyqa_B-ZcDs{fZoJE zxk-b2PyAerpLQO_h8bUDe79tDeQ+S&sveIvwR)jry#WYpG#p_Mj6uS)Q!y!kx2v_!ic($LX+K@udYL4b|tzmLpY)Pv`Wm^ zRnRB@W@|yaRJ!MfMPkDRs?$v!ZF!v7Bd!6M(-*))^7lDjkiS?E)r>;E6NR(NW$&@>pU=Ezv zi#H!_h*&H(_@ zZE&&7EO4;^!C7Qx0gK$LB}H2XT5 zONG&v%h}oD?1pgUXw}N1oaI|s#A1!2LQ=2BluMNJ znfsdhUn$cPbC#8ZDz-waf-zHlRT-Ctft|wTjpFCq`FIg3|wWG5Z%sWx=88&^{ z2b0${M*oZ(5W9k3jw~3phf?e+wXV1#D$Ug_-qo7`(=XKpeF-tW2{G|zlR1Hm7edtk z&o4F2CQ8WE;6*|Q3mT1=(ONMP5+197O33%-mm|N}2t6^{h>~A_>ZcDgfFqdUz4%4f zTVIeYq_jC?1iwCma;`_f%6rjg)m!)wYjKniql#J_W9nRr(M{bVkSXSKGDc`9I5}Nh zfRjaasbUZl$iGLE0aJ5z<*P#{r5ioixwxvfR_RPmdfU@j=w ze-^vm-HXlhKgR6XEcASN8ag%_h*s5lqxHQ5(Yekjgx@~}@lVdd_^xTloAf?*Z9j%H zS~XZVq&Mm!6VPZyX01D8rfU;sOgYH+i~@#AV^7pddX_I(`}?TtjA_AqWSB5ouigr; z^&Bci<-c*DAWN5pj%8^wF%2~?$mot^<48h~AuydiLugVQJBifx?e7_gnKy4P4jt0I z4z#165_K{CbqD+E0WGnfhtFlq_1`HH^nM&4-4@6M5oF3CbQa+vqa1qVc$)OB^xlhk zAU?~J&`=hy4s=EBcUyE5|<^O~l8XZRCtDx=l7 za;VisM*F3 zHQM>$l_6#E_SiCL5$BGUlNmmi2MKG+;Ki{Fe@}nZ?(Bnl-L$2F8-b?+@2?gb?bXH) z5_MhW30iE3QP;G@(F)9bZU*{Gx5aMxH9&J7W^6Iz=~&hb%(Yhb4eN6rWE?N)vg(S0 zj3aqi8N_5b>)`P5U+~T&_0ZF=3|6|A#~N3E2N{h1PQ9V zb*#$UHv2t0XE*2F{r|iEQf@B1{`c>3UM_XL0VKwqU`8!6xdg#XM~lp0y6w}r0rHp3 zL_wj(URYmL`eHOK<9bK-%Q7#_Nv^3YrtB!UzSspYd=tzg=Nz+>v`$J9zu*f^iv-;i zoyBQZe5Y9nX<0mNW_b2AYtA!jsyU8hCvvg=y>1w@yf$KIRY2T|(umI`V5}yjFT!p*o^A_mnJ}T5!GVSGhv!L=uV45cab8kdIeto>Qv^idw{sP?( z!MravVcmhB@I7Nf4I8G?X2d973=FVhH{(uNkeualu|Q8_; z2Y4l&(VZ=dU+22ss>W>*Xc*KmVpaCMS24{Bc$F?>b~*6raco=q73LUc~fCvc(mt}y3wqKD;#5om}pF1)j!1vuhH@rW-6B~^}w=Ip=*TpM$SLA zj!pN8J!Xi`wCJC`%W!t~D)j5$-;9${xTNgvq`!2xl8GEk_#_lQyNu0$JH8YdJhvVj z`w21$1es?;Es#lcfQ%~M_&hfx=6fO{*#&*_eesI6A87A_mlDb$W_C5S?0YM2e#`|e z0?MGv`0MaYcNaVu=z|(>d!bp73tk=Kf@gZVn8orMbnrskF=f$iJi#W$#Z1?({G2>Vj7WxuD%7clxbXAs5tX z>w@ZUx}a98Qn-siRlAJ`s7RNn(nllw*@jb#AuA&Z$ZWY3e94(v=AQMIl8=m7i98oH6UXx zh<6o%jM9V2d^#{7Q-tFueny9f>Y|@dDP+2o#U>XYGtNa~x=08yW&=r9$~w0O!dg(H z?i*Fy7wgustzYcsa&~{jjb(mmyZ`m|mvW=lDzzB7U<2b!K&URJY` zx-~YytRW-NS>c2&!Oqa zShP-_g;AewM)t2i;*)|S*u!T&&iF80Dy%rIE*Yj;R{W+5ihrt9wTyn}j-9Y<=~5gy zq_j z7%*{DA+1e!7wzFh7gfZYBRug~tczJZ@3AQ6 z$vI^)B>zr=h&SqWbU~voE@()p-p&;-4`sef_qPTJr}>#pDBl?6j#q|wAY@)?yxgCD zM7y8~;pokgUg$dAA8!n^V5trv=Y?Kg2%L619*J~AzzjdUJ=OzH3Q{KcqTlKp(R+

@Z35Lh6TK0+;yQlhT*qg$#>%N%$qXQ7IdNf&jNQFt0nFbHWJ>Bq zl_1&zpE>&Ev7gZS!MYga<&PYfGT7$gO>x0iipIXIQ_BJ?Vu=khHq5Mcb-N~)62^_j z2x;7q#`GxevfbU0<6i;&dETo@ZR9PR#;Q ztqvkVi>d4M4)`^%smA&W*O(AtC(a%Nj{gk&^f`*Zc^@Y~*@h$8DcCh&Al9{Qhfz;G zj9J~^#;2P{;=NCLU|M<;#4Prsl9tP!G#@N2L1QwMZ4=q({03;s`xv z|0H!Iv2c;7B}UD{aT#7nNcTYk3)1*BUX$jJh?U-i7(c|W@<$ZEOtlMl7_Zk14o%Ni z%T7;1O>`#jweouO%zY3OK8eAgtwZqmh>o}^@MYZEy%WlWzk>>~ZE?@wU_7&EFcRNQ z#)^ZV;p0DPVp7|u)PqgW`BW^fywCl=rDA+ ze=s^X8Hsi^hoW`0{^(M75E7r6iY1Bbux;Mg*!RI_pQ5ax4uHkAjT${Lx&Dy@ZiB_BRp-2CnFH!Dv5D*|KDYt30FQD zGFjsM@5~s{F94agV=N#Oqp2k)L)Y%Uofng7C+QH?YtbX+KD-oC1Al5ta3P6EBX>0V zvIbgxT?cQ)5R`Nez8( zT|5vzs}3R({1L-}M3)5&KP3qD2xR_3Fw*ENcLQRcRanh+Sfw_)3&e8-EswXBq{a;m z`&Y+jL$AZ@v2JKK+!LRUbjO=Ikg}&YT2HBp-~=~xOmsun6}1pDzXo3G>5k_j9cVBt z0FfMg#HLk4hz2AasKhO_C^|zRMqoz3MADf50U#qX8w^Rgj4e4o>j+0K_o0t>0MZC# zRuRZ(MUY59#zRN*;R}L`w(Nj|GdvNIp+h9u$q>YdwBkrht2->TBsZEWicWPy{CqETO7byNQE?<~ zHLhi%5es|~p6QJct>j?m7Rr%nxRe5o-(F^EYU`Q4?4X`R`k~&~ht>G;)Mj*^HXbj= z#^C0*pQBDxD|m)|3`fWta74XkauAa9CqoNZK`Q}i3@kn2Me%}DJzHbP=xbG_hny=9QsSy~^Y#tVk zK8RzR3Xyl3X~`)^HWQ;Zn8$E(7+!995xr(?6feck5FdnY&BisFtCGfyC4mv>67-Vd zGT&a(Yz@hu%gD?^IWxx8JLj@$>r3GK?^mFA?_L-)W~@11N#=5Cj6jX(zt5NqHS}wM zjHZ@sCM{`ti=uPf+r49|DIi4%|`S0ski) zxTmq3*=_foUT%0f+6^tgb;E#VHzRs(b-Wnifg9d);Lb)4{I!QCI!>)Y08^6#0e{4& z1tM&&C*J?kfmaEH8Vz;B+XO=&_Hn~=VQ%;%{jdIK2X6Y%&78UPK~GOK9a0mYkGKwx zec^$|!=yjsj(P+5o*?Hwf}O^rC1LSKL|Rq6*ux!FUvt3yX$Nj-z&Q71Uoe}Xgo7FB zxdkU$Bp~;9fsB)WH$X-WOl`i@706sTf;LY)fF3>pNOG@+|BRcd8cuZ-%6&a zMOf~!R($Z(Cxi(X#MCQw773DX&AxH zD17>-uhHmF{Sf@lNK72?1NN@XGYgf(G%X2|lX!2`T`G_XBPkMclQhP(SZk9o#agl> zOEzOA$u-Tw9+#84VABojnf_`Ok_A#kWtcu=GJ5yvi6u*xR4Db@akI_;Dv)XCoLVvx z83LGa0{qy8YCj(6_v1BaH^mIVse%4+1ba)`zPGFmx0s-gMFK%?~AW zqyV&^>WPpU-sqBg6*^9-g5W7Z2%TONEl2y~lc65;pU+e4qV3q4XfviJ;^y3rh#A)* zI-w@oPxD0B?5YT!Q4@_u5~$4OU?qbClGGr4G0hvFjq^l<;RHKhInZnj+jfFG(IyK8((O)iKk%7Bbzc5vb^JMk)15 ziPZuwTGG6NcB>p-_?|#V3|b_><9c^pB}`j$=`B7C*@FfghtU%<4_CCSY^Z_a<*xgR$gg-E91;#F<`U% zEE|kg;A%Cl&1_0@&tv_JNf`b5%NSa#CZ>7$uw1n@fx~RvnC$C=Ob=g7bNFFh<5np6 zaXkm@9I+c+sADKB{|Q@9&%~_F(deG=XGG4r7oAf6fJlOjh(rRCG&goq7J$*dFj5BY zKmgM=(+lm{xoN{YNnhfVd=NL^i}%ish+rptG2Qao!PzNArMM$v9y`QYRS`A&8uU!M zACrFg1p9KQAg6pi>)27&HAy?bX95w@!NNj(pLZOC)+OWJ(OvP!xMrvo-T>938Y3{K z2?ArAqI!H&T;Jt$yf&sUdafs^%RPqOtfYtY3ee~45g7c{H`u#d{A`Zt@( z(@)w}RsQN7-4nRj_m=~k{~n_pvFKj|G8b1Z>A=)Twvot8ce6^bBbj|fLUnXc|0BAk zU5D62f)BQXaDs&{DRmLW_dOO>LBwqB=W;DNr(cC=P1mMN%2E^IbEU85fi9WeY@1#P zo#W2|Lk)D89*8GG9SC~W0ndjVaQw-E+Z#F1a3}#(QUKyp0tsx~&`BrG&v8fWe1H01 z6>&+`*j}|fJCNWd5HSQWF^RM%nCQeoM_7UfLguLbYu9ZKbeA$M89oS}=godW1Cg4H zPYt$deyeR2j6RG7T(p&gb{95a#n}DMfsCq*1zoIBj|F5(ke_n`pFaIKf~(cRw^eVz z%%JNqw@NJ}22?{rl^{}}xhDBnLyB)rr2Et&RYR(808;(^k?L3F|D4kNElTyX;&bWq z#W-HHcYSv`+3$Z1`6I(G5DWZ*m}knnnwS+>7qe^s5uMxu(e1G(@Wa#zL@ewvOVtsp zm%IpMG*7S2TuVevW3Dv6y>a%W_U^T11r`a=Xth#75lf^v2YLIpV|(Xlq}_8TlDs?& za9!=;VUEtw(HI%tzDT>~CLHYjC2;f`Vn>LbMM}iXUR;`yqb54@ZsGSu#d{W+7=2 zJ8<2rqvHFhne6nY)j^;64`Ry32KezrZ=A|YVcY)+1;s~DsAC5hBMZosGiEyevXnf$ zkbveKemr{+6L+jZhxt?SW?P(^hfi% zzC_D=hoZwHqY(Y_clfH+bYzTJjUzwOZ>=ELsKN!v6!Bhz46FCF(js1KutWtZp_0w0 zw-qT>>UAyEVe^giI|F1)zR+HE&j6Z26c^>8cdu^f)4K-_AGY?7v5j{3wZY8)I*^G; zx3)6~CF71>s6AyY#vDDr8lvXbA%LhsfZeu0r@tGW~@Ltj-olYN< zQ3a970i+bPR4HK~>JzOD0K@z--Z zhubCYMD}xAz7yHc|1MG(ZQU3L=36t~f9~yxPs!jKx%uE3KG*-#{YaQG4uvIJ?V_?! zk1HS}HBu>?TB3Dz>leHHf1(CenqO@+vNmS}3CxfLV$2>g1f1nX=g7%O>GQDjy*H2^ z;Af^2S7~bkmUjZndmNwFzWNIC(if0}vRu{7L>g(ctW~{{qJFA(hsS~{}B3C&=q$_}S^yd0i4HSQE(uI~c0SrX zuk~C{1kVgYw}e09>x`E$Yjq2Je_#L(<|QE)n;7nK#v_-EvcN1OS4^aNk;PbLOFBbW zg}A`z=9?&=B_r8*;VfqC+>W@6`FQKw{d0~AQ>o@G5Zp>0> zfdeVpn$7BnFr0u!;KnSaHs25kFm3j7dErRAO8&kaHA z`}?EiZT;}sAN!!)qeBt#@<{Y=It#P={D`e-Cy~3KADgKs6J4txb*90Rs;JKJMlU74 zF-pN2j!1t-$B<}Elr{aN6xm>gUl@hyLiNXbrazL(C}Vz*ehh)kteMl1m6cU-zLL!4 z3cvrIMya>x*8v$zA``)@eb$n8ukNy-2D)a{MXyCyqtD`6=(&(UAjuC+M!DmQ$qsYA zQA82}1Hna9Isp(Fx4?mPPT~lN1P~&U-3Ul(@Y`F_f9W0QyYyD{$h-mF(yyW2jTpG% zcJ#=&37wL!M$bic(SP~%=$>&s`Yrtf0n^p!l3EL0)9ax3q8s@B8iJt!w&5W3S$-{g zEvbdh9Hew!IXy>|T#U>tpU)drUYL5!>V0!<|eaKtdJ z6<|fc<@XL`Oavr;)#y--qEro^KqmJLLPEa47jL|dkmp}S)N{`w`h}+u{^Aqp`0^8I z_va_k_LZke&m#Dx7ZCcwpGhwxUvdls$!{1iIA^%B~?`XX9C|126l z^*ADezrcbYGuhEgN$_HWRyS9`GRfneMCuhoLG%8fs6mzHR~y3&$sh_!OAKk(-h8uS zrGSk1Jg{#&P7dsiWlubeL;@KdvtjK_?Ld;R7rqVh#=fvNz>aNJU##M@xt17@7@d~# zn)!=UEa5$DgUhk`_(Wvx?1pLUTjHDV-(m;+I7f?rU}sy4BS$~PCa`mz>OjvqKIlB% z2a%KcoS;qiY9ToFDs=t+UiAI`DU9FH0GYdDu<6KX9L>%|ZrKjjpi^Xg+O3u#KvjzI zv6hlsvA95s98(+5@$M|6Du6ACf)#avo!xo)NZhjn{ggY{s96zH^IHbqfswu6nd=9K*Hgj*jb#*`fSx=Z767_PM8;^<`jO~N(n__kt9jc za%&@Hpqmmhc?%%$W0QBfXN;7o<(m7sNRoP3@ztG`OujJ!70JSs8UZ9@h%+wi&Bsrv z2e4q^Dty~wHafrbHQL2vr2Df@TV^$MU05CQnL*~bf!IYJh+j$|lxa=xMy7b7?Ubr$H1sOG)8ksa z(e)}c999G2Gi&3E@l{ZNaCLk-DhQ!71JG`Ab-dT77G8_5g%A7GMZ57e@!2=kQ2)zn z_<;5ehg3(4vA$?J(jV{luZcH%1>)mjo@hUtaZYF4lYJ1Hq=R7njE+o5B7sJ76(fb! zRto|iQK=S)sURmfxfIA~+Q}xDcckABkSSn86OdsCqVrNTM#wHL!kR7XvFZEov3lQBJbul(AX8cL(7ca(I+Kbka^iKC{7G|2Oo_{Wk zf0ouSrr%2zV9he-+wwH5Uy+H`3sSIQ;Xv1gmw1F|Eb5&<;Iwo~@+OtDt zqpsH?tHJ*hHK@}3>qgm2QW=zxJ7pIaBCAw~6KL%WS#dEL(@#I)Y|lPOf9PRM@b|}b zPfsLydSkx3ClcJeFvZ&!a~`=Hr@k8k96ez1T={93vB+ZDhWQ#6E18HE{hnbshX^RP z;7IX8Y`riMiw^h3>>V92aZ?jaTwf0pR{a%YQ~!)9X|G~x@?S7H{Y}hV+X&OQx5C)X zUm)>FPkev=I~*)uNMEQE ztdYNAu5=?_BG!D2@n3QLAii0%7%iraz?1zVaASOP1VuN1N92d_jH-u#PM_fBey#E9 z#6F1oJ_)mr?8N$jJcS3T+aSSo6?m_LQF7Vwxl;i_Qp8tAj*qnW0T;& zpn!n`d}Tow3(UyYvQbhih#%^}N*hSYKV^UU5#aE$6Ie206~+e7Mf9s9(dMb)`1BzH zntOYp{=MDN=#gFsd*@q>iCc>0Q}*M)y3@!$!uYEA{YDtRF?iEF&dV>0{b^cI}SA0|#O6?mhJUQX>FU31lt>GNEMl z!AUxW+8<#_-e@$^3r~eOa040fU5y?1Yj+2l4R@gaAO|{3^+fYg4m2886YqUl6HP`{ zMTc2IXf@3fU(DkB1Tyjr4`}1L#@~40`A7$D`PhM~uQ>2G+th*Q!#xRnyzokA2X5nYZMwOul>_w{C++VQ zq^4zyfBz|?3PmqW{kzhPCaz@|$5RK7V&{gfII{N;iV9p(=WG!FCr0aeUGhz_EwMXQ z4UbKSU4{ZOyKI(-G#J>`)195+&h%B9-^M6=MFjb!rNw0bf-jn4VGAifhtgd?AuFaQ zGOoK9^L*-Hs#_Jz@T-o@TkpWqJ0HNTs#jyMR~0O4R1Z0;GRXDMks*n(@@K>{VyBV~ zh?!XoP4?RA-)#49Ld+!z)Es1}+i(`^a2jh#8*rv{EeZoUoOJh7VJPNHc zCu01;jaZX&9!HqJlJXd(-V&YW%(Rz~!AnX^hF(_0dM_};_F;gB{+wj%r>Irfxyc;l zf7#dyEn%-A^wiNgs~HG=9JcyDeZjsGxXnUOhy^{*MN+@ zA)Ypl^F~CvAKJ`uBe1E0I2{a`ay335?TwcRLSBh-N3)^6_^^i$?r7-1XWw|?^-d04 z{iXwf&%5Ei&)o4wcQ?G)$qmnSaiIAmH^gREL8pb)(P3s4w4UgTj|Y0;(RL2J8t0Ds z1A_2eup90naCwwqf#%;PDTr0<`=bxqzDA2ar+aFe9B( z@=P`yl7yWXXs-o?rV**qn8VZ9yhQ4V$zaVY4)q!uPDyI20SE0W#GzLJnZ0pna&mH! zI6oP!Ten4v7A-Mr)*PI_kcHg*d^5sS<2PH8GhHHmYwUP6&&&f?fBzLSqGsjgr9Ey1 zl@ykuAioI3Me6WqXAc`W$|_*v6_61NEiu$oX!TnLPXA2oy z7VCfoRxSrI*18$b5tXs@$dQxd`Uhr zAER1yeca!-JzC8B7UTD=$GW_8IKoJtXPDX>SzU|zAx*vre2BG`6f<8q08o@owCTn* zH|_8xZL!X0QUv9~B%Wj-3`zY1+g1U~Rc+Jwr>p!{XbM%|(y8*|IItu01Sa=dh)y4U zi&oDLK=X(DqS=G}(fHoJ`1GM(X!TSdbg4fYbG}@GeJf9)=mbkHm;PxPxFTp3D9Q?1 z&bj$0%qt*9R((((bW!g|RH`fEsUj(5zq@$h67=fU8)?bu$jR0OpcP+R3S-lMw{ccJ zl2zJP_E!{w>GMeHM}G#$xNe9i9hvZSofxmxI$nrL3qa@8>(FMLrga=>_Eiv~rr(N| zU)ROc1Q0qv^5rN8-s%*HkNecc^PvPC!S48cVi1B8s~{}Z7vaf%h+9w--4ROS`<_&fB! z2V_|Ji}Ui>5ov36tK+ac4{eKWPPsK5pVh$Cftitv8jk9m`UbUk-s_zXjk}Vlz}X3F z2Kvl}Gnkt)7tOzDj*lCEjDcSd#Ks*PaWd;9@`@!)C!pgu+No9FiFALtpZ^+Jd1use(PLK zW4M~l*(~{hse2@C@+OIgHeS0pF+$S7fo)MlyukPTwt%4O_z^6Li9_dG|9~;iK8wQw zGiOhrJof_1b1o2rss5^NIjf5SD`fWqK}KhT?B9T6dMXF1pQ>J_im8qBTavjqv604B zp=lSYm*mk-A{-+rES(}AcYo9uMt!-3`<#$J8r{|MC9hu;d7IU{HR8&WYz z(;w?rZ$PmQvt--^Vk&{m-vXIv0vK)Suf0?x z$D?U#NLeG>xqV4Ew@mNa&k0EYm7vJUtHf_iJ5r6Go4KK#I zquun{=(_xRgr!wMs~KeA3I6E3pe{ZgTNQuq>_CGdUWlH1EgB98z#B1Mc)xpfJk#DA zuf{phZlXV0(cL2iN^Qs0L6^k)&~$K3bR2g-nh(7NPj_(OrC3k28eapicXQwsh95k; zCILnuf|ETFl4|Mq#4YqTipQZ$gFwbFOJy|8WKA{w4+0rBmm)IkVvV<~PC&Rtouz4D zrqWns?Q0@&v-It)My^*%i24g_O^rmOSI$rSO^Bq~pq~KCaWa|oCFzLh6oJp$e1;wa zdSKzog*cOIiEPDG)KQzgD*kQhA#*CBpVCQEwiBS}@VjCTmDL%k<1J94g*5V5ct8)W}Rj-Z3Z@!81Q>GA1okBSoW4Te5Ek|xX z{VXPE(o{`pHm^DcT57$DkGQn1cHEd|{;3|CXxdc9mO&6yRK`J%1_+F$pn*tIkereN zGUI%dO8i6jjHLN4uecZ|*~e_kJBz6YHlbtsR6IQ_4t3+3p-T8i@CmPnDpB=OJ^B;e z5Zx5d4~Rr)+GI>QycIu|UBF(Z^%T8RY;8Os@M1Rsku$oc*@#RZMzTPHT*y|y%?2_A zdIWM>K3`VC-i+=9ywt`_TciseNsz-d=M>S6(Q~0c3|LF>i_e$f{LWnbIAtG3hNdF+ zov~>3Vv^;W?;eSjX1i7LlNRx6AiU(Wj8afSQ2eT zR^hXJN(g2IWJ)vz(LQFSu^Y6wdT!Uu!B@qDZUpML9(kQsrfKY&1}la3*9!((kM zy_;qu-SKj~124w9nJJ{$c{k$I;Q@H7lNTQN)B_KHAvfM=IX)0~w{W2G$m$45sLgl> zLW;@}>E4LU&;gj*e!o4@R7%B4oCgDEKE%s3K+3ISi~??&LlB> z)mgJ@g#~XQG(m+(CrTSeB{yV#MFcid3I~A-)@?w>U*5vJtFOZhFF%a+^F{ifUqtTQ z>5R__({2@39yxmCsrxGWB1?@Rfh2={QN* zD==dN8m5{~?F6X-N%BDwSkRRom@MXp%3lfLius{{ji%j=BwFP|4;7a2hGkK3rW9xP zWn;&JlSm%28hx8h!WYlz9lTWNcvb-(^^EPBMKp z@2phlzf0k&Kvf1b7LaK}AXD{{<=3If4nsOqsAHl7v1G)pCpz#9LCbwj9C(5t;{EO( z_~M%&)a&g)+c5!nA>0l3Hgn)6GU^w?Ji{zJnI7n#Sqsr~YZ-~oLoFTVjHcETNRtU@h7znu=S7>{B~~%J`aa&yfd}aBuYDZo zFvA_6jB>+ky&Pyi`znMn{t@Xuh+5!@hy~V3_d2=WDE*pq)dWPGz(Q98LPWL#uvRi% z1SEe0Vnn|`Afrkz(W=Ev3wYH&3#O9T!yZGdJcgNTE}tVZvR#eG@<>`yVU^wp89g3*?X8#>2z!Njo>@zd6w{J=P~ zjP5TB=btBjEcvzO#Fft+1&R?AFi)jGna!)UB8DBN9gg~2=D(t<&RYz_+=z7V8n;Sw z(I{Wkk=q@gxi^0m#bNq+o1A%h@x2~+M+QdD0-Vjk**U4mtk(piuD%)HdR4&`A3sdK zswTc~_C9idNG6v)$^yzY`V59Ki9wOfU?7`*<;t&$04$=FE14kqPdX@;WYgNq)|z54 zFO7d{qndic`3u0DL@a#tNn`|7L#o3KGu<4RQmY!4eDF3(mn=m2X=?*MCBy6o$E2BQ zVU74ns!D3=8j@%NQ`_KKV1~AK9G&SfZAtZ5`J%*`e!+S#w^CHq(E<5%Y*VhDX>Sf& zq#K2=@XpeYo%uP)Jbnu>84{pPuVoh&ZQ8R5tRAFI}vPZXynn?GGy0a=6GLbe3Qy(ZWqbvIcdTNp=qC=B+ zylP6z8m#eCd1#ZoV76r)%ESJZ$FTIf%^1}o6_GE0i>7z>L*rWp;4igCSZJ7w2Q~;sxm3IUY-vEJk*=rUVrMr9gq} zO1Fv|?d|Dl3&_ZwE79Kkzes=2$hE0=h7{JNe*LZad++|SvBD#uWQJN-%dWy-rCSAJ z(5b#~0-64j$W%xBv9-)5b=In-i%dH_*&Wf^jE+E}?Zm2RHM$nsjJ*aSldnPatg8_- zyB0#HR>7ZR9e9OQe^4MgPOFQE#9H`bnh(C1>w%6bZivXx6ps%g=lddLUKNDS^FuWK zj-y?CN>zl+sE(Fnsvu;hb_4b_M~ZZu>xS4=hCSa4T{1k-cB%tkObj5<2_%To&b^-K zFvpGHs4BuzY7#I6Fbu7@(sFMCj&uveM4J6#bgwkI01yL0XeR)|?{qGku9D6u4-6o= zoSgQWeluXk_+g!(Z~V}E``gQbOvVj(h(M-ew`SOX`(Q?)Em3? z8=|FJH5zFm=2krwB(;>!-cq7ZmJb@UXzU;aI&jGux|vGM2> z(-~7IPR9Pd2h2&nu5OLag2@*Vt&~0L0M+a1*|t+yhUm`pjIkTA@5hto zJL8}2H?R5mc^LK0H|XA@8@6uSib4V#OW|JwqI{H=sLU9DCS2uel!Qr^gh}>Jk4ToJ zM`eAn*q8(6zeN}CWN+0()ew;+cJ{seROVQYE#M?DrSSCL@>xdtbqP7&TtfUOQd3dQ zw-nzkIibj|p~jK&*iI77sdHx#*Dn}vw!aD;#?&RCAdu0asAAF!$e7bT5tZSC@Dv|J zBv&z;*>z643SAdmV{~1j={`QA8X{AC(P*?AK9#a~CE%7r; zy2G_W9f3>S0)KQ~sMSzj2%SqNpX80s8G-1wFo^#8A&%cf6AZ*=xTD*$0E7|LXlLLE zUVAOCj_ymU@!r!&k2)`|g3gO-5kv&he}XWAi3qckuN%WB2ut@tEP~W!0UFsx7Vq|LX=9w)| zoue}!U5%bBQgfwSXWVSkv$@qjIds|^sx*IVlu?EukyVzG%UC_68OVuwiga%}L@^;U z4{i6M8KeSoK5au_=5S6P!m$b8Va3zWV2*zP#(H?*n`%Lr_|W}W6CQ%{bwBb0f}Ij} z^87~3x}u0pp^4N13lz{lF(6ZbdjB)zG^_#qU2JQj3J1KXP6;rV9Xf*Ss3q7J0a1frmGyIlM5b=2bWn5rf<{YfzTOc#0b{X#&srCD~*Z+ArGFI1`4&fE1SE zEV9C@Altaw*-<0e@pSr7;hT({*v9@$?^Oii-O3r2%jWznjM}jpty8Dq$q}7#eXlPN z(4{Fn;v2vb^AX%5KSGVzCU|T}6oS*HV)mZxSWmEdlm)1%EBT|bgr!%+kemS-fXVVG zVO=O zcnqg7s|Z8-4Z)y+gR%F(ep)#}g%p*`ve|D*ie@o=L0_ZXO|rlYhnZU9Z9rNP%Ux19 zK?J+nTP1HL$cnW7TcmQde6>J~RILS?L{7A2>No$Cw+5^dfCylj5R_3)C&rD-{bi&! zBht81Kuj=L3gtFg;K>=Dyj6aSXil9KkO{+E?dqb#=xYgNsv9691|6m;CNkzwGUni9 zEyorp2|#d?7eeNfVQ2W6gIU`qk)dlUCzAkx0HkBG2ZH%sINgWq1bKp>m~=mc5oGA- zf|v|nBT0$UW_#)0Wbp2Yk$W?RMSDqak}U~_079olz6O98;6X4Dvp{>oc$?Kp+F&m> zgFq)ye){t}PhKTiA&^Lx-iy2OQ^yrV=^QnNW&EPwQ34|7D(Q<3nWgwjnk)7A;%0M6X`GFmvWiY}&L5=g*&Kho}b2w#ClQ-XdJa zjH-EW&sA#&PWxS@`41S=;j!a0Oo^SK%7=F964f1RHHxr}=?uj^*{kRCs?!6c<541T za{M69&zXT~_uqp_e*TzDkTBZE3o{BN3gf)r^u{T6KS3fEFmo;pviQ&V-A7LC{JIEx%)n>>)9Ix zdk-+K{Fv3;T79TJYQzM^s1)Jf-dH1di~h#2jI?5lId4wh-pM%4J+TWR=~MCI$nLnK zUq{sK*$P2jo5MG{A$&sXp=NkP{HbRsTFxAU@3ySL%Co1ix2O;&Sr}O?2sKX?fyzbz z#^|^(Y>|#DC^F2~0yx&Rj0I}M!bJiDIx4_OG3W;`R`JnqzH*svvq<9`1ENR@(`2iN z-AU02V8i@VnAT$>x;LDI&!71U%^&HH&mJCtW)Jj6)BAg%$pc-{r@>^T4A_L-ndgvm zumC4EpT)4=qcLmBd>lJ=n(<(K2u{kh#9o`~u^c4{lQhO`r00yQ(xkytnfBr-As`^A zx60iraf_5r-3ci0BmKFMsnhaDQkHVH9vjf8J}YyTcsaJyYGs5aoe??9+J#)}fK3<6 zz>2#8GfF4J5U9Bn$QTD!X46;_nGg%ee0Lq<2xN5HwUIiAnG+C5S0*CO2c4EyL)_vZ zGd&bRfD*fiz(y;A=)S!HGi2nM){zD}Xfk*{frO+hnL*~L03F~Mp2F`I_%jS|gwS6d z&KSQ$XCO%u!ti9raAW9~bXufzEW6l6ZiriAferaD@DP>ZPSE9ruoMrplMW8SQS3qj z3F*ct&Z*kF#*_B$=$N21I2acn0wga28(+j__#>8S(b-ZuZz)y>S~C8TI{8`vgkcz| zjEK+X3^sun=e+?kPB#X~7>SI4j4z46j&=sf@EZeUm_9SLWPnT~>^p94%&21dN2U_U z{0B^KZZ7ikInq#LVh1PSVK=h8Jc-(YBS(&4)TmKt-MTe`gM%?<%owa+zuqVfyKWV3 ze*|-tHm-KA_Lb(}W0a*fsXNqWf;sGfwY{9tFHt`zjNt?_hS90BWQSjNcsI@s zAA;oD@5T(@089_6fv9z)SECE1J*7==GmjzS6YM%WaPQ>i)E`7mSTqjv0Bhj zz=4NRCNLcuQ|IxuY+-yfX#kqnBY+Y%;oa7=e{( zTakTMWkx>~GXj{h@_ZDQ<|DtP0EGq!I>Tpp8kbq&(!wZ5@qzhaO*sj`ND^mq#7T4~ zzwNY=3{KH=MJA}hs8lSnzNfvuvq_Kkw+vgWGX+u&U3_sV*=>XIH+d_kx|*N;o@Bq2 zbW>Xh(44e&WZKt3yD`_8sUq#bDG_?~&FcPVAp7>s%ByK5z5rKeoZ6X&D=^cecV#(FwD608t9zLw9c!I$T&uu$l-|Gn>gMI0c!w!c{WCZ0d`)1w z>iJg~eXanq@{qUu2aI|0d5o=H7jvsrL4t=QF%Beo`{4Tr?#JP<5F8#d7-thwP;%(F zC2~?E#p#NjE0g{mar!6C<#qn@dap+_WmdrU!!&9StNaTrvHW5KNOlm=Ox?2)t>=%& zLxUqxyGwI;M}3T-&dpFQrZK9t{Q%d8Ho>0Gm>~%lT7-n zsH&t2oFPw@F!NG1oPHS)W1yb(olSbQN%1o`^2iyXzajZ0KlNVe;l+GQa{ewxNS1u5 zQ_gjE72lmacM`E*wkD9NiMHR>VYyc|Kt>=&r%1<&NzZqnbA~@c2uNOvap2Yl4&2e$ zfj7H(A#6rX#LTOU-ivM}-9ms+2c1%CqHB6BbV{v(*yL*HLf|9t({*uOyv%Q&iguvM zSTA&5R1-Z{UW2YnYcQ+;bm28Fvl==rtU(Y&K$TjR_caL&suM+||f|C)+si z`AA=MOSuIdCRM@5gB@t}HPf3|1zj?#ASOkC%pz;0(&gj|WNa`a9T`c1_+CKJ1<1q@ z$n;IW9*=0%QnycV;Mh)#^UDmpkPka4~0$ow;B&KNzKVZ(-@ zRjXEL*RGwJcDisuliTd1T#nV3vHi1+{rq!OX?~qi4yx}}|1Fa0pw@$FOEv8mD{-~v zmM-H(+jEo0T|=(K;qNSc0dsjhznqjdGm2N?Z3+fL)4CMikbv9HKRYlANsVz zo6`oO&(@__Qg#+BJ)>Nl!}2b+HF&l4u{NkvxZ;s~6d*90;W0ebA4N@Z6DCkpq$Q%0 zl0*VKMnk2<0$)Z7V)1!PBBLy~8MCH$N~POE25!qhE{CXe+%z5CrFl(#dZtNEKORKbu5Yr0?x-l`lr<;|!ZtPIvlPfJdaMEwvdt z!iucGi$1#OufnOwFGH^HY~r`JMCSY{7a&ucj5yGehKNZMz(ggup<8-Qbe>-qZ+COY zJp?e?0PpokFMQawCK~m<3U5aH;+DU<&d54X=0iqQn1|w`T;vqw8ivQNxq?)K)YqEuourOlUHU)K z-v6kgn8K2yfNlqTN~ebJ23g~TX1yTFyQn-#+m73 z9WN;&oGaC?ymno){QLEh`u{gSSN}yOqYP&xiP;+@%J}9NqqK+sjE`nN8amv;a#(Qo zD2D7FYkx%o51u}#q=#j9pbWKnq zKq;alyJ85?uxjO68=GXy}gbkyD?aSwfm$y)5b(GI4>t6| z$6c;P{ceB2o$osE>}NiBtce595ac}G%z+nMJMea-J6;KP#|!QJ@u#NVc)E=n9+Vpb zlD~vI@KUe`o@?iW=i61~)dTNzs)CPuRYmLL6)R#YWqHV1nT`!D8X@uex|ro~V4lMbbF0y10gm#&KXVM@cKnDA z$>Z_dSFyN0wz*lh?H%zU{3AX>?YJhmqepAJ{B>u9Bu&KBUF)&6I15_wEPqrsh676i z#LO{*0+H&Eu6(8FDa%yeV$He=46A0D11-&RbooI?a_FpkIvci-ewP+k>}@6V(BIKQ_O!+4ZyiY|+>WSa8M=HmF_)5ytU=`$RG7?r;P9nQ3As>p(B ztS{OaM@Lvlf?~&wWXgqZ#;miJoYIA4PjpHN zLeB*^;^P4xxVxbPPqYfcXM^uX#JES%pvMjP(lBzOF!gAdvbzX>6uZovx#J|A@n#Je$mc!|K|jSyeF z8|{myKXc&b*W7T+>pr;sErJh%fIq(Oz#m@s#*Hs~;rc%-3}1ZEDG2v69`AJZLgci% z=sN!z#3xoq{CsO8y}0@Q=$a9PhQl0qzgKlU+cwaEs`t7PxJ?0ZVMMJFgZ=4@Od%7_ER}9@(psES(wcQ{zLbf`}A9vn1Up|MD80QU$RI z0qC1@FCKZ<1HoOI;qX!C#<40F7SG=^l|be0(woO`JHwSdW)MWPQ1PjuYV$W}M5jaBC#*t=WD80$yy}*n* zZo3aQ;W+PQXLs!4FM9QB3~LCPg~AlaA_I`8IlRv%Q=U8q3!nNEMlu~E>(<2FX7y3F zX$|8~c5eWTwzAYlbpi#<0)6`*G?a?fJw&b5e6pN7{*_Q2ge+u^FnPf$Iw0jh-8gI9-lQ7fh~o*WT}&(p@^ zn;omL^wc37E!UQS&T6835ebaQQdd3mrQ&6WXP2q@hH*BEtB5|&B3}=dtfihlwhMG z$|L~`17eKK$~uqAoCd4}nlxGNw1nF_^d*PUJWD#qvdQ9?`khBs`B_{jKSer9+rzXw zL>O~`b?*?r(Q%@u=-oL|HdCz{DJ?cDAFO5L%qQAfQ&P-`%Yh6*xlRw4IeG33flPb6 z(e7%r9diSs=UGS?qCX+)Chb*E?;+THVx2cG@38rpnyJ(~BghDVyZ;W5Uq=@5TJOs$H@nE~jO3oP&nBfR!Au}2fD}X^QytOss-jQI{dnX(PlRY{>DVrofL(b= zf5%h;ng4*fyo1$MAm;xU$k>t=+pJo(3WEj>LVSEYdiLyzY15{erQ3%N9Wnq)M(*wJ zT>W!>zMNc*zPz7{_=YKW55RN#CTN6z$<-`Rt0fffM)jWHz|$SWnJF&(Xw7!SK) zn`n?+nI0d1m7%Znvj_zx`Mk2TH*+oYy`TU^i!!j`{dX|c&kqx-Rl~&Ro<;G(Os3Vwk9PkttJc5m@&NGnS7>WWv&4R`bG>^ z<9tg>v!7ol?c0XFOOsG*YJ? zT50N2QbMwBCPKnOCcy$Dyp|jQwk94x^1$ygtmRz9zWg29JvIbQ?(2#M_eJC52g1?p z>3DQ&{4HkmS&oefM{sT%>!voGJ^_sDm4MIRkH)a>)3AHvG0ShIgK5wnTBckWSXsH2 zb?ZP)UA2-^QWGnU0vVFM6f=j52uQNA9jA(xVSi3CHlLq@wP(lUhm)hQ{O~X=Junzc z_YcIfLxZvM@KF44WH>gS8i%c?r(xIGL>$gphI1uboZ#l90Yk;~Q760G2WILjvLrT) zpUE`cvu4np5^&<&NyHE8fVVqdhqm9{h?wMS5s^wDK<2D7mc*<>=JT53i+V!|9$GqZ zM>7W=@8G~Iog8?-j{_g{B>)(~_x&7b@|6QEM>@>?%aIN|M|XFB=)ip+5x@up1v~J< z7rg)6fj<*SXe#QCcO7^;$`d_i--FM-^uvP<2rxPj0F1dFZHCpss~si9ap2?5{s;)-#hSnh$ou%t$}vKug1Hb-0>X!d#bGiZ*_6tBZlAfYl4dz9_W%rK$A>Blje(d zQyFi*e}Hj#q&?$5KvPB6qU8in{J9&y8AGs=;)du2ZYJ#lbT(O1my0CuQn3_U&jd1I zseYsY>zd}nt3_IR9+T{k-syMZPXuZmy4A;llbcuumTc1M;eOdv0-663V;Y(oI~$r9 zs;k+tV+Y2K8)tNAA|fI%dh}>WLUZE838R}M_m=~bpL=$lma?z1yWIZr@PCiS&KH~P zd@#?-3+-@M#Lljy5Cw%g`$zf74-5J3@F`?ZpNI9YypGvbs$jO4J4Tbn{o#774sDIX zHQ$pXva@8{6k`$Za`F?64^5L5V-mA6!*}D5ZzS$6)4kyk6>$?&R*5Khe(@^}{WQL5 z3Y2NFr!oo{{?6?<91@JwYPB%K!yglGx(#Q)9*Od!hZ!|O0<$Tf;zjrJLJ86RuaDHf z#V8tL=4=g&n)B)eF4mRx+m7!qkkTeMN}9?`vV}Egk74)^nP@U`Fdpq6ifg-mih$0I z;1ORRjwphfsCu}rM@zgmu^+nqn1ZAuyRbbc3#UrNwCSfjlX7qY^P6DCoLWsPD`YGM zJXoU=;H=@wdQ?{8Mq+^{Ap=DE_(;I8y@jLcpnjq^sWPqio>h zx;!i$xe24&%|+xJ1JU~V?r8Q@C)9f&5+6PgjYg04MATo$V{-I*tQ&V8Kh4TQuSS#6 z;L&h=`f@ZT_f5yCt&AljA|PByz;Egj>u#}txmH*4UQ?Lb4cMAI(#d*fkz2kGXA4*3 zVE%k;I6EAvyF)Qy?Ryxp@-Ylvb{G0B`~$kDUXLz`*P!dX>(DLXX7oi6-)qb+^$ zRvf{_F z$=y&`y0sZ#SUSOsAwJ6u^D$Ub9DWm%>W03HZo~s0IMBYw2RL|o70ci(tEPqys+_-U zDuK*@iLsfMO=7AQ5(1~$zkk1x%Jk^b1I?Q^H&PmHxR;ZYW2U48YGm|{6p3B+bzDge zT6#CG{$K9Krr)En)28%ap2y0YqM~9H7jgVv!Va%Amz^+~iAJ6I|70SF*16-wMn4Ha6fS) zO&VBWhB4sp*BSXBmS`W^Xr?e2l+nFlJvupzqv7js#WaT}rdGQaN5f)Kwr?+^!BLYL z-Rc{9WJOILT66^%MZ*R$A~9DJSs_q+4n%QQ{Pl{DcJ$4?$oWpvl7b?scU!8Z(uLVr zbOF;3Y({A2G(0sT4p(<=j;s2#gm34D@QkcybZTyh{}j&*j6&NvV=!g+2J9pNJj?QB z&N07Pj>UvpBy05#!{ar-pa{8iBT1bBGlsN;Thgy$0|xZ1z>LA1Bwei<;;a{ZXGpp> zBHgQORljxRvm{l;d1U08F3RDzd&;pfVK0V9O-F}!`x2zEUwNi4nm_&}KDloI>fiY# z+CDx8-QG+<=riA;)nfzD_})0gy*U(%hHpT@ZYwfMhpKUrR&VjXgkct#eRTw0a+t0Y z$SK>66Q#?r@$^_EZH>gJCGVkc(qrg6^N)y`S=%U;YI&}i#z-b<#ZVHT6WtL*1{|B< zkGOd?5IwUFBB$Pf*eQ3S%Zx|SFY#4;yQ~S4_jJO#^W$-MI^vLI4b#L#Wr!T@w4#iTP0X@2OEDFxdAf*KEe{FMXEza_(;RRi4@ z+<wrq>wA4aWVhzM6UxU8OZbj$x z8t6el(m9RsNc88mI{GXn=*wW9BnF||BDzlrKvb$TPK$zg4MgMue`uFsYiDADnw8fg zc2OOKBnKceqdH=j)IeyOrvV*Ni``9{mG(=33n@gzcNP(_Wq2SY-G|?Kk(fp!l_3eh z=$nPsHyv1}vTHxvfj$dw!b2Yu-1M!FgJ;*V?w(`4R0~sW{$*1MWd2Kx80s$p51VXv z=gytOx^?T&uU|jB{r1~v-@ZMTELmdqt&ws4MvPhh+N-McY(^$7ZZ7{ubUB0nJsG9z z=kh>C_N8Uo?5vc`M9QrNC@SNqP5|S;F`OLoEf(DO1ZFw>kVFtOmLMkiq5E*CUk{Y* z+^zzmA8eNNQy@m9CTcMqhAq-Rp&YZKgKvHo$dH_0TCxShuFUtp#>fY;d8>1`4r4R| zARX7to2CysK zBR@u9RDD#7Z-kni8spk7pW?m&A$WasPkg<78J6cC$3fPq)Aaj1D@dMzmBJHql@c%g zHcRnYAM#iSbkz>E7H}kxp`Cz;YQ)8wp|}g=vUZrVDQ0pD44HGgNM?Ga%&A91zf3Js zrOaj6IlLG6$S=p?O?j9)cp1X$k1$~7v&X(dt3Q2<&+q*fZSNV2c6Wb=j`xg0^Ba4i z#jQQj=k3W@F?t6|PtqS%S>~(KqD+)NPa(rCC_l%1+mGDx_1JXbJ52ncDZ0;m5K)t^ zL(KH65I?6j;^);w^t=FsCwW1KGe&Cf60>!`&IoejJ;96s4#7=ik~r zY{fNAJLmeL!)#A{HpzijGv&sYfFgj5-X9?;z6eeA;kUHQq(6%UBHYk=t^*CnInb2f zv}Za)Q~U@({18vjWPpOe1Dob36YVsdRt+KZ`E9a4)8<3+w9>Z3mmtm^9TiV_omtkaCUn4zX< zj7QEyIq3>8R#XJ}RCgIU*5SVaGIqA|R%K!3utjuXl@Cex0x?FXiDjfs3#A(*9HeQV zGX#ByN{X=|`vOL6Ux|+<55he?+MrfcL->V%2yc>KOnuZKsJX4@7kGI@XO4DfAua1L z4$}|m(TMVynFhcR%t&G&W@}A>DGe+U19XcB5?Bum12)k!@l%a3(!z=#0T6+=NRwJp z&Msy->seR57brIES{|tC5!B?IFGMN9Oi@-bj&Cc(((wl|?282meQrG3+&3C+ZXb={ zyCxy#{^^K&a4N#@{1)x+9E_gtkH-%QdysdEabm_4vYD8@5(%pEa1zJLR^$7l-{QNa zjW8hXX>^)*GvWw7x}?+~V5o{NX;lbJd=NHID`314L5d^@(5|wPWZd?WUbDSDDTWMD z%c8Z!S1XF5$#8Y*a(I#(!V`HvuR7vq-i0A4?;&AxCv3Yg3t8A`b>_kubpI+8@3gOpkTC>SWa^z4IuNIwaWgy+lS#0V?qiM{2uYH%E5l*jq&E|6 zo-OdAGmsQFNt9v<7CJAeg7|a-mt+sLquYo@9tdCPiC}$8pcF!&){bDIE#Gxyx*`_S zuO;+rA^l&5v}c}33KC3eOFL0V zO%Z7q;AQ-l{s;3rO&@hkWIi*Fu?wpZED=O9EVJr}>C=j>iqpSU4z$zp2Pz|r{8$DS zN%|9&s8m910AmK?R$Y4xs`-QO2FEGCM+z>0GjdR$79QuE#~+HS7U>qOLRhf@wk3F#qv~adzAY;P5VH zY!S-H49aro4>_Q)i1a%}Vv=I8M(%DiSfPOmQtv9JMH2I}$v_L6#ChoUR~n|8zLkNqug2+EEfVE*W|YcY4}Zm90)i+@kN z{w;>lxVZX9L<@D20stf_%gRbRONRJ~VsBEwXBCZ|-I6Y7lR8Oeq?E10K(*q@+5<*p zYKD>494jfnRx-pHKdnV*@&r6TGzQmqYL0-|`tXf!0JpIBc>f7*>e&j<42?tUIb$%9 zZg;YxNK-?#Ol4rNPAW3(kZqJ?P9s%i68AL#*UEPzsbS1{N4P>DM3OSIrQB@oPoo`= zEcjwAbe7_+^qi!-BE6hq=8@4KVtD-2$_Pd#|3n$iY|6vh?+#(W`$=eb-$(@CI}Rat zjzjp}6A^Lec(l8HI9feC2))}(!_L)A1*29%uvl7}$1o3LXKo^9Z4ANSg|8rH&W(te z7idmvj^e1^fRFj^2usxRW)H-Zp+D66j|#0aJ=6iJ&Ic3vp!V+7rcI%@ZT0uW2yGN4o$^awLjN z_C}nx`%m^{Jlznp$U0HlK0?5xL+K;M5@6_A(j6TqAp2-108l@{R9}G(;eVc_VORh|iLgXx|qBwKzUY;$=V>!;DZ^0%yGbEHNxs zs+a)sqpN&iJ1W)jHE55VBehw;QmUT7OvAG;51VF|E}5vW_0@0U#_ zkohk$Vx+Fj)HVVY0v9%^)3Jeup7o84p0!)BRyqm9^zPl;99B7E#0ay4u)vOO?CY^9|GgWf)iobvbTxTGKv8CPGR;Ntnq}A)6N$-p-HS0kftc;4%gP4Vwj%lpomLfe?U=)5MqI<>1{^{8w`#as!{jV_c zS3pLqkJNn#nB?$AK*p4MVHOU}8;6OH++mcMr+9kcr>GDV@7_TN`i_i_OteHRKg^R7 z@+%|t?=p&pBrqZaWUMSx)(XHVwi06NDi9;Gv)FvAq&UmO=&;Za0U#;cO7T|HOEw8S zl<>n+NqY1h%{cBuQ7r z(UQRk?pWWKm8s8Q4Pe}hSZB32kXECa)jewWw$zG|*`Jz?ZtqS&i+lSa^pP2v7TzZ2@UyBqQ29>dUu zPh!y0JJE094W#SQf8llba>-4!`vV3oxfT5u-GDxs*YJ8R1}wP_0~TL{J`1lxzr}Si zVCl8!OZ(mnu0@{(H=%FlAL#aG^jUlpdM~>k-RZ9D!dmFL^eR4Ig`P{ULa!zCYvm2- zlwJ#8{%{NWuDlW57S={DzW?&a8!_Pfo9OOZ^k6u>>3{cyb#|f59qPzI?}cLmho754XG}AEx(H2)-w7tJr~zOpXGJYe?=X9xuO>OEv=583#+5g zqT2Y9_WhRXsD(P{&u|PeZWrll^yl|B^)uJ2P5K9zYaNS}FN?3^yX(-8aqPF`dh}by zJXrV#^h>`LBbWRcPkj)Gu&&K<v??sD*-~ZgM#J_i=bSlku zJ{I%dNTckuXI(()ie=c^@-xi2`4&v|4K&9Mj0p@v#v8BU*o?^>&RLyznNbX8ev$!| z6_;=jO@?9oCJWJfjiAX+ETaDkQ^7dNJUXpZDk9y>-bBy5q>Zk6{uM?*MoPX?AQi8d z5U5BCP{c2d-=#RVHVw1?`V{8+xnZt{8@_-4UF3ehiX2iSd3HVg$n3)*(UT-c&OH1- zH_9F+!t5iXt24XoJhi1Ux|%H2OOw&oy?M`P8wXYR;NRgx1h%h-x)DwB*uWSxnEEvaZd{7F zhj(Ig;RUk;i~JIv(79eES}L!VK&(fCB^FtIfYNBk-C#FEjH8GzB~dbZP38;!bkBFf z#YRG9HfW^(#Zqi%s48G)*4e!|n9*+unm!hb5C0f}#&`Eb<2!qz!EIg9;=%rC^W+dT zf4nyuJ{^ybUTljw6XxLL$xYaF=sS#E@;*V#t%ykqAV4Ab$n-E%8k#~8Nh%PYAqj&8 zRx~xEgApZ7ke-PSQjAEqraA;@%nBIV=|pKGp)jXRTdQKsL5d{lbwn(XG>71Z*VqMK zh?rLeVbiWb&v{Q{>gtZzczgzOOLq~{9Yt}uN~sJdPoGD8k1lxpwHxqU!#enDXkEM= zQ4?>31>x=RK)f9ifVV;e@lI$}-Us2;_Eqp|J3qY9(I0Pz@m;VVUhU{Z_r7#r1+Rxx z!E1c?hQ14_iq|^^(ro}<5BA6Fp>pSmx59i3y&gj9=tuVfc)5)i-jA-1_oA!v*&DA0 z`_O-WYr^!$JCT9L?>EBy@Fv50J(%%mPdk45c34&XHM|=B(vI&#{pmO37D9I+zWknH z1$*M{j$TZgAO5QEY4=8m7ya{OocJ!>8-I;pnnHc?S_dDzDL+VW%Kate`?tif4B5|& z=vxe^|T+&H{X1N%*;%)0!rV=*peO@ zeXsEU<3>?&C70Vv>F?RN*eh-Jt!Qa@6we#U~Ho zo6RfmMZ!2dGb9c-bZUvf$VTvvYyjWrMyL|m5V!XW#v9}MB4)`Pb6Dl(tTQ-H5F>q@ zeAc5vDF8DZ`72@!F0C7~sF*Wzk^mPkB6SN!@!WZDWsn~DAq!Re1WBpbAR+!8+;SY> zbeV<7`h(Hyvx(^4dR5^<54k1qiLrY0-;W+)et@FTEtAg3uBjlf}c)} zLtgnRCVvNW_B^t4^DutObkzU+9lZ3>BY3sh!+7OWQnLr~#-|VA^`?KqYfYTg_+h-- z_)%UT!|P2*O&=jWhQBm<6mPY7g7g?(ZTc{uAHf^VAI0mEn2t&u=z*l-I}b7U|Xc594jVd;OD#@mfRr+2}F8e;ltjq~9Nl9>Iq#=|AJ}*Uuip zJB*j^-}vMaULU2u^o#L&v(XdAZ*%_%{b=~O@#me!PvYGsPxJZ=-umPTywUh^#*5)H zZi?fZ&6p9x}>qI)a-E~IqQ{cDY%re9AnAD+a^ zO`gCDjL)l#x9E-Lf5P)`Jc@yXhvLMMldNlOB}Uq)O8LvC63G0Q7#ozxsAJTMX=&)(xidcd>@$oSH41Citikc)#|@CNjlPo~b~qxL zpS%Ah`u{fiM5JVhcz>ytVgF9je*pshtgHRb`p*32xLL$Pa$hz23CFgDJBHfL4s!>l1Ou^)xx@ZAEF>*K0P_he9}sYQt~zWp{!0x}FThGI=QDKA|>j3EVRqGm{+D^9K?JNv!)z?FWLp$E#F62?m#{gJbY ze`(eNnigZHQha7F_KoO+)N5)Z*~=ZH0{n2YUti$B3FFS_pa`H92*~i>MDqXMC>EFJ zrO8K9Mc%1G$x>uhE@Q7p)eV6Vff2LHMQJmPo>zf417w&1(il~KF#y#g)geJuS&59- z^i$blvWm~rh1pe_kD2>7BQ$L)p8cjPuIc_c0^%FPKfV#%LqC9D@CUddrYT4qqR zkH}O4l2n4A6rH3j`yyhGyDm3vl zh#k8(@w$by4O@3?!S&ttPD{t)ty$#&tX6yqWJ9?(VJF#BVq4 z+Cme6VtwF_iECQB*xoZX}4=LHZrV@yS8HkuNxTF+CA&BYR3v}IIxV* zEAi8wpK$&HTN&%4sl;kus>^@JR05g*F=IMbC$N!mmD1>)mT2pc%FjRl98H=uL7zT- z@WT&37#$jWib?KlFr;s7Kka+_y{o>NP0t%sJv5MImQ?1 z7Y-qI#az7bZFk(%>kCwk{RDyWpCBl%5rQHcqITpbxT||R)SEH{gUP_Y&&$R^=6N>r zTYeZ}in5-5+N5|{x;<={tos^}*aIDFI!W*7NYd95whz-@Ex)uJUn`r6RVU~~GfaxV zR^S;&wnK(f#s*YUtQww;v*nwx>g+cdvGijCn0pa1>ng-12cXjeZv!ecU1XLfi_$zu zUIwICD`5yaXm3tVc9IlCBUAW3*_WhqdI)&v&XB-Ksyj&v!vsObAA%dLa!S+c8cQ)Z z#%!<8XZfcvQshq%0ZmPWPrn+YSG|QD7rtfux3Or>BVW>}ibB0;q(xT10gGuEOt|#I zq+etD8UELmupc>jSxCu9M#iFaoIH1eHqyb7g01R3N6u zaiO1ce>u4tJ^#}8KWALui(J1|oRmNI^QFf6!unZpms_WC_3|H>N+9z;W^8a_8&@!5 z0}ufg+3nf0$84_m-h1yMGBVOkIcc|GO-aefz4UGLZ2M~i96j5srfk1$`u{fi#BK*7 zKFdl~-Xe7gR<~g*F1nK5SU`q90+_NwLKXoTltwW?vRj~^!a*TC4TZ^owPt&qQbIftTW==XUP73o}HsVSd)~Up4eZK!*7vAd|~H$flL}oBBugzeIb* z$Xl@rt6qBz(}Vmm!N-C1Z@-AV^i&SR1r+JVFg12!DsRyhVic2~BV&H3U-~t!UM;(WPb`*D7+{gQIabCFkGVzm#FRyGw7@KhW%K!=)1R9zA1IbXg5Yw|~6 z%+~sdn|}`?=3R@J)anEs-Uv-{AS~HomP{KkAu`}YQWXLa0+OEvGFs*<;2|KRE8W`f ztTTVK^jcs>k{kIU0CO>nXr0iU>~8>#{5BwppebCEoD@Gq&8vn!8TTV$XItzqOeAYP z%=Bin=}IESd}6}wJ#mz8R-ebbQ@dtn*(6;f84Un+E}j-uXyxc7dwEs<{?1RC@Y;G z&$_pPj&1)>8M`toioEJG-MRu9{n*V|g884vsz6|aJXTTcU4Tf{XQJ*)CEQB*8^wIFvBj; zj~w!*Lg*~s3-m?ZANxDww-s4FsKYm4&yJt%692+?TIF)4z$1q3wVUl%)B<)Hw%4$t(7Ba8Q=6WP^&K604 zS`xzZ%*;cjh1gb{gSmS)Au3@aUL6>RJEB{lc1QyRbo>bZ5e-l^t|_WUHpJciJED2Y zSd2Nm3F}yLTJ3a>1)wlk)r2qzMj5ctePS8s5=cvrOXbDzt^H|O$OJK3y=2s+^cici zBo5YlXEl*~wPS)FGes&erihcVm*6D!=ci-*hUVy;_5j*Xy9(htZFxZufs7Y|6D=SU zzlh+20K<}~NRr}3(lTs<5hn-{*oa8?CD^D!vJM&)=rGer0zP~nsU^k+06Fc9M90nS zcVeVH@}C)GgI%xlpv`Dvu>?1Ix+&9on9dL&;Zh%4ZK z=AZJ?fI#MfNC3s|&+_u5d|hf*iRwV#!Gi~5!-frJfM^>@!d#y#&95_+K<0nksPWkV z!`7eCvn^$jk)2M~-n@A;CQX`T^k}+v?P`ud&`I0^Jg!FH$uD~v%9ZS}Y#?J_|Dll+ zlAP*RK;|OR4=1sAp@oR9?5p|6`cSV;KTZ2Q#-pH=9l1uuV$hN{ux~ME-AEs%gdpbV zE^O--i_FKLz}%qPnBu5{slIhE|K2BXwATQXY}iO{LlBcg5R*%`Q>-Hkq=acHUs^f` z^w$XhOc)hfl}@q~btM?Z!x=B#+EOZ&li_M&f=bKbQ^@;rAZFZpBc@gL#&=bHuroRa z*n5~2#&WN`l^>!?^S4I%qAH=2WKJ_v&$4oeU7Bjbx|y3(fTDa!?W`<6TT+BwIa$a& zwjYDmF2qNZ2IG&NTcTQ612g61-=zrxyEjLjUM+F&z%YC`WiSS=TZkX>Pk}Ym021lJ zRMbBKYFe1-B-TNNo6j;U(v)Bc+XdUNHssU9il#^uGb6DP6F1+IR97?&C1qzL{!swg z$S&E4rTYhsz70$DM`)__ zU3BkF@{+8jGjHBJa|D78s+1nh!i5Wwm8CA(GPYkf zpt1e6uddm&;0!#XFt(oX@L~g|QBo z6cIc~ftXod#QfA0s3s;(kTw-%BlYBdjNG;gU(ERqkMs$_RZ)!)5b-hmBR@t!bOT%! z-xRm^ZHG6<^+o6937BeJOOWQW~l7}q_k}GXlPf+_!O~T zYbmyXkEz3IFJ>EDU0ES?FMGNv!2$eqZYsvEtdFQUHy}EtIVKCVpz7{_Oos5cYn|5t-esxS8Q~Z zojx<4+9XFd$&o9$+AA5ZBr&@HjC6mrI!V(>Ti0P%muSqn^(M@&5`d{5J_Ip=NPG4r zoE$X{IK(tchd@iRO+H%8UgCJBh@LV^0!e1%%Li9-#z|nIA`MrNvEyV`b1_ZY=FY6? z(()xSGUZuz5sH3XgUovGVQP@SIa6xsb1&fRtVC9hT$JW%(|0SWS6K3{7^8f#z@ijI zr7cxrK(ILk(nv|HawsV*Cax$UR?wl5WjIf_XZcb)og62vK64Bc)~!Ux#0hxu>rS{K zt~mlj>mi6>#wYq?c!s@?8+x|H%j0?=V##bw+Pwj*vn?r&0F4%-NiRqGP)7QuumxE8 zCs1Wd-#OJ}kdJ9Ie5uupst;!VNsg7GESxG{fs~z*=$>>xA`=K=2p(cGBsC#mAaIc+ zBudMwd6i`5QXnG`BeJA22FTEzIUi_|lL$V{Sv|5hd(C*02xcT%5irpzD7vpWk=cMB ze*&KX0v=z6>26Ia@%zhxOvL;Ebk3-O*u-iGpHdxzQlG|#bHfNWw-Q83ZdSk~6NoS` z?EXx~?$dPqi!F&+wrp8NpXu6ns^7JNjcZ?6Y5sLn31t2Ujokp%pzOwE+lg#D&HLJJ z?8c|-5JxfW?Bu6ybniMkv9Ev6=&d?bjb25%H{a-m zNPWRSNdgZ_f^>bYktL~smSh$v$piAvpltmL?2hP&*|*$;sh)nA;PAwRYPFE@>gzZ$ zZVY>?qYPi2nI-l&q!Bb3fLX4pWy|zWo1vMdtaKpBjKXGe>6RpGCn*V{E5Rrp7X?}@ zAG5-WU=4Xpv%m}!qCPpFBliObus$LbGo>d(AT$4#KjL8TFA4IFqLcv7=+-#>tTaDw zRF8EefPk=at4x4F$5|Bds&>N4s{M;}9D+97Gv_(6B58(Chg2(%C8ZH*`L_0$37$`l0Kmr=C@DJb<@d4^|YKlj{jzNp`@%U!X8Z0_}1UpN_qOF~SjkLy; zJL|KN(rB{)YuZU<+8^t$0Wg|UQhg^lH^lho;tj>2+b4`mk~ z6jKF=v4IRpQ%jM_RS=h61u-c;2%YYM_?g#W?)J}dxM%^>c8uxICB|cU2-MTq{h5rU z)w5^MHXvrvqD7Y|_R84%ajDOf(bc}MG{4SN0-66YqekUQ0wy+C(X+0$ooy~}e0nFh zuJ$t0m07T0fdMp93?4FMh&gq8_wLi zc9)QxIq%g8OZYJAlk#qmlo ziibdq2H6HMni%FW{y9vC^rmR5s=>6DoqFxmXB9aUU7+$tf)Gl zA}u|B0KL=a;Dc{^xUu`^@QeN!j>r$-i24wo@eOc8|2BAY<{dgpC z@|}S(k3i89}L45R&GH;4}|&+Om!_&^0WD z{wGyMWMVBuB?njlNPd$n-51MkXu3Cv;6&#&F-(ySU<}bsgbq4P2|#$VBtnc^BI7mF z4?{AZ#F`V|A-ilZQ+S?LpXFiz{>1^GB-9BB2^cV709LG6Q2}mD+ou~{wQxeZhjyQ5 zQ>FR!rV_~fj~O*8S25?$88t4su^X8UI`pio-AI4QXi94N^5tfy;I?hsnsc3!l9J3+ zl$LAT{_5&VuEy0~Aix}6$4*v1bs*|J&8h(rIj{PE8^rKpNU^j@&xZfiGOm0CiR+^2X(kzZzbuu(Z zu=L!Psu<2<1C3V_x0q@uan(#HswFN?ooB93V=J3RpcRmD`uOu&4dabOuQqp!M8s;#6 ztjp3rk@Qk+Q@TjXWVSUU0W-v0yoBh4)^@#DCKnPo=d(VmUaDT5C#}mlgAr>Mqw&JqqT-}p^rfU;;cl`vOaUa1y`Xk)jr3K#pt~YwGOh($tz1Utvz`{0fOKCI(weG@6 z%-FOV^LDJm_UtnTq!to*nZXR}iS&H4X}dNn8DIVI9>Nj=j3Td@I$B_nk)DvurgToS zm3}RSU4alA#L&*^K8$w3Nj1?i;cA5N8lGGg(W!xmNv(>Q`E?LI?^Z<4xe?Jx)e)WS zg(&SUW2U7Das+5dQVI?eh>?E`_i`X(;^hP~p~;K`?R0QrWTG3Qr(cKU?Ok!Ucnec_ zf_a-`rd-t)WHf-2L|P!mb@7AxN|E|Q*Zxg?>p$v4E6x9(sRT0rW5xytf6v%%YZ>rnyb`9ox1EKfDwR@V&&QLJmg8jP|7a3@B(tv6Y=B6?_)}>>X_!? zi5cGhm{6k*)_mFu1uK6fV#%TBWHcrGM$1jri95p+BQbrN37ijF!BHk4Sj0GK`>SGT z71Kp3p`Day>6GN;Lsx`RJd}?TE?opN8i8^o$$N9ar8C0jh=r{iu=<1dG0QK|OfUWT z$YaR)?mKcd42 zpg#ZTPl#JI8!wLNjJk1+;UDq=YISLfI$fHfYG^%N7u^KUd=-mUb4Fv_uC>_45cpEdJJ#aQJtOhK*WEF7?|QRoUrt#GvJ3M~7&)vPXK@nq4-7;11&<&y!PD&PVgnKz zlo-I^LKct_=x_xx&b?op?xnlZeoigKCf|?#%U;L0EiI66AQCA@FPFNAOnG1STxtG2rV_|h zn%~q2tVr}KJsPcS($ejE_3EKhr%q2jkEK%IdNazxs@qL{#-piEo)6OK4L`3fBr z{^jKrBCoK>th&gi9~u+s9GW~biM)!HM4BkkoWE99tm(o%u$r6E#ex^JukYf(JJbetiL&K1E(;qO? z;fW=;`~k*c3Sakjf25nl5dQ%4Bwk|DEwd4B;3i}Y%q8p%EL<3wC*%*)Y2|?p2 zU!mv9M9j(BkG2bE;g*=sQ8%;!KAJHQ2`7It5}VUSs@s}uEyB+7^%%P)3_a8DM)x#d z#HKh1A})fK3NqkDU`3J_R{$frD2=hm4G}3-5S4fbChls1wYej3xGaf~X%T4&j$POKf#Sd|bUWiW$Kx|SKL??M6I>p^68EcA45*bTTSW3dy)Rh$7@!UH6rqh)Jr3$T1IM-O0&p!v|1QuGN|4ICJ{6*^WYo?&>%f*J&dS!tABk zmFBlMl|ZJ_{6~x$w*bwtW5=*+)hY}cGzd+bHbro7uo=nNig7Xqybv&4RF)%vAe9<% zE?KNbsOm6E=&n#pRHd&|2JObHDWhx=I~pC^m`^HUk1qL#*?qKBij=uP_9+fw&*1p9 z?~wND3j{G$G16CJ*oT>A#>i-K24EQ=!+c}DvlNWI-U}28 z5SHg>V_)xHNWSL*%<%9*QuP}6sbx#xryX>tvJzLlLK1hS8D)#|M&)nnfJLVM^QCH> z;IIXWFOu+yfTmp*wm0@vt1Ol65)Mi!*ywQ4O&%Qy;AioRgT;CH@!T;?-L)PalP2Kd z-W^aQxSj=QBI~1i_{X>|x-lN?+Yv8(*Aulme~xP1K1V>8#<;d;Gqgw=g>`vn&D4|H z+6DgKn4gB>KQuz;d3Dist^=LsISiPI)HF{85v&-p6?$FC31TjzsO0Jx^8Gtlk@q$B zVhY0@gv7len7-i)r0(j0odu~lhZR_KG#Epd-OF%;5IfHoaq~S9o$O}7l$L!UT8T3?||A?ssGL_~xHHI0gV|HoOYUF$O?lqeI+Pb@a`}Ssoy^M?uvwYE> zKep$W&5~v(h%vj4>b;~Q>bTikODa=bT87-xVr0{;=I^r!e9o8T5pP&set8kQ@PZr? zuhR5AdjdO#_QUk2AHvkDYh#+PKPI^OV*VfQ!=cEoC|6YBNtUSHUu!Ah@naFMNCJZ5ZqDz$_nM{P@-zC|S0Ic}*ayvXVcQ=5LM3=*x(9 z$}ZhnQ=t|K(%YnGmP!SfkvArboqm|_NoMn2KIE|Ma!LB8i1n?6;91TkFE~Pwy8gmx z%-XX9(Tiu{#c#UchR&bDKlpukwtX9QIyFP>?w`XE(+D0tTfn1RL%2u1gFAb-MVAGM z*qobVbZE}t6qcPDgTagcg18xh=sMGZZixaiIx@iCZ2K#EX55FUskfoWtiK>(<1p+mScaV?BQa&eYv__t8?kde z5Kj;lLqB6OEWfmYUT8Wg!@7n$!IN=gz>J=~4ZsQ03aJGSbfh0q$$p5ObR%YO?1)2! zD{$!4PRyJ$1A~VQHaao(fXnXV%yhUUvz6vQW-5V9rTI;b7`z&_8o5X!SJ%_D1zfaw zltwn)yLZQzUw&z{{{?hx(X1_zQCD1MkOUd4fJ}h^3Ofvjp>tpIXqUsQ>G0`3zf{Vp zWVuEHbB>e+?q2{+`U^6zhn+xpV}9H__95!5!Cp^HGy02)^PN02}jok zaPRsNf?^uuu^v$v^usdzL}+!IKqhH-5A;rb2%TpKqGzHfx)TJ%rU;b<1j7ZJAD1kP%K$`=@M(ZkCV~0Qla-hXDWeArTI;b-JtDJirvt4ujPog z(H;zQ=gvj9ZrzM@M!RBarPKE9J8+&1vDhp_m7WT_XQGKxt?rCF*)Zg5Oew*N;ho9fqq|N?rryAVwIYh?q3TNe5xYpzDIGq{}5o$0e+(m z$%+V$q^wIwBHf%4iHa2tnFu?r0&;Z=#8`rV!54uRD~3*DTr7oIek4v~bZAVSgG1zD z_x5YD?b>Xcn{C@ouFbYJVX}?QuFbW1HyfM1Y18KRo#*+z`WNP&bG`@HxjynX80Z9C z#<5=-5Ur?`Ea_QjQ4(2r6J?oECK$F})UwA!g!)|gD@R63?o1vnf$BQScdo-1c@5(o z4JM&waua^cVN>ixNAU%l4ZzKR3jxbcB!_D>=2}05*o$5Mmd=0vkM|v%VwdrvW)ux$ zZvrZVB~&S(L#Z_c_44oB@>FX8W)~lf4|;NhoH4{ya(3@j>i05XVIT1jeFvulY)G_T z^xvVSVUj;u>#E9|_~EAZKBXR-I-Uv-G9nTZZQu#SI(>RgvtNCyNR|u1M1$D{LA11B z>@N;p(&gGxG3)4&*ySS?zLa0SIFG0X!D{tiC%#)NHDQ=>qtBKqr8XJ}k;y^fNKxRZ zEG+zPIusAb6{Al9?_9-FhQ@iMmUAI*Dzjoe!B+WENxv@81aN=Wvoim&NXU19g8JTo zfX_ZFUBWk8u2EzIlM@eztXjkKj|eiQVw!ewXxr*?9PS89{A}1<30yKUlC6a*$E~x% z_&z+GNM!ej*OaWS!bghSi`brd5{O$;9O>S<~5*}#WKK633H>3$sx2aohK$?*KM)VFc|XcukAC3MHI7o1nisX{lxz6zoj_C6d9#;{Y!WgrEn zCbYJv7aVU0!pPc{r=(XrzeFs?cQ_VAi2J87X5Lw}Dp4Ec+|tBGENR49%Jr)gqi#X+ z7=P`xOkgR0DWbO}Z0Tvyy_4i=mbkLK(EFq-e6Fo>0)MC2X=QS~#`5@;!JpycxaNwPK}k8l*m(_@^$LmL9NVQ3?-P}FiSz?KEMBM%kNVO&~Z#?qkuWBCUMXzu~H!5cRxQ-)dGR=#@7dlTOX_D z_a09DyDMS&ulS*EAN}${54`5h_dxBH8B`4_c2gEBF~Zk`fuE*=&cPwaw>%%zTw}|U zqkDDYsZcGnO`eE-1|dpthf3?{+G4yiU#pezN;ilVtQi;jem96P0t2ROKZ{9-Rg$fN zs`FP0Uc2RRcgEI6{3Lw9g!|s=CIQ3Bhwn>r9+Wpr~QX`_xk?aLX|p0 zw!jZ(?)HQ$_Aos93NzV`zCrXbX<&RKG#6G*VXXeC_`M|{DasU4seCKHo3QXN+VF}xNl?KJtcTwGeIGpbmgIYS5{nLeS-4u-vB5{@ckq&-`R-Om@NlQgFv zB_BEqi#5@JDPc6V6Ppe*Rp_)lo9Ulm+MfNWgL;M&oO46p;2+x_`gJwF)L4yC`+q~< z@*-GwS5kXqjP*0V)oh56#7aKZ&lpMXrrgd>;gwV_x1gVgM~As-K&^$7GLx7U-^adA zPP@6RV{^Vu4DOv-Fc4Z8FKVbT@-!Nir-){9`W8=ry&En1*9$_R4lkxv#M=#Cj`P^& zb*TU&nSh%_DQUXRQ7I37jN8)M%7g_-z*IQzcMVok4)Vemw^SZuc%T!0-96E*Fvs>XZFk%+%IarlOOzpHC0l|)tlW_y)gP(PtsD@WBRyCnjgOiLU zKq@W#T1tlNG;xSwD3^)-@pk?ElD3c&0XTN^n@Ldt5N@3aKa`a2uY@O{+Q`(Sl=7`k ztZKE4`u?oIw=x(B#V1L|n{Mhzw*}RHWcp0p6PhjNw-g|TG*DY^YJcODV+ZH*L}<=O zMn_s1apbly2&@+zE2uNY(tiJb0i7c+3)B9iV)dWF|73m3CBYg4B|Oz!4o#t@F$2r6 z=qfBc!qhEm;JxC-c22SMW*Y&4iMaP)&N_oW(t>X9@0$N=~Dy=BF1>lUtUW6;h z{3Q$93~_p`{5cbOKj4nL^X-JjmJ9U^2YjbNne*nlkYPSDH8p~@sIC89_+-OmkDt|X zm5DItqRwe5PY|I`*lTo#JOq|4fBa)R2<*DZ`m{0~meXmRJiKKYCe?FVO#}0$`YkrJ zxhbU>9E`Y>Q^)N`Eu4qMt@l({d?Dg)vva+{5cDq=F9vD&d`4-@|Y(Z=%o7(b@i4!H&+I)IQ|dZsb)Pr48+%Q9kU;5?;_VlS+;eQxCf zC*l=+n|#a|<}g+hr!8*<)?2x*+}FS97RFZg1oVCR_PkA=YV5#_3u{%RLH$L!0cl97 z|G&#SP;PT}MkwfS!<>#4E;nIYfZ9pluQ&sw*c98_LNaJ!ZLdZf)#2U@jAsU>nX!_C zm?q0hCQlL$C>6s_iCK08N=jZBgyejlB0{xWMK?Y#&IMwg(Q@H0!yqQp>{D0sTQ9HX z7H33v?o24#8AZmJZLa9iC=f~xzl2tphI$40trECZVZNmADHN`wuINbLGsi9HX#bF) zt`V^eQp_Uy5+mh?Dx1cwJF&)i^Q8y|AM*5n5+S9IMjf6E3Q+kdAQt|VlPnU~yuF>R zD~x!ay8;uuF}QE_;b15QRhhyP8m!Rm!I%O@m3Wa~xK{s2P%y5;8AGaG5J==Y;A&xG zd(b304SZ}pv~*$)qkx|5Z2{!7&J5>#w>-R!vhO+W(~fBuPOFTscR>b6l~pKlIv>QG z=4+B!Kpb`$!w{D(l5B>Bp|ME@5!WV?9{J2CpNLv^h$yI$;%A&{!gmG3Vzb>)WZ7K8 zylm6l2!A#o%G12Ce?uxb^Qn@7B}EazEL6tK(2UrD!y5E0lWKt(nrvIG^r`NLW&IRu zBOr7>ng2M_)Fxp{D52_Wh#95~;T&c0Omw}(iC1o3I5t~`ga9|i#}4Hyy09T4g#Ucc zn}uXJ1F>DCb8k46m1q!tBR&*%;-@g$R(-`P5z?xX8a`v;DANHv1GBjRLOqXHW8Xn= zgrfJ3BV!{LyP;wF0_<1_nQY^GIUbz1^}$}-e!F;; zF0=0ho1ItEtKYt4j|a{SxE4NZfm%SQPH(%^%5}PE2k#;&e-5P>OuJGFk5bZCVd+r^ zMN?}^*<$s_vH(PkHM)?^S(`>rs`1oDzlN61%->1CTxFWIwG;ah>_3Mz{~<{h#@X*@ zEjrnK6-eC`rirmM1t0T=l18wWcgi!6E-%A6;~1Iazr#1avE$oH0?$+lUK;eH zw$-TiohUfNsd$v(A9VrmsIOdCJX_MD`o{5ZRE(S(pMErIlCJe3B%IJ&Z8TXB;t_>m zJ)Bz{4kw$7tMj7X{26jp6N6mJqhG`(NuDJ;hTZ{t0UTXIV%2k@gk*pmda7YIE(^n< zyeG(9~8g!9d_!^t^$GQnwueu zGf-b**?cCKRtJEq!df$O)@HqWt7#45&vN7^5Q8TFYa4${>x=A2pqVtLI4nI51!5#2 z1vPedbJ$cxm9ZYwr>@S)=frZV(j<@fIlqsC>_;sEQiiRoH=kkI?8kxo~@6m7? zmwOyRYJq(8<-lrP?(_+5GEpawc{;=VzwNCv=v&gaWPfpIP6vNU+gZ7$;v{iPhh|yM z#so~+rZIqEWfcuSj?|*=xn(Il0-hVtOj@@#h%|nbV$}O=GrC>gWgVaq1|91vFqgKp zvxx&~Oi+7-AH?UoStX?o7d+J{v`kZ`v23u)bd|?UKu%l-#$(gsG|^J*tWZ{I;EK)g z0KunsB3grIC)s_)h9M9OxP|MGyXgMo57>(E{Zat+LJT(Mk&MgF?#JGe#swJEFaC;$*lwQaUpVNhv!ivY!I243tfOmDxJ>JMt=3j45 ztzfNfXbAVU(s84}jncj%6ojs3y^dSG#M9M=bJnvzM}71~n&z3>ArF)<4qM$!B?}d~OQWl$3Cp@?7+;U4-Mf0#2kvGOij-Mo?{zy`VVRKb+aWW^~ z@oY150G*d*Y)*mZ_&(HU0b2{~&_o_1;0r`xTld#Gfvg$#NpXzg!?`c{c>B^UCr0B2Gs&WQSQaE+!$7n+AZ_33kZlX)5en&Vs;EjqMC&}4_w4i z$(7X_CNP0Fqu1>?S}hqs6do>en#_5JEG{m7mAm=6&2L0E1>KYuGl7R@b8VzQ+ZbZ3 z1EFJVlqEN~)hwe!Fl}I$zb`zJl-fPWQok>xdJnCT`ZLCX7>!cr1^3lh=4)W6l>SQ& zIZ31`7W%3~TdEVe;UW9_Q(vGzUWNaM<}B z$WLjWk4*%_V65QXu&7D{NQzAtkDO{qV9)AL#G|*}gGIOWSy7xaAxU;Q4haQUyI4qX zm~n6&prK9%5^rfX$UGTiM9z57Bi_g4+_)@^ZAUNO~BCr<= ztnk>jdhD;U=wSZ#<7LfDH0pg+@EAm~bKQ#Z=PB>R9L3nnd( z1YR2K!*W>zjby8%ce*)*p@~|5D@^Hl(~dH9yJAm>p^&~I1-$+pyx4RZm*&Tyfrq!W z^a_~@dZ8r!#EV#AD5y1<#9wqXL68FeUfM26mz@4H)LmYPaTA^^I)NCSSu+s#tE>e+ zQ8k!5Zh*e*o?+7fn;;BS&d4IxH_gm2*!#?gmaT*rd7AKL(Y{gwKK22&$cQ(9TGR_}+Nn7LyyfMS#ZG``>#Tx30 zl+r~yMfhhwfM`;cVi?Iz^Ng>HXN@;{0b2j5s2XOpRZc*3wGU2{47zVbOp-!&b2lrwFFTq+zh4zlKx;pRBt= zK?br+6!bDCWbO*B9*m2&uIsfC%>`wEY+q7J(|EiLrcLe2yXP^h#w5EP@WWK}1?+q;$kvwKrYR~3Q%$Eu8grN2)aCYybwWpCM9%RWw zEy9WU-;Mu+c-_4XVgJ5$LExpccw&w&)!AD`lPCLmt_3t{S8~C1vT0n%_gkq5*Aie% z^^h)miJ^gg1ML59xke(T_ajnE<->G$hjPal_xs>%oQPtfx$r1t$>(w0Irfmi&{Vlu z{DU!H5(JrkeS@yu@EI&W|1ww?%9MR{op`hS4!-&A!Jv>EcFVm{{|Ewa$D$?p;d8L1 zUQEz~S)=9!w3?fHE+Muy08W~&rr-e=iZZIXei{Ii4Gu5XVm`Pqdac2PgIy4>OK^&} zpPeEFONx0lNy7H;-coFd{J?|gl&khItegrk#tV+G9wB~)gwRpZWCsrueIY<$lPMH5 zSeM?#4#U;^zIF6~=a_z7cQ+~{Lo+$M#I(kyPoX)&#$8Vrb41NS-BoD_gr2b=ICi;? zGquN{5E@-z=Ny#Ldo=~Z;3^ogL38t^%rDK#6y(IFGa;%yQB$3Kj#oueD|>yjo%8s> zIpw?#_RRAm@So}??>iBTWV`z!S5;1enWl-eH~A4uAycJ8ow677d-P8T(S#j=c^HvT zE4T>SlJUza&j37pWiM#@gpRT?P^^c6%);^kH{3_i#scJ$klT%5NYH{4*3L<{$F_e&W~_auG;Y3kkvgT(rJDURpgMjX>+ z$Tq$&7T!P~nk`AWH{z#@t@i&&XkGi-RqTI4qU1^5Kz(@N@&wYI@HLIyF+ShouI zGEVVMzW1asRH968#Re!Y%{iOG^urU&g{Pa?-?+(%o8E*&anM}W6%hZO++wL4dm!!X za7~rj5NV@O_PGfGiJ2ccTmpU*2XCLY_@m4O#64ZQYsx@NPFoZ|rTNU|KT919BAbpM z%R(x7))t1b!TuEiL%~K&S|C99FUd?3=$ZW(>a?5XQa#LqT}Qs$gy|*iq+vrzR@cGT zIoxxHFye4L7@NR+im^gN_uq{v<&h+*+60tHtJEyb@KiW3ry)?5$GS38b;PlSIt23` zzp+nYsYE6TEEo$BtH}2tOG?(NzuyS^Gu=p1sQ$4R7-G9N4}$XGEOCRW8o3=T7HVc) z+gv%%&p0b{Akw^=>Vk}8#sBjt1%-quynE)CSGBcJi8K_^Za22#Fhqu3p8B^m_9fK8 zb!7;*GKV(sJn1&AI~1XUA&7pAarz5QST<%M2sZx9Be*f-m8%U2%hM!#-%DZPJ%2?O zm!Ti_QBi$PqEbMAlD)w`%b@i775zkm?Fz;{#-^|u)pG+!C+@p=T@0|7-KTR%wb>U4 zzDS6x4~E-pzO)mr4Pf*`s5nVQqE@8LJ)RCN-hj-RG{tW72V|$lRoJ_l?O4B0UQN46 zF<@>F;uNpQqC=c}+#K$pDOBu%rcl&jE5CQ_l=+9p%vHTQ4cR=@)x>2()It7oTV72x zq_tEeV7$QooM%0-zGsGYEWMp|PVk0Un#NAg*g44-8oLbY0v5G9*A|+XItMG%EtNH~ zL>L+qI$>^!BGnuDZ%bw1lZjIcg5}981s@|K=N$}Y#&@4;&mu~W{V$nnj^sr3h1lwG zn6b^1P(h$8$V^P%OKioRlmeDnq`ZWX)>`;kF;^C z%RCngU30XlyP9yuPyN~c(+_KeSaGH8wPC*i_1Jtbr5ng*0mk}2V~8knD%VwYw;7FB z1yWpyzc|4V=0QRois-|s>Q+*ZR_;E4yE0oJzd||0UXUpB2PHd+)a!$6QQ=mT?LX54 zRloWRPJOT$UZ&&s6!GIj3t*5idh4}6A$M<$Wv?DZu*1$LnR7kp=aXj(tN~UDiT=NC z{fMsZhpVynP84Nw8=IpK0aNG93u}0JK-~Uid;{baY(M%?y*v)3=5<{^vk`Y@u0^Gj zCVF8!t<~W8CxcEZt`Z^1lK^ZWW;rIq>71N`D6FM)t_@f55jKaFf)#2L#coP}Je0B= zAX{CN@H$aD`WnurG*Nmyp;-QBRUeXhpaQHq)g_V@U)Nn|Wg3m!vkXOQ_02)b-~FG*-@=GK1!t3}qop0eeDnfdzjSYetduyXehK8kPNyq z_Ia7JEX|X@WQ$sh%;bw%b!qU$Jy9^tyBmAm`VqKV>`Bt}{;{1x{2J%>MO6Q0wG*6? z36(EA$u&W5%4h?&FP+JGzg2r?*@A(w@tw{38(=z_M+^#2uN3i0<_|43?)(R@= zcQkZ&o9!^OMVJDcPlG3k?kCdlM*QCBuYRl4B8;(6CGpTq7L%%fs3@q;;s>SRQd}XF z!~pJgnn5XK{FFVN!7)gUj*6LTW+)1w@zO3)0f=EN>msNsb{#`Oi)O;c9vdADI^}&f zknVyo^2sk`=rc&`7p7qYTjMa11g!l@qdRe+J$ zkyhZSDrbT@wb|yca~U^u$Re8kmE{M<#qgesEamfh7H6}e3Fs01ySxjza@RZG#RYN1 ztw|n|cZ+&ypT+4wWojKxUV(G~u7ZYs_a|VIEYFBJq>I9%hz4t(OcC&f?rkQFgz&w8 zO3WB@w?P`;pb5a~y_OA19C{D1noed`*Dueo@*HR3)+7?#>%*1$se}Ynyi1XC?P{{O zQm!>1!RZT3dHPf2>OR#u*gVV;za``TbM_Qm6SWr3yRBD2A^1~qw9KdbL$&qo2X|$7 zt$4lBQ6C(zZrPuS`ylcki?w;}7I6t3N;?$28;1)Pc84^j%}gJnT3tl!p#v-mw9Sj$ zBEOA;yc|4c_8TjtXiC)ziTW!`Wu;^VYY8$!2|QhLP_TxK`&v`Wai|x5Qzv}4Z>(lL zs4C}I9dO13s;G4dPr`5lOgtRR)P8g8d)jfRT5#{J`&AkRAxeN=sud|bWO5WKwCsba zhfSw>Jn0=YV(6=5P-5pDEn@3Zu@5s1;@dE(m+pt776*sk1)i7nvOm_eo;HLr(|T(y zEgxz*ZW?-An?lr*uIfKqDk8T`f8vHO!|Y8znlB23r(jg(%(3ZOo_A<-!S%Ps{hUV= z#5v-!B=Js0;c+&6J7+N>*;nBW!D)=g; zbjs&`vo7A<2b!yphWuoC^Fo=F`w<>wxD3^U%$1#~1#2r)Ib#I#$(D@w|LfBxn&w+e zjB1+w0+-~Y6V1${2{^?#IW8|#f1*n^D8dOLvqAZtgcb<9epS*_?z=PvVSJ6s(W$ZO zb-|c{-S#rl6FCiPGNVtrvp6OwYT#S6-uw zk6yA*z~y|l=yn{|XO=#rdS8rKZWU%#7@)RForqLiH&mEnXB_NK6f5<)Zph|#WSS|Y z^etyDqaNzz0I;CYw{Sd*hlaOErkegw9L1BL z3!&pVnOKgxb9ym_+@ZN{x7i{T?~F)n;rN}ytan-R1~7n#$`z;6L!l6KcCGb+{yau@ z00${durECL&PCeS5>2}Ni7FpTTCqP@x21wPGd+@GjAa<#Kykp|We_C4KnNhLSB+O_ z2+w|oC7tY#&5)K^l)OFp*Zhd*>W`=Qdfx9w`Ay4CDIvW>{#B=1{9z|pJJb^Fl^A0V z9ZpMmb=9&?bN*%hTThQ|+C$Jz#>qbskDVR^$zq;A^HDp(Jtuv9K3BiGY-jwAr*eo! zSA*aY4a`Rc;1Y0S#49lFD8v_M`!h@L!{vidJE@@9tGtJ@b;KmMCp!@>yo=+nZ8BLo z+3B$lQbFRbF)NtA#E+SAwVm0jlHm)$6fI{5oVcRTAIy>XSnV1$ndYrupIiE*TKHbl ztcT&bU>R?Nz{hE+mXZ#A_L%gIr7H#z(cOv%X7f#cFy1uxV{gL!aYPaY9Gi_~bO`Md zlLrIR({*QdLjJMtt?`oS;ThjY@FCN`2x6UxGBO&K{winQI|>W!X%V}A6vpx7LkQKi zt}_+6GYnL$@1gL!${N~Oan})Niup!PH;4Ec|7I$3(h3P^I{c&5cxlCM35-knGwuc? zBa7w^1N1eIj=zb#qx!vqNA-S^Y~dZlM|ggcz^nF#&Z~4tF9l^d`^p;+vd{KZ#(weF z?absUU2X>&{Jtmi-1tFp)-rdyP{HuhK2Wc+lF?{o_k5-IFO!9uAeM7SoTN~vzFBwp z_|bfS;jEIRv(T#>{E!LEq{I@--sF9~6wUWxliX;?`s4j0h_LM*9#wySh1f*;k3}>P zCm&IHO?+(X-I8wXj{Ae@9T7rz<~zONNxMXvK-J$)I*Td}sX37g1ugS>P5xeRYPZN2 zt%en-?mk`*bcP!GIf&=XVQye_Z?N=<{BS4v`$3RfLv@PlMO_X+Qe^#tSWE4b-qyDl zN$(hTfp1~Tj))zHU6P}AehPrm%1r+yst-OTtUSYQ3roW8+jsJcW3uxV{2yZgaw?+` zg|8<5ph7DnZdevzDqd`JAiX*}tsI=}E+uBEpf#|=>qlI}u?Q*iT~nVP2Ft)6It%J) z>{($+_Cf~E_J#IxnBJeL?yq0rc)+N_#EJU8$T!qbM&pB z`5<1qINI0Tux3AAXo>uDtkkIC|4ZKy~o^C(FpB3>UJZoTi=L7UwNZ zJJg}GNZ|>p8F@_@8tYgsvKSGu*SM8-GuMLh@2qL-_jF$~Ue`Gc;&S8HCz$-;uV?tg5|fY7n`7CzL1*=M70OTDh?7F6%d$-}Ez+-} z#Gy$2aafr46}z;+y$XRNAVz-bAD>9byjE+q;}v21QyIaGS1kO-A5JpS7G3;ezGyup z$~v^9soqXw!u@+io~+QGj_kZwhIO%JQuGna!EZO7W|V5)+DKiRiveXSkaPl(Mc@28 zR08~WyhKUt*y@9h%4kW6VLsjsiE~Qs_T0s+WepBIYjS~r<_o@GoVlqVuMdSthYd*YY4KEzj;!Qa8wtS<$)e z+3@8!@|c0BbE0AL1ImDSCzlbN!I3Kek{x%o-pq@z)hwe`RxtvCY|cHZ zEc68{42aVHv-(+rFDHOCs?tQqC=B0-EE`&aW>UyQtjNM}uI_>SMvF4Vo+8Cwn^|@X z$qSalONef8Ua>pTX*vUEq?~PNBjkh-y^0L<2bJsAY>e1Vr!W> z8|R>ld)(OlI469n@$PD4$vf6+u)2H6O1oO-T`hW^v5~bk#ZB28_}l(h(>X%6Zuww; zD2Rbn6rfaio>V#@n!K#`#@-Tguf+1cEv0`m7a{NWrzhLzk)Px0c$4FFOf?(tmAP*6 zJki>*aM%bk5&WV-Ps4^b0_8<+zl|RrA8E`!a_CRRlprpZi3iAEMYM^=96_AO#%o6c zI&pn)l#3Ft9LDUL|6q>o?MF+ zdi72xB*l<_!c)#W(5-JUoMk2m%DtC-b-$NUREX9GpHX__Y7)O|Bd;jXGFULn&@MF3 z^b8Y_s1^uCb?yX{l*uM?y&?lZrI6!2%;iWLBOj_0jF9gabLPsof5d0lgQM@5GhuXK zn{IhI@QDC;Oo%nAFqW|CiAA9FXlM?;SS+)fUdqoA<@J3#sTX%yX$?VS5!*^Z6b>fe z3<=>=Zrgt6R=dPk`m+8UrYr1pU_bAQ@aoWALHyKQmxpMRdzjIPVMPjjdj3m3^qZ30 z^6gS=P&?VNd8CXiRV2Lm7dPh5z`&-7Rg}fR_nR;?deolt%{9Rfvf?_}lXK0K=4-s( zqDPW@Q%@4?%#Pu~I`M;v&4eZDahN^0PqECmgM%XMy^6(m?k#VYJRn1<&h~Z|*1!22 zEp2wsVFG|G-!RgvM=PNq`Hjv&)B`uM%+Rm=64Rkm6!$7u@!KA-mXMAZ2gh|DwN-Rz zRYWbPDvA>hsPE1SXU|vXrF4s(#45p@CQjt((Lqd`mF`===U=pv|72 z5MK0wtF+-YCizcY`3_?&J(f6lJwVdX%T<3_H8)3d>h@Va|{4#9e@`J0>S_11pFg~+L zqZGP?a$yYl?dRTa58^`h{mJZ=#vTM6qrITYM3T_lgTOhBe0%-GU({tBOeZeBem6m< ztzS|6*&pRRGwG=iE&ajY-9KY8nzw+`rPWjM#WBm`F6D{(%aw*}(bTa*HCDlYz08oq z&=gch|CR>?wIYV<`j-5=X9Qy`0%owQV__2EmsnPSo6Vs}xzf?yv_;wtK)DjxKs(8l~T*s-D zY&Xjf`;&~RT&skx4vTbVbz*da1FfV2&()YfHkxD$$TzveE0ZsZ9A($e{TLD|L9E0_ zBj;av@Y|ywHr;-A*&Q0+p&i0|Nk$iu>-ri|&?UBU&_Gp-1(C`t!l>q3mz9Hd+ryLD zE9tkUghamA|T+*{9|FSqO| z-FLjCjx5r6Lpro<#hKR(39Z&3T?2hmdoS;9Nd1idh2l;J$@}OTR`WeI_n$z@yg3kw zUxK(A>A}qr?Kjh^F3S75`51OMfITY|WM~86{B7P1NWuE%3QZI*ti;D8)?Kjaj<6|! zi@ajYscR=N+kt*PR8I6!5mWnm)D`fD85+2$mYHq z%A9jvDiNJd_9;n+!ae=t>w2IX4$cRMA9;X&(=S=@>EZqkw=a-;G6H z{twr_2j1N02s_`wxFVxO-!y|xt^EO(lZC!^g5?h#l0z7w8$HV8Xl?YblI8zoWw@aR z0@i9lVvFk5F?s1Ca;S@|b z%R0*nsZ3?tX4X?Cn^9wQ%UW7=G$6t-?W21|9@+M(hpUc3vm)91i zn={#$WTDSak-#VTvbkY$!B3+4`S2UiqY&Eb=QnnD$S77pCXCruA|WZoEDCY=Y*8fCD(;u+e-e@u&Z=SRv2jZTR}> zv$wD;Yo4r^6s9lF)plHj3x>??=bbT@9jNk`&LL9d!6kChc&RkHvKvAPP(*c&*tSH8 zv6-61&rBmN+Ngp+p1LownYz#h$iHU#&78iu?g8A!EblKj3I1~jSv%Gd4nS zoiCWU)$AtU_1Ax@V&P}^gDPprC2!fT6C4#SbFjvVd>J&Yc0Ow;Xw*nit52wG9&P;Uw8TF-UXA?KuE4SRR=2oWc8+ET0j(NBMy= z@{2yk^kKB&aak{_S8YO**Tnh6>kWI##)m)4U-Tg-_7LTVduhea27SM~OMNnT)-Yv+ z)(yWO2VzpzKd_7USIs=i2?iUjQ1B2N5lVhs8 zxxDQ5Y6RJ1vA9m|;84BHousU^Mwx%*N%F>P@Hck=EV0$nZ4aq=ThjnYwM=HJ)Q#uD z_KGhmA3~Ux@GrDt)4%lTuy)PQjU{Q7t_=Bg!YY#-l;-EgxAG^0@p8fg4b9#4_>~yy zlt~INiIE~fyD-}K6uq7vG8|z>tijD%&QcOClCDfu(D;TM9gDPfKmQ~$&vMAZRj-q( z+^49Ru{go5iKZZ#WI2}4|9x7fd)$G5zc`gj!bA^}PwD=#HOZ&C%g@OOs60pAyF1-2bxh6bf43uFF$i0A=rg$sV_AWDoa)dSE9`&^BLLc?$b{B<(!^__>35&oiNd#;AkT?3YDf3ay_ zW+34u9(@eKPzro&=4m5adAnpZ-(jm9Zku3_?i(>^A$5=s{%rK8r7qUXsHt&+kB@MS zmq0;ty91{CZ$@#~m$a+qr$u0r#5Ki>;gde&x%&t&bMIZ~@=Pzck5{}ijNPtA{WF~| zLOwB5i}Pe=nPEMf9k)t2B>;qCJ-Fc|Amj~D{CMGK`S$#U< zbMk|^^FBA5Q%tA8i`DG2Exgw2UW|a~WC}h(D=09rmTK?`6Cf5@5zpBX(Uf$?{a6Ey zIkiJ%HiqkbdwS@;L6w5b*)~ zWmFhK8G?Nc18sa0M{c=AhGD?MZH2q=T>p+Jo~c{T69hZdI%ts1HxJiI6~=4yr^{b# zN4sFP+u9q9&RnUv+*rG=4?9#xJOG?Jzq0f-@br2!@eGsom{sxf5=1DQv}9yXQlj?+ zxuw^&P+*{-{@p7`OK2Mo^F9Q6DWn%fSXpy=%S2H>znpXY9EO<;{?K14fw?VuV};Dm z2Jv^`w(JL!{Xmn<yE!*yjCH=8t84{ zM6wD5C+)2?MeC2qpaBLnciEb#u&?tTJ^IQ%2kr#>qXkE{z{Q8w9 z18XY%MxpLe1rH>yVAWmy;CxTEw8hFiEx3TRK$$=8pKr%9%4rLYj=g(Pt2n&BQ}UwkrR`p=K@^*hd^Q1vDrp09J)w^D=xX zdky<*G@T~@mYJAxQAv5t$5j@GISHh_jq2_5mY z=?96NX?yn(It02`rO9#nHS?8Tbr1}WH(?izk=bHE{QS9zV~`xB+goz}x+s#HIuWhu z4m2ym#I9_#@~Is?F%&D76GIpNiNohfGJG!&AK7H+Z-k)tMYh|Yp!51pkaeZ?ub&cw zRd6PFMX_?q5ckxzr=?c?8jHWMt<3#i7(WPRj~>O8Y!3KW>9aC96;=M|_p|@D;SxB3 z=shycM?DSPnNO5&RLOX)BW66z9*V=T&LR=LUFf>&UH5nDXG=7`FPO*g$6c7Y0 zhnq^Q^BoPxFNtH$54yM4VkAVz8y}TE9t7lF@h|JGwALW=-d!^QW1h zRJlJlr`K@a0a^O$)|cPvX$HSi#=7NJrl1J4eLY!`O%-3!PjpYskUjVc8$5Gcr&}gE zbm^9Q93(g@P&LgvT780ZXiNKC`G{X_3=*Iuisqt!P_|b85dIO*8z?9UZ=?xO}?&| zpyd;0i7(SF%tdMbOPZmR;g&j{3F%2`c$*T%G)KiEu^=d(GcziWsRY31Bd1B7)TI+t z1ULy=Bi`JLizz_?hP95jGc|dL3ZWIrvhvB-eB;4H<92^>kho*){BVwWsOE9uj>aC8 z7}EuwmWz+^rW<>*RaxGAtfS>r zoT9RD`rHt~FQ?d`*Y2kBsucMk$neHkgfS$Kj_GS9-ExBBSSp|e3iPh}_jU7L2la)R z$q`yEg8YfNMf^0a`bo#etKI8seXVtSt$2`^APR`HR-dj)t?X=si048y^7D0I?7dt_ zI9x%ud5I5HstBOVImX_($KuRNY`DbYy2SpGPIMO_hNBQn+j98TQGStD+k}SCT$N|u zf;WyFf@0ax@WbYrx5RbdcHt=2DfgdEc=5p+z;tPswY2Y}rpK9|;63kI0V5_nnUH|W z8f^CPbcj`jnB&FKJ8{s3KykvQGnL1x+N_EJ)&@ZX4a@aEyiofBuQy z(VNW33XND{$vDSAutFX<;A4*&MjB78?BNeNtL6 z@%&!Y^i9i_X~D>$eFZ3uCkdQ&TQ*L#ok`H{n%`! zL2=5jJrB4Wb)_R#In&J?J&Vay+S9Ebenlmv>1MKP_X?MYTs34xl{ZEKrlxo%gd~;h z=x}n8a6eJg>}5K{sM$BPwc@x1!ZeL3pyR`#+-*_+=dPdS79aV_)utw@`&c)1vZoyQ zO=xt+?!JUkBS6w{Ey^I|B_gbAJAI)XgVw!HittdUr_GYLO1+Qw>sDy+f<+Wd~O; zV*77&%ilnZILJTUBs@|mcWvPXuJCc;vaeWvfStvZfv*qghX^?en0-7eS<{E&mBfME+^5gxL zGx*In@M$kb!1qtFpikGeywXdX`AxNTBtLc51QnkBv;ISjn7MZ-h7GL0)wj>idX8u>;T`drW}37=jZOL;b`kaM#tg0Mtp&eR)}9IS2_!*P zSL;7Ee#-WE-^xDN#8Y~291Z7NH8&pb$o71!*C$ve^Xjs<6Z%FV;F0h3Ml}@K?F>t0 zpbJ3wjYFrSZj*jzz{_N%l~vEhzqP?+^s0quCI1zXAhwNt!AmxEvGe$QHJwfh?9~4v z>MeufY`U&dAcMQRTY$k`0)xA|Ymi_eKyY^$oZ!J7g1ftG&;)l44DRsF{k&D@{G6)m z*L3&p-D@wIY+1Oq{#|$5G|`QHzKJg9 zr@2ddFPLTE|GV7FS?IL!)m_)yz?m#8nDlF(jn#8q(OE2bXLMaEmtwH@(h?W-EwxRF zm))zbtL~J7qM-JQcxPt7Ujapel#{h`Oq@+Vu~Wru%=V@*1Az$&uv8m`1RERV0^HVk zT$BN1xU24ic<)r;+neDepYy&qj}58Ub4Mi*jP$Q6893_h;NsT6V=`mF(+S0b4cK?RO~8jkmVE8a-|*pF?5E8EuK6mdcjWomMXS3lXs929!%58u45g%*HdQ! zP8+x{9?n+2a74ULD{>W#L?ERx%c1zEtiWAf3;m{5K;B&O3qBwYw6_w~njNG=_RgC~Pn^H-RA=rmKm9oF}ooDD&*FN;S^%Hc*nN6zJmi1Bes zc`;usM48U`@O9w_s^XzThC!a3$T`1erJD>XRM{Gh1xY&~xR!6~y_Lm0uSJ^Q3-G8;E+r{jAq`{jtPJ zB+$hk!|d#2fJl<0C!c0V{kL*fbyoWyoG#4)$kt z6x?`-jRdJns<-*me?3WnWiPWV*VWA^mN?N{UaZsVi)Q+|LN&-#G#dPayZJbguU?`z zh;iYEW#sc@S{+ypA%SPMS1;O2#d!Vdu_2J*QDrc43Ck`=(>sYjypi1k4qWQdBELDk zV;JNV;{X|tNcNnp$F8*iCYu^_J%0aZNGGc1Tg?7eo4ZJpB;1$@t@x%OEJahA*S|^^ zG!%w3>lF$#!rlH##V$jfeciTl*dNpNs82tBJ#A$-2Wp(UE{7>tIjzQxJ<=1^Qjw}E zPYxy*x#6Z?{33j1l-EBnY;w@Q8&C3TLyN6Ae$H;WeXli0oj(}_G zDq&M+1Rs7jC%hfv&rds0In(9+J z+D0v^bBI?k>(RezdY{JMiL|mD&~!>LF*ia4CMDmY!zM9lryfc_O;DyPCc!3&MoYe^ zweklQ)!gWp2&id>cuqPoNnn!L8|H;(If zvfiw=Vx-*MxE>X`;a%F)t?d@V?YCvZ?V#l(mv{S#%ihWfh@?}%(|3}b2P)imB2(~S zk+P?l4U0w#`A85~DJ+z4=dg6;x9~bH+ZxPvGFp+CmP{_{ZPL>oG1!>N-m1czeA?-! zf3L5~(A0SWMdgHAC47H!xqhszf)_Zo|06mZXp?F}+mf}k-MbP6ya+J+b9-N#KZorq z63jUwOoY1{PP1QkVR3KBS;m|#Thtm6btxKaTS0GUA);)1T((Qg6WSgG3TNuDl4t-` zxP02ogQI9Liaj&yf`ai<8bc;V1&O^{e0s{ev%RV{r|l8<+{6Ri7TmDrt~~+=GuJRg9}o>ySN&K^pCT?rnWfz`XV*{ z=1QdR4M0X+-etdF@4w%X-FZ46n5%xK3%yAX+cIguTCnG{qkxkO>N=n;J2d)D7J8Q+ z)x2I>;Cdg`%4#E z-SnC8uKSLGKos%`OdpwnhZU*sxJrG2DAwC%e(^!}{+a@r!rexW@5;nbU`$x^7!v%{ zyuZdYP$Rb!t;!`auICE|S^0WA81)fc-T86|>3jqN2T7efgQQ~!cn$5yFfNPm-;-!+ zkHp}(Bx;9G<|4-@t*Rb(RXG}dVOl@KQ|$*`6tXeKAU%m>o(xjGEObf}3|lH$T7Kpw zIR6x!cKv-T*H!WI$Vbsdam};w;*$E&lSg8JF?qRkpk@T^;VvEB^r|J`-V5nOG;R5i zrs!EIv|9k*xc^}5psdaFstb5y+DUGS5>NAL2H$+Ct$L*o2g#I_uX5ZsjMluJ+mXxy z&!j2~9n_4TNX-i;-M}?-@pWzlE75?5C)Qfeo9IH?WEt^C7|5@0ifv8|N4XeI8E_kO zi+S|Zq)NIMu7|!n=pcUIJ*%f*`P(I|ao$GA?JLPSFo5amE5XrV%m{p&?)rHAhBi*1 zY8m9b?Prso(4GgNCPqV|ADdX(OX`i+n!0yOKV*rE`G|>@ULTeL-@I@p&+WNFa*O`4-60nitwv^2E*q6=@vPDJaL+1Nx@XS6&gu(vx`J8U z;{n(3EJ@^Z|2M3xL}ty%G}MOE_K7Aqzn1%z+=hY)R7r+`tomeCUcd*cZye6%&kNZ*I7Yd>|hgv(AgYR>;+3v zxc=0BF=kZ%Ij^Zb(x0lo`65zKd&3A0tIg1*+E2d5Q?5`ggwP?kuVXFuE;DM(KXGoE%mi|Q8vq94iK$8vV9*E4LLSmHS1RWn7Zah$fiVH zlP%Pb$)1O%Rj+bZ1{_)Qd&W-I4t7S*klDxqIA67@K5#wA{0a9@leAB@%hk&~x#W7;V?hqO6{dsP(=wS5V4^maH<4-eg8OKW&9vbe*vZ*NmyMV7 zB)Wtz>R1B(;^VYKpG=gN1%MNObP8QhG7Mp*n7j`r0)aBibRn^f@r6lSH+w8KG}j%Q z0}kw_z}uT49Cl|Q5=L*Quv71EF|oKVO))D=OTr5eLAA<08Y-*h<%v;mn;4~8ub+5K zD=zqx{u4+9#Ka?}KYrew-MB5TeJM5Htl&w>jURN7*CjL%9cJ)&_=3Qn(3VLR*K&$= zPcKVXxc9q6CoEA5j*|}>kpYtNd!hWm^$pDJYOGh{``=*^I?aR2)fM-VL@>?hUyZqOA8JK3y9Ut-l`Z24oIm922g_gZ0zlHo+Dt zxbm!q*|?;KM6(>U!FftqQPWuX$POI1r!p0$0rf(#R*901zKe<^Q=53rqGlUyc%{(U;n~I#hp)nb|d&1`Z z05XBEz4)iD2!7uNHg{Z^SmAC^AbQgB9uAE@q}qK zJV505t{xI+JQL8HK4M(0s1Db~m2?Bcd;^TcujJchCJyoa2}?@{Dq{>QG;D`-s+mlLJ?Ew>%OqkJ6IO|bZ3H;?Q}hQ(!~#1@ie#aQG^$9KQDVLN z$g$JasI@-wZW<^=Qq547RN3Dl2a>rO=V^7I2Ia0mdLI#vHS7JFi(=M1M4;HJ#qI`b`6!(TZ~qTo|#lSG_H7 zZW(C?OGd=+{`?&FFjuMR;`bsIDitSdwg3POPY0il_U;D=3J#;@2IUsztcDksz>BTHWQe)Y{j10B!B$5e$) zw|g~|l_CaZZUN3B0UShvK=nddV;=ADx}w&-NDUXrGD#ETz4f23_Fl$YkzUvhE63pb_q=b^EO0AY&>yh8{;z~ z4oe+DcO~B(CiG(9XWmdPFFFYtgr3dCh1r4K-7FZLv*e?!GRYvcP5pxhh3>{?oUQX& z@8DlX|D5Qj$Z2CHpKecM!-hrQ+k%ZT^_X}P^@%eI{w@R}80j?;zz4hqq@P>_>5~U@ zJRZJ>&|jdBK%b6VK3%f?_hH+BKd4C4npi|FWRB0)1N?D$t1ubyA5vx%;JDs6y#TVr z5KL9>^;+>g_nsq%ha01~ySa(WSkbW~PD@;<>^5^wJv;#3N~$WE*WJCWn)|!%>8lYP zm1SF{L6NC___j=P_+GHw^w77W>Fgxhj?^AlXRVWB83+1KKip-uF?t}Hi3xlBP zs##$sReALN7nw0`8y6vWVx8_AOuBVHT!K&pnlVPCm`{?N>$G_CExv@LCgA>R%n`H8 z4rf~DN1O$%G1Mrpq(!ZXsL>W_;uNLd!pF~TA;P}W(6_OXvKqbaXOBcs)Jabt>sqPN zH}cjy!Ib2RI3rNnSru#MkA+ccSG*7$TwD zr}na__8w&>P}V}!*Te)do?^m9(HG_I(M0r#0W7oT;UC=M)Co@s1+4C3e|{AZ#Zjgo z>h)@BC1rAB9g)vC;?*U1U(f#m2*O(>p*2k(A6K|JUB=|LndUhKzB0NF`F@#|)#W-- zF(>C}XsEDIGhF_+9^%e^-^9=IbD`#*faeh7Kq02M*PeoB zXNu$oW4sJD7%9>eHqZAvGlRfPut$v+VB`R2FyXN9dYf1}5KmT(vIQrnODE5M0Y`+_ zoHzBnVo+b_uMj#qKMPM~-5e+hf9TeqkC>`Ze1-sot!5s}pdVLNKaceGF}1{}!N{5y zo2oDBvb1hbTr!y|!N`o4JUf#|LE1xqtD!s<-hlNKIoB&6Wt6G^FGLUMy+u7~iyJNN zpQE?68zfIbkv^Btz4W$E+1mP*iL3(?#)jQYvQGi*AAonDWEeT#u*h(sykCDaI=*-H z;kWDBk0tVMiT~AWBZfh(<1A=O2y110jv3i-LmwEBGzo(qs}y zn&pH`Vye7E`eB%i6*$kKF~a=7<8M8i)(>>m5gs#cM(;ahBA06)rQYfZEu5c4Wqhyw z&;HevPbdb(IP@&3uV{WeCFh<03LjQKdBltXh2g~s2xIj$X1`Bm^2#YGNqzs`49klW zE+*!zhEqZ5jrV7*R`^Ga-D|E`?7+m~?vD{l@q1bSz|FV2T+gF6-LIC=dOXAyDl5Te zM=P@Py z*DD>PcZXpqRA#%RuWJx2(i|VxkQylK&q4A-OaH^+)6~k_f$-?VaScb$1bX@fA^3Ap zkhO9{+rJRCo73Fgqftz?V*KHm?E0KO;IhslH6uMG?FLy4{`GH{^HHx;(#uQ|a{OBi zEz&~=kFOCmD$og4C33~SOoDn@ z;UfBMY>r)8M z{n_E-J&uMacFS|H=}B7hh-i^h%5UxMY2j2lE%&^QSxe7Jf}b$~O6dn*5>KO)^bx92 z=1ylm<-FJ zDp$Ny5*Uvn84?m9+G!eO;wpH3N(*05eDocW%rcJ)``2JH1~qn9?6Vos6PSwB2+Qi^ z3iEtvQLxf$F(|rLEYMjP%JgHJu~}TEVvbu6hr%B8Yl!6xje{DJ`W)!_gzP`MRhH4Q z=T)cv5_!bq!Q9>R>5!~M(kS`9Ja;@_icLUZO%S~>N)KhU5fGQ;AKHFwsX{V9E7zI% z)%PcpR^sO4#1ARy7k8o_l344No;@g%_|5E zRtfJ^DTQqJ5iQuAGMM2)FPOw#m>z4C&MDL;m$k~LN^yo&dL-xiGG6|)&cPI2rJjOx zHw;K{rLV3}GMay-@snW8#guLW6*jV>lkId-8V3Uo56%sagG9n{ropn>SRp#z+%V$s z%THU8uUIykI%bASz~9||lqJyAqKfR|uC9c&ZdRQ&cf@b+kHEMo>)jm)b;*zn{AuC( zr_V-Uc&$a=A-c*LT`GQF+(OxSaRqM#l^o%b5tc%DQ-xYHtTOtgiE|J8dH6;!r zP1Mb=^*U)B1aSD1uiEx;UNS-$Y%=4=&mx3zOo3h@~as%OJ;$ zDSDo!n?NR{9~r{F`hATr?6krw&sXvKO<(X$Jh$PiOuE|JI*w!9@Al(8O;`+cjko;a z*#uP0{-hd-2wO9sA2$FfW>^_8t+7E^d5j77NFPNCbqCT!F7V|c8=3?uw*VoyW6J&g z_Y|}QdAcK2E5B&V^(dMP+0O&)LLXqQDM%I(^{tmU-eewbEIZH3Z8t4Ny{G=f^=o2e z+sp~X#wUhow_|@hXo87>H=88mlAuDbIs}d!JgIU$cRHlvB>V{byirvBIfo}zQQu+@ zWFtd=NfFdDFrwb%O^(rZN1{RLMYJ3I!Z|ko;W{LQgM|ml)#)gDF~AGwP!=8WjzUek zF=yz;iWCQ|&Yxxko(hfzsK#OhGy3O!k>yP-Vz7kF@~btr(9*a+{R!-~zmD&! zG0SyD^sr_9?;d6@!$GZ=dr;hoqysKgvT|6$l8mt{CUADEpQUn?JC_x0Rmdo7W)Nqo zee)LEAz<(Q6mqdyx(x?Auqkp&yC4hI$+ApB)+m=n{b94RU@N=#`?c6jVt%BAsV zJ^tj##fhNAi(eL;VW4jj?5r!%BPFQ{BO79_2WT6<*UAmRpo2JdkL2DdLM~#8kemY9 zsBH9a9-pLD#G=<;A3gnd5L-Vz`MIUQb112t?3l;AgDD9gr@0m0O!n-Qk*T(-z2eX= zOrlD~NHY!x>r{p&-eqE8;2w_105YYCX_{DgKdj?EkKIIWN|`mmcfWe{?hfG}t-10c zlu!&kxq<{w8#CG5tJdHk=x{V}$itxp-qVh(mSyJ+0*)NC*m*hhQ!DErsDxh>5X=KG zMCH8Vaz3niF&}5^ZSz89-VC5*p?t?bXtod0r=ZZ=I*syjeBsz_kJE$fURW;A>IOR6 z!@^1bl5or^{>E=M{D!mxJI3hs&z8uSw_?(ioQ+S%a-&|_(_@e$rP?TL{EFF6bO4hz zt;|*sw^8|g3*#4kYtW=Nytah`+6SvD^NhbOPowbGK+wP!=Qpqx=j#s6cRYqzZ3lX7 z5|{dpJK)CnRKKO>Y&pHX!X+!W{QXl4R6FD| zA{3t{Be6ucp9;IUqc9l)#j;a-N*Q{kr`#XcyMq-V-D#Q?G5UF0)FVCY^+a zmD#?uOstBj%VTMC^u6m`7Zb>=KPG!Lc@QxvcnW{;cua<&YGtzG{bJ#nkz;^{v?iqNiR8ka%^vlH0!!YE*RdVj@)zV+}P$-C#LN4u9w>cgSTt)2E z6}?)@q3%pH!yfuHS!5!IaQIyHqa$q^58+h96vT8aV9iQ-ep>|dpgfXbY@O^vL>Kcr zCA^no%=E=#76N#9TKbPjiZ3r5W1Fbg-R>;k(Ia7(2tU}w!yLwC42O)t=hE|&YC>>S zX`(1gOh?4fk3=I-vc(0Nk$bJ`=7=iBG)s5ZJw$seew?b6o-?k?h+?IyW0~4+Qe56$ zJWN0f@&uX_m8-V+kCp#C2Kf@|`oJ<|dWCoZ8wV`fLz9dma^)tx&>wh=D!`2<;vNc4 zbn=3cez7kYF9n+0l{#erXOVLLdt5p_c!TIILTxYzF2+m)6w2^0UYam{=#K3D*nyOw z_BDV8dDK@ZW68MyMj1OuDG`>de%Nzd!HO&n*$T&%>4Z($R}1CfOgI7;r+{wk zdYnWtaMjr191=iTmXepzNt9(1l)j;#s(s-4fmkEE7wu73 zhY*gKyG6;301nwqqCkg=hZ!Dr?hhndcOcM=G5cl12lqNHCeWsI+ZmUi>e?+?Kes&T z!X0^3jbe00+SpA+-x7e-HWAFh;+1?B#=(wtW>i&49az_%p0!dN)IcWShqOOIV6lI# zYB|lyZ(;HkiR&e#ZgK$e`Zc6>YuK=MyZBH&rw%Q)6q9?-_2me-EfbtF&g^8gbzEo> zwcIv=YCxZx@MDkT*}&!R9}tJ;P)nI}Z&lm%<#a8kx|&vS#R?Q_vTJ59KT!4aBW>Ovbsv(~RL1Q% zSlkt^Y|PC;;6nra3281SJ2!0li@6*e*SBfUb#!FYz@ckJlari%8v}J)K7u>dSYzW|Py23pKE?M`#~Xl31(%r#E6< zfi_=1KH4+?J>DgzlbzA1+Uo2z6-IU&n(b{3*J(Zwp3!D`HmC6SFKwo%^K3HIkw*Kk zVns8x_v6R4KZp2$`*a1089ERZI@Tza8ACjLcnwX0en{XbPO!J~tXvKsRCq@r*cN~d z3b!MwEc|0IvN8AQ0J6%chLYP~3c0p$E9ZRd^$C}V-y5i=7H?|2T3KD-P1r@k*L@kM z-V7x9Z$3 z+4g1tq>0Sf-4609!YA)ApKyMU=le(2@uMG|LWnl(A!J1=BBL#c=o&ke$dj|*CVae~*%+rNuU)pj2@+SLg|(LAp^qioWjz-{Gqwdyti29oB^#e@P2#j3M1^6Y zx=UP5{@elTasFQuQt~4Ovt>XgnU>jSE(t|=%ZnI;V4^T|QogOGWs`wmc+^QO2jk=d z4OAnA(-@-gUr+gcBa2#1h-GCXRa1kz0~@j}WtiKP%_%R^ZxjhA@!~s%R}ME{JY|2; zn?Vs5W}33uozgvVeUi`lFL1tAKrs&YA(R(fq6{{z%AmZqAHEjfwh5EbFGd|arT6<( zZ&f54eD-P8>@&hj2ksx?ABK+H^}ZRg%uT3-8_2}un$!a$@kmIm^d@wJGz$9=#506!S!7knKCFGSZ z?zL;iv)V~yN0S$jOuk;6$S}ACZ@C9})!#lI1~Yo=HI{};IkM=O^8TsoMmYMm~JKE3W27eP6_WM6-3dT4e? z(O93y;v$XrhLvySyIH8zlv!nb@ZSftqJCjMczn#LYZ=f)h%HmTh+ z&Zr361iH9vHW_|7-p5)onOy8qz00d@WvqQ-#^i3G#AFs(RJko$I_H~kac9YPr6450 zMbrr+H>3>Sur`UE@f}u^qDds->teUCuU(%GUs|$+zCLj(yQveI-|{cR<+C;W5Te4+ z;$u;pyb9X;<$M%EHQ#Tj?W$SfJ~$EintMQiOuOgvXSfW1I`{l9p*f&$XkG?7y=6*C zJI5q_q5nSIwaI~?#7uZAPY3*W>TtX|8`0KrxD#t% zurTVbCU!Jrxr{6RUX5UV7XSHTio`{N1mrQ0tJILBDmfz~;dknt8^cw*sUOGNd2QHV4L6V4n)s2he?u=sGlUA zDw7K$k>RnGbs;h>nwfG83E{N2lHaKd&A>JmqB0Tkhl?R#>pz++R9-Gb|6Vhw`d^Pk z{a*`ifu3@cWQr9&gkpZlbFbZ7J zsV?OwZ4@~vDX^=rWzv&(tTN{6LeCV2nW$@-Xz)4ECuC(@Equ8WupxHxx^P^XUsd}} zewm|upQkzCMz)@`WpyyE&p+Lb5|I` z9n#x;@G^4>`cXcGK$u1?Nt>ltdb0R@TS&743;2hGQUDIGnrQ!}@q$11PhxSt>| z`1$0p5%0KOu*@g@bvY%Ir3|59dco%_ed}1pr1qxRG>U6;8D$a<%k}xuhg(4Ht;#Tz zdThWTJ%g3tuAJ#{vL|m@=kxrprOxZxdSSsf=OfEB;Seo!DNVKhgPxEohWlCJIIoVO zvw*4(+{CFcU|7yC`04^!ok-A7)u47Ew26tKdxYB|UY8Io68;bV)*98k08Rou&Ti)L z6$3C`hC12FQvR%JQK0gcU?JdYn^Zrtm*bJxmM$447RXGpFU29DR-jV?6HNfViMCMG zLx(5r;!Fnpw4lkMb+dnrw z1@H*MED!^H21n&p1a@C^hHtbZ{K15^&e7EeGz=I_&s&DV4XBl zHU$B%O+sZ)a9+QhsarZ_68=ffR#ysn$g%l1Gq^1*2?dPx)9lpkB#daY z51uNFK|B!h9>cpP(tj_@q?o)phLhLptnM=XUHDPLPnbDXu zc1QXQ)CnSf>?1lwo2c1Hp~mlK=t!n7sYX#|RL$e!CyYSo34a;!56^HEwhWJ8PZE)= z1?JHv32%l5Z@ysLjDQip|J(L6Fi6^rrDql+d+Zkg#K|G7AgLV*^G=X&^h-W&M?PrL z=30er+xZL0B#>#40JP%_+L&?AYxC7Lb*Fqz)h9ZI^lg4*2WR*5o?aKpC)Zkz#92p} z4$y`Zc6;}w41M0iFn#^swG0$@6vzK#v>>*jkte68R}f0KyPRtk@O_}!4WOof_%j;J8C&Mi9_db~FzYvd+S)LK6e`0Wrt2vbq{!e4zXWgA&yTBx z%M;nct5NMt(E=dY^KuTpx8_=sByC0VJ#5&p#UOh8JsPi=XAByK*P6V6l?Esa`Z@^- z$p9rsAXZB;ZddHN)OpZMy;1y4JxiXNScZJMGV_6cS+g?XQ!OJ=xW;OIJ&aBt>#g$* zCQnv7TnwE)Xjbx6eG)rZ6JtKJTsL*JU7;W-`RmyVnA=iRYPcpKD$2OsqCgeP+AG2$ zCxCI%*@HIe!XJqc0S$J^5q`YV%k-DLWLz7`eo?jU?t)^T`fAxZ?c-*fjN&nA;z?A& z-s)+RQ22r^P6E_Nl`fS;n`;c>e=xIMWga4l`#ctBiK%KEShz7}U(b2jHOf5@fjLq% zmOSMvc?;m00*&9E#V6z}e(S+cDsol);x!eT$|xH9f7;x(=sad->H=PEDItp~M|JL- z-OT+|dutnc|viMl(h70-jQsK9Z7i}}ZANE~}$jjxOMzWNGbgLIX;MykYRw1Iu1 z9W{&Tq+&spq!!r+`6wocpPh*R(-ICHWm%tBccnqacc_vQe*frNjRkE|2bWhrnK(!S z1K(xY40RwG>Q{%z>Afe{$$g`oEl}<5>G@j^we81-hHjV}2XEdQ3(#yZtN5jnAweJ3 z0NV&#IjvmLt^=SVLShH$^5MO40V(Ni^h@>6*Zxa=hH#-I* zTDK1K)B7s6Ia>3yjC-K<-$N60TsxQWMsVDO@cX%3Y;pdbPE^_gtNV2){e|IXDaR zqE^YXGa*KJQ!w-`>m*TLT627938A_BdYqgn$u#QOWJ1R5!Phgqw2xZ>qq7pp;tKsy zBSA6oL8>~0e++7i%VJpJK%7O7`=k*>XVu&BthFZgRa6ux7#-s4IF&_hKKFFe`OPZk z-$YC9cV$C$=qAj87ip!%+NFy?afS#JeYYB&m>8JNcedQ__xf0awXi2mO#n;GyJ@^4 z)s-Kf83eZG?*!$!2D?Q*%MxciqnCPN^0qZZKX;0|JaRDU#p?Vu^uTc@oF0~xe9D)6 zk$V^}?pHJ}#Whi2fo0Ka!y(zuWq>bF+y;o-dL zcMmQ3EdE|HbQ+xW{_~rk6HQ%Sq%SV!@2~&5zG`^+yMjT9)q`eLJwEALMQK_r=-sac zrbP-U^OgNTI#t!(ZU|@8wo&Va8DL92266bX(^a5|R^x-b(7}*@lj8cRPf1RXq??}u zeV$)u2b|Jv7D49KBw(pe%*@4=c)jDA{<`jL_Dwf99mW|*m!OY@6}Wfm+*k7Mhp~t{ zuNF`Nr~EkuH?tw|J&86ANCJloRQ3mZJ3iu}cI)*B)#u01$KTk%CQZn%*%y26i#?07 zP&D*A+l3@oWPZ`o(KR0&#pLwg0i3OM#Q+CvE)9Naq7Lw7w65jFZ4K)cd-gyZITL#- z{2Do~F$T@Ow7C%#Uz|3{O630mRNAILw0oG^YQWY=UTRzM;w=k>$ohEEr`p zb340=32F|1n7^4Ye!p^|KdiXCiBwu6N&E0lSbw>N7;!nyV7A0$cGJ!bW)%urBn%?g zGIF*e8@@LklYnl>l@qH|?x0?JhL~UTz;tYitE_xbnkjf+Y~ZjGem!e76iN|i33bi| zKHO)97}A!~et$AJ6Ohaw)=e|fwlhXaQ-pceTTOjtm7Lgpr;8)6b^~IUn8nnm(oA)i zT$IgTWgaQy?>|oLu8go=ycY?eC6D5FV;W*`;e>%OYJ{3`PHclLCf!E)_t zXs1BUm~tN^PUhF%Y8hzWAqoDz`8D_wDR$7(zhJ39O{6+eUNliqCr1l0_5$22#4tlD zMh2G!U6G&Zards%TT+6kvwd6@BJmYBXbu=L;(uS`&E*eCITt5BxW4#uj?G{;R9}Su zdVfHzIG7ByoT1ptuHt8QB@Eq)_;RQDn}30_1s#FzXtX&`?#?Lc>@+t%B~3_{oVjA6 zZ6sw3Nt;(&!1bIbXi@-7_6eus{tE+nG+!`3Y{rueBN;k|6LLdjo-~}fuEKkdbneYE zSDbc|4zDePf9AwkD%$z1(0@b|ZH~n(y4{+|V=H?dYnPTkya6S;Sw=GzRO}!pa*>wH zHH;fWQ528cSV^-Rx3sSdPjcUMQp6_cpODfyS@qF_2-3~$Ihhf=6Ej+!_f%eNe><(c z>oA@F`x)KSsT=udI9=DZ7D^q%xBIr?WxGs^*>Y?qS>!UQ=@jDbOM&RtRRRKL`lP8g4Q!8H;^A(^66Fx%jL3jJl5+o}X; zDE6@&MC*i@licNMZ+2w=tdt{3@DsFi(46@9=c}VqmI9t$ zCXQp;O(oYeOtE1bJ6SR_fjCK`->vvJ{O<9=C3C&62qnr@ecS&y`RO2<#mqx`XgfEue z`QgH;vR9!QbF^MyGW9*Os>`!%Q^GCQ0MKKih(*#yG&8*(tkEsAl=$VvaZo`dr4=rK z(m47tVgkL@OYj-s6h9UoaPI(Dz6gogoIJ#umBQ;~&+K%#lE_(x*e8;m#=3Gn8f#+P z;+aYjILVLp*H*>!vzGylL%J@yB7!$Xql(!0&4Xk=LPei-gTgRl#_i_27yj{*v=N2P zjW7aU2{7UN3b89xI~_#uV>Jx2=CFvcgcO;MxSu2TE3J}QZO{DdS2d^u*l1wv=&;IN z7xJ2`Grin!iYj;!$V3_`=Sx+GT3rh3{8kBEt2dY?8msu};4)==Yr6guh`^-XY0?^1p%#k_KN`cX(x&Bdy@iW#RsOHSk=^rV$yAYYZvAVc{oiJ=NAb;jR%2Xa&R{+N ze)ohlqI&uz>~Qxz5QTfQ^gx}=cv;wi?)YcpVqJIg-H#;%!e2C>3*IImcPB4lWeVpaGn7?l8!y# zvNKCPT=<3XvR!{}RlKd#QuybR6tg0?d|r%!QA6L<$ooQUsaWOnbkZx(*~6CizcH~; zWTHN-RP!t+Dd}>|;~!>qvAScHC9Xt5HBSd_lWb9x7P@Yf_Xp|JKsQ+8Ai^nl+WI~) zFEvpZ6MS`sDue$g5pQRP+HF3t3!MY)^)`O0dFbPSGlAvkxB~Fge4tIqIcTJIAM#H+ zkmjod2ku7@ZemE>%*;pwHy_Qh1nm?4@)l{0Q)qyv=*thnl2Z5=CB;9Da!pf~CVvWa zP6e!+E?tl!*MjpPzmU*&ySk#ivg4-ba4$QkYb*z>9IFA5#DoSqvZ?72l{D7Ol$yMg7Ypf zS51mp8(#M?rnJa89CsZYWG{b0 zjd-szAV@akG(?YM>8$ClwG@J%Z;v{xP~S>tJYU>LeSyNB80DoTkew>!R?#oiFSPSW z34nz~KNe82kSx{b9Qp3}!|N$ted!cl4)gVFK4Ej97nP<=0=A;lV4A) zFjb2HVHQLD=lz*!IYZ^}^FdvJRCf#Y%<(F3%D}rQP`}C{~ir}~t!6O6} zvcU`(g(}`i9|cvO(hIHV{xQUFe;W99A;Csptp`4W^?d zDr5j#7_!#ym+pg=-qwTB^O|sfZ_r2ZMaY+F)Gr+|_apj1DwXMk&2d}(bdjNgH0wzj zD?nNpi=-L1&wLd*c&VSC3$;&u_p&g}vW|w4%qJ!wrcf|`+ih4d8PgM=N=+xGP5s5lBmQO(?W;de$tJ4J6MC5n zCSAfM$g&*qM8`W1xQa=5M?=@7ga6Sc@5x+}jl; zmH$>tuSczS7q@^!9fTAFAB+fVCzv8TR+sH-Q$+>EJR49p;1*}tr90BCq^bo1l?k&r zm<~C*`Kd^$0URdVog1`-{5R7QH@}TXYY@05f;fviYHhDMtAuo8FzcIlBK&OfUE|A< zqM?4DWBc5nZ<)eXY5F=I3@#rC@Y8iOJ!o%-R}t$!Glc=owr2G*f%x!};nzH+C+f{!=hq%B_~x zQcf<)FyU6K#%zgZ#aqQOnSK*mD=!HDCj>Wg#KK*b%Hb;-2MworCV_BxdFq9D?4n*3 zQh0M@#&Z@QUJ)+}?H5)d&;*zKuW7Y(dUXs%4S`^1CDAc>>yBm5gb) z9U>*eCc*$uM)($Kb`V7Idi&2O;vg4#Yh(a=%U$Fl_kLaml_?SFD3%=dsAo?ttpAL2 zV8A^q1U-2(~)MAgeu}uY&~|9GcghNBZTeW830@w`{rx|8{6o>VzdH{UHf)3G5Iuo^7aR=!wYu z%?YG^=Uz5?KIkZ`b@(0RuH&P1=nm2rXUv{)j6n7Jr}krxqN&_*Hi06# z0q#di3zTPofGbhclI7+RnwO`a4wEZ~QZAObeCn4u)HQ_XpF&!r>^wQX1KV#iB{=%M zv6E3%-O0;0=<%?>;wSaM=jhES@uycw6hq8SUXj|R-W;$R6XTWkzP(&MUJ~6@rg6Ni zZjJ;;P!3%N`33QYfo}DgUr~4#7fBMR`P-s}EQo@zP*5APL6-RY66E`#xS4O$W0Cbx zG8WjWo|P+n(s9b_=A-KBoCC~F7&a~+ZL-mb_F_|6+V4aE&&ue`6?;DLWMW=8A*O!h z+%_})QJOZMs<>pgzO=9;CeF%5ot0~DX5*@R?Il;PNn~`7rl@-3*CXQfq{UB~+_xIh z=d%X?y|NrRnt~UCYgi9CQHNJRGA!#${B~rrf@~VuB@}uiIwy!nIFEdtyQykwmAqw3 zWxVjuvO!>YYo-6k)L#a*!9U;sFoEFi?he7--6>wQxVsm3cPsA2ix(&s+})kxQlK~# zhkvfm_x{b?Px3G`nc3NQ_Uzf$!Pm49-Or>d`mjPW98M}#ORAXFVg9AV>*O6k{hs0h zm-et|iX3tHuY+!IW<8wv1yn4lYo*^4Xd3ZHvhQt!BbS}w1*nn8VOVPNdsiCe){}DlhXmQX$*p&huz!K_PD`5ici!aYgVh&Z^6c)a^ zF`1M<4S5?1>K)s>Y#MrJ`7+sk%&fTm$~RIQaWUL}C*(qRhW70+Fzt>sZ^jSm*u6W6 z@8dLX4Y6RA;}|b*?2&BRVr$0x0jtuV82`q{d5cf2APArJU^DS&o{;zUZ~puj)V#5F z$7Dkb`I_+?8I=iT?d3li>oIOKS1R`zX)Z%mY=VzbUa!Y%GJw z&X!b`cV>Tc*>My1gzf%`*!&QklI_gfWQ1p+wqqpbzOCQ1EMxe@ygfM}{R&OPszin_jt#>^5nheIO~Shll!`1a_9QGs8CA6e{} zeD`E%&QGHV6a=>|(I&%|=@73@Uwm-F^C8b}Cb~gVx1EIS0ae2T)8p`w-gI{gp#v@5 zz5xs;2m2v=;?J+;5YD;?%sgW$qROg1YV`UbKl9v08O@gsch*2LH<8pCel;!FI|%Ec zbp*5WZqFUTrO^XQV$_!=}#32QqTm%o0=ahw_Om$ zBlJ@($PJLAq9V6HreI8&qnr~xjEz0(?GK$ZZUt>!QGBxMU9kVer8(yv>*_rf>sQ9z z?O-OV!clKsQFJk=yGTNsP=4c6DJF!T%LQj5zxa^MTMJR?3Y}yLc?$BT9$#5~lNS7w zH23E`d{J8Qc=CuwoGZNYM<2)}d^1h`fvgV7LRb{qV$>ZG zdiFaAq8{%HLT6w3yfhg7mR$pK1~knUKn-7(d2G%KK^UuvCWbyRbhzZbVQ*t+rVH2N zA8Q1^E^uP)5v&xGc1PYcA=h3JHd0X71XHph84n0EGQp<>*DK z)5`{K54x*Mfk0F=awfTeHCVf6PP-mtATiG{w5B-@g(Z|_*wH|<^~Mr+4$)l2_4#m6 zox*aQQbtx1qY}QJR1@w@DvL(%O>LD;C7%JKIq|5xZP5C^ZQ~}KHh~fDmX^d?uT(R> zWqhOAJdO>o%Yg>W5_acZtiNI&IG(4WdjaFs!STPK#fRCl@SE$;3h(@L|NEymU18W% zb;y%)GneWy4adow%}K+Uq|aAfzHI6#=Gs%0xIF>#9*P66km}3*xA8_bGfSze#nTg) zrI7utq;1(SnWFM;C@nBu^ydx@%pJhwbpA&6x{t3WKhZA{;;0{ll|_VUp=a4#7Y7Lz z-Htf+XHb4aGz;~^o|HcR86m&>uEF{0%OaQiha2cZyv{`!wxH8sTY%haS#34w&iNjSP5Er`ZP6qCNu+j@j*Yn!9cmRKlaK=B=8V4cP zqMxNi)QtsV4mBnFg5tqOVaTXUqXNEAK}39*d}t&J6jDZf@2m%|QxlR99Nmw3XwGtj zF#bMt$J$UNO^^3a@j1ab9vZHovmg`4_ zy^(v3(JbOMMQ@#D&oO7EF-DUw{U*0!-;S#$524IY+hIO`y0>!a6f|}?v|TpcYwTb~ zalq_oY^lYekpMi1{vh`0U6`WTDQZ4N`0^tUt$ZomE|daLo`I~xG>xyQBfrp=du7^D z?)do=>A!Qa@LeER(=n*`DNcJOtv5R@c3Xnm{prmOfBx6u(+3h`Axq5hUrs)>HJ(?a z=j7*OKW^K0LP$(or_|a1TB(FG$W9%GCLU-2YGuFaP{H0*>m;Dij9Jjt`Gw4ZY_;=G zR0`u8dsM$(Nr**4BW!yOr+&aXG}lz=a&<>0mHQ5MI?P9;jqYfQIvTvL5OsUTzpVlLD(zsmBJaN#D zKVU7-3L?$DgmY99zh_MEdxkRPS)O&qGOp}Gx5`*Q?5+`}xo-4&A%d-wow;B2i^^7$ zTS4)2%M7#=R43oFcz~RzW_#U_EFAD;0SEZ9r`eIj=A4+V2T;qmTqqMlVV%!5R(8uJ z6`C#|S8rJ=@xMaE>Aw04I$emuIvQueZ1*JfrKNdMw4jU%8--QKI*oXmo*L`+ z@3bu;B(g*rd$WT3Eqz><6!x#Gos16`sP<7vvJ+Xm7BHG#8UAH9wSOO1sRf;dFB{JP z6=hl?%7}dhfeMjjP20zj%%S2Pzwc7A8Gpy?q%y>NREHTGd$A`bshmUU5Uy`NG}ioV zfC>*tEO4&jjr{xe-=golX|VBM{xqDHmR6QIX{E5L%-3 zD_lm(vb`HW!V`%<^Gb`V?=Zb_6GXi6zsmy{Q;9B6>80OIeQ~w$G&QyF;yC>Lc}Ulk zU=50Z1@N8N(PKR5|1f%Wyp>;xk|nV;?N`>@g5znu5B^rK&&_H{6n`ZXVnW+VZ`Nz; zyrc*pRt>9&K$H2Nf8^`>?|6{d5BSv<%xriW*C>d zzi>TJTz%!_O88Ghp7o0#7CQR0_(s84+vU0E<3!Ed<9QZKTW07--Jiq9kx3DGR(tVv z9}2T0tIaig*7dGtsPig@&?k~wj(yCgM4x$K;_+PlA2Qq85XS-K-~q=x_o**zFeC!OzbW4x4R}{VJlUDuD~j4=S={ zJU=H}@}QDP6ZIX|e&PoBqKl8;IB04A0S}eD(kk4JC$rA(Q8ihm=8OsBT&tCQVqtg3 zNk0_e4>W;3k8E-1*1nwV$8gi?-a;ZjiXATB5zT)je_b#&^SEaqe?)n_uow-^&(N zXWk_rW-LwlYNkDq6gVwS3RTgAzeb88*Z4%_M{;e+*B=LIVC9#g=l2Wi}dqz_Zl9^aHQe+jPK=x$8Zx5O`SZXK?$I^MtL41v>enYtPS5^RXdhBRVLg&-UE4`&nu~jKHBhWmPO_9aOlWmEx(WeT~V$Ul7Z*k?KKoUcw4RmWw5r!L0 z)^&_uO(iWaE|B{{@d=W!G*S$~W~mFJqoZ8JeQn&|#mzl}Mz8}w&O^C(GoBAnZH;@& z3A(SDcd_MUFbZOd{wQz$8y1wVx9?xo?0xAxz0yA+jNi}>@ke?DH+lXMpd~l-Ws#;Z zC{Udg3rlgRQV~J~H{GCYu?QwwT8CgE%@>#9Utig;{dt{7@rag}Bn~Yt8uib@tt(m4 zOm=3M{H^?@e7QOMR?5Ir0@5SXy2@kh{Lz)$Txwt7nZ(~{f z<07e+o9PIfJ}Eo>`$#-#SXdNV78`_i)wkKPHKO5{aPN7*XDiP%cY{Pj&o)wBkJ6X3 zlw0Xi$}v>9XE2oc&%@TZ7#GH5bOLLYHN+`$XS^@um^;30%avqlNl0yfb-aSqj0s;} zpC>mn8;lpQY=cYR_f7nzZXL#(#UAd0SQw*kcK`uTyVsyMJh%^Ht9Qb`oH&jJThV=A zXguUH2XX!&7N#HhTKnr_jv93ZWmL}N$hM|5kVIW0!5NK-)38|oZ?j|J(&NPU68Uaj z=kljqzXrSkZL=t za9XwD$nYr%_?!UV+J9G48sA9!6nU$VDD4$Cg^lyDRn+yx zc16@Y&+1!sO9qL*F~G7Ls8(s=QNb!)w;EGO;b(nhH$GB z|FhiT(%8`id`SmZk!@VD^@hI9c!^I;P zh^stsZ0niDju z;^HjpS=AEPGccxkY6;pWsI@NZVYTORt=avHw%Gcvgqfv_(o=x!vR$pjZat%??W_G% zZcgLrZc_!FAyrMul&?w|Fxqj-RKsK#)4J`8{p8bL`9nhv(8GcoIe=32d6T*9&6tJ> zQAwS9Z*mNqK-7Vs?o(bkb(Ct65wB;Y($m+}J3|k2A5Ds3OWu{n6O=ig-}LO}kh|)3 z{#?bX@~VDa`1lXVI)(hVt{i|(TxK?~;~h9$XAoWp7Q!H<-j*?$GkeKNR*1X1DTs%ym!1FJFoo@ydTSA*+ioScpd zy?f5*irxjQ_vd3?0{qeE>B|uPC=ht?ana)Ous}z_tYMxbi+IWE|Ldm>G6_u3gj)<^_U?$I z2-j)9EoHTvWp|&Mw8PCuMKYtTkHKNYFYyrIPdWPx7aqQnjY04sHO27Vg63?Fr=4OI zg4^yK7cSWh+i**O`}J%??c-tFZ*Q|Z-`D3Zl2?-}Gmi@nxDXL*iqdWU(8Bwxgo%+U zDaGs6wpwquc{*XP0POcyBc^U_0YLHZih(q!VR}mPcP+YZ4$cy#Xr=5?Eb55wJOvi| zY*b^C(k3NO7C;XwMZ`e2IZA)gcWx%+q2p6MCXHlk`O}Lc0(O=$ZE;j8q)JFxPMrR* zn=BAOc1H8T&Rup6Ezu@VDNOm~_zBJmHNr8)$)18q{<>_rdkt*M1&2ZqytwG0rLF&4 zF$2+1QOi9GqjJ?kndv|ifY)7~NQ@{>T=gx-LY;=6<2iFKzCB+?Yv2+0{&cwkgfW~H zoTNjNZFOMmQGAV~O)}NDT>(zW#oCf^Irwt(8OMtp?XtVcrq3Z=<2V4+I53CqbYMct@7;pzj+cjT&KO$M8;cB?)_Uf&R_|Bim zEsU(1q#sduZxG_wfBcYb(MNWKvu5PL!#PFw?_0{lsBj_qlHQfx~hpr+9TeGimrxJhEc4 zu1w#NLNjSyFc97InuGK>;!W>>b@D*eH-1~c2r>n#Y+6Bjp<$4Zx-D^Bs!6TU3 zzwVQum5(96ydZ^{5jukyXogzO^Zl!AewI78<|X28KO(amQ>p&a?lM*$=EKswy15Zq9e7B`aH>iBUK*;o)=Zb$ z|A?c(AY`2A6&VjEKuHyJD7V2prJn+Ky`b zZ2xG(LTR8<;I!jsW|a01dF58OTJX;b0-tkbwB1!r_6K@0hVOqjO3kljND{Qub79n;t^!S!}w%iY? zBfPB0J}KhBZ#n(Su zKp~!@(c%QlBj{D<@qVgxS7+r*)V60QX3*(Z?Dwy4pS+)+iLWQ``Cbd>7fOSE`y(5* z-SfV_<=9^*+1Jw>MjbnK5YF7@)qR}jIC(#JoaH{q%xrpN8?~K(6%LxHeWaiH$bVp% zPA|9)k~qrdTOAa3ufClz^n)p|eiGR}#}i&T=Y8GJF}NO4S=n#RuOGnX+fJmve5ccn zFAUDnqH-&a>ScD9G4!~bobrc6B%Ni+(u%)@#MknkeF6)zzQ{ve(%6(VcEDM0ZP||2enyx zHXg<9x|{L!EdWf~S)Fi`Y(+Vu0ywM)LCAC4%6KrLxXj&UYNXJ7s7qkF?u-_7LkCnV z?^@SD?(H~(r(o{!iZJl32LU*ophVU!d6ECrhA zWOp`+h-~BGUn+LjvNv7yfMhRj#`%tIzV5+b-%IbOxyr4BRKA5dI=X~}aN!%_vrjlr zf_Y%F^rWvTgf^<;*d!^aVxO2Hyhf>zL2Fx48a7n)Otu^V=Vp!qGpy4WA7rJWFsayN zPu<^&pDK6!mhA@d1dOqbi7IRfuS(B!;VnF?XsVRH4|`}Jj3$4<%TR^qT=wD=mqlu> z!&5LH-cbk*`B>?O>F+R1dT-rMc&u9~aUQW}uawe3vD?OlX(OF27&))&n>g>xJ{K>z z+&&??JF7g)_vuD9a$13f5%TB@PF(MS3q$oh4Ad#Oj1A21D5{r#V1EOUO2&3Q3WgGK^R8Z`*im+3+9^d*TU1h`NKbc4jz?cbhgL7&{_x}*cCa1LJ_G>mr^namqDY%^Swi?z%`ix|s4ThmwQ%rYR z2`JyFAlQPY)`G*Mz~bM1gJ!g9YsHC}d;pKsxR>w~J)a9E?r;IKe7OSmn99o#ueiNX z4SsfM?8sSS5XhNeXD1Ymf|}3U34+KvF;8$Vtl$t^PS5P)i=0?h_;~ znZ9OHX)`c0%PA@fKHr^0d7kn17Z1G?fX-5JRfCthKa^-iALg47vp!F6#(EDQxW7H4 z%FF*rXueb4KbZIvc}lo~GGhr&dd+^RZDxtED>QF+u{W^b?jQKs|0j3dVuUu^!0=@p zpIhUSf%D(-{>hxXi@~BtGpccP@G9;>ixzYe_l-W>)12kWtUpT~EC~y9^z?+aES}WG z#FL@Pz+)YTMI|wBhUehqQ@qiT+eUjugd*nt*?$MQJ5}S0#WOkT1J$nng`%h zacswR_Vd{NL&zYwB-k=fE|JGn-sN#7P%?O1=B z|B&~7!O+2Z^;?8SUiV34t+9Tau?uVOmdHAjLP8kIE8@*M+=h#j6ykt1zr(XQ*gnSw z_BQH=Fpm5YB_S`1zzs7i6N5>Uck5e2yr{3e-+R%(QI~l0WZ2MQ*)wXBCVAtanXrF zoL-Oh9P24Gn;GwO_VxgJ@JccE`^kP>)f@ayf8#zYhSWSCFYfUFg`HF*VX;GVQnh3> zmj!=^uoP$`q{}_esEk~=mx{r~o6!&0ZP77L$k&~Ga3%KjyH|0#)613se>3;hejJ2i z3e*ay#NQI@3VlCt>wQ#0ThIZ@&KjW;ZrELLMk!}CEhz|QpkjwKsMzR-*!cQ{M3!E2 z@wjjSiKL|fnuuaN#(mp#v)@W(#ZI(hq27;d@VK&x;mGe1IBpMYOld)bl1HD1!T|_ z_^G?D_FVYIGD0a>W~TL~Xt!DgN&5_(-um+=CrIxnFbod0|7gk}=hkV8ZBKDv5G*+x zaTFJ8{jrwGdLHnN6@)##YSO0gDFt(_nPc zQ0tx-esr?TASkNXC*x+UP(NM&w$q04Gjg!5wh}&6qeL{|M6kl1SG@5dyPOi5a^MnL z2rv7|6h|k65axNMpV>6Q_4OWENoJ%CCH#&8!7q?C@Nr8}d3~?fr~AiaFUoHo&fp-p zzt^juFt7`@zjk zh#8p+(oV`%wTk@|=}JBu@b`8pbae+mDyyO&c>7TT%H&8ZUaEf zIpUSwN&gr40g*{!X~5EuWW4Y+OmUhq8sb40V|SKOy_)hWymW^`Lz^GN&p*$ja|Ot} zXcMW#b%D-EN;6qF-m;c`xwV?k07O3vwQqEXSiNdWpDmDW@zpqj!=X1GvHu8PavgLy zh5IA_oaqF1p<2X?>#WyNR@tTJ41vsfG7~bCMx&g+01TF%fW{{XNJ_FV_|`k^dK)R8 zL{_fdL{qrxIx5B0V@vUmv@P8xVE}_Z`|1Y!71qI_A=9Uu!^&z+q4ik%k*a$9tH!-R zz2ZZzbN^GQ!Y&}0FIuOKoS^bKuEJ;R(%J|0FRP+w#Rzo9197idN*;=xrK3J9{i zwvCljn>d7&Ilyxt32aarFih#fH+_y+4Y8IEbY#aRUaui|JK;ISD@fkO4(J*qhs=NxFwj{-X`d{xlQ|UG8$q*Vp2r00~xHH?%%+R>oY4;^d;D z=px#*5m>D~8LbN$Xa=n<8D8(*7Ay@>#<+2CR_RNe!kN#Eod)3H)}Muzt)Vdls%mtD z_(|AEiM?rSVq}o*+G#=N&oNTMJaiSRvT;C+*qYMp?uJ!3faV3U6?`2ezK1r=6oj)D$H!UN)%z z`!5n5HZ)HT&l%auX87wdf4(9lRys|_*0s$J0!X?P&@y@Mpn5fX1U1JMqFs(YUB!Mu zs;|^STJ(Rk`Bd^p7>4W>NYUKUEUSvK`Y5eFI^H^@YeG{*d$shVG?_1W81JS~lJ&|w zGP^_hueMTVsgDqMNBGGTZ3ZWsGGES#eX6i~5XU+VUjOkLp|{NrXS{(Q68uwL4tlFB zwcqKX7^QuD@|Uw8*BYEGxZnWLkJ?%p6%{n!`_l*rScV8{PJ@H7d(VHZZ`ybQyxs?A zj+C99{b!*9os*Lj1)=HoH&#Ocql9I7iZ9QNhu02`%KkvQ()FmpWyz;I;3HjW@`KxM ziuilFDw2;1G8ddpw0o)jy{D%ikr?iEo(#*eb}H>PmP=2bJ~RPKAo9?fds~P_L`!oNL4i5e**i|6lsHk$P)Oj@j z$ovfg1IYd>6`@>KDAk+%sVt9LX2!i;YYQ4cNTVmGp)e#Xfz94lNjR#O!nV>FjB+`% z7YM-hsR9sWNb2}QCPa$C;~KL~)b0Yfni`G~3FVm?XdW6!Y02NvrR${yY9`DvawvoVxGR%jW(`2-ju$O=t3m}^!wqPZCS0Dq;9M9}x z5lq->yIFpgWsA$sBGQw*LvM^n1^MQ8U8_1j&<*WV3P?O-_c%d7A_Rd?+=@b_zzrsQ zndpCbI00SBk>sI8mXP=ZRj!zAVg!rI%`;+}zwzPycHiO*d(+I23urP!eyh7*XR_#n>WktA}y>l#A&S}%;` z##(OUuMM{woZD1~>cwC%Nt4x7|4v_6qxlG$`*b4p&&wAL=tAper`pLYo=VCQ{QRx( zD;X|V_w9!rQl6z#qV%OX(74SfL44&a$nQ}Wm%1XMRYI;OETVq@k0^qT9}qusZvRxO zWGI)}k)&n(#Y9$oIM{n5MPvi?W}27cS({1c_azKUbo+K~?NNK%0$kuul3RVyh728- zlu>Os{h_XQXXUn({$DkHrAV7FXS{N7uCBI$ZJh{(NhZMxw-{EUEK;Dy7^O2AJ<=ZU z$SUgBHF`y$Y3qq!(Ppo8&~rne0CL76;sE_|oN4={C}$9NYOKIKMxyeEym1ubMUr>~ zFyFud>Ie7^^W*3@Qk<>KSv`MEjs`}2WeYkI+oq1(k8cb9{&P&(Xh-H0j5LGYFC&^Q zxgZm&2NL-wDT`7^j8v5>cOfqR4zmOe2T?^MEZsK0)J|GJpa_riyJ(qGMF9H(@^@C8*%tp(YJbxXm>=u8Z4$l6lQO}CEj_(_y?n~ zPn*&pGG^#%S<;8Y_9|gi0t41Rx52FW>~k>h?ixb;-V*_U49S}7_JWB^c%4l!iKZ_P zJ?IxF%y*VsZ>4#kp{6`?+R-IX4+RPD5w~hgCCI*oz1$OIy+1WfI8ViB5qICN+crROHDEJ`uMN`Rc}=FXEp$>pzs%> zz=++XprR*8H36FIeqDyiTxfOscpFZ}mM(MAMs#_`8irG2Tw_lU$e;&gUn`~@?L4;v zb$zf-2K^xey- z=ziXi(d!3Ey!~G!_Vue?TAN49%|j7ms}qaoh94^5gv<&hqwyBgl?8_z9?^ zAPNAA_l+W21|HB$?Udf>uzxTr^Re_2GlwGhN`h0QuQCROEuZ{K_>+C8z>_ zR(~r(xGJ9i9aZs2pnzf|cP(KE5lK1P7sDa@tr)tdij(uRQ*J(ZTpF3qU}f5kAMomt z3PMO%o$BT;N{j5k9)1AX%nS$fUtf$_?>prvF9Hb#3&v0?Buq&)Y^>}MDEwrOci`L^ z<2N4e`;va6{?{~lis4Ck^WM1dj*bk5RYWs?#@R#UeN36y(w&v)Dp4SZ2ITBz=VRVq zn4bc1p>@TqP-;M_is6$00>xs#p7DgaXXnYefoei#e`83r{0K{@iFS={NOUwpjd9Nw z$%X_}(P6;)2WQyZW=5&-s*gsNfIA(8FCO*%JC(b`$imB7>~i8u5cwVGs8+(#AgV)l zjXZzVzzdFg*=nb;n7k-pk^fPeHst|nl0n2X^I7g2k$rqnxYc6%lC+7efk2*tHvV-r z`sKB2NVbx2{_ju+vK0hctdQ7_q?#&J5ONZTBNN1XgO1%sY0{d%^W>n)Lr`E4OdnZ4 z*e{aKKZaEM!10?!N}6O|kFT5fS?qmU2cIoWQqy~3)ABw4WN`v-zy5tZ zp=S74X|jn`61RNFY3?9 z*7T6GJi)NnV$D6D2RP^JVRE5n?h z#Fb7C!D*Kjo;pOt_Ugxr^T3~T^1H1CL_xF^<_6sVF1M5BO62xyTetclU2sxBh#~Rf zOy<#8=yU9J zA-X&&-=)6{3})rHq~NzB1eYxLGTUJbm++>ttq^SSxK9$yn!*rWSAE*snDBYswX$;-R@cb_i_zn9> zK@$H4(D$#nX7;3KGH2O6D%IGNn-SojA7qx(f_jIojexR=q3$;fd1ygoFCSNO-2dWx z-^oSeH-9-MAMPh{GrbMHJGwlc9IK0Yu{L;6GDUUArp37OuT_x$5_=I&z34>8P2te} zh@)3~H&f+KBt}N@LyvmDqsQ_8kgHVu6IU*v`B>wnwoxxeB&TX(PX*huFK0et{6X#( z^y;4X=}*G5Pm^olX!YuQYtOESKe@>!@CyMuMnummBX8FopB8w)uL50esK^E<;Dr@L zfiY&OS96cf@mqyiz6!{q2iFTBORTU!K9+zS4K%SwcKC!8)Ri~t7}TjysX_6wEDBZj zbFIU0NMBdhnNcf?U3zT9r*g~v77@FCJZz~e

Gu$r2h3*h?XjEqP~ z6eTqy;~4+lY0mlIY6^C0EQZIQVK>FccJ>Cz`k~kYPviC0*Ly>ib-(DY>Z&G)%~;|Oktm`BCLwTrPvNFGn=aTNpAG(N zD51b>PKx}VdW!e*k!NnE;p-;&k_uby+EH{xdj)90s%IAuD+?|Zhx}Y@!`R?a5C4)G zH?Xlkrk`#pzFv~0t+W)ZShVbcAvEW|2S{jFb`UX6ZT6tfrtN>PGdyQIkn@SGc$E6k zw^=6RfoHU9*9ba$rD4CbXYhM3al^4!&`1crpUIh6p@dK2yY9oH=IS;Zph{INqCLnL z%ix)&$ZVWjYceZr{2EBA0OZ$;OL@30q`t3nrfLbm=W-AX8yEe}1*Mno++W`F4nw@R z&;F6rBO&n+^xfgkMN2g&9%8A>`dFpr$0bgxA%DaA9AG313s9L(A|Kdu-i~{lc?iFq zD%O1;ROPrZXf=Jg{oPotgQETZX8RZAEJi9NCF|i&iS9xZ{OQq*^kICA?)@I}>Thb2 z&tb+R1s5X8?}PjNB73M>GbAb$Jn2vQA=EQsVRvYxDdWO**Y?i6N<<+mZfxX7Yf<7e zs21H$|BbJ7(IH~YklUyE`Fl>8TtZdcxbz~05?>2)Y?3UvO99- zDmL~%*|8`4>Jy`BK=vEexmS)HW=8d3S?TUYY0oLheOaV#-0{!=%}uTL zC|-~stqob0=atrPcCN=8Q2?y~1{Z4mAdgk5+>AUgLbKbi#^*LD)g+R~JKe~rZG*5C zH&t^kjyO7>nw1};DpW);X52(dJex=?JtIL{!?Eki&rbhWqcEIn(43+Y^0aO8*9g)b zIzlurUPC`~dtkx5B)OMCBu#Jiaz|le!@Gl9E?_Z|bl11!w~|6LB@0#M_Y>-@n-X%9 z?V=DM%vfbM7rYRB6RWgXQYyse`!+4aF}*_GRQ1Xw{HAt*D<8a*bTF&3dXr?W_6T7G z?rjOFFTRo6WQpJ&V$)H)6$TB&NA*ONG;)l76dD)*Yw>D$sp>Hw=P_j8pB8eV0AX1h zk%g;!87DE2qvO0?tLi@V%ut+FB$>kimSeiR8vm8yH&9aIjg0sQpFS<5T(|a^t@B;V zi0+g8ox_HIp(}Dbjat1`F?RU-JHP0dd^G3)ULN&4*aJEZYX$>{z0h{+T~`((ZQ9Mv zz@j_w|J5mVvbq+R*_eX*qWDlHo~I|Ss)l$waCdr2h~4s)#-INbrZl6}?6zGhtHYGE zz}pMjDTXBCmnqjSam%-U!LZPLigNUbS^#M-Ls9!@`_Gzfxs+uYCau=OJWT~iv+})i z@-Frk3*$*fX>6gR!3Cckitz}KM|=M$14pbVDI5?bjq?(t;MfN2Z95x|l*_VFCMEi+ zPZsXWWlhyLk+9_d!~(5rdX{lZXDyd(Y#r6T;yco1;|W3_WcVESTEA`mj2+)uosMj!tXK)063Q%5 zXHtM>-*L7-^R0?wM9*Qtza#2fGolxWXv|VF!HPYZpl&gDnaDDXIdNL6av^ASXnd@`MiiD5ShU5ySeMtVP9pIav;$Yp0?!j4_}YOQf|HZNN#|7>ME%~)mqhUmBd;`f?9w~YWEj#s|6ftE!EmMun7I-6t{#+fx{k~B z4W^NJ6#a^wPmy3*bj)ovS@a_}>mR+=ZM9+}=&ftQtJpEBOZ&)eN$=ql=kxnylo@y9-l1RufKLeSwNPKu=#>z_chj!C zq)l0>qPr6#q>eedB4lv#EqQJ(!drcrWM; zmVg(>fb50i$g>lU#~Dq0X)%Q%mmkmm{*O>;HW}D}-!9{?En4{eH>V^2-< z!AitVggJ|aVx+y=#@b-+{nPoh6sp66irUQ1>HB-L(g-~A<7Hg_b~^<396>A=j&EQW?=+E zMiY<5iaWRQW_W52-)6oofW#SF{wABZb=Xe&z}q2Mebvq%z9X%^{u{d1c6-QyfRYOc z3eDcxV&1{d2a0`?z{rIyT>S!#1t1dmJ`oiu?9`!$_y>0RGrC2wGETj_eXsBt!Wf91 zuq4G_4|rC`i1ZyoG7Df_2lEq)5wziOEW{=Md<$Q0p!?u0Qn4KNQm+zxQv5F1o%#oP z*PeD>Sy@YHYUCM0=$4ILQRb8zs`2?!W6oyr1f(8f23C;~nURJLdHs&eHw_wD%fe6g z9GTKxY++#??27hOeMMjv^y;Nzx^5>E%~B{6Equw*Qknj_wzb7D>xQ7XAy-l&jOwC@ zd^hsc^MQ;1{)B_%6I4#Ob9*h7YD|$~V@Nx!N3t`*4eiZ#J7J!3RG`;;xAd7OcSd*b zvM)e%f-Cv@HDxehiiIm-#ro;R{VasAv^+sEDvSMA zd-PSrmQ!L*zI*>uxb_|g+ zTcBEp*@oD*Ci-);;nm&h0Q#2F)GFMFS=yRkytT7qZ+XC6CYDz7 ze8)IEAdV!E^Z-9$g-g7s;A#GJEx|Ls+{en!HGZ?e^Bu~FML%Lhu44C>=W<|BJ)MY-9JdV8+wmLhWRoRs8u5XT z8S-!PTqhfCSiGf$GjJ9_Gim0ktQCTre!(f2eQfF8-7vExHKs@LyvN_zZL?>3--I7O z(LB9SHAxx#eLE~+c>Z@jihxRIaxL?V?eZF3BBQ>L2a#+Hj)FGFA+c9tnE>*}KAJ5& zTn-eBkPV-rouit}`h*nXKu2(w7h_5@2S{cm76R}U>wDvTZV+We0je2=71^6A_Ed+d zK<*nKk(rh4Nz10q34B-x1oBAXie~m`-yWn23jmBR4F-XA(zX$G)33ZmLKf`E+0wk` z^izA|p^QR^NBvpC{&yH3I!rZ2d>%XIh+#gxMn45h3T1gb(crR0g+jzlFribr2d-uo zXNDwhY2H@{j%G$;Rp%hdo0RuM5t-Oq@(gD<^=l3s?F(_3yPD55{#iF?62ye06DmrI zKXQ8C>7pfFkx+w04*5!OYnI+<+&gU>u^_8t6MBXeoLtnN0;Qb^JlhSUTNllK4dGWS zUSGEDBye1j8$jjfmNuuD!vdGFrwf0Xf0kV8mLh~>o1++6$^F!i)wBHCEi7aANn-qq zA%xcysJeH7vD@=EcV)1mDl?0m zun9!%i>2*pd0N9DzXJSTOP~>bmhF#&`?!O9eRIUM0bw=~SE4tr<(-%ZO&8Ty|1a1) z=#EtQeTBipeDOOXOcBC)$ZR(adPm|#zxiR@#X@)JzND?!N{e59Mu=t$^|IgvL;R?n z7w)d-N@;WFYwB%04n%` zTdJ2=K4Dn4|L;|XXyV}N19qh$khnRc6#zK(ueuCYvT87_{W8f?IAg)S${K3d~eaWB2&^nWsVS;?jCZhCtp*%kg$2Z zDR*M#;KCYKBjmuc6556M0uyO z3BwHYtghbAr#uf)gWs~yiuz&-C#S!az&vu3Wp~Pi>`tN+(%(iN)P!rB4leYc|F>va zHvuNdr1Guvcn`Jk9RE#=d9oOR!}cz}2d^hEfbt@SAmRT5%0M;0YhDZdYYobqx|r;skyl!hK(qO(OUzz6!3Hs+|2B{@U1Aja8o8uDS!LA4 zWU7S8BYDYLM*hFReSw`ORPy?1#^39xN5*jn-Y-0Xzlm$2ywd1)#F zd(1{a8^-5fqh*z@Cc{_`+O-cAtZ5!{_3g(KGOy*B7Jr zg3Xw+sR}t~>#>_4?(R#))7Nsvrk13I6 za@CmTWBq(~AR{F%g44wfcJZQA)kO{m06=mt4>>Bz*hWP_bAoZALG5fF|$o5(g}hz`JCbi zc%G_h_vq(W^$ziE2#Va1?zOm*OwO zmf(l|$>-tYaBcT-=-hog+C4jlG!|W6n1~yDPQ-0t6LB}MKiqE?o*tWzS5o$3#^$rg zJ6?eul@6R>Wh!GvYnWHTR{;elpRe!VL_kyTsDsl{Nu5OiZ|bIPC*o-oO46$=BVA@l@E8z5Fq^ zBhv`*$GqTB9Dn?mSpUO^Fs)NNj3E%3Mj#~r7J-YUJ|5in!VJ<1{~)CM2z;nZCjhf$ zIr+&?aH#u>*z|*kkP{M0@StS^La_DDZ{qB4e}}YNZ$;TN|3dBi@1th?DC~IZk622u zG@GCyvu!t2{O#{3yzLg0+;<<&5HL69Yq8m>NXOH5hxM3$>Vjc)s*e;hg8N-;(h7Nn;Xb{{em0Fd;!78 zoR1KHjZm_}x{^|S_}&MYFlHRKY}(A0GleRp4ps~aOH}b%1!2rZ691Tv zlacEgNAV=dGy+P%M#5L~Tbv|<*jZVJ;$vr#vhy&KcO1sTZ3i%C(|*j@un$v6lQ$k< z3-llb`9aLzauADm9!1XKv)F#27N=M*m5vk#+nszq1$RRgPsqH^vwW83iE)zTJ7DI% zVJlUYS??+kV-x{N5Wpx*Q`sqmRpC`0tt41VU1aUcLH3owgRyYoLY#ATE6IZK@t`jz z()&n=DFN<&S)|8Ffl8C{zusAoZCke^IyxFznc1kU)E877Ms&q93R&NRph5J|kAy5|^HAHRTI z1nS56Y1Mrt@WR~iF#;&!cRrzBfWZpdI@HB0(93$Mx(Q8&=d80bYk1)c2rbM1a{!r4 zD&K5h&6VPX1ztW_9NHeMzyByObQrdT$KpW$mvQ0M*RcNg{}1ba^GBQ-FaRe%7>d2m zKaKSJzKhz|UV$@tF*Zl`z@l&5h3rQj!FxcLrL4|x-qFdn;p_axG^9037AVb_~b_Qao1`Sib#eAivbeE1RM{pHU% z`_@1ljeQAA@45q1{Mup7x4#d>L}UG<-^cbp|39oH;5jt7KTZyaL&1+8M*27IMP2Vf zz=t1T-!C3RN=N{oO_0HNmh*77d~KMQT!9jT535^={O3)i6#jWmR9wg?}p`Zc763*J!)(nv* zNxf+Zsg!{HY_xA*(^m17h7rw7Q|5i+xZB87*cv5zay?7=)RqysZ=V;C(Y-e5HB6bg z{AEmBQ#!39=pEEeVCN`#NfO=|{3=Rv^H5i%60zc#*VJp+JLc{5#RS)=(9HYk8cDBs zCHk~$BD=soiR}AbTLIWcm#P z)bfLgl&lr?3nRP4L}q4dW5pAH3#?aj-&%J}ywzaxYvsF{jIyX>oN6j@2TR_vV2b`f zduIWlRk6SS-8c5#-es3=#6S#eK@7ka6f7*1hGjeG?i2wD!9bLT?e0#xLq((;rP+Yz z|9$4XXL;oZT<^W=->W;j-*?WLI&&WLZ+2s^vHhq8$iu_0BodvSRd-RE08E-ephKkC-a&O~aZC_*0Z+BJS1nWN0(CTF>kN1MCeUy& zaaD`V-%nM6%ng(dDb#i0xviOgu1|~A&GhjIT+bsVIRz8OkH^XtD{x@nelfomJ`X8e zXGVdHGT>*zEK9{Zf+X~{rXln*LnT#M1Cr-ZB0YYXJfaiJOwdi8z5UD*@gLQAl9O#z5Vt*|cBcioMZ z;(m#pE4Dn@7C#4mgztKGK-9zcBH`}GQQkcWCthfS2npwNlN(mmYlgK?J&jFm9z}Bf zS_n5eBezy#RP^bM@7q3uY`Ga%@;O}-iRIPF=QdAVoEcsz20h5 z=~jg9G^~(w`BNABPsJM7$^>V#qJRJg{jyV(i{fdmS`_Wxbm-7UkH@vR{60!gPyN32 z$_Et}L(I#|Rnze!BO`J4>{<2g>lGG?S`bgZ+uud$x28nBXg25k`E4M>m3_H-Z9EHe zvn%r4>tgGDEpc_gC=_;h8^xX8!?|H&u*W|T`vwmJ7A*iG7Naa;DRQ1|hl~CJIMuxu z(jR#oYdXD+vf1;1ZNM=>(gm?iZ$sf-cVTU<0Ps2NHes}kX4kL)`d;A&_H z4zzg@F-|p*bMK=#+O0SCyz(j%-8~WK;E4RjcOmx9X4vD?8Ak_yh&8W0j}6asz~wIe zu>OvQSSn$~H@E|PyZ6H(|9;r`Qfs6(u7xN^Cv3X&J|LhUetfYla;jHX`#fbE)Q#L%sXtFK-(kwL2s(T50#Y+GFkn!B zjLsM{?MY~7%7K#qPU+tt#rAvDkEu8SoSSfRC-JnjG@Lqhs?wenlHYY^n|@nLtS?#R zIe&gT$YhzFkSH+2Sw0ztYS_@?36#%Yh$|7Xxbo!^?Cs@`4ISRVp`b8Sd^Q)QpUuR% zDdVxO-E%nGyB{j1eT=gsM_}9Aow5C$_i<(7WL%yo?^vhy*z#mcT<-h^PPBRq8>)H8 zb-9L&fP{+~o9=3YOWi-hv1i&MyLugLZT&P3fABu?9=H#y4EBhzSa42Y2=itDOJ`tn zzgLm^#C_QN%9}X#;+sfuu^`&Tgx$|Qj|*RagDZ&ev;xlaD zxeb>~H5lNw3)bR9D_?IUw3A;AO8951-^k2Xw>{Q$4JCBl_V}7!Mq%rPs12JpppS2F zL?^`H?4>ixjZr);EHB#en$>O7}gwUVs}-BrTI|Iu${ zM_oUb6tZC!3WJs4HDMy_32h@<`sxSt%oZEoH$BnOf2a7ZR*q|vYv@8Vz14+^-!-S> zhlYl#0pUESvdtHM7sX&I#hM_M=luC?Aj1KyYJ7+kz=WC{vm0`1H9_{nk0ZBjJ7hfa z1Xk8-f^R))A+E`tD0re3vY&hcad+Q^NKbD>czC0zY(J|N3i*+ zCy>;rE(&gIi0>b0j`h{62?SW7K_Gxrn={PzDDreewv)4(Hk~MOOK#c#QJ$_yG#XJ{ zs}}N`-G!njA4cXg_hRMUH4xdTHu4(Yg%pbyvRqu%3c*SB8zBAu7FhReYixPp3FJ3# zidaV{#26ir(Xb|>yaaeG0vHwp(wvQm=R$fmu%gF_x(+95_L4g0Iy2e;;Hs{rzn`iC znHwlQbGts%;<4V$tlR6VsHg}NCr-qS88h(Rci&a|yOmrYewF%T6;qi5OR0YT|(#yXB8MY8`TDdM<(UtXaw#mqolRv%ffM?f4`(^8hc6Lp- zJ=WaZ$iHTG&5o+9tP~e7UO;+!8hm|yQ~}EYs*J5RhC}=jAgun0D2ykILF#&JRG4(9 z-b3vgj1BFzb|$$V-$|8O+v+se?XGuYYpxwv9@2@31bj`zT#0zG6%~X!peI5MyP)(@ zU3B!{DT0jt{n`rC&bHN+iZj6y5)v?C#0YHMxUsT(nC^bJRvGJkYhq21%5(nwR*;c` z*sQfeu)BrA)|r)J7vbuL2uC+WJ2)fU-h>rWfJZnvBEh?wTHQC-)g8(9_Q>_{M2eFW zlFg1tx40qEiikUIgHR;G#rN1i#L@q=p zX1Vre#L0Cd9W97(w;)O++<|MwSg_7gLroj!SeF!w2bLNfu!?bWbw<3=gbbr2awXnr zW&`4__r{fl)xC;Ns4BJ!VTRXZ>kGS^*ri0cOCzu-IALo{70CP@RTaqGMA>F;Jl4&0 zp<>3yoLtLL5Af3 z?X0J|y>g?jO$U<73S=mBr7Zs|#cxiB72dNBFO|>CDm14MW|l2mrWQg*MJg>lYkS)w`lQW z%afYDB*N?JH&q2PH&C`2nx64ppK-B9 z#({xce}o(Marb2I*+WJ5`T#;*sBF(|rAqx-N-MyQ9XW*AGd`BWX#x%$-H-EvF{jJV z;zGqmc}P-Q=UO4Ld<^AdY{#(&5A@4fRcpqhQ zR?c=Gc4_G?T$RzVMnuloBExC^YN|g&Qnvq#J)yb15@#U9aV&1%Y zs%3`X_V1>6*-EjdK;=1qejCVW>!(P;E(LSCz)Yq920?~}Jy+~a5;rL(6OxUh(~X#M zV?`F=Ed19C><|bFrO@YCl1$OFEsjVOKWQchVz%cIA*&4(Bob-(%74%42Bndnvix7;e){*)9 zsVb1Ufnr9c*SaXoxNI{o79fNcuBghcq}bTl$~}7Mp7vCF21%i!Zks_;+1gjBKUi7Q zdGD@W23eqsPykp|MqTmu79gLckaZ31q;;XoV(=x`Q4PdS6%*dkkMK_t^o{TB~2hCPQYbd zg9a!ONpND}X?av{oE~iL&FWzoOG^niTLk0v4Ql9U<VBaYce0r#iV{m$+Y7t1dE3+_J!kc#9dy-fk$UTLWvn zJQd`qZY&YD&ag#>Aj5OUCCO-~R`;y}nZKv{mmp*NaqGg5TmR7i7nSWjTz@aRoo(~6 zUTdRbOU&-wyRmZRN;SPYBO?Q+PjjtM4O|F3dU&?sQt4)TSX4B--p%z=rT#s|G}Bwe z^z_!#vFzey6lY{(P+%~&Y}kOyWgH576~|5=!=|D<kSFTo4urfj<9Qi9&A#eFAITkaqFI=tJ=dBd; zRicNBh*{n$aT5_C#}RVv)yS3e8%YlBmM@pE!m&ocxhOJH`ORCl9Qh1q`3iN-{1tK? zk)rTOn}q=)KX-HFG6mlq~FS=D3tUqh+K`5f()Fwa9Rp!{>iiAvs2H{ z`lDK9SZGjm+y7qa?}e?>-x1q)MmOWT#L5X_hOJ?Q8SXkxrTe*Fbie;nse9GMdXJUo z{P}Gl!))C_$soWYm2BR4Y2Elb~qa5k3Da8K*?QAvF^5}`2L;;u+hDiK!L!B09Cj^R(^{X z_}M=YKlki`f(EsbR-*=fd8-S~ju;J07=lf&K8b`H)sbfJA#mk_M0XGT_-cC~Y!J4$ zZi7g-YWPC#Gghv-x67M2(BmBx-_ZzhW}{jJk6^-98Nv)fhAlM&9L{b^6iKruz*K?E z-%Z^VWUl`xZG{E9UQqn6DgB-pfBimmP|@#IhZy~S_50+Qm@QkjVBx}r7&mU5I_6YO zx}l=3x5nt#(f!^^*}Bz5zb>Up{d6_<{B#%TRihSq*Qkl^)vBYLr5KUFL5XK-Mr*p#82;9 zbPf z9ok{Xjy3YVX#uz9@iNiT(UnUp(5?RMQ)m96 zAOB1Bs~i5$K_Le2wE!ud#lwhb;0=?^4D;naj-`0kZwvH$hg@QZ}CweD@;-b)D< zGnQKH@!flG0SmvtsZoQGb5}#Gly5Bk{zviMyYJ)dv_UB8{u07(tA{8fcjb0LTHX36 z9XAH}>I>wzZ;!9!o&361ZEXGEORo=q&07fNGAuxm<>o2SYqo8VTNO5*v-U2 zzwBPpKt|xD3S|Cv>ZTyWk5Mnssi@yb{@2uv?t!9bKos2_Gb0Y#)zg9C!c2_24gd6$ z6oNK07i`1Q#WwwJrSy!@)~#(gx>TuKs~e>mMYp3`Te1d&Li=OIx65$hXKuluj1eEb z8ok=IMQ17e2S|ZFL27}&C?r`Ts<(}#SIhPJTTGS6B9f{%V7<+_sutj0)qV+BG+ z3s{X4H*`1A)l=?A?43O^#nlT_-Mlf)-5V2K81HJB=HZPgk`|L(B+eqdpQ&aG~vcwao_F8O$I^ z9!i(a!7npLVO_h{SZ+2V$|S&KZ!Q6-T3k2v3T)fwc02B(T@HY4u#<`L;H8rh+*j0*VV1; zs??1Y(~0S?rL*{xDy z9h?wpqJL+^I=LX)(HRjAj>=E80MKgDSBZo(91-f_*6>!D9k5bFT|K=e0!^Z>i|5QvP$0$&^BvFq2~EEZ`u|(O46O4?YciJPUh<4ad*Z zrepu`kvKJOB1%5$isfydK|!|<_-Y}37&{tYwtgH7THK30A%VCu z>l0l5cq*2+ZjFSOJ79ZoFpf>0h~i$outdI>{edAk;Nyq=eSGkfzYh}IJ&6VNYhr;^ z0+tD|#v7fHEb+=UISQDV@Qr*gTROgmGrqkge$C|E0iTMS&jh%S4)evHfOnB{XH&%6 zxrsklL>eu~YI-M5_6tPv)6b&tnHO<-#5f=^9#__91G%CH^h0{BT8NYPEnYx|z@m3C z5oFlS#PgJ@7MZ`T`nN#l?oAKNf= zGu!ijM|5>7yDD{K#dNoQrnbjaKmPb57B2i6GiQE^gZqx)O8He>Jg=<{k|rQC_=%@5 z)X^JDoogXcYJ{9J%Jnlc9Ze_@P$&>kNVl^{jn@HDk`X3IrpR##93Yt^1xgOr*^py> z(Bbu>uXDjcnan8lnI`m?XZ4?L;EWN?o|46kR?MjD?sdPqof0`Ous5RE-YnOWTB)3K z@Mf;X0cmm%S&p3E%2^v$zaorNfw*POm2qPj`Qj&EW%Sy0XqG4ba^+t2>*vTZ<4Zdg z2OIr*(Z?uoc1xUD3R3IY5fw;Ihj2$~xts$lFK2CSKKmEPCrJ|Ec`ENkRgf&v|ue0-%V;5w3)Wd2I3bVe;n`G8|CB1;;?^T6t;T? z#jRT5yC7>PsC9?Hgd2))Z-%SGMq|T^oe8ojT13 zF?6G=qeqY8^Upt5Ys9byre|onJEn_mt)Iek-CR$z-y(E-BcWZD`fcj^G^N<4Aq$-G zk|BS+Xc2xseMYs$=+l;ymM_MzR!?HAgBMm9J*4opsG6Kw^h=~rGCdZ`+02v;QcyU% zBSSK}G?QeKT=>f@!VFa0u&`$)s?VBWUwyHJq50SBsnbr^2@>=};jwR!0S2d43A?E2uQp~9mr@#oo2S8 zgVK5t7A6db*Uw^MVer5#J7>IK(-V6Ok~NUAZdbrZ#m-$FX2e8f(W1lQx?$qo;C1&%T_3P>Pf`6 zxEpZ~+=HzhUdG8T?_tXmk0HBpBkXy+C3duU0Qn6X;d^=ai|)A(-@ou8u7rl->fk}x z`AU1lG;D(JJ9NU8!Gm#ST#qtswH96!G*5ffyq;GqWcHP zXx0#W9(^3;?K)sby*rTR=6X60I+}%61`Ca(_*|x~_s*YTv zBhm%rxga3lKmA+aH^}#yW;)0aVyZyq@1=ed$k=|^RQkuNf8hU%LPg#7UMN(|sCIDS zvDvegl$4bGs+pLX8^K3U6I=hba{V=LCVCh*65H^q)NfNvD;+i|y5cnXF=NLdJu?HB z#oksWF8davTR(%z4&I2g^N`xD3pPqd$IOtK91D9&x@3UylG$avE$Ayw{vM?`{<031Juz;s-$Alk3(k77&B893&$6m~w|@OvV>Fptsm z8Jb!1o@Lo%{!7JssuXqS zHs}2LtstYpGT#S2uPjFk@@sk_!qJ4~4hAd|vxI8y$gJs&baz)|J6TZRDnM>=M5coo z>jVZ;&5lTRbwl25wNP?Boz0MXdsD~*r5@y{xC~VRQ`St6epiW)n)v1X@cPAvv^|@K2_RgA-@8XFJ4^KomnGjXY8M#fG zAoK3Kk>9j2;#?h&>+Oxr^_w8a*#q$gfet4ZI#&cULu7h>DiEmrrmwsHR}zt*sB5Z=EndN@k1!OYE{L6y>#{;o2;^zlu4 zx^dkf)}BB4I+>AfHmWr<2sfFM5v$suh_+IxK!6nLX;Rqp zTrKovof4{pgVxF+*JDO2!gGc}p*e-ebW7oRwiIUB4i==D%-UjQ274s3yUME?ethtL ztZ7sa2^NzA8!q*pBz{sv66KsNC0cVkr}1k0^)P%ce3oTzlIv^N%aLp3vKxadrOJ6G z;p8U5rR-UVuvLq~^JGagt`CxB@WynLE8c2Q9h(XgP*L`aJcDw176R<*Im!dt$-P@e= z=eL23dS3Ee6@<#O&lPh5Hs8@`c_*X9jxBTg!g4I|`P_M&C(k!ao@tuAs|ga;Dw9F2 z5ktS(1}EehEo$wVB_b*u37;U9LpTU9DpUB9aBhvy3LWj<$~!zZY#`K6aBjAlkt}eP<;m z2joe-dGEY0LJ6-$n4y{e9Z__MxjD#K1bnO@Bj52gkQt#shEJplWd3^UrXa(WL{&i` z$LfO?DHFL=R)KSuD{xl6s=Yv6k}&s+h1l`KPdG2{?Afxb_~lZWvZt=x z>fx$%kbAg9k?=)+p)S`sU#_Q-9*19xcJFe1kzd6B>5FAJb-om*FO=fE_%D-;oX_D3 zb1#*t=|W|DuABX{l@2Zxy-R?{R9umD=FFM+a^aUae)1??<05C72&B?Q!s?|M^7s=N zWAZ?x*;6tzW|#sTlF`PPC1aJ$HOvi+rqnXyEKZ0ge2AW5lG?Cjpz(6e z1bZ`59bAxZaYdHYO0!%%q%aV-_6|rAJ;m&dBq?0t#a*KKOSQNl(^YD#?p^{bF6vrQ z95dii4ci}j2v>Xc!QOUlky5)BmP=v5LBdH=lTMMi#fZBoi9?*(2}x43O>^~>nzk$A z3})myd7{Ll267zS)M~37OqwBaNEba-6%4EeYvVI)h4$15LV8P5yrf^W6s{@uybljd zba2O;ch*CZfJ}M$IpG!o26+-U1R3l28O`F4SA1vmcSi>qihfQ=A;{Q*3OAS}Bok)# z?c1mP|0cy-sucRM-lsaJE+8YKgA9{om4Hldf{al>Q{Fj(j1IvpnDYdv3&f6~5+_g+ zZxP@S_@EoMx)sQ?BlM(;8+OGM6H3G`UBXFml=s0=AYI;j`q^M~vbID@*lZmUGZy-K zi$}JEPaugGkYNiY$ET>)IWbd<^I3&0fZU&nt#oVT`fKFf;6{#&Bd=Xzut<0W2z$f} z_(VELCBbY!i9iIQge^L}KVFyblL94)Bgd7oE0tR(@EW{-+Uwn=3|p@hT-!8~Vg(to z*Fh#ry|;uJ+Dp6zq?XCI>~A&+$PB~bW5?BF(IAG;i6>R+ub^(;A|p?!TpkveX6MJX z@8BUsr{!S&szl6Qo{CRbWMby>49r|6vLs#8MP@Cr%CaSUAJYQ-Rb)1Ho6mc_S zMY@ui%S4uoER}N+EsShUmSl=qhDfUBZ@Gy0nWfw_ESk&pmrI-&5Ai=!#ZTOb-K?c4 z_;gv4=n43Axj@S5Ow3)CDdDCn{gcIsnEp*X=85^7l^GZ??Hi2$JQ}lCWn*4=Ef9d}{N!;fHV(>qb&XMD^_pT! zo0qWbl{fHx$Ii%Y(Goe$A4Pt%hmcYK4&*ex3wd|ki_E5XW8ITaVdrbFq3EH<5bs?V zDUI*IrsrS8_71P%*n1pAISr?SLb02JJRfU`n1;8ZsO6K`((!ey>F_EN@46eg_uP*S zZJx!pm)av&!ij9q1Y4eZ7JFXlh#gNngUs6X5Gw!_D}`PP3k?AsZotP(pUcVT*m31o zHp^SYoZ=|QG?#)!3cn=r7cYKN1%zS+W+qsi@#bBPQCyIQvhs_P!w_X|1Tu19-B0Sz zdcAEyhQLDI7G`uey6Ep)T3VV~nt^M~a4@i~8;Y)UKb5-m-n0!rt=}`vlsUiqa%+C9 zjT>4%cP@I>sDXu2_-Bh`NZP7aG%3W{?Wea&uzQd#gxoTK1wYO7mWn)fKRTKfNNBAj z8tAZvgMg4A@SY(6!d6MTU&j@K9RzL#gm})eC+x~v@5ELaZG~WV-Q+uPAh;EFgd;)={p89!l`o*fd!@N< zuY-(!pNt3H5QHcir0CGi!TH+#&<|Tixg`P@7i4-SnJrlA>Q$ zDp60fO8phoO+kipIWAOOk~~D)zCJoR4GkY_2fJDi!miN^uxs2Ac1_-dT|Lq3z6!gB zoy7bt*wudnb`5weX7$>Oye1;&4PMvWG!*v@>Ar*b7r9-In~K?O43Bmcx|eVoy`xEk z&Z3KdhS~5HxiAK1eUZj;eEZuHmc*&PT(k06+&6kjDow-PUg(@1oN zMZJNzsVDbE^V{Xx^vf_CiSS+OWPnU|?aRcPp`w;5nzuavTF|%)55()ZB_NC5ebky||ST81eYi_}IYpD;!nk z#;Kdk;8;^rtq^8sEHXOan|clK3kNj{j6@4~9O?K0_J7z1xv##Ab=}^;k-?$Z7cdf6 z1*R{oO2E|x3$da=7Wg&-mzG7MbX5!v4xfsPkBd;fMDHUcOz3^Pexkh2fJ>@!4Wr`eG5TN&$Ot+yvx3{|dHr?}H0pe+?AnV@=l& zaoD#n@byAmSnxU4_3Vx0cizWEDKN2gDe%?j*!{+{H=OAAlW>QmA)#LzzGiNgC?sRa`&-E6a%a`O`RnWpP2{MEw`KH;OCIBbS zmCy;_Q@*d?E|`n%HR|9SI}a(aUFF@>S~i&0d=}dCu)e!eu(S1r=M;V1h;C0^FZ6k? zx~Bvt^l?tKBWO_9-P1qMsnbmNrytXeF1nd+ZX2E+4<75nbHa6D=SC7`b84MG!9FDMbU1LDvi zAQnvmqR^=C3N-bPLes!lH1vx?eV+)__m4oM;5anyFYZEPP$OtHYKKIjPDl)D1xBG^ z{{-AQA`|t46VX7zYZM^5e*zkW$ngNVhPbN}94*(6MtwPM5E?If0%}VfYKogW1JhA6 zG#NF66HqfKR=IB!9F9hTOVKoF8SV&OjXJ(7&^RO(w@Ek+{8pktzh!75@wmNz3>t^V zqK?F|c3?ETd?QddGzm=xXP{nKB5DOhprPFNZQ`z8z$(;{@Oj-j64&~2z4~&Wb^Rkz z&o2T^CH%XGB#9m^*N;TQkVv_QNQtZ1%YD}gi$?7*>ah~nIMfPGl5|KxeYxK40n1Uh z<6u1XK^St@Y?4fm1C2G1Q5zV_nU*Qqf=tzC#ezXEGzc>ks_(!5UaiFZ*=L{O$l)W( zjHzV{GVv=SG3wE$FNUfzcYET@_>tH-s5?^L>4;4|2VzU#5tw=ZBZzpb6OPZHjf=|{PXd-18WY44sUK=i4Lcwb!vh z04DG450LnN7bJ#;;A(mjb`A_h@w;zf|Ih)*@6!w4bm)LhBS+)hmtW%Irvgci zV3a)443RDdwGR*D`|Co2HD1CE$3L*O#w-A1R5fG@3ycB*8BXs`G1?2rnDAz!#@Lcq zhzhQ*tlnp~$`EK+s%sBPFUWcNS5o}`bWr9o6@i(6!WoWZ#*9HqN=oG#Gt_CP0}s2W z^p%0HvQ!k~3U&WF$jG-YFvF1PgS~5IA{WHPq6MF#M~%8zZ0Ct$5snYZkwTbI$@jKO zZY*q>;aD*4g3aBw*^R{RtYUd530K_4AeJpr4r1=$j7&#Qta9;2u*rn4hlS$E$)6O+ zlu3r6COiJsUdn&4x+%!0=cFy}qk2#>Jbh!}B_QGzlmZVqcl1ku#Wx;K{z-5TOoeAq8k~IM z;TDjJ>ix6f6qp1Du`~H5z%4i(&c2Comut9)z1cq%mXHiM$@MG&X>jmOfkn)m0yE(h zk_&TC4h;U;FozYwUUZ|JyA3RaOGqY6K5;PiiBfTP56ywuFGXF$B`6)$LNnnhadGjJ z_ywfHB`gn?uzZ*#OoMMG3_fWv`lZ7hm=34lG`Nd+hNLRpHCX(~z46#9EK?){F1~Sa z=p6;ezAHOa^&kPKz}2YUeh}{K*bgyj1-NjY z86JU!nH+0vd^XJZ<%?3HO!%1o(Mm5|bfD1-n8?UTwU}X2Qj(-4Teq~-;yX^5;c?=M z2#kL8DST|SAXYN6Oeq*xKr;i(lL9zT)#Qv)h+B|#dvhQ#5J%qXfbe?W__lgIY zRLuJr2d54|TF2*6@m7aiVS%d= zk&=1lI0`&ThDk_R#_=o9zk(kJ4#hW*Jc8`E-omMAGjM6aSJ>d^i^ykNVcmPLVt=0= z*zwNWI6o`|*)KkW87^+f7m%r#G!2Pwyn-QhyfCN5qu4t@3e{KMMC!{Ou_vTIj!YPj zb%O^XZ`24}$;-rd141ynNqwwr(;9{SdSlzr!MMEO3zW~9iyhs%38cM+><8~bq^Bd| zoFq-0jj9XdT5F63GHkZjYP?)GL>upLBalfl*<-@r0AzIG0+~A0P;pu|yO-E9Q&3Q# z+~|O<3(csPNvU`ZWY{7l?rcDY?ZRDgVLh<{(op4@`YM+Q+#nAqUM~?u{B9N1}$$YM6ZzHE;;dhasc@_Q8d)3&?|= zZ#E18IWPrh!62|;^iP39P%2CTDX{lT75h}!2WAN<6vA1=Auu1tepvztMFJgbU8&@aVrt?0u@iQ_@O^zDE*g+orDpT2a||nPyrn1E=a-&DuP|8_zSd(gloreBo2nK z0&y?*DEDFza5D&O*~>lJ`y|V~%6$sZ83QE#{^{Z_OS!$7m?;670z)DMIsqwIh{6VUqO?m_9DAod z7I|9)M%=Ns&GRUqIvPI>?Tr-=HA8x*cX2r?5kF@pqjcEwQ1M#l`bcmarP9 zCyzkpQxD>o0sV0D<+doPQ4LuZGg2L$u*B63D<5l#ik0CwyL>q+;^MH)HxS#q_r~ez z6Hyu?P?WF~SBCY+Pj9@23jf|%)9N8C@$f`+{U-RXOBa+a{T!zvm*MQ31-LwVDhi%{ z3G1J4k0U<)aCYn%TwVMP%Hm>xh}AeZxIa$%^uoDW({MF%1r7`7l)Tj$m^B;a-+qg$ zV@G5A6OSXs!wJ!n(Q{T50Z(s{(IAF1LbUM*zk(QXuY(K=-MWKn(9l4sq#{)bT4pMgtWEG9qkoWqN3DE3e0+%UHQ!27GGN zz%n~mDS%znH2*xNvpkCwNqa6pS0(>fIGCQZqLL)7lC9r1ziW;IQXnIu4JbCC(2g^q z+z{d5g#jirRt^lq`9piPhr}YW3S|CL>ZTyWk4u4!YC{9UQwnfbhX8oLJ`ptn5?~Q{ zF$jFv1&9O|OCi4wcKtVK1wMfzEDy#ofs9Zo)CcCkey{+*00Kmj0D#yB3uFWlHcDV0 zvJOT82K#>EE}#(ZLpDjc0s;L6kOt(!ATVPek}qyW{3J}mQcx}&Lkr*-k_Ss*jsQ`v zgdq?TSR`;FVFgQ=j1$2mumI-%C2$Y`QsBcl$n_lrz8oY>ql9G$7Lj9vxHSmKm_paW z&PPD4|2jC1*a}ma+^0{vTqg%^g9-#x1j+)^V3hb72TD9c1>ywk>?A&RbUQ%eFi5T+ zMgU^^FkL8etVBRc($6UH#5h<4dK~;S;TD*QTK-XRX+HwZUk^cS?q;03M3B*Ng-?Uu zk=~L(p_!_+SvSap2Cv)wGnw|W8^&XC<&IJpF-Bd4`EfkMks0ZG`7Cd0h?R4L|S!kB%3+w zh=6C0V!4hEF?wr^HWTF9wAQ~f$nagG@ExPlTL5{^_m6bFl`G=@>N%{KI~9GZd*NGqS4mG7H9m!faJD>` z9D9?9LzUb>vJ3**Mgvmi{Yw+cGHPoltySN)c20A(e1N>KX(o3pcV^d;1C|U7#>qq9 z%QKKSl&>crf($<_RXUbyA8JHeA#U#w1h-em!^1B@3i@K$6I_C&zz-5( z!5=DM5-6}C1+txgk^o1nNSv4@i(RJp5ojRf__O6kL;%PbwqA)Tcn!?{g)sW%!%+%& zlfZ>BBwGsnbn%-eKp>DK0AdW(T2P$A^5GVm3)jF5I15y`_=_%(;vfKI@0$hZ{%hbg zaGiiru1JmmRK98jvGYq8fDmvHfN+s;Tm(!A5#|t);A}b0Rl=4cv%rzPxHX2cwI&-* zat&937uS$f@t-09BqH}^5SU^7?8J>-u-w-`NsEE&6;#=U$?*U=4khGK@ z`Y{U?E+fx9aX6#o!UeX}uuve#XfUItng%jsAA1~A&GuOBXpb0^Hu#S-C^&tSedz0@ zux1xiyc8Ck2F*1vVjPV~YgiwLKKKA-eS2WbWA|Z2t!h~F-19i!vmZ(t-i1gh+#}5v zL|dE@FJWjkK?CxfOlme%ti?`he)g)hBaUm;czUDgp_aJZ(+5A?cDH20tYteQLLe%f z(_CG}yj6!>Z%CK1rrFo2G*qo7-E^g_c zwNZu_Z!#jn;(}Fbgn0(L5wz~l>>Wi*XH=UgN)vNNyYb! zqQ853s|?S%u;%pX)3I#XGPTjH4nEYCe|oWg$iD&^5rVcY$XpeWxpMV9md~An9yPr2 zxycQwW>+Li0nF*`sq$PhhZb#gc|>CCV32jD^E1 zW6=DaVTdi-h4ZD>3{!+hS~jPBNEF?iQuT338c1fN7cLxhsdp|V((K= zBi+LT(MD&KG`$Zy?tKi!&UK`acSM@FO_4$-$LN3!W+$ly5{d{P0!e1V4X1oc#%gAZ zj|Z~78emVehq2AQmMZ*n3{FUtYjTZ^R3`%x7(WLiQUxdoE{vaYC!%J2NW9tAkSNzl z<=Qh6eu>eHjpCk7=^W&kEM{zOCkUyPh*cPJzmmBpO6Hwtc2uC1D(>>x0wd-HB874v zggXxI<$62%xR`$u$nYK0TUB%@q3G|O4mnh7)~u;qFz@i;!&XxbhHURsLziEJ4AX-` zkkQ5~vTJE3KCD+4(_O1$wR;V$@(|#3mx9;X39(Mv#-vqpV~KWlt`w$KyrZ*xN6zxD zJ0rry1uI+y;Muk7>VYNIYT;AwI_T=)h%bi^z=`Ag)RQ1^Quyeq)L%p06lB;s&yn)- z6)CkcqX>6&3WNLWli(qM!4?EN{~R^O#2C!+9%*pvpMmDnH{-qda~NDwfnn9FmE~L$h($;4C~aEED$+%0Ruok*Mwyj=RR>;*N>gs6R3rE`d>K zFd|bV7cQY3JCmncFzo%aVCb6&&!7z4IjR6HCYIo?;kjrSnuIS?PaUAzdT7#PXCH#Oy)El0Sd&U*uj-i>TCBWnrn1H%NvT)nD zd^8$eEb%FkYZRf*fHZgnMx%CUESir1dD=YxWgVR^gXleo=19$~ZM!gBVrIwWe6= zig4AnG)-z_k&aSpvj`|iAxKoDDB*y2Kn z$svpxBm?R7#Oct3W&@;xx9ih%1ctO5_ zIYYy6^7sLH76ci2B7e0P@*l2l4l*MAg!C+AaZ)}Sz7hb}P7~lM1-?UYu0RY6VRqAG z!q_(kjshah#^j+}^l^N$wE~}PD?{JpGibl$I9kv99uG`khlf61gJ))M!fRjd#DLUG zn7ERo#_uf0 z;{sd{O<9Y;{EL|SJglFgNz~J@bM$G!AmtoMFs~9Rk)_&plxMyT4-uPxW zKHed5-UK{3As>&7&c}PJBpxL|*R;#HXWDK!1#pHF!7E1_*CTME3CH;ej4|EB-V~Ur zbasE)`LRo`K;2_?fiaiY#-rKWLlBd{1s6*-kdfR>eMsu5s0@%bI%aNsQkwZcS+QVX z4Vj8MYr>(Sp~%k8R#Rh@QiUYPGiU{b_KtI~FF~d?rWwqLwg?cgJu3ecpx)I@}2m~m!T zBnvpiN`W3LFp%oN6-(K|!)e|Iff=^aaCVdd=>iJrPSug_>Wys4R8u4lu?7ew3}iF{a@j&khfg?B}su2%;$ z#}32slZVuM$VWl_ul7#Q=NoZba3mg{v<`1CJ%RVWJ%INX{(!)UBk1+*c8tqA zgTBi)BP`)ZjL$!fp(%$E60sM3zTJsY*`?^U;xJyAwHfz~E=Bqvz^_=pL~jfyt*aUVvnDK{>iF-;e1#uAox zh(Ch&zg&v}NyiWpeG=W49Kx`?O9;&@#dGt1M5BSZaOxco=fEsAFjyN%oDcipJlKZ_ z{K-8Sg0o->mUIgi;0el7tut!YS5UEPk?{;jgiD9fX#Vy<#O7@lkYR?&Oik|&kxxkF z?#%G4nV!n&{@I_F26tK-u%O`DF>~k6#g9M!i1G^7a9JRTJDM_9`l_MSwL!jIBrC6eBb?~n4_L7ZR}Q4_Ok*1`;q zFLA4hyu0qlq3%79->waod)L4+2WPDCaKjQWS1fjS#7dI^Nv;AP&Rm2~u3@h&>%Yq0 z9?^F+#g#$9`1!5&C}><8v2N~IB5_!EZ*!dQ+yU#G-HB9zq-b*u>}b~>KXrTyo0>m@ zRDr2vftQVsw!ooxIw9tsyK&Or7gtv=1-|(V-@p4B7S*YS@MgE+=RqOZ{dOl5G^&Se zfhB?(A%){!uFXi&#@^^cpwY&~uoX&&7>c$49$S!f@S(1^&ivg#=C>-obA)K>?23ws ziNTB+Gt^o$Y$YU2m&-SN`RXOfZ!b%2zf|5$c`M5$O-e;)A4a*j;wLVL7X^}zo<516 zwrs_rj2xUy%fzv)bR5i1!B2Ua*pr`KDL;t(n4j}2-PZnkJLLN^TiG{_K!r<&@{B4w-Ne&m=s2?&{o{{Ico}$lL>ziA{{ioRwHbp_1x^Ysqx0M%y!uHN`lp=0V(QZK-N^=bG=p2M?Kx1dg+XjJPH2QLA;YN2^>ydlW&T+DTt3Cgqq84fDW zg?B(aT-poBywxA^d7E(YGFLujPR3_pEqYaJj2u^r)-v-?R;&qAsE!^ziXlUW;G1v0 zQOm_sU8xXz&cb8zi4X~`h=_!hkpePL<70bA#G2SuqD{+OWA7}5uZx_U)fz8Zx8I4w zef#3afB>v){RA#d9E0Nm99z4+kK+?Y1DRCdAY#yrO15hDXe?y zNtAv(5mysd;_Q^s$balXl!pc5%)4))-gb5R{ecM*Lz@@7fMb62WEg*A6ib{eE zOG<98bWz??f{bJVe6_hC+j)WUiL-+9R$~ ze*txi78!ZaxFp$%wt(1@v^+F;)fY~0jE8%0k`%gJ!zCZ4K^vtYE{4fJ56+=QaPrGQ z^`LYFWR>Hi)xY2lDXeSuh(+7!-(mDxVA_uJXgMMg?LW@KpoIPC@l`QKryaob;xibT zb_8Sde?~yeKD;(}Em{mr!(+n=ao><))azdW=iqD@`z69FG#xK4JOE)SyuWlGo}H9~ zkG|f4!O17#yW$57j6aHx*OsBSLTDln+n)L&Rwh@nG=s+zCft;E(C zeX6StGMv~hsfW1LF&On&8%!5)Nic9JeeMUt0+@xfgBpJlAyV?lV>mrv05-h!4pud2 zh7Ijr!TDhWk@DtCnBC@ay zG~zfN9c0*LMv$TVe>%vpYe@%eo^w;lrAwEpZl>+Kw#hx0%DX5KqFQ7GWTa4)(o}mf zIoX11dhv6YAQanBtX2eZ9$Pmtk+jp&>sI>j;pSF$)Nf@+{Z@9=Z)HdQR(8~HW~QIB zRfXxr@1NB~rXlAvUXVA99eip+z*TV}kaJb!isZL^hMfE&k4Qa_e|UELmr}PFBSVzA zBo9;rnWYM3eBkuvI5k^GjrZXCF@v^C!Mp+Xf$Lxl+XzEIAuRqm=&x z@2^a?vk%`?F#^H8ue~BMxBrx-xW)K7*$| z*@=z|j^VYhe!)ExcdMC7My|-)D++Z4a^82Zf3*bzvd&?KRE!2BoWN_dity5` zLcBbCjlfeZ-d+3yo}HSH7e6UPhq-IeNdTwUsvj^i^Jk3C`~|H?72<`NKjE!!&fwt* zJK*h?4vzqdW1lqG^-O^|q!^qs&RJb*(63rghJ$~3u0svS&IrVC5O5{P#43=v?|P6? zz+#0KRtYL0M(mjR|IcRkzmL*EhJz`;6qsN^@xu>4sG4vE*MCs0G2%uvTack1zcN~Z zjK0?r3vJHgVe3Po(G59f7p#`^wC2sR-={C~UTBZ6o$BDHS32W@z{^$vhYX*t*gbDH zuxb^~eE1>OKlBJNa0sGBk8b%Wj*K0Oi;F+S-Wh{%aLPdJ>DdK~s(E0E*?<)ic7lTm zi2^i>rLbS_RUM~4?1JO(y^hpojqr_^2i85^24_C_5Ss;lme#0_L;d`L#fyRLWK^Wb zVoQ&YP}sZ~j=%6M&VTR*(wfv&V`SzVOxXHDTWop$No3Tmg*XX6)x#U*zCPId`YTAU z>4t1412T;o#ONSHVT+0_%xEBE2QGR?cu7<8YmgzpRDlfReN`2HI>c}cmp*oe+h)w3 zI|p;-eTqH%b}NWs>o>>8T&)mcOUp$o%&^3jL%zP3M9xWKu+XJ&2>S_8P?#IVQ~5f(%Q#i}I#jlDwi^@>Rl%N^}L4qLU)&SkE-Xs7n1c)J;K#A612V zVtmBFveY~@?9dO+Z;ZVLGF%g-{}w5nH*3;gB%nmC5z}DO5AYsX0vDfjcnR3NuihD+-qd`z4?jMtbr|0azog?ya$M9_Ny8&(IY{!G+^3gCX0gVTxq0!(})C`gE zN9Lin+-rS-rba_Eaof;z+&3;4PtRJ97v^un?L!LCSnjdO&<*eo$%nIFCfxfM!^{D_ z0XZ<}5Th@MXDh#298ZKZnt}@D-tyoTkf6rM-1Al#;`27%0A%zplE*9r^I*?(iv7BIjq;)MlgOBZ<5N8ySF=_=d3*bC~n_QC%B6)sS zGn^eb5J$&Oz>bf40h6cUuzxQc8_*BuXH63#iU(q2aoM*w*0+8PSEh}}p^*cz{=K(w zFw7rUKK~e(7f!+9v7yLq_awGF`6M<;m<0kfSr(I84`itn_K6J|qr7ilod4)uBs8vr zlsoUhk+(YIm$y43ze!^(wKyXG@h4Ey{xuvPItZn+Kf#s`uVMQmtx@q#7o2F*4)ILvvy2 zy&6t^BT&sh5pF*5u52nQ|S1(^}XMSPzFm8?+fgei?ENfvS%p zVBaearhan09;A+1*(5SU{t`n{;Bk5)eF035vdyjRD%q80~GJ2b)e3;tT$U&^~ zk&5X*{kvkp!CLRSb?Y!tV1wOCEIf3jK*!p)%wwh(QzG#7_5KuRjhsbN$l_187Bw%;QJn(5p#PJoEJDb^vEOF%Yx5LhJY0}}=Uw>WnP;)}fd`T1$(ct6EVDP`b8%nt(1Y03wk>iS-ia-D zKZakP?TC%_Zb!6(C!*@!i4%iI;lzT^aCX*I{M@4l);GNuvFW8ur?1=Q5-D*3gj78W585(`^b`0+Cf6(e1k{7h%A;9DW?l_>x6EX9t{qYIx= zmHKO_TeQe9&*#(S(*{-u$TSgv1Y`w%Q7Tdq0^%3&ov;BhXxY!C}65)PM#vrab%oTvS@^BH%!?(j;t0*S3Kjsz4g;-@y`Qy`;Fzt%xUm%r=u>))bukkPx8 zs0cB(aLxBJJ39*>Pn(8>_yin1dQ7o)x%exW0}krFVhu+vx1hR|wblX-JiSDzkZVYi zG2K-DpcmX#asv?rymDEfMUEvu&_vQ!OLQey1#~LPIT-k|$R%7BFyu2}&LMBA>Rn<( z8PBWKUqjtujErRU%srT?h+3JFho-OipjxLYc7iqr=fa-Q!u^GUrO*x%xCrEm zya})b#=v6$SMiM#I7k*r6?jO8vlQ@z4i1Fn-b!}8vS0})fn(4*82tqt1X8&F5MjhX z=orMYBbl)4n*fu48l2>udmTB4Wx?#51e2IM3oKdu(%>kNM9J){UOgp2;`1c*3jw*tU*L;?r67eZQv{eWIUXc_xDS(vX)wWsZY1uZa-SjGVu6EuxsD9?cOo=#zbPx! zs0H&hknspef>VdFX#Vyf#OCYMuc>r!(TZZ~d_+9vhee&||767_;koeG{Q2`$w-Vun zg~g>ymqcexm8oQ{irJ(XSN0wASSw658W7=RK%%1*)B+4~QW$eg30oBk**arzLb#oi z0F4J?)FODCmQAoQA=<$Ot4-dBb?{JTc`imIJG-dyFUjIB)80`4L*PTs69qulxYg8J zK^!c|5|AMra4Q5(!%lE;L$ZmR0vfSa%(&(ZSN=^k31CUQxN&2uNWQa0Tl~(zHExU| z7Nk2mBHKY*2PV$JRjyAkGpp7fwy1FI4_E0;GdUw(;DxLA(tn}FfI_p@3X^STR>GBg z^)WBH5N7B`t>P;}m|^S8_3Ow`{(O+Jdi*~DSmoac>AzKk559jo(CDJSlRVZ{R75lu zeD(#hGV*Zb*l9HuhF?9G!QkA{a#aY+S6(4fCN>pJGZD5jaMZ6pD40nnhku;t_i-b; zzqV-W^<0#6JuCV-dDkvU87C<$X{dEOiI506CWdAN7;&lINqG*;F$ggnOJl8CR;j;; zx+%!8VB|-{KdP0fxoFzHw}8wD)DDP+BU@YqEGz?9C}*l2pe?~^a1Kd@SN~)*pICq= z=4?f?i5pOTF!wDI_fkM}?G$za8G6OTEg)NorC$cz1zu_mUJp;sgz76GB5+_3@UWA@ zpDPDfA6|s|BZ}Z2n1Je`31~TY2ikq{BkmbnhpElC~$iB^yqh?xAX)&=qSH8NAwJ|H#uhDKr_pPc&0t`*CFKc#1& z*B?{animxnsok!Vl9F)l+_}nTd^*uKeA80vnrPWk%F5Lk_4wnMVzx(wBbVrxf>>aN zpph#Dey)H=K9}a_7#celwX$!9fJ}}Q&czn9YQafza7U)4IuZl`2o3qxZ36NQE&?em z=-EoaMdjGyp=}M2FK)FlC)&bzYvnvw!b}oa;+6!2hBZ?76DA4_MzzNg0fi7lKMW&P zu9GbBO|qB-u(VZuv&`C5?pVTwz)Y&dy-=>XM$U^x))_1+97>jhH4HgUSKwi=wy-d6 zjH}jFWGi~B48cVQ8VZ4rLWoiJe=5k(A^plTwUx?2@jPddq~&UVib&*jXkn{Nxy^vz zN|6uw{Mk!bv1~PF&6 z%Z2}Qu^upUp{To{rklwHlrZj^k?uwJ!!X1}rQQEcl>a}%_e-iY0y5`BF39m^dEZ#d zS@QwC&!|jtQ&s7q9p4x2oe>vRAoCYhHw78i21-@7qCKt^sX4e!K*pofNYs*o*5aQH zGneBJ&Vaq208Bs@+yrj8#Qz&hk78s|1?K(86>~4*;i>!3c=Tq}9$Ac5jXaM>hp&ZGKr$Q#2nYn`pvIuJc=g-U_+aH>JUS)~ zeN#_h&H-S_QDDwC;N6Am(Q0A>T1_oN^U<47J#4+ehd@xDEVa}2y|WLZcXl~Gk@$?> za2~Bc-j3&H|A1iySFrFfFeK*;S_%+(^jZxkfh%a(55k~B|ktJJV zLiFw;!WNHp5s(qLnn?VWKda~vBjFKbDBP~VPEF6|bp&q2Pqlz}IK9dR@%r1e$nZrjf%qC*)X2U;Q zkcnTt0>htp9FrUjh;cL^->kJha28I1owGoOlN_7XQvWOMov~aBGYMGO+arb0 zVzwaN;HX+-6d*_e&V8EF1q`wTBytQU1tNKJP7vUh2Q2J4{wC9oGo>7*z;_beNkD?Y zWJZjDNu-3G>R>{qK^sG(fJ219kRowdWinuixLIX1AWFg{$fTPrNO7T576enRMo6JZu$bCupT-})0;x)BdN!sR-8pvos zrOR&tnG~ZvCjB8GLx1umu;oGtTP)-~xpGnDoTgV6PICBXi1nS*bzN-V$8xp=h&q1! zBo;1Qgiq&uhE3ac;8M91v$PkfVA@oaR}NTb3&It7=LzRmt!7Lp`TsA4X~=Yw;+l)s zsN6;r)05{FVonh-q;93`$4GMThZT{AD~k_j^1vZ5@J=FCsMV6ebmwVQ|U`ytbei z-J^cM7vBS)2>`u3aWf186?PJ~UB5Kc8MF>>EdB*OmLEpjiD?K0htZ33s?g~|BbNoFM^{$*27Z@5Rmv2hUWf^mJ{;P#4if% zrx#&x(h>AtvI|d)OhKcdICzC*3Ixe@)vO|JidiHOBH$qaV+hHTV=Ks1LX52l$XqA% z`zs+739#78wQ1P-FbnWh3y8A;nN9x(WcUGT86h(rt=W?w7N6AL_2VLpPzWq~D~n#U zTwG9N|7W1nlyvJg3*jffrki#6IRJJXcsF0lV0H@_1xx7EffSEI6HS68ls z8Ec%JC5{3l0!~{Wc?_p|^~5DVU+jDS1*Ca-tLtoN*a-VtwM5Zv4H0LNYcOny8{-jS zG~v4kAHenp9~Ad>5H0t%jEmdVZiKCO--}Fd55$@5kU$V|U`v$N+Mo%^_{3fn}A5TsSqHn zI4gE6A+!oF2Oq1vNTsuUo09f4my}mw{*@F{==wh`mLfKX>ZMZ$*VM19tx~1_8tN8Z zOIqKo_Mizexwx%;A9!{egX#gPFa_kp7@8}vz*ZS4-20}f9j71r^gH;ZRp9ZF*{Iv? z8+d&5EnZ%<4?Qx<@zlJncw$N(MrT|=z@lB4lYa>lQ%|7X_*lF=Hy?x2&Y;WeEx6M! zL5;1k>z}U1qEsKW7OyNig%4I7#na={(QEmSc>9ZusMBXYT2D?vVC-IW|8_GT8J>#z zVFfVy2xR${z+O!UUjw7Wt;x_dbo_b~ddD5cvvaq@qw8WkI5-~uQ76!M)d94gkc0Zc z3230f_y7DJmQVMR@`;+3*aEy=jZgwXcT< z%u2Lpq=U@gRp}tYR+gLXal?)IxGh0cRFvvgQs5$xqwDHe++9mGF;jn5kZJuGCYtRL zEro2p$sTK@ATE}|ca6~#Y4#>W+Zoi}Mlm%S;&iWmI5%TD;va5-@Vjou`gUz_uvZTp z>C+oW-+v#c0s?R_C=gM%H^s8Lb+GH5w{d3pFzou^17zNFFG?PL3?&aff{1$ckn&&) zq(1&Aw!i%rPW11OLw)_Qt@YE$Z}})ref|l4nK>G3pKpoX&peJZue^YL&6}xi9Jb74 z3xKS>`+j`iu@lNCjuHO>$a(l)tamlEL)dC=*6D}a}GSU^UH`O;EtFuNY!e=gc{AdE2GxbOp~db2E`83)Sh zu)_4_m#9?=Cc2=R;eoWN2{BH3|~Z&w*GakOvy#lSNaObjDt7Fk^~mQz^;^# zEL8w=Cv5J6bl;ra@Gk~l{A@RB_F4+p9*fX+;VyJZK7)3jZA9CtxtN@F0Ubvqpv#0z z3|qAyug%EETZ=a%IQVDj%u70CQ{#Y~%-H#2S9u?IX~wrm-O4I74y8#gL*-HtllR?t1eu!h47 zTh$~*=^!(~Vvk5iBXW#dfz9Rm3yki_G&v*5(Fu{xZb-ZR9-I#w1Vn}-v-6usdgVo& z8aoCRbLOCI`o{tklW=IrP+b0W4%UD0K6do%iQ}V2;?&fs_-W8U{M5G}j)x7v#y8(W z!qZP-t8aH~3+{zu6NaK}!AzW=IsyB;eTbBHt$^r-I5}%1vYvey8=ig```SK%4Groc z-DpOMOEsi%BTBdG*!KJjxHx$fb`R)_W%u8Pv=+@#K57Uqj~|Tq$L_{zcPGRN$gtZe z#nBl%+CGD;U(dx+|DIT0yB4AwH9>(u(y`f7fUo9a|5J~v9k$~LA`WJ)J4zr#wZzDA zfr&Gk{y&2ZMGo~slV1rtLs+JJ!eFzbW>wG{%^Sdz9=M7>B%%ZBf>Rmn2t2#v(cW@ zRh6Dhb1erFUsjOefOvOTUDZA=bHe}fw_2P$x*z!{16@(nTDX8 z3iM4qjQ689VVL;&DEbhd`)nPaoVgWuj^2XWL$|5fKX!gOa133C2IIHknT4CsD{T+_ z3ohf?ZzY_WJMi4X_3+6!fIeyG@btXh@D5Fd(I;K*#|klAUM0ooh(zu_-JlV+Kl=iH4h+WS zS+lS?z#kiZ`r+W%ao8mKPG5f<9We???K@z5KVO`m_OSrYToiTfidAjeqUeJzxG;SN zu6*_dO5S-N5pCLFXMca}9TSEhhX!I(&-byd>$^A+?1#9gAI0UFBT?Ap6)e2X8;iXJ zqy$J}j1oVyBi6ckt65Vk1ZL9iYk|E%y|6W`Czd_d97}3c!`==r3aE5N!sE^3J7Z%2 z0J6eKL_t&$<>?`A&B(ClI2aRRy}WQP*azD`dK*!V8evz*PPja0CXf+>itt4^+U*16 zc(|yprF09|im_K?bh7LX0x|-3CeCWQ1~Rr|WC$>S1jwjIb)C?RLmF6UvUq3VszZ#v z`t7ek#(EuX42}5uE69~=dPTX!QZn7L%NI~mT!4vVMLDUJ~dDk+5bTJPZHSU(k4P0-8_F!P{{c z@XWjus1>pnE}Y`bDboG2@$iEE=)dg(CjNK^onsE7v4s7|7yA&r@hnE|IfD)>e?%?0 zj$=SFoCmL!`&cKkNt599un$@zAX6kDLzpQPm?=>pV-T|{koj|z4lsJ_i{8DYtAvCE zj2$}`(b3VBb~@-#REv!GRZ1nD{19ZQi%M9%3PW495|A+=+}Vh1i#CmzGk22h9aOi{ zayiex<4#=g3qbkgDY!UuCXP;>iqli4VTYK7KlKc9-gp}a!UkgXlWmamR%eucBG58> z4$|Lz3oD;#gH649!m23TI*4GCTzNOg5bl7X!&c1Se3BGIcZzV@hw;?5m#cuWY^zV$NV z8q~wqCtKmTZ&zfrYJq51HxxN}V~a~|w_Zh7v*!4z^Lscrbs~O_ zUkRke;KJyk*xsm~8o12aPaI@iV0J(uyQbvWw$(+4m_HI^v^QNl*6eI7MbwxPvCv`0 zR`@p1oc1d0%5gK){{m7jsAb*LhoYaCUOb2Vj8x2+I1ZVKaX5ASh~~cR60Th0SQVRl zu==Sy|8GQqMs+Ponh|F7RxO^ZXJFL@d#<(G6N0X5k*NZiD)ncnTY^mG+bC+40vUg} zcM_1{bYp^yUpfpU*TH`1dO0W12*`w{!p?U!%)OVv?t_Ie`mBJ*fCRV(MZ?^EF&uj= zftSy6nBM&y=C{6pSFZ?E4^9^M;jsH~87#gj@EnkhhrZm0*VC@z?bI@Kiavvuv$mmO zXfmq#ab?~ma0rZpsaqWE-(L;afH+u!B4F$rEpZdz=$#FdPnJM|#K)iGcT!>BI}Q$V zEmN`sNXxQ`EHx`b8i7?8w?fRyxY1?)o8#M{uARx0=K&Dttmo`cGra*1| z7cR_K1u}n*B9!PLLnygk%@n8@GiHohM}|swPl)-|xg0B_>p&(^KxUYL%y=uvWH@N! zLbzgWy2%x3W+%j(OxW`1qrmtHxG;YK&Wn_N^DVI8bDSPH7&+~p$GQ$3aj3TsVjg}J z8{T*edwci6xnaX`apWlAV_?y-^KZXLx5@1#e53NMh?Ll|9<%4^-lOf zpy#rLx2auQl#LyRijf1ctNpXs*6I=b@aV(X(&TnzSzHA$OvrJoft?Sw#19=_!=)kp zadv1hik@kONRob?gz22;oSlo7L?R$gwlS!vG1)nklXYw zEOT(hXu02nryfITNI!h{(EVx(dXhj$wxmf0TV%wZu%geH(t$=njJW%wKt^wIq3|8Q z_Wb0nw<&!2eGq8MZG;7_-W{T9Wg>s6MdLVqx?Gz^eWmoW+!DWaPSd`K%NIDPwj2qu zQJ67p3bNBv@$+eJNWl9k)4+%RDK`rHKN6-Vf#jkbm#T3^rPj362NuhZ()HtO&w>85 zE-pSN=1F{1RjNw;(dysTE=%0Rq0#7d z_#o{H`sQE3z|9pHyajkW>LePCTmw^|c$oYWVdeniz7o*N{}Wgk?&+<@zBK*05~?6&&a* z61+~NL|{f+QCOQcO`WUyT4x|#2PXfnRDsNISA-FLY>W;mhYlUWgb5SWB6ufHGNYH& zvD(|Zt30AZ zGzI(uaJBzH91#dP@a*&0@$lowt=j}84e!Ff$DcuhXH9H>@=2712I1uEui<>h4yfq# zD)!%dAF>=ARf|lr*#$dVJc^4!A;7R<*x%t5#CdrkiW@~Xy%U@7X^xZ{-q_jX4qWc= zCVqbE1*~zbhAdYP?0up&wzhZ(sczK~=je{5j;=_({SItv`55x+R7ZlrPPNEn8m_Gm zLpad^=Fb8dW(`WO#;h{hMT%e3M`n zJ98-4co6`SFx0BP)@=jSqIewGE6~CjON!$!fNe zJ}%~ZcVz-VrQBIqj+L1W$Rr8K47UXtDa2V26J)p{Vurl~QUwlTO=d(mx**)e4XZc{ z$lVPQqQ^KoBF4c1u?`j_I=d=8issJFSS{w^PL7Cnb3u%oE23PS5hdpdVizZpDp0}w zoe~TdWSTvZWAR41$peWTVC>F07Jv3~&jKYTZU(7=O0HbHz{nMUwY6k!1Tut}-y3AuMf7Wsks}>aD&?YBGA!G| zRtFh@n7<&%5MWB>unY_<;i zujCrY2*gxKrGV@2RDz5|s#3&tCCIQPMh6*dtC%%B39U+1sXtuZe6X*|P_zPpiYxna z`nB8ZV+3TlZ;=$d14OuDZza62R1 zsNQ!OY6{?Z_F4tUe$i?(NM~{DBImBXBH`L69^wjK@fp~EfClX8D zBxX{OC&@9l8KB=dlL0XT2nqHk0S;R1JD08jK32|Ll=9`O{TJrd-4i4yljxfixHC5hi8@sn(CmUwea z5dCP2{1IdbD1;JTlQX#}m7pSWGmxSDULZq=;dl*IIC6$hx%jCN)5~l*5J0&~TPa9w zz6FZP$5dcE(`|Tu6Lkx#KULiz9{cz2!@@7W#OL!r$9KDTE3;B=DEqs{nSn9CArxvU z&NSq2sR*k9OhxVxUCx2Ygcs^+d`>wk%F3j|piRxDyT3|uR;jhNZ+DX zS{KA~dwmqV1YQi>jXEqx3gAq&K_XY+wf9Lzol%8&H}PkT+;|#uzB`I8;m6Tz^mg1f zVjY@I*o2w`GvOAJjA}!3;5jrG=FoIF3J5ue7Qr!4V8%ZSZhpyV5R!r}%a3AY;R(#z zbqHfhj^d3kccaae9E7DF! zFf#XNJT*20twv?SKj9#zY&(I_oTGSf;yQQ-B*R@G#x*z_X8%kW{IVoI0u%yP+)9A0 zE|rVhsa1Qq5MHi`zU=^ErV3>K45h=14l??n-xDWJs43T9eDMViA3kh#ePhKGV>+>R z&y4y1f=p{nGCLpu8bRg0(!$w2+RDCpQUK>k;m%cf2{xRLohAh`3;i@gg$N6Dy`cY9 zAud9Dw(g`@g1d{j1FzJ`^)#GUkGSqd5u>~0( zU+?aZ0~r>cVp`6YmP;tRbU{5^x;ZAn?H8*9;^c^AVTVO%c2v$!iu_!56~Bm_5%XU} zuIX3tbEycAW z6eli};ew>kDX~9ut84rQJ6ktQk5iIX$3;IT=LhB5`y_!WM@3GF+ly>A;`0!QDZhL{ zo(tEqVQ$10r7O6^wQ-b-DpjTaSanm7DHn(-7X~MbQ)wX6O9Ppp6akq$*biXAE&#xl zeZvaj7?cIC{uyZZ#V&LW-;G&2F5sPI`|(OCB5qvVf!^H0{VdR?gc;S<^n6dK;W^OxTFa>963;hYKXaFO^jUmG{kkPut zszBz?QUn)$))8SwKhDU=!0_S2)vP6UE8R*l#pp}&XVq-4N=b^Sq?M~N^6}P~Y;;gv zLORHBvqtWRR44^-k(_JG^K%~}f`e9g>jgLALf?ys$24QX&aemsy4ZpN?NqCdzyfy6xhN5jBU$W!zX^m$6qW%m#_(V zCwL0p^#2%d1V088wC-I(&t|IRROcb})c4mmla1$al_|c9l3v&_MLf;k& zc+6HA!Ucgshn_ThLX0*AoNj1FxF8hh*P;KLiSCcOeyrkQy#_@$(}m|0+UX#}`093g zob~W3@5vh8jokfSAX6r%94n%_1a*+9Ajp)VOdPD+wI8kD@q=CCHn3~{ChYF{5Oytk z!S2DnB0jKtppTsQ7U>DQ`@4&D)1<{sR}#kQ{;sflq&Msz?ge`uw-ED(!(bdNlXJ4Tei#XkdP4&dc< z>VN_PnQS16w(|sqi_1iO(3<+6bU*ktn-u^ z{nITAdCvT~)^b8ShC`j3OR})%u?|3)Dr|T5_COy>28%fs}QnmTW)OK{azrW4{WWF!cq&rhT;yNFVHkB2K5J|qDDvxJcEn2-Y%H<*PbK-`UYU~UL1pgy)r7HFRTis%e3^NtU_W#cqnK1$~X{yk->(2r>7j^-h zaZ~`80mZ00vIMQ>?nb}li)j1F8Z-!r!95f6@MiQOyc4w-?LW@Ml$AfA=jcQX4gU## zkw@{;%q_U9eHBA z!GN%!0}n-S3DM7OMGsR47j}y=99?(~9h#`?_r+EfJzO1B{uRg&c&-N-9h80#kfCsc zXbPXdHe*TrsKwXlM?hxtKD6#V5iTzdK#5|jsSD`J*!%yB>=9HkOs z2}%`7Q?nQyq*CS(AfogXn7He{65Z?u`mOA!-^z~qt?a1Zz|8EQqI5@zw_{K`O#aER z_eoH?ONiuEfhjOc{BP#}SNcuu=wBo#Q&s330<+*KY2*|n6;^J~BIiy4X{aBXfNC!d zL4!wLM|8nfT$TsKRx!2K3?EwsL6cp7Y)7L>m8w#Ithy=4sC-^!E<7U0SZ6I+r(fH1 z`t?95*twR>fVJ9`u%F18x%;Noc=9}$!d(jpo?gf&5 z03uI7z+^k#Uws^XQ-LSv9fhNB7VP?@iX`Ixua3cYJ@D)|d*B|D0r&pdc>e33u;51^ z{PP z@?9xfeX>EVYvUyFWenxsO!+W}mB3|)+)LjiE68N2Dc0J|B~CS`a5Y~6m`bSvnLk6Z z08@pSRb9An0bhRkB_>Xsh;7@pRkqUHO3{|-#CoC{YoVWrN?macWL68vB$=$;IriM5 zKGt-Oa;=r+*FrPeu_cBBXem6Vd9DcIh!8 zSC0teL10kbKO)+-wbmUyo_e_Yu|CE}6RoAk*1djwJ;>NXjBP85#hK} zJ~O(}T4cme1s9UjR-jGUydO{cOohuU!%(fqw{Y`Mf-xXl@})d&sR*un@lTN2S0ow^%0TUrg|G~e3@dCs>?IzSp}Dwg_GZ)^ zEFWFJ7&Hn=$Gsy8(0o`r8ihr{J1|0O9VxI6kjx-RGJ*k{U>Ld;<^gg~K2p0;Ad@2* zA3tg_Qw!v6kPn)hNJ@sHLrfLO{3*&7WC$$!B6weY^_A*Y(pzk9rD&_Oi+ZhjUB8qsO2MY8KK8Zp55+x?Ep~Q?4)QZG~pKI~Mr**b*L7*m9F* zXBKlaij{C+u|e%ZP5&xv(Y5OnjM&0M*wVVX7~b_=S=yMMYb`=LU@369UKmGfJQB-!Lx8{|pb!rIq>v7j4`oOm z>QC8-55v!)<%D8*4UB~$WR-j zK8f%M&45QQ0hxd-IQL7E@Z|$%%TVwp1toR^2pL@DE=M(!TKZ+c(I-{K%e7xP+=FA_ z&_5gYz7qBzaX&=DmYSQvKMM|$Nw77BAY)+W$NLPGutONW)Ks|hx0q8Z7sR7(6)x1r znvJ&3%=Kb*ZzBX5+A)K5kZXDe2*`98h30Pw$mDIhG05l-M=dK(n7L;4cT^grXm@z= z;ziX8GiuZ*BqYRF&Q|)ZAj8yDAfr=_sElQi7}4@6jBs?p*RC#D)r@v(+OxQ$D7RwIu##1H+PU&CVJ8`kNZ`VI zMvLf<0taR#2}U!b9E^xH8PxhUlo-arWV`;0wjNBP_)jo$eHraI(O_1>Fer&itZqbj zKk*VSMGu>j$n|Hev!~R0G&Vvr+FkEH$?7&$B+Uw6TziL$3Z`=NO^IuSqZwmdEO@(N zV{FgKN5w@B+?98ju&O{t9uEIS2r&ehOY*EZ7N$bnT`7|%sA~Ldk>NN#d43yz!qdGc z!TputsM$+EMk+2ARzx@{ft$XDN+pMTuk}q3iIK`mwB#c(^8F`Dfi0CWH5QH)ielJ@ zaaa3n7}S7%7VsigUF%ZjOifM%(7sttcF>k;pc~1zD5}d@U_ipgPUA|rXZnAaJY1<#J-1Z3VrMAioNJ@HezEDz$6 zd{1f|lUfD13S_F(pQHX|i;R}z11nN<(WFCPxW3L78L8<6mPoCDg*z9ETPGjXdf54L z8Gml9mULw^+s=lr+n;|&?J~b6JQ946E{gP3cQ%v z^1+>_xdU}bF--h$L--L(CKD#+tQE+|Y3ZK>SNVwDxEip3s$^0EGU~%+>xzI7$M2}F zA({y&gb9Fz6u}sn3D>Yp)hDd%f*F7Dqgq{xVBcTvfh{}!lA%bA&OyR4FtgyEO@Wd@ zNM`5AoxbJR$Z<0QZ}xJ}YL#Js$*d$joCZl){nAy9#u8SbG6a>05&$KGGWF+uu{NcF z41tJljB*`E$%tzB$H1-qDBSzz0L15Q#>LBQq7`M$;IAE9GYbVXTp2~T{Cg@6D5MZv zQd83~V#Fw{S+fq8FLTt3mL#`Q%u<<>7sTOt5!H%wQA^wOZ&qSRt9A&jRtGa{HNe6e zb+N>&7M6R~!fLmgh;sEttcxe&oION$N3^RuBHUfD+QS*)o-T6ij7T}xL_4M#?ZQ1p z_t5kR5#3FcRoC3ODv59x-Q5kVJv~JCP}hlea~C@|tgPlL*Ash+xLw8bYHkvSJEFu5 zC6eMHB4*c1gjKFL54VvU*+)ru44d(wsO#M18Z=jKCEjt8PKnNLNOE*Xti=;Ct~C+v zT?dP5*ToR;>gZgz9`GE z;^FMaaVBZ1a%7ae$<99=c6|hzx~_zC??^cHjD)*ivQ*e4jD8U?NgQ4Ku7+cGxklI3 zs4+MPHHK!uEtHkI1em1aW!GEc(Tm;e@p3;L4-+ltB0b_^>XQOv9|^0QRLJ_Kz+Lhd zbMJUKh+9L?SlCOPTqG|s^%MWyVkPbpo>bbHhp|OjYiXv~RJhm*BYC1h@*;v1VMwd2 zNq)%gcUIygud6vA2Ob^Aqu~?pASPQtMxF}?ViPzkyZ<=0QjM<^gDO>}{$TZ+$H)+5 z8h7Yp1sQFOj6It>)yKe=ip?VH<;53RkW9lxJ{k}CkesBJ;TR@l-=A(JQwdH|PgeYc}Ic5lSNPGlL)V@mr0w4l2P7k@n zGK+A4uxja%>l*}ojFM^4k4Z8kCkeZTf0Sg%qjBGx0}+?IO+bd(x+s-NXwOC!!nPR( zkN@VGMmY;RQ72EH!tB{|@#&{?uy5Z1HTajx`l{YkF@;zt5@aYU4LSHi{H1>WE&ANw z5-->};w?Kfde~XuV`oHwojt@Y}Vh{#Z@@VLK7sEyFw^0c zOIDqj&ej@j+Y~WiE2IJ$x&ZRm?Zk7vC&J^^v2gc^kiu8KZ}~nQLf65bt-F#R)t|f> zZ$+QL_zf2@Z~GZc&OV0TOShusv|P9c3j~A}!F%K`G#6`KL;vezsytQaLED`lb7vq7s2heCpF=|QP;H1XoBuL(n43EJ@cyR7My!_2B zy!hGocyvq&8V4nzLH{H)6u)(WEzBte(|vQCP+;oPHG9U@Q?!CO+d!LmK=c`^|8pY zQ-Aqz{8H4ONN)YP5S`Q*{4?MzA5^W0yWlx=Eu7_pa+U(#Q6R<~z-AD1Nz30@rv67I0wgNoq*O0UR7BapSBc{{ji01K0#y{Bi`YB$H!nl7I+nDC|R5 z-8`(hNR7-bw(T&gZ5xbf`!vS2dkW(z&pxSSqSBwjxVCLE_IHpm zHx#*cI~%#78(prs8!K@i+eX6u|0)s(Nx$*!p25d$pTrbNs|hbWgVE1Dg#qoJMBsBz z!{>!I2>b9I9Ne}E6{Q#CJto9x?=rES@3-Eqq`m7}x6`jcMsP<1nX4#Rw+k=ynGClM zW8vHXjgr$PwAO)~VEfpaZt0DP#WY%Vcrk}*8+sn~s+4p#5MgjUJp2ocsigEk+ z9cVfKG~QZy7GdcEI_t|ZGVc=J|7I`ROe?^LD}ThuoS!i)^(Z2V83gHgaX#yxB{@fiq0d~Ej1XkiuZ(K264nL1p^Y@|Azyf&oNry+j z0=NZl6#Ml8BJx2A)Hn>|YkZ3eJGz2!WD*iIRCq*ir*A^~;1qzbrU& zotVCvu=q#~O8~@GV8X<;a3qs6N)5;&nO}{O8_;OtI=F_UD;T+BLIGNQl80uJ3Dq2s zjcQUmyZ7TQQp;J7`=+cz!y$PRw^&%ZEtgtU9PS)hfJTGT&}3*P?wg#AmY){k{^^@g zo$1s&M)lD<$aPF?C6Y8`YY;OKw#pE0ERwl+`^Ukl!#Ldi)?mcs?f4bQ@Q5{33Y*^f zc=U4~i<7^l(g*r-wo+864NNcZJZ&%eZR7Ud{Edlob^+-y2BqQgx!+?z4$wQ{XWaeC_h`HL zG$w8V-u$!#ug)k&->CfpQ@>zv&PDhN(6kzoflkvmU_{!_==RMqR1xP88Kj!-Y<1$1dPTZcI6>VIB*4DUMNS; z)C+iI%6Di!A|DS=%)z6RGX-oSP~ATq^#^C+?#Ua_c+?uy5D2Ojnu0q=q~WPg3lWf2 zicX6U;Pyee@bXE4r@)A7pA5MBW#=k-9ah1cVY?dpHPBkBlB_J=tA5-t^~CtL#rN~iH5`0pyuFWIQwV7;+u-56Sw1? z)GG*Be-*cnTZg6+Zt$9O`0_k3Noq?kesdJf$E|^HK?S}ze+3hF0qtgQMa|yJQLE1~ zbohKH0@Bap@rn6(XZZolI|?i*mDf|QX&LY>^H>&Fz@F@;MdeM_x>Wy+*V{T9V` z6z%Q2EVyz>ez{)wB{3m_{32)P70B=%)?i$9Q7DL!^w&j=0lPWK-j&RXIdHV9=!$CddK7Wc|V|c%vp?I3v^qw2X9O( zLf_8{(P8jXygMcabJC7s>AH(Z*28Ik@wco~YJwBx=ZqVD=Y?VSy|ku{kIg zF7hFGgcit0yAAKfUPhZ)+fgqt4sQLT(P%^=K8QMwMMr?hQ)TG()iyjjAO`*^7cpB3 z`=J{J2<9C?yXjKvSbGI&m#<>Qv8#AXKGsG9Qzf&AmT=L1+=ID0u3*tVVBub1@!_k8y&%VjOVM%R5p;>I zK(ADw#guI@bq$AyZ!%u^_7Db2&B-^v6!%K5_W1NoX!Y?%w4Jq0z~s2pYWAXze+;~O zN1*M@9q5;E4ljSY9{2Z;!ponm!N*%J;)}g!@zV4{^a($J;M8+?{p;PhXJjh8eI+9a zN)+(QhRL6U0t-=n;`cCe7jaJi?!Qqok#%ZK8`r=zSYDlI1sQfN@fk=?!OVfVI5T=B z@{Fw8>#Iy&WgKGuH&*M{ugBoQgDdw|vW1v`4Kh-wUMWL)#YGVoox~9lYsUL4xxx+b zC>3fJMunt%nMkQs$|R{d-c*S^p8rbpaQ=qESMu_OOKRDAy;ZHO;tDQsC0Kb}x>Pp2 zsAtFn@m%?SYx1u^<^`Y00y3lE+$S6kgXNpzxVV0Ku!IzW>#x|!H|{df)A6+ zaL<_SaO)9+#$ie5pL+rGrDD>3-bTDRDGQ?#e#Fr5?U<2t91C*JV_e)%7_e#wd=_m& z$Emq^NGeHp4bH(m69tAs(_rWmA#lKM<<)p-d?7*-E@FVd%QJJ<;K>{`;sIO-6ywhCmb)c>TqMc0zlkGMJ`*8>n0o|=QE z?R&!W^%1BZkR%|(E}e8Z4l022fD#ybXTYgX8d}c&5q&eR;K7l(s4X=B#~v%;9g=`Y z$7dpF)oy&dr2@lN?81wq5~PN42_MB=L}Lk~+0cCSSbhW{%XXvF=#>atya^x7UyD13 zCP~2_3pf8rc=cU|dxj+;Fy$;>oKb`~=dDA(Wj|tSZYf5`AHy3HQ}OD^7>tfRfDy4j z;gkF`=rS({A(6Y$Ysq>%Hz5Y^e7yyIV$b1@{^_X8b!Y+-;pGzp$F3_-Pij*g79T~| z_)BOxWi4v*~ z^y%hH@LTx<{8s&lPqzcBegVd9D#H^WuZLT&m8xmmqklH+y2rr2N0MZ)l0gt+I3p`` zgB4^_;P9H(A`_=(EwQf3%v^LOdggx7DlC+>Hk!YC=8=+;0)Kyh1vK2QfaRXP_tI|x z87-476F9jBGG%3#aOv_zlnS_)Gc1)Mv%s_w-K^41lKrnJszCW`2;;-o@v4+^9E)^S z^%=2_B&mK$o`sZktSjm9)sDBDxoU;dm^CdqxRDqX1FF{z!8O+}aOfM-z zXyjq|hyRFC1!pmL&sB6^y%%?dEJ2rL8!>tFWprP@3#~_GA|&Y-e0dn?9eo^+O-M(l zMZ3^yYB8G0wVs%|6;FS>9W93C&T_U9(Kafbw# zQlz@=?zY?2ZnxWR-CZb9Ebh3wd!blymp}+{_h7-T#i6Awv{>Ap?|IMM8v+Zo+yA#) zXy*+tXJ(GfnYlCP{ms!5rPX3MDN9o`(fpOpaDH&$1s_V*Cjj) zf0}m^0lBq!Y;+--_^pO>m*wa%I1?Qge}S1f-y`nx8jRm`3{Ur4jiAKKczpH|LdqtF z$wtrhUn6qa9y}d53!(ff=`@{RJiVkgW<6X2*25!c9qt{Gg&rxDe2KT=)tLv-n=mmp zyB59HevQ{gW#g4$X&8}s3I2-@qR*zY_$$Gt6TzwL#^dp59&s4<0qJmv zk^v0y1f|We4@_p6mW>wu3(%GyY(AkZW5Qz5zIO^*hsB|Ja4cFyZbr*Kbo*{bBYx;L zAOtmu+{BMM`iF03S+ogF_@ULNPcqsJ$c7g`=3Ikf;L|S~-hFaZ-$M7Wboj9Rb>>G= zqcDD$v0QE*z5#bJjn@5AnO-9OQ{dpw55cfBI7q*kP#JA0O-j-kTKYLkU!sC~%6$Tk zOif{cj3IZ5+!SO=S#XG!s}crSoR)>AuXTg{n}gvTk)Xc>(t%ZK7X)PZa@d8WqjCRC ze4GIcC@Mzpf^&Ft%{O>=-DQm2QjK*q^wEcbPjWBf&8b=F9CrziFFFM;zTh6AoAA=%eyK#mDKkqjC4hTy#yZ#nW@Yz{~TGA%b5-y%H+W zFR=s@x0K-1y;m@G7Z4PE4goRW<4>b9@!^KEcrWH_JTf^CPt8A|)`9B0?mN7?@D%Ru zos32-Al$pHMk5v$&&(%;Bvsv#H3;g1~LjSx| z7`>|!?=IYrzw!(2w~?#y&YZ0nk$Va6%q+mmlQIw%^99BgeS^UTCHz9$4c~w@aAl#` zATkYR8Imo-u+>bX@>L{-hRl1Br3y8-7YE?MH$#z_y%ptU7x@bazz}E@$Pg}TSb$r> zMrviGP*)IREkJ&03M3~dqEDY**t~g@f(R)rtE*|m`B^XfH;#9$szR1wYh+efZVLn{ zkfERS!jRESWi-H2FbmL#s%8&H7XvEgvr;jr+^CV-Fv7mL_0@XpMt)YU^W$44y+*E8 z>#-a8S&#F&E!*;@ib_wE+d>sw@t*RY2@J|T6ZoqxCjeS%XsS!;A)HeGu~pSF0xA5R zuF)UrBVz%X=Z5HgWO_*l)ik!>9LLgO#L7rBJ<+NIc+0(UdFc#VwuA7SNbVRnzmBw62dUj37Jr0 z8?1wgNpsz@$kg$ zcwqcCv>2R;d&U&vPcuHpKR-Kzzs>m)Ee96BEhGhYesTP2Nk#iHd-3S3V`$PR4ef^I z4jfK$wx6z@{7-$^Rhqt~YB`?9UBvD#Y8 zBFBN1D_5dlzkb-cb7x%~Ypu-nY1~l7NbN$1F7s>g@GZj@9?m<47N$YNZ4j1#@~Bxq{nC z@aeq~?h(l_ z@tD?!(4t27(5TBCSK-viUAikXk%ut8s?ErRBg;e&Swes?;vf@$5++=NGF2^&v-H=I zff9iPgzbCV^T-$= zW5{hNHw770tvQNaU{aO;x~*FFVs@4p=`Q3bGPTGDw}=816)O@%W-&NVO|-r>n;(l?uhA>$KH zoa76M1)7P4s!0l*C>DYd)G!u2a^HgaMHQB=7RL1STLrIIhTy?B!t|!4a_I+CsX9zn zvLKgc537+Cw}dK^Nr5oQWATX5h@3Gj)qZ zN|Ta=vBtfnu&}K&nDvy&s7*1l)_W=qs#3=#<8Ac1E~`PMssft<Vde8Gij#cYb8OJRENR8`PqZQZCts7OJKtJK5*^W#UJRQzYLpd7t_2I)%ur z;IVuM`OX?-0|o00KeCOxKZRtIV6_ z5t>ezlGUa&ST0KQy+91hSZRjt#WEv`Dzl$knxxgFq%y~rlqr#WEV`R2`#bf1cmiP8 zz>I*5e27Y!#IjFdPc&4{XMl_$caGe=vo8zFT7Iq8R_dlPIwcSHy%G%1mxsb9SZ1%) zzdke?eNDayRA*IrIi#5>1$MGjJ`K343RPc3GJQY4cp{ScawiiW($%cEGFz`BUr2j- zNqQ!+FxgC~h-J7Gw3~SfuV)n_{AdY=oV|?Rhc4lzb>E`((5-OrkAa=vMmVzoatupk z!4n4?s#F8ph9&c5q{jJ#mHK}a-{}AW~r6p zTnxvZSaj$hBg+p|s%>S{RG~;{<1vx=_;|Ip(%QA_6vT*#e_2^sUA&(PGIa)&Dzz@M z0Nz#Ap+QzzzS>zvR5z#+?|-3kX`qx!|j^>C{WDy=F_f42e|sge07LB{RHp#&MJk>Pt5UI53a9ek&@E8{F9EE^3f zz+dMb!*eUo;t!vkK$C&n;2fDsfZ&G?KS*Roa!-PYPu~={1;p^9EfwvD??U^Ld1y1b zkRN7*AC?U+;Veh|6ZqkhgbqXZ;=a)b)rU^2@jLMN!hLvo$sWA4{J5HD#)%(m&H2I9 zID#KZ()1(KRSe$G_r8ebP$4{q6ybq6htXm3A%2*pz>6PUf0(=r&o4cJzs=qUpI%vT zkPjf4OL{2ISh-#aJ=fIKFurPHl6xd`SzQY<0x?%7#It~m zI+v7@2FMt4`^haprW7@$7g1Y&S?z8}O2xe|y$|Pq1)x#zdNk-Sv*z*(C6e$V>l(>d zf>giu5fEUZ!LJDgI(#v#RhRMt3+TwgW-^C?IfP#_AvFF0e0h5E#RWe4MqX_B(vjhss(P5<6(N&CF`W>pism&2)0H-8=?^4pFUf>}>Z?p%bg9zi z*B=WPTfY1z>)K5G`jRSSTV9tPzqDj+CN;CLDi9=|6g<3VENso42$v_XTK)B1jdbrFv)c1q&7s zWQHLxuRu5G638Of@b!<~P{dnQ%2#C!Z*WX04@i}yEQvtCLKRJ^09AdJ&czI(pZmYB z)|n8+Nm_Q!Uf!(4%24tj0 z#_h#na1YoB2U!_f$`-zxGW=R*;x_Z+=E->{F=Sf}M(;02$HWpmJmmx)9#@3+GJ)l= zLOe982#-wIi>DTx#w)A8!(RzIk5Bs?(+}5T)V5mueezzovK+Gics<+$qv0F85na|_ zLFd)q^+cx&@feEK~w_nR_IIbH=8C1~F-3(wCwfyc(}g+~<25SCl2Tb!M{(5_pkLvpI0-y#kU0Ke<@Mh_w!Y?HiRscrTD#;7WmsdJ2%WR$eg0hkRKs>jN zz!&4$Me?|UBBn1dwu(o!eCbGEA+_uP(-LS=%OmjRx22zILZeD`v`LP$;IXk56jHOp zLdh;-8@zhw!ST&$X!Fhptl#z}&auqD3NSzk%ZF0wELkmmFz6|ULKzIPXzItiqtt4M zrQd{F6i|*?{VC_qU%;$cvoUYpXE<}_E9F;JB?acycmh4*UZ3`j^qTaoDrK%!r3{v; znI8Zd1w22K>IVI)s^iuRQ#a)%Zhiev{C;YD=?g?q(N+3*5liZn_fb8&qSnSc0dn46 zDxd5+{jnh9`utFM1jfL*Kg%NNoEyNhB!C|k-4k%v=)HKG(D3f;y=dpR79L&W@aO5L zFsul8X391^J}MvYFZl{#=`|R<7Z|?38qdx-jkgwmh1vV6F@kXM;`DuJ+b0VRLlWQ} zv0jscysel(T7fl zC99feds!YOI0G)hX>g^=wmm=M%r{V3d0Qz#rLGCd0x|l6k@DdtWtEhRMvaUicb41~ zWTajD3Lnf0DU2vfHYcIQ)9;|gdqdGCDgm}1Z-irL5+Q;>A+ys4@?j4mFof`dmvxr{ zw-PeMXm3@9-7ZwV8ggJCM2L|Z7U{euJ>_L8@jiS(WF}6&P;%T(h0Eqkn@h;CED{+- z<1cF{5r$-0dKpC^wI61IA*mga6>ftFN_^226sV>mc>(DPbSSWrbI6wqVTp0{RjYY^ zYk1|#G3GyPv)hxwr)uco}X;zYO>N@Dk3om()`yhpGWqCk_{(bk86~)vuCkzN{2_lCTLF$M5~vgurBX&T&$2u*a$H!-KBqw8X8PP zTPs6|1vINJ)03Zp(w9a=ezH4Ab;Cr1RbGYiG8#46wfa*w$0lO%py61vIvSTQ>T4@m z4Vtv_l$BMhj-$$;TjSkAm3_VLm{Q3{o$`RHB@h)ztZ#I6!>g)9{#PpCR1czRd91#w zsv1$XeVeG|-szp5tt#Mc)obr(eUK3qn6avkG5T+OE0A$|b}(Feu7^WkmPP!ikWouo z0N>r93_LRb47zMQkGH4h;oj~`(Wpx_9v-pR_yoGG{sx_5FQC`f zT0A@LF#bF+3y~YX!aJYjqkYhNxOG|wr*5%u4oHAkP#oS`dJb6{|#gWV)WV^HPf$r3>qL~$Q>j%1sNIeD=O8xBCC_q(2gME z`&%mjU#!N#(P%{V35=tr=o*xyw2@x|8uj4ufOs^dzi(hHn)+`*6aS596c7V% zzI<-sd=Uvnj#MYU%xxJ22B{Q5eoqC;5fMYZGe;CdU%AyqG3cLRSJ<< zzT7cttt0QCcr@UP+<>lE&}MjA)IFFQ8V7ft%Y*6}vKb8mH!?pl%qP>9a6E@Q^XJBV zdImEchW8ANW?Hu;iTJsS2{j>i70&@6N#3#=Hl;@6z#)8Y92+Y#85 zu^q*wQqZ$l5Idy`4DL#yQAu#A5|E*Buclkgf?e##9jY~ivuf$`FX0DF?4l$Vb?!Q+Q|YKHN7j9{-rR2ma9|c;l0U7?M$fH)riYn_!k* ze(`+&(^wv{ya`Li+iQyP_UdzZXjB$nT5$|7EI*63{d4h;8T&D!s0L3>Enr!;2_All z{6LeYY5CY87>E*Nd(|OyYhnm6QZp?d zeX6-!jvGx&hTJ)FQ;?C7Fr|DrtEGCKlA4l=$DVo~T9XH%wRs8J10O=WhweSkTHM#6 z-S-;xMe4IKwf`6OIcV*<-{EDdxZLmXjPBm|0<`-X{((23-AnhLS9#2mUI)g#hv&MN zVI+Qg#&6Fs9jNUYrX5w>dw5)>!MOL!wnZhLsH9<4?%Uq;BJ;%Uwme5Wx*eE?QpV}< zimvxadQ{1uQl9^Qxh6@Es;)!g$o{{mPcv=0_w$+_;PD5ThTMmHc|YX3?&I~zy^(b9 zk@R^kb&sCsH9p7u@P4#o8r;_Ilf3g{Q>`anxqOu|dtpSy@wkVRmrqE;%G z&x8UmF(l08ZK4XcWs^5c2N3Hqfe;;UbbztyRghscJ+4))$8P~LveuIOi^J7CGOBi$ zAY&J{O|JnCOovOaEIhsVG=}V`z~sZ15Rv~a{x*YflU9lehl;WEn{s5mRa6^bx2}u3 zJHfp;gdoM;DeexzT?(N<(E^3w?(W4(aVNO8#hm~}T8bB!!@thjd#p9~b#jxDIp#Oz zeV?h$#UZ1093T%agr)7;QKaEN`j|0<@AksJmD(MW1Om7p$oiY8tanQPd15Q~yZli* zJJuOg+(JPG3cN##>#ON63;uQZ?Z{MrQ!3hC|Iyu=w+GNEQbWTF`ofwgi@Wtm^jQZ?)| z4%rxMs1aP=5Hp0S2QZ>T%=8q)Rb;~ja2m^J6-Ld*l8nBvW~oJ1qYG=$q*GEqR#Y$O zK`1@=GSoP`XfyvKlJUb@4@$w6Yt+PQKm(MGghmq`&`2$_rX997JF@nDSMMb zo`Lxlamf%$R&JxunAiqZ>kgBA`_1JPn)zodMCujqGC}+2WH;&56toIGbDQd-)5Z{2X8Z}nm6Q1G1P$xXz^S2%%o zJ;65Gf9EP$VtrGEdobI!%WKbyA%Si)D%J9Mu$rk$ulF5rvk7K;L>1*$6E?VQ)*3w>{kxf0X_9XMVZgFEdIkZ@35pkpShm&HC8guYZHoreHNY!p~ zOie%56{cZdIKtw zz@X#IFsbSF&%&kn@VoTr@78-{j~_3{x;t&yHqE!RF5QfA>Iao@v)5Y|vNnI0q%>1C@@4Yzt|Xa(}FC6A~sqjPAiV$`vpy%wzD$ z+!b=eRL+iCj2BKGoq`BW6z%#2`B41^C{aGSI!1DKSztO!2C$hVuHw&zfKrQL;zBQ7EU>u!7ZG=}veqIU*t|SSjBR66l~< z{C?ncBwVUW=HHu1$qk3Te?61gn}FqHl|nR}^wObh86InsK~nxG;j9-A{mllGm6KM) zii;FMntLM!rMV?z#ev`wKlH__AX^NS4e)a2i)`D`E^V;SS4UV3fICwu?SIpEK0%-l za~)E3bcxz*SJt;-S5#Bq(0V3r8~C1Uv!?!zBU{4Cp4B5n$B91P_Z#wks!32Wa!(}g zlfju3*RU^Oy|FdvsSdUr2E7%AssDZ%lDraB zr@IiX0P^bYTs@W(tt_t|XSiW6RSO?=evM=8pZQyN{Ud*!EzQ$t zZ~d2$p@ul-u=*R4td4yv3H>72^!>yxD78lYYdFp+f;nO~02Qd|M^~-L-GS;F>c(5$ zku>vi*N8tG{Zey^dN!mlK|85mlKHg51!X?a5Fk63sez0xS3Y;KLIX zSRc{x$5~cc9L|ErBOx@v^ai&+l7n)*iqPbnWaOtR5+vNVCtd3`iBAWK5i5lO$0I?$ zmgoneBqFa{7fDBSSMYDNg@Xp##y8&y-W10c=9otH%T-nXxv`-vsEdHkuPf^k!N$9R zO9%vs)gnt_Q2s$xdy};pFCZ<+_0@+zS8r*irqasJaFzE+W+nCw&QnClE8;4wKGHd&)gJ7l+kg ziSa)Qtrs;mWkK(#8;edkJ(Xsi_>0U2&uj_}Mm&r_L|*Y4hCN(U=;JaWO!6!2?41zB z=0cgTGC47tPl%%f?ZI{9JAmR3n)BqS&k~dea>V*DktoWCVj9H|a2%Y5^4gXP=gtVW z%>#(~=sMEqeoe7BoN=^fCC?@X{XOJKw`}99WD5P`8u4QuenqvJJu1~Yyq#bVJs!MN zy(fBRsyOeZWRh%zQP9$jUnbftC(~$LH4kSc9`i?e?L9IDJQq(*@}-l_&Wa~KE=Tfj zWQB}SZH^CJuGkDki@%_YCzd_*7Th$co7QWd4DkK7qNSSZ#{2aT=hS(e4z{kYbSA1e z(X!ru+E?!Vt|QSFvQb*a1aW0F`9~ikFuT53i5h^r z63b>-IG|L0@SPiOic*$8T>Gb&ZF_HkhgoBK&t>)-_0uk#Mx`D8_86HN_a40hrpP+X z{1YP`3&#@*zxD-{;Z zTVy}VRJHz0Fwwdd9ZQ2>^kFulYUPrk#{Y@z*BYI)m$lgZ@dt$TwG%`<2_%w@#NSBn z9uM4&m)cgNr=d@M=+}>Enb>POVnz0JiFA)lb3quxsQ5@-?UqA)JHVjnjIE*%obN~F zs@Bi>IQ-{Mw+ppQZmH79#3NTrMzgL9DV`Ji-QCG0pxzbn!T6WA9$J%-H}_0)R(ai9 zJ}>zF78PDG0b3gxPOY}9Hv(HXI+Px>J?aN--3Cz`7fSQ)PuQ0OB5N!X`yR8wdR}xk zk6R@+Y_p?2dRi{+<~^x5yFx!Wyy76-YqzGZyDhE{$caxG_Xww$A(?;s5G6whF`L(H zAw#7x7HuS3<~52Po;+@!dPzdLXse~(_BmD~Q+B-D3>8gUi3{7-gOa0nv0*Z6e27WdxKCrdTftWAFDu9Eu@w1Y0hQ?3su@PrSz*b9H#678zqM1Ojz)%E3Zb3*VTX z_@GE+)yW-rU}y!4oVzW*^u8Yfd#b*Sr%#eLAIw{(xAWH@Q+t(kSvfK`in)UTD7I5r z1XW1l&&S0#(|$5utb(xYHTSBqOL^l2F`f$1ZSM-=cssc?@Y_=6HOgN&rYR?(6K44H zB#;u|BI`hs$v5mcV@*4h> z`mi$-xTDti$Z6vM8ZIijHfp=qB_aQ_i}`P+IdUz>MpDZAnA~059SivIJ8Vjn2CS!{ z&NGj*$zbexl8W4bk^Nxz)+D+?7FJOELqr5olSd@FGQ*;OqtqY99&W`T@F|*kVBU_n z&F(&iaar-W%!{|oD>I-T{5Hi1gc~=q0eAV%VMVwa-JqCTWaoH$E)K#p`_y&MOsFbU zEaQljbM-DNddnH&dl0}R3@?zxbfo+S=#|JU4j2;!VQr0lv+>M)QT%ou7v}NZUgQ(& z;5ujefhLp4xFcJ3jSNE667!SoJP~SSk|;~)m@k?0J6UwZ9>Z9=F1((AC+hD-C7dSC!`?B{f)E55YGT5kWm zVn$U!9k~v{ggpVa`#Y~6!#yHb&2R-N0X0<%*{xMGUu85BFvkw~?@6(72vnVYrmMJ_ zmB06Kq*^yuL3LA|Y#^p-EQd!{x!j1u^8Ljjw27pvMd4Pu)4t9uG|nwmd5KPX`;I82 zov45bW{r~wubK-tJ%HWHRcQ!2Lpev{cA8y7rTN&S+rsW5u)$7TDZY%!i7&y7980~5 zr;$b8>5WQpNEmYB+v3NH*7&>mee@pFYg=E-&X2rKqb#Bro?^`q(>Oq_71LN z_fK3nLnXChqodu2pCYi%a^a>wn4#M;BIkce@wX(|9Vd42WU@U^_2ns+u>$R`F6WPT z6M3b=oDRYi$ZH5qjpGx^+=kJ8{H!i!PWg7@OuCyNacC*qP{9c5_nl;33;J1*Ml|!K zkDtbdnvMKg$!cc|`=$0xUpfC2PaIA6YXH}7y|}UU)9*nP#SETI$AOhF9$@Y^gC|1^ zt2hrmlR)*74D?IsPb!$-x4p^83^`P}J}Iz_;6YGJBZAKF?z=+G$Z2>m z5L;Ff^w9Mx31uCxY9VI*MN$1)YTWPAZ0!eQktc{Ws>*&eY{<&-dZFs=9966#fhy9g z)g$086lm&l{R;iE`AV&*WWcL-Bn?571Yp#Hb*z`;H%`iW=Io+Dw_{J1^;;rG09>_3 zjHhzBC~d@iyE3o7%$&VQo}hNxiq?BdeUj{#N{T}mVHmG78AF^q3u3vbVdy6>40H~U zl!RXvp`Hf*mP2;E?uVNbn(L-_p949iyc)zvno-=+CTQ zdSY_n3B@wpgk63_91}d}Hw}#ISYu;AMj-M{110!+$4Lq?yV&!>v^3E~N02|jdC@cb zYPw02p&NZ~r)};A9-pNK@%k0;7_cLAHN!4*pzm;YyeE70?roU*7`%U2}RRZj*8g{uE#eZ_Be@qLV!>^B^eDi8|O1 zPPmq9{^9P4)#>_7MmHK1Utg=q$hA*$&ypuQ`%COxYe)z5&l388<~ovF$nS2?vbl05 z0l;H*L~#qPrW;_MK%%sMi$bqnre8)eI}w z_afD4EEKD+W*DsRs?_}yNv%sor{g_kJPG*51lWtJlT8Sb&?%JYD^iMd@>N78eCA#B zP!b%oWU6Xryvw!y45sC`SXQiG7|}_lTVZ#R_HSk`hSOceYCzaWiao5UchkPFFP8s) zDeEvTDH1S}W6w(}`fk!LXN6XT&SRvy1u)}Y0w>p+U~B(gM;gO86#5-uU%%RQ3(Xl7 zP+hF;Jl-(xtbev?Krp6P7yn4xbt*e0PmXi6g2Z1tTAD&%iW3koP7N$=esdFYG_qSk z-;DpNh$_#?$oo(qR;M1r)*K#9en?N8Rt;^w@^a!2A}=uW#=ODXiofmw{tzxjqamjr z5}Wh+!S(_Fe$-WNxh4oY)*0zAasW?eI6C|uf-NowwB`+V18=mP^bN)Bz5*VX> zmE!VW5|qYB(4GVN|EXnKfchp5*zYW}c*3eE_MEPYLuhl$Eo_LV1-yQqp!Fvn0r3OtN9Z-#`t-vZv}fSslpY~^c?W)Gl(&lptLirKlC(JffZXR(h}Lht6$%u z*Nby>)zTv?AvDL}^VKRh=Xil;&?rg;u}#W%Uq^;#$%i)uSzZJhol2zx57-2a*W!|S z%~a<5WF-#{0gw;hvBU)(aYy#kcUC@s@(lFgU+;7c&9%fb!$eOguJ*)DfJNB0?mYe5 z+BrY(Xyn~Q_L;$@P8ay#P5WtD(u{a*zgB6c`3oe* zsetWCM@zGJjfOi*uAT}%ndz9-9X}8E)UVCmlDF z?T>nYu4hqEN{?w(#nVGKiY#BeKANv)W>Y$OI0P7R1BY_VYxK(y4gUT9{c)ikW*F!V;IU19HADilamwxYAH6>FH%Ys8&@#A#f!pD;eindZZB>K3m@wZM^jhpxf_$HwqsCDp z{?>}p+lLDbS~SY^r?PULxWr9mTV#M4U+$vB*_LE%8Ntau9OsfF_#+p+ZYsRY zxmhUw{Q%!f7h#(p9*64Q&4GY*01pR8?L4*I#ha&+Uu5*9{?{~E!r{=4L<$#Y@S4GC8`N}ntNg4KK<>mVMy?>A;fip z!B_oDzpX%DxMrbS<3G*NMJBYmDDv&UxWf?d6Kzm}x?bwpll6%8?LnH=S%GR$`}G@9 zdH12Y=Ifn^_{{J2%CvcpLIZ7vq6L=QTALO9aqj0r5y!Irwh)YTxf!&DR zKRGs7)^;jcxbGC(Mh8gU8f)4pv61r|LEVV^C>=H=bJQ!-k%GkCO&?V`)iM2tkDuMM z2=-$5C9>%D?a*d>==G#0BYzI!Ek9+W@AJ`N%YmqkJfg8t?>2Fb`bN0p2MH$=2%=*J4M;)m@G4^CIgk&L4i#qxny7XLO6V7w|Vlr3U+L(onNDP~|_?Z(|shl3kP;4@e+w zF-%3hY(rKMxnD4*FmR=?rpG$yNYC|1?!Vr+*#8+wuj0~Z4B+x63F(my^S`%-^qfm6+LuAD99@$Z^sA4(&M)=|3J| z#razguak1Vbuh)9{WeE%{S_A|bvcv`^%F@w4&wx(s~2$c*ol<-x%g09u2ElNGThDm z)iS?Dk+j^z+THplR~abSJufeJGm4?mqn@?46xp7w(dcf2>>KzKZT8#JnHf56@Mhyc zg%{p!q;#tI0MtV#7vbS#G(mHti6<|h5Iop{W0>adkpMOJ!w6P{5pM#Xx2pzDy>N5c z0=Hf51I;#9V*-2>7BbMPrjnntrRPKz$J|9kbK89YgiWU}%syn3ug#>Ti% zfiqPqKFmXWeUaHFl-Vr#E&dzPS)bi0sK298El$a*e5UY{1Oz#PWySxb#N7*=V`t=s zPgy)pdia_cKFfm10*urUv;-d)9&#+Q^dDt8O(&E>-aG5~41`9Lt zh9e4T!vbGik&t8)SMgHkW0e2K=pcmy!qV_^H!0N(?Vr+<6>-$8zK2&c+%+C`MLYcs zAMYa&`iROG?ao$3==y<}>%*^VLX#R^=$9|tXADc6E!s%HfkI?v;OQ3J~~v6t#FLKSRN*y8C|x5DE~nce$+=gCq_IMLjrRiI)t<`T+F7HGrI0KTadYEKE0 z;$eG1^Fq$&>&uNo{RsHU27zOQxoFejx+rERN%S$0T{Xe3=y5sGoR*tQ*x$K-z!2zi z9xM0)FM*n@oRdcsydMUx@0r11zn{p}8d}#I{_TvR zjRhUzXm6%;v@{e{OWH4~(?AMT8b=91f&gvU(Py3l63gz`(*iJ9J^!uN2n&A#yt-Jt z4`L93Z4t`ZU$BsRpwEdi6@_>}^u=}Dy$|#8;mIAg^N&0opbS($5mL8^M@m3KeXGL1 z3^M0{@MXCD6eGwrdx3&&QF!$J**;RDm?74)IjE5-f$a&8^3t3?zKDO=Qf?3|;K%cR zpffBoVOG68Z9AdHS6I)X3-Qk4)~`q4V#N8@Ii7M@MI+z@Z&_@OkdyKy6(55)ne~`n z`Pa-Gh5JUtLe8|M9+B*NbIX|I`=#SMinrJXJOIRr@Y8CpT6-YGnb9s-oS>N=r?? zAvcuME_0rko}vs(rqwjX{Fwlpg}z9% zm3bH8sV6*Ql1lzf6C;jg%O+G5rT%Xp@;$>z$Ub85vEgFHa;YxPNn zywKf3WnsC2aJhLhk$+nXTJ-BzD}y4{uk5uj7e_|dz#$@63}u~NqH&*VG38q;$#P3* zA5?lG+H~1Z1l2VxK7^LKjB-qh{GIiyRlux&SA9zg^_tMhlVY&@dJ|{uEZ*}ovlhQ4 ztFaP$mKJdmMs7ce$Uhs=u%pIh0A6%!b9b*+_mwG1tW_GhueqGVnSS7E=|J#%XPv`lg&|h6ssitZmbV{3fk8Ab zaUQlHy`Q&>F^DsKr9yMi3otRy=mjJS*BK1#RX0onL$-6{Eks%>UO&%2C7No_`xSnw zL*vD=Mod1p;Uzjks1@Y`A&g zw{|a+0)8~#^lh((_tfqQPKX14!rQ{NA|(8+nPlvfP{FFIcC?fwaq}bd24?|pG9@Gl z`z(?IMna&Q-{x%vr;uL1pB`nopWM{+2Ia#b25%%~H2wVzLDEFDinCQG zE3Cp++)Qi~R_B95-PQ0ZcudrBgKVJZj}64n7$-9n*ICfxZM0fHDh>Yf zklI4tXv|4D=}a-8o*Wif`XLT`fmYwtRohe<#-WDFCqao?#tKuj>jY93gvu7@1JapB zjHbC$InUrYdtF7X{mGuP5!2r$n%x$Fs9%SqS&Az;W)a_b+XMxUWD**-Ifci9W2V&819zq)I^#bec#eEq~hm9;AgX%<3*DhXk^KeTEw$tKw-Er2#3EmrDq z0V{)PE0x=gsZxl;uG1A%EZ(7#99};rFD@!bC!w2QCH<+HCHrfAk}nyH9ynV&byxX)Niy3T zlJ=@$nz+>R{Xt~%R5R_I{b4uDGzUl#6hV)n`5&2Rb$dD`s~!QuP`m1M!n`j=k@TYZ zj+fv7HO!Eh8ifbU&FlnVH{=KwS64TL3cw@JbjCr+r(?G>C?xyrlZ55$D7n7-!kUYp zY(Bhg#c`SK?kr}_4VZ16s$&f2|)pyw$^S8kD3J@2AJ!Pqf4oa!rTk`!o5aa7LlDjzqQSYMbcP z6nq9~OHwS%TOy7ewrX3XydDAdAC-J8{o~m#E_M6+?9Ujtv#fZFBUl=6EUSBdyE7NV zRi)T)eEP#{1M|^+h(}!VuZV@&)msbm02KU$amCU`ighVQs@W;2m^Q=w!9uoUrOAHC zWAf;uFb%9qg7h%MIE=OQy4v?Ita+QZ0L!7zQ5;x}KsQ`C)WxrKzC6#kE+&qj_--w6 zG2-hFXHTKk__5L8QmpgCJ;v_#TLj7FTT}~Hqn>Vv@N>&H!3kwS9J-?G@GeT`b*cX# zbpggwFD1my2XnA3)ApEw-s6mjy`i`Q=GiDbh z74Fn8fA!vpB>lp54v-x8h=)EV?>8_zf3=X)&#jhAUN8NlpYYPq;w~$WNQsm`#iUCI zt`Wfas6fL7Yd9UOSP;4PS5_$!Oz3 zhXjm*>am(N%?7p~iMrd4F-p8$&Tg_F&TcxQ&j-Jz9s)uz!6Z`>ItiqeyjJH2f<};b z3W4jTlC>ONh(BE0*Z6^(HouX^?_=$EUp*dC=vI87A}JxHcXVyCceE#x(V>sD-N93I z&{plc&x)})kRJCHt}wdIy=DEZLeF(A_GlY~$NCpZk+pCTT^EyoKgyCDgCg#L0X@^1Y(#rd%N&INn&mm>DAZ$pXI^3Tn`amMPJX%}oE zkf-#Vn|m{*{c4d`$l&D}=^$Wztp{NYEiAw-l~&G~FE`R7C`X=upr^V$VKniW)PA=? zO;NB(ja%nb-0P1O4AzZN3k(3f885{EMwndy-0Yi#q-6=*6-wN@vKNE1@Bcn43YrY) zHRsh(biZ|uK*q{?CRZzCuR)yf!O-nq1cs-XTDwTXMY43pnYf%3q4CmKOfy@fmE$gNAgHOlgiQ02Dcr%#op3v801UW zeynyRNXHv}#n-IAM?Im+3coST8#2AT+4-D|dDwCChEcb7^suM@bku%Z8|g>fWUsX6 zq8$!bd$|9>K74$@h2L||SX@Aa5OFfG_#l?}cCrWzI{Joz+r|A!^Hy=0*l5Z;vsB?! zPh9wb`B`w6F9+2_I-*Gbc4_^IBF8epu#(W*{&@7zoW0aOJzg9h?%!2TwP_!Yu%by_NU_lIIpo4NN)vFQ>KEPgoma|rs(<0Rd7 zI0t#-%5KstMH>=t6lvY0#P8w4{<9BVk&UL`lsNT=&$vZUx1-cY;%_8358SfbvAhId zkLfL_0P!<(0t8>5nOFa*h3mEnI+CF*Rax57VXe}dRflVePDM%H6GoQ-8sM5)Pu*Zml+n;8X%f_Fa0?^4;Gnd03t_hWy$G_}=krzyc;s(lD3w$fJnl6x4M zjl(H$@eW^z!M%ig5s%S~E9Lo3fponK9ZII5K0t=|Cu*bcM9%xw z(pTTxyDv7Pu(fmVCH-Ci%XREj{2E@ z3vMnu92}hTeS5bY;yz-bzG@tsUg^@b?L*ITL_|xShqrWs68k#MIPJu#V-())`FC(G zbM;nS;t#@O$^<4I8y|#?Z{L%2}o3*7};`gRZ6_&gakd}b&H>h zh}QQ&3o!L>9bu%8AE(}dus+MxT14hW5`B4tmT~?S=FR(R$f*48^(0Ku+69=?TqMZh zf&0S*wlAKk#YIBtTkn99W=wa9N;ewh{7*)tisyjB+?%&1pFBq7N?W({4~fKE4yizF zSz~9O2T352Xa^V%m*qnDSQQsHV?>Tz(#oXm1H_fh42_HkJW{k#>cRMWyNMJfL$&4= z4#7Z@RGs`a?jAdc6}RIlvb>T(s%xz4-IG+9dMNL8Lx<`6&GZgk=VnZP%mh$7Pk(*I zvgjl!XW--oisWtUHb3($zb!+t)emJVoQE;A7w7Wg*A_4)-zNHBWWuN9RuR%0l&||B zxx|pwoqy4wZtuRRo0lT%6#_A{36JQn>txc_0%j^-<&1NDncbsoU4k3!8>9BJS@Yhx zKz|+^DATzm{(Azs7M{jVzOB>JokwD`m1y#=;x@n$f<4~*8;m&6^d2s zUgR*L;r(&Ev7FQR1|mSQ;!n#J>4g)phW|k3c>S9#8_g_W^~{I?LGo(iS2N>>&#G-H z(nS#oG|pu&Dit^$tyigpF-0{qtw{VtmiNyX(gaa8Wr|AEjcQ|!k-AzsmEB9K+z-&o zh}cbGrPJ@6p*zay``po;zVjFyohe&|IKYeZhgu}c+T=2QwyF7xNy{m_!IMTNPOFbFN^pdpig{{r2XWky5$`%w>y{4&9}7XQ&2UeP97W!n=&u@nXOA5(SLk7asnL|qU$CawaeR=a3@Oi9^*Xs?%>eo1Yr2buSNxq zki$PKjGkXz>^cfEBfB#+d19@HqV=RKlw`yQvcbf}5km?BQ8alZKPlPM%FoGk7lcx@ zD^~3ASZJ0e2U?S7m!71vLl3Z7zM)``5g^nF0-dKTH@?ea2oNfGvfgG{(nNI81tVuW znL=J_jd{lP5V(?WnW{|$kSb~fA5bMzqt-wEpZuDMB#lEH_xFVaJaUx|ptPj^>U*7z zx$XS(uXeU9a@L=O-)6jE&2OVSTDaNGQ_D>w#B!o*R;=(H;=$=yYYl??JgV@5xkmeL z#fWNagHX)C3_)0iLLL8!ZD9H3hd`^1EVO014T`Obj@?{{VVXUpl-bxG-w!nw0M`Q{ z+7(6JRQY9vLR}?|ETxvkysDXADDRVjZ&LCz=j*&r#>iO{L(U-m(!9a@ zqjvQ_6{bdl6Fu(aJX0B{Sh}<=A|)c-!k)aK^!^B=X8U0aJ(D2NZoOi00!NfA>4=|f zW)@$XsgLxmqPR8PdKiUJpOaXgiCbVb$|Fo;h^+nJ8eV0L`Z5Y@f0sX5_lI$?7rQ6c zWT_&0*qVpOOuh6CU!Lk{cg<8M@`crc+{LIG@NfF{k{Dc!{AioObm2Q|JsCF<;W^%lb?9K{q2QfGcH$yzk zxPDU+x?8f#Gw=P$grjj$?blJq5jIom&iMOUZB^4N%8vnw zC}7t?@Srpj!;?As-r)0sw&Ug4A7z#?lFK{Ja@7@PyQ~t*>bC;UV4OQTOf^lpkABJi z`S&5nlyuZaTWBqx3MSrQPwACqIWjwS`-nHWMM+eI6cy81+nwbCSkWEFj1E7eMkZMB zcF-SFim4ygFu2!5b%+C{7^>5-(%hgXaaCCT3V?@g zEKdLh%NUDc36X7Sh2djf)cPG@=0Mc?k2uv3{*tFB=0GgH_uFa)?-A#$>*A=esnNk+ zz%`GL(yEaFkmA%VT%ZO!cjr)t3K}3McO&I=_*4=#e=c+@xk$;D*}?F>iu;1q<(Bm` zL1TKO%p1Ztjf>u6tq7_L2`bg|U*@Y4zI}KhK}2!#PQg3?*Js>6nS8M-)*Fb5SHPsi z0&#I@XzO5?r9JGJ1H{1C05tzc+> z-jcacik!Mlkx>)nPRx9_G$ z&vbUY9zQNxq*IRK@vLUJP z{zs7sAQl7Vlw1zr?Vf0;Cxe%)-Cp{hChK_WY6Un-#i}5cLMov%n_vjUhzbE(&ANES zDjSA^sj|kzna}g)x4~JMe13%8bigT-6Sb_(%&~a~a(z~`^$FWMoY8KASl7XyaDvYC z?{baUvk`$WI3j(bIR2PpWgfcpOeOJx7QX#ZIp&RcE^IF0>fO;B<^blLiaq5~@+yL(ZJF*MtuOxSMP z89N9BWZ;VqZ@>;Vd#gz2Vx(@apji>&W}hCc*v>wvN2O>Gv%I=s`RxW~+~YdmTTo!C0SSwfcJkXy+P5 z6UQh>!1JzmJEzaK@H<&v&>tak4LRFw#oZI?G>n@n*I*%YfY%zItSm-xXg_0|Y^P%R zAax_AJK<+fOwE!3Rtmm6MJ8SWWVOHX61Y8X@(fdi9VjTqiuBo9jP%J!b#tta677FA z5mG3FF`4{+R?169t^o%M;^!X;sKF%?P9sgH+}ILC&ROIFPvVrD15%iN62#j>korh6 z0r>EY6eUkG3f5p2R|LI=8J>7w_CX6*SOhCc7H`KZDpItV&#q0$<_4X?cdk4Qfg zf(P6s8N1z)_7$@Vlw07^k%gIbO{>&3>*Yz?X0M>U3Bp*u=hO0)>Qy_JQAyy1eM7i@ zLR(hgK5LrSTQ0J%MTZIPowHW0?`X8O0Fca*1IckcdbB4K%ty}?Phi`Z>U^s=S)T(- z7fX!KOh6b$(X7emjwEDpXaiK+v_xuJ%AqbI@%-`M9JD}yC`FjCUX(yXgyxX_?X_xH zAOuJqz5^#q|BQxwfl2jxCM!0AZTEqXn-}0uZj43Gm#me9rzx9E1#2f{L;70njVAH9QkvfiQ)7?Ki5dRxJIMmZAWqvJ%V|Yh@yFWa2D3cVchx@ODiC zCyI2`|F5;LY-_WLwyvOs;2LOy6e;e(-QC^Y-5m;n;)UW?973@cw-gUv+zLFn6?cEr zo^$@en@^D|S2A<&*?VT)d#&Xo9u1yyfqVExzVFUne9sZ-XXYQqZaX0W=rsG_Prf~z zy-1N^M&&!giqc7mZad_01EH;S1y~)@J&!6z`BDDz#dS19a~TV(IKfml&*0_4N)#*l z#cfOe9cz>Xrp7kj7vQY}i?_XS2h|5$&{%FAC8@`-z2?qWO#Pyv8N@<*YZdG_UQF4P z|9=rTGt4@sQuX<=VMAB$wSfd$xuR2aaxV(Kl3i#FCTZzm zpKCR}%9*HCI9)s|r9|oDI4zvO?`sZ9TSln5d=XQu_^jpqum1vyvg{~RzNRS8%ccs0 zxPHERVg9AX2&iH@cv6UawdH0H#bC%t|E?%b2)K+y5|6kL7^J)?mQ#upfa2w5U~g;L z>z$z!wODK7u~=&V$51hvbb)TgjZ`>tTxqVb7zV^{#ex#(orijyxZPE>LGIfrf}tdw7u6QPB6;BKLE?QsV0_Asy?rGg zJ6~BLGr!MvHV6RpG(^cyOe>Tti?n^3aID|&4w$gR)A!5?pKV{+err>tR+edd0LB13 zuOKrY2awxkM!vC>L^golQ0s}6_P;=|Ze{-OsG-9O;}ro|N~7MGk7u>1rO6cK$h?+U zrB|3xO_8C_I0%Ij5Nnq3&=%u(PZ|_44W{F4NKx-4>=UR=7Y|ONRF^AJGT|5xX)$8~ z>=3kG8DCkCLjSJ}qq!867?P8rbd#x0Ku4?jls!C0`%G;mX9vyqcu>6GemM}~CBl*g zEKNF1Vw_|&ioQ=HCRpcmY*azAXNPKfwo$`eE0c29Z(bJC=HF7p!7+Qdx$eF*g_8uo ztOveQbYeKqoef`%yG=(w&cF^=QDW#~j!dGWc+Vy_ldeI*grjah!>`#+x0e?|11g;h z-I;JmCxD}w>ZM`)qWk5cDluZU#E*vn*=OV3*BY8Na$B6r_(_NL)l2rq=q-E~(n^2~ zoPN(4t3!%1;%_#FxK*oi;{i!6b#QzZf9j-(>~tiHo83SB(Kf=E5Q$-tpN*#=Q|Bk% z5vTmfmgQKs)kndRs(eVxBL-X3THUrvci@}`o6ik#&qK7_>sj9u{Y z{fdzwt#d%>RLlU7)9@U2u-E99Y3BEbrBx|Etc7OE9;i`@5TO%NL_~A9=kfY! z8eYO;!oU(+VC7E6)d3s0Xeq{SPOMFgW{-2SZ-oo}T^fOt;73$l0HA&i(lyBy}f6kWIwB z=4ibGaa>{cOXc!yU;gncOE50r-5r^_hKDZEjMC~S`6sT)-(`7nxAB>|>Vt1geW@RxIPr7Wc^s32reFHVbUok7*Hg~Nz{(%Z%yI{(M)` z(%3AnTCw2tasREgA&&2dq{|F6t39l8xR2Nt(JP!D%}qSZ??G;E8WMpWrD6I*U#W)i z=bdZPr08?LQI^&~ShdfD2{;mO5qm&(PG*5{%&^$|RMG_cVX6#5o?;kXn1qSJ8$V15 ziY}sUnXfGAe#;9uI=-Xn#o8rFc3X=Dj@cM2gw5;Xd30mA7oGUBAMB&2>V$O(%FX5k z@nblF6${bBliBO?ROe(WzUz-)`Z$Ks6t&bHEJ8<=5Ghh^nDM7WLwMXurlzQLg4Wsm zsSAt6^X&tWhIBBh!?UH9c-W)f2TIPb*Hy!-sb^&B=iKh z`nBw}F7QXtEzDnT&)NBy@9FK;yB`sJw^cE)spN#_HszcF7PA>wvegrg3driaxm@`E5BdfT~%KLxyDY~N%E7FBcE1W_*A<~yA5(#MVaJqlEUbN zdi|H%_HYf8;KcReKBg3eE{tH9C}9ZGTT3Qb3MJ$LyZ1%rlGtWY`gc7{e2j?v#rk?5 zCj)Wu*H%ce=&TI}PN5xnH||x;v#55v=NZiWt4>j&QI704q-2}pwOzyqhIJO5Z##9j z!fL?a-b_IC4NI7`(2tg^rL6;919AhWkMTi{bsY0VQN(^Yuk`=>X%1MWQ?uK3XyjYV zCJ`XdvxpU%I{eJpjpeuD=;~e~G$xr{Yg2$;#=}qTR7h=o^(zTHOuFUgQ}n(C8SDIa z+qNw)ZWS&;jw|H=Y1Ev^RX5XKrms1-|k(7l?u$vnx~oy8vxF#kpR=B|LR@Y^ zD@S`BxIb>|`(`&AAd07X`X{oNLp4+ zQ79k$(#MQQg_(#{$e=^Za2ze%p1%5Qf00~n%t(!*nZ+w#o?cE|@m;#vwoXdR0<}j@ zSI_oGmH@|KUUlGcbItv-sGmMx!$^wy=(+Y>nwmld&9u8cazo4zID@pIUu|f04``K^ z8j~NfVWPF1A1X$8t}mst;^kJkr@!r>W)Gm0k=n}WM9Dr1In2K~2$?MkA9?uXkXttccA_{P7UPWZj@W5r)q#${L6 zy=ry&TN1= zT?<8U3?~t;zBj9L^zpAJ_DIxJb-n)16nj1-^f*sU2 z-?aETbIOptt_bq^*dG2Q*^{Z{W3fRY6Sv*T^ce>{iej=gzoB3}w^#rqI>|YJfCqM! zS#-Hnd*sANh)?6$1b#)(e8!^B*%>>}{&mdEhx%xd^RjzlaLZd)cAjL!lX_(uN1%r^ zGM~@9g{kiZ3K`GXp>{Z|wF<+wMp`Ijt+BXzK~~I+U|V5gD`H!A=FcVZPLY22F|s?# zE94t8n|RgT$?e4UXaQKV30RAr>C|1FBf@AsUs2_hacIV|Nn9k?nNg9wVEVRN%y+F+hsXEdtRC@S^m{rc!$iCmMWoazc_-OT?=SO?@>1=Q;p^5V5 zPq%IK1UOn9()$%#g8Sml6`D^e8|{z-O=e(abzx@!<4nz+e(L!O{h!f`eWN!2W68@u z@@NNId;gV*_}qb-e1-g#;{ISZ?ND20~d=^Gys zDNxRCu>wJy`qK)E(%~AI6b5>K`#u1Uyk7s&<%||$)FTrzbu#3_jk&iSSByMbXPE&O zM(3jTk06y|%rYO}N68?zkF_((O}dw3BwzaV?$(3szY}TkNpDVh?=00?(OLV+T8?BC zSS2JpGb{Y7X!sNzuJL@wy6`v9=EqY|#%JzIzDAadZbb;f{ee3TGZ+U0HNaxaRDcQa zqc3iZM;APg8a3M>u|5wWd4K|h5p=4ijDawa?L47VY)_qY##?FKtfO%JibV6h6Ua@R zsR!>2*}&vc*T{=*iE@lL+YS9U)!t~-woIVCNDsT~LjE>pEN0IhG;u|+_;kQ@34VL6 zUks}7o@11XfIxw38mqFCa}24_A?{@i#OJBFU_%2~D%p%juWzPE%xThe^hl4?mvoa^iS5GQP|8)IWs0BWA+eG=IiZqwA6a(_+ue0O*Jbk;p| zKj`V;RaKQ52ES~GgTe+{&kh@ADU6EFsAqj>68)b_u6BSr-Q^Cj&+?RR)E#QNm1lu- z&9hDiLa71$_d3i=?%!hOs&~}J%kt=2KxYK|&zyir{`Ip52+5fV(Q8X*)W5i=hM2Tc zw9U)REJ4r=Wg|pd^=^P-34^erQ#9D%kI_6yY#|zH?4c`H0}U<27L6K?x#uXI?_yhv2idGaH4dztSd16jx4HWEpYKz^~XJ^e9;TU0(adrGH!0~ z!7B5urY1upVKj0z{Yyjf2(%1xkBfT5i8 zM+?SmQ%|~ZT(8L&#EuIOY2j0NIr>LBMbtOlbcUV@WTU$}KF3Uy43(2wMZ{Lu+#;CTLLC3#wA6$*Vjn_#*U*{Z;7fy^Kzkn-oGZqFRHN{+o5@=93{ZX@8AIsZBKH zv(-PD2fw)rb)F~BB;1W)R@Hgk_#@n;&wT`yoaECd<^=U!33{CwQ$`XijsoN=&XDH4 zMv?`so?7KGU7mLI@9B-DJUx&>XWhwFtb9CAJlX(@ReccCfm46nm^VCg1<8cQlOnpl zH%#lOIp;X`e8Ia}v%J8u@-4KH>qX;gBrtqxu;H-QThiddzb~TUH4w26#iqr_oV|95 zUOrzkRcgkcy>3BG@#pHzdAgcsTSUo5v6BV4MAf9|Z>V3nBnfvRPx0H6+!x?)Upt4k zj~{j4#;vYb-dyR5i1y^Xi^zW(Wvh$A*G{bK(cv3)|Z@o&jleFjr&Sf5C!6LAZ^`J&JbLou&x^loa50z|v?!WgatWkyQy>@@@TRRf(xQCvRtT!KP%UiJvXZ4XQ9?Lo5X<5h$ zqw(*q7|M6Ojk2Vel&CN5Wjtr58X@Tvmxm2+sMOv=EoDk%N}9?ac=$KbM%YZeD0&Y0 zqfLL_>9an9s^-d0Q#4sZKUDxp4j%C{S`=x1xguQf^7vkoWC}?0-_9D9S@!Cm7bgJC z;UPNz_^~Tq48urTm=DFm97vgu4o6a2Eb+B*SA#n$j78~|Xx^bt!S-K1aK%oB1by0-qkxjZyP*p_xWe$*UI)AynP#g=f#kuq8gL3b zbtjUp*I(x7F5vVC*LN9&)O9(GY}AWc_bNBaOrqgqjnOz--oNrwa&^?g#|)Tx=^||U zaZk(}o^A25PYfvjWZJ!(?o=0jef%@LG5YOpLc#c{AyQ@do@ZO;wEXgFFU|>)?pcm% z_D;;ABc~Vy1`sQCJ`-|7>^U3YMZvj4_v$7GUn5v!>|@qOE3~-TxLOfS;U)ZvvQFU< zF#RQ}(@Wa7Gmgck5{5x-)hOb*AN_CyoYcZdZkaWaw^WMn|oz;kM{!+TEZpP2kF-ck{wKs8ck)^CP{f#uU`Ln*rv^7 z(vtN5wCA@UXr?842ET_#pC+Tx|A^DKk9a%)Ngk$Y+e4NWPWgMMi5Br0qA#N5pDcj@@_)SY_r*LA? z3DF4lu!KzPw@jSui)eSN#0=7Hd0~Z|m`=wAI?FEiv-LG9?142OckXGBUGCD_ zel+nCPx9LmSo7%`R1stIICWh$_iUkcM(?wu&x=dQRyXTdheVYmNAftP+s%pp25H9l zkpO{Wz!;h25cj0|)YoOr-uIQ4+pDB^e>Hxrzd7hb!jL}NrO=3}do)okO1r9R%m4a| zd`2&gvZIBF`U6Cx^I>W)W=(~2V%&JNL>-S2|2yBWpHrc<^Sk(q6Rz^8({JnnJu{Yg zvXgH#l zI;C3uC5qm>`U6KU8j3S7n@kTJ%pG`>0fEnPBbIAdizO=rA)d_C>8|9C)vam;AuS>G zwm(Xj&+OAZ%k2rsa7r9LjYwgV2~XjC^o`Hti2%RK7?Eb$g9{bRvQ=eaZVN;)3~aV; zf!Db8i7Pa3H`(>RsUL;?X1x@E4=uInTWna;VPZ9tWowU1%GG6H3lr4~Liu{nu44&X z>JK!@{kVH?o=9d^89$xohmL5af>qe0Q8PmZ(i~8Q45GvhE@vq`I`Y+@d>B&;#TeXs zwFWc=JCJTwjue6##N#fZTN!yQP5U!c=(UDYf{gH!cFe!5t7iwZK?E~`F>pdFh$WW) zlDkJKOy;`G5MbRNO%1likNX6ahBrzyaFOt}m^PnYfR@)nze%P~yVsUtJV)No_HsZXlaL1C9 zcp-W_>=i9C!!J-nQyqLr6w+U1_QaU{_S7j3bHJ3sjn&7O~_f(E|yC%fiN2$}_bSG8kXo5#zFS7NkMSpIgFr4VL zjiXLXjgSybW07f(O4MDn5u{Kmy%>THexx3sV%40ERZ}@|8Y+1vndvc!r(BvFm`fZ? z%Q$h+s_~dm8+aplsRb%!vIlr{7?1uakM_Fvhk+`e-;Lk-9C|&POlMWMVndnMFTWcm zx0y>35-O+axtHi1XTDqd*EX0uos_Rrj8+@?xq-M6itV!?G z-zm>in4v6wYNQE#Yx$cVvuWnG1~Q>_X1?5bH*?)o&VA^=)7;M*JNKDUNdzf9&>pJ^ zS$el7jlG#A4@>@XvV7{`xUcHs&Q&oV79X1Xt)H1;X?6_uW^;E)ag-YsApp<{Ktb^m z9T`HV?<}1Aph^E?`-iHHQaOxDtp*+TtLulHPi-2nwB8{94{ zI;UC(!JaFPQ!8a$yyL1g;CBl@WbinTTm=n z%tk`X*qG+u27rIZFER_?=?KSvoerk|C|kvukalPQrKyey!Ed;G%wPjg<}(YgiPf_R z*Ub5wd^3AWsSi=i$gaSW|DCp&=ZOlXwx2{4Q0ZEA3{&~T6fPay)+=u53oZdnEXTxo zLJZYHQR!_P=4Y*sag-1ea|9qF(xxjWtuyn*l3Sjrr}%zwQ=h|q-9M=%PVIFb4cylp zTddSN7V~@t_Ge~JsOi}v)t9IP7ioE5j3ej24kHaCd2NZw-kW5SNkVb{V>t0OsoqN|gw9)79B}D_b|ne*y+~p~7XXdp zP*ub9B`N!?Iakf=lmoG?C{5C!Sc8!JRmAUqGBL_9xcPwigPv6{T!-13&Wb7h%gQl1n-MY%-~2MDu&K`6!^oRGYe7ae%W1&j+x8P6|XbZ<944pP>{i0 zM}TV_ON7H)t2LnV^g(6X-ZUf(dh|()c~nvU0JJ02toHjkOMYN;nyj2$g|mQRSqwFE zP?_Y&2fVHm7(T>U5-2!?#=*gtSt8wk){eMZ2oy9cf~JI0q{)!8S}ZKCj~>y7TP!8l zg_&CFX!#6A<6y31yc3%y;!!iX*WwzR)hC@eXmTkLnQq!h&|V=Xow$_n?`eK!PCZ|i zO@#r5ICH{`4(r^$b3%~o#U6(C5Y}3sKFC-(@qscO@`({6L|lUu1Y?6NTKLu1ckt<& zR5c1}iMJ&VWtd%!p>c%PXkh9)*vFS$PHOdQxSGU`RMGQzmXe2PwAQi4u*U<<7P`BR zmWCDjqYxQ%#s~$wv1By`4skguLlnj1ZNy9 zgU){JS9MQ|%x3(z9yuupHXJ+e?h6DN!W|+MT9n+_n~)m_$F@90Pglxn?d3f%PhhBZ zj3QEcb<=PhSF2#k|BcqVe=~)}QE2`N$x^b$fA^8ss<2A_t)*rD^9)o?ftv*9_FZ_J zV03XL>$+|?%+-9Vh_`W%eJfA(2*hN++_`eR+CyFzkYnblb9LYl1Uu-_ZK}C$6CN2S zdw##10(K2?e8|p!mNxr#fB9Gq;j6oC;>EjW!x|6@e|;L#`*b5R5v1q@in*u0a(cq( zT)d*(j%&v*$OU!x&tRyjDc5g(RHNbcd}Dd@ryTWTV#MDR0I&J0`~#45Osoc@W)5Y& z(VYH*YYC+o-mZ*1)wD4&9jO&TDXhmH|3Tm{4TvGGHP(r8m{BN zhF)1WcTNYp@T(oPU0xze1U|xM5Y~7@wDqWSw1l-g8UQ-2#us1p5Q7JddFJ9s$NSHb zem-ucID2i89Nne%*&!K`eJI$!%GaIdcwLW>AL9Eo6NgfgcS1(9*V8@MiA zf7`wrp`~RD++Gvc913+%(;IXB(%sHG@>mR}jo*)}pcd(^$9?;(!k^1;aU%mWO+C1O zE&$8l)9uzOXYhREEREX@AEXx1hHZRNd&}-MeV-wGCZq_u#Oeq5lNF%^hZ17YZNLjy z3E-M%Zn$Yx71g55`G6qaY(a^?`H~X$?r%gURgQ=sydSQec{8Gm2Jx12I;(*;;%#y1 zZp~%WN;7_k7D5a<0a1;`S*YQ8p6t-Ch38qszuYE_CbIzv*c;@w{z;&p8)d$skHt zwTY#r4=JvU{qrVy8X75%&c;nVSh1R793#j45JdVl<2OT!00v^A@-aLo!4an!_990qwt+urwq=!t2QXxbc+(EM+Nb9PojO;1>R`YB!Yi)@AD1eE{aHk|1#n4I`e;%_ctq&xDOy$jQ{gKBBI|i{{Q__!K()m YT>1_}|7E?P7l? literal 0 HcmV?d00001 diff --git a/docs/cugraph/source/graph_support/property_graph.md b/docs/cugraph/source/graph_support/property_graph.md new file mode 100644 index 00000000000..4610cb7c3a3 --- /dev/null +++ b/docs/cugraph/source/graph_support/property_graph.md @@ -0,0 +1,54 @@ +

+
+ cuGraph +

+

+
+Property Graph +

+ +Part of [RAPIDS](https://rapids.ai) cuGraph, Property Graph allows all the great benefits of cuGraph to be applied to property-rich datasets stored in a graph structure. A Property Graph is really a data model rather than a type of graph. Within the cuGraph ecosystem, a Property Graph is a meta-graph that can encapsulate and instantiate all the other graph types. That view stems from property graphs being originally created for database systems. Conceptually a Property Graph can be viewed as a property rich structure that can be projected onto any graph types. The Dataversity, has a good definition of [Property Graph](https://www.dataversity.net/what-is-a-property-graph) which contains definitions from a collection of resources. + +Property Graph enables: + +* Multiple edge and node types as seen in the Property Graph API +* Subgraph extractions based on properties and/or edge and node types as seen below. +* Storage of properties either within the graph structure on gpu or using GNN-centric storage extensions on host storage. +* Adding additional properties, nodes and edges into the property graph to store derived data like analytic results. +* Client access managed by a remote server allowing shared access and remote operations using [CuGraph Service](./cugraph_service.md). + +This is an example of using the cuGraph Property Graph in a two stage analysis. + +``` +import cudf +import cugraph +from cugraph.experimental import PropertyGraph + +# Import a built-in dataset +from cugraph.experimental.datasets import karate + +# Read edgelist data into a DataFrame, load into PropertyGraph as edge data. +# Create a graph using the imported Dataset object +graph = cugraph.Graph(directed=False) +G = karate.get_graph(create_using=graph,fetch=True) + +df = G.edgelist.edgelist_df +pG = PropertyGraph() +pG. add_edge_data(df, vertex_col_names=("src", "dst")) + +# Run Louvain to get the partition number for each vertex. +# Set resolution accordingly to identify two primary partitions. +(partition_info, _) = cugraph.louvain(pG.extract_subgraph(create_using=graph), resolution=0.6) + +# Add the partition numbers back to the Property Graph as vertex properties +pG.add_vertex_data(partition_info, vertex_col_name="vertex") + +# Use the partition properties to extract a Graph for each partition. +G0 = pG.extract_subgraph(selection=pG.select_vertices("partition == 0")) +G1 = pG.extract_subgraph(selection=pG. select_vertices("partition == 1")) +# Run pagerank on each graph, print results. +pageranks0 = cugraph.pagerank(G0) +pageranks1 = cugraph.pagerank(G1) +print(pageranks0.sort_values (by="pagerank", ascending=False).head(3)) +print(pageranks1.sort_values (by="pagerank", ascending=False).head(3)) +``` \ No newline at end of file diff --git a/docs/cugraph/source/graph_support/wholegraph_support.rst b/docs/cugraph/source/graph_support/wholegraph_support.rst new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/cugraph/source/index.rst b/docs/cugraph/source/index.rst index f1012c0fb74..12bc74aea2b 100644 --- a/docs/cugraph/source/index.rst +++ b/docs/cugraph/source/index.rst @@ -1,53 +1,25 @@ -Welcome to cugraph's documentation! +Welcome to cuGraph's documentation! =================================== RAPIDS cuGraph is a library of graph algorithms that seamlessly integrates into the RAPIDS data science ecosystem and allows the data scientist to easily call graph algorithms using data stored in GPU DataFrames, NetworkX Graphs, or even CuPy or SciPy sparse Matrices. -.. image:: images/Stack2.png - :width: 600 - :alt: Alternative text - -Basics -###### -.. toctree:: - :maxdepth: 2 - :caption: Basic cuGraph Information: - -General CuGraph Information - - `CuGraph Intro <./basics/cugraph_intro.md>`_ - - `Blogs and Presentation <./basics/cugraph_blogs.rst>`_ - - `How-to Guides <./basics/coming_soon.md>`_ - - `Performance <./basics/coming_soon.md>`_ - - `References <./basics/cugraph_ref.rst>`_ - -CuGraph Development and Contributing - - `Getting cuGraph Packages <./basics/coming_soon.md>`_ - - `Contributing to cuGraph <./basics/coming_soon.md>`_ - - `CuGraph Development Guide <./basics/coming_soon.md>`_ - -Algorithms - - `Current list of algorithms <./basics/coming_soon.md>`_ - - - -API -### -.. toctree:: - :maxdepth: 2 - - api_docs/index.rst - - .. toctree:: :maxdepth: 2 + :caption: Contents: - api_docs/c_and_cpp.rst + basics/index + installation/index + tutorials/index + graph_support/index + references/index + dev_resources/index + releases/index + api_docs/index Indices and tables ================== * :ref:`genindex` -* :ref:`modindex` * :ref:`search` diff --git a/docs/cugraph/source/installation/getting_cugraph.md b/docs/cugraph/source/installation/getting_cugraph.md new file mode 100644 index 00000000000..005938f4777 --- /dev/null +++ b/docs/cugraph/source/installation/getting_cugraph.md @@ -0,0 +1,55 @@ + +# Getting cuGraph Packages + +There are 4 ways to get cuGraph packages: +1. [Quick start with Docker Repo](#docker) +2. [Conda Installation](#conda) +3. [Pip Installation](#pip) +4. [Build from Source](#SOURCE) + +Or checkout the [RAPIDS install selector](https://rapids.ai/start.html) for a pick list of install options. + +
+ +## Docker +The RAPIDS Docker containers contain all RAPIDS packages, including all from cuGraph, as well as all required supporting packages. To download a container, please see the [Docker Repository](https://hub.docker.com/r/rapidsai/rapidsai/), choosing a tag based on the NVIDIA CUDA version you’re running. This provides a ready to run Docker container with example notebooks and data, showcasing how you can utilize all of the RAPIDS libraries: cuDF, cuML, and cuGraph. + +
+ + +## Conda +It is easy to install cuGraph using conda. You can get a minimal conda installation with [Miniconda](https://conda.io/miniconda.html) or get the full installation with [Anaconda](https://www.anaconda.com/download). + +cuGraph Conda packages + * cugraph - this will also import: + * pylibcugraph + * libcugraph + * cugraph-service-client + * cugraph-service-server + * cugraph-dgl + * cugraph-pyg + +Replace the package name in the example below to the one you want to install. + + +Install and update cuGraph using the conda command: + +```bash +conda install -c rapidsai -c numba -c conda-forge -c nvidia cugraph cudatoolkit=11.8 +``` + +Note: This conda installation only applies to Linux and Python versions 3.8/3.10. + +
+ +## PIP +cuGraph, and all of RAPIDS, is available via pip. + +``` +pip install cugraph-cu11 --extra-index-url=https://pypi.ngc.nvidia.com +``` + +pip packages for other packages are being worked and should be available in early 2023 + +
+ diff --git a/docs/cugraph/source/installation/index.rst b/docs/cugraph/source/installation/index.rst new file mode 100644 index 00000000000..27858b77012 --- /dev/null +++ b/docs/cugraph/source/installation/index.rst @@ -0,0 +1,10 @@ +============ +Installation +============ + + +.. toctree:: + :maxdepth: 3 + + getting_cugraph + source_build diff --git a/docs/cugraph/source/installation/source_build.md b/docs/cugraph/source/installation/source_build.md new file mode 100644 index 00000000000..cd9297abcb2 --- /dev/null +++ b/docs/cugraph/source/installation/source_build.md @@ -0,0 +1,271 @@ +# Building from Source + +The following instructions are for users wishing to build cuGraph from source code. These instructions are tested on supported distributions of Linux, CUDA, and Python - See [RAPIDS Getting Started](https://rapids.ai/start.html) for list of supported environments. Other operating systems _might be_ compatible, but are not currently tested. + +The cuGraph package include both a C/C++ CUDA portion and a python portion. Both libraries need to be installed in order for cuGraph to operate correctly. + +## Prerequisites + +__Compiler__: +* `gcc` version 9.3+ +* `nvcc` version 11.0+ +* `cmake` version 3.20.1+ + +__CUDA:__ +* CUDA 11.0+ +* NVIDIA driver 450.80.02+ +* Pascal architecture or better + +You can obtain CUDA from [https://developer.nvidia.com/cuda-downloads](https://developer.nvidia.com/cuda-downloads). + + +## Building cuGraph +To install cuGraph from source, ensure the dependencies are met. + + +### Clone Repo and Configure Conda Environment +__GIT clone a version of the repository__ + + ```bash + # Set the localtion to cuGraph in an environment variable CUGRAPH_HOME + export CUGRAPH_HOME=$(pwd)/cugraph + + # Download the cuGraph repo - if you have a folked version, use that path here instead + git clone https://github.com/rapidsai/cugraph.git $CUGRAPH_HOME + + cd $CUGRAPH_HOME + ``` + +__Create the conda development environment__ + +```bash +# create the conda environment (assuming in base `cugraph` directory) + +# for CUDA 11.5 +conda env create --name cugraph_dev --file conda/environments/all_cuda-118_arch-x86_64.yaml + +# activate the environment +conda activate cugraph_dev + +# to deactivate an environment +conda deactivate +``` + + - The environment can be updated as development includes/changes the dependencies. To do so, run: + + +```bash + +# Where XXX is the CUDA 11 version +conda env update --name cugraph_dev --file conda/environments/cugraph_dev_cuda11.XXX.yml + +conda activate cugraph_dev +``` + + +### Build and Install Using the `build.sh` Script +Using the `build.sh` script make compiling and installing cuGraph a breeze. To build and install, simply do: + +```bash +$ cd $CUGRAPH_HOME +$ ./build.sh clean +$ ./build.sh libcugraph +$ ./build.sh cugraph +``` + +There are several other options available on the build script for advanced users. +`build.sh` options: +```bash +build.sh [ ...] [ ...] + where is: + clean - remove all existing build artifacts and configuration (start over) + uninstall - uninstall libcugraph and cugraph from a prior build/install (see also -n) + libcugraph - build libcugraph.so and SG test binaries + libcugraph_etl - build libcugraph_etl.so and SG test binaries + pylibcugraph - build the pylibcugraph Python package + cugraph - build the cugraph Python package + cugraph-service - build the cugraph-service_client and cugraph-service_server Python package + cpp-mgtests - build libcugraph and libcugraph_etl MG tests. Builds MPI communicator, adding MPI as a dependency. + cugraph-dgl - build the cugraph-dgl extensions for DGL + cugraph-pyg - build the cugraph-dgl extensions for PyG + docs - build the docs + and is: + -v - verbose build mode + -g - build for debug + -n - do not install after a successful build + --pydevelop - use setup.py develop instead of install + --allgpuarch - build for all supported GPU architectures + --skip_cpp_tests - do not build the SG test binaries as part of the libcugraph and libcugraph_etl targets + --without_cugraphops - do not build algos that require cugraph-ops + --cmake_default_generator - use the default cmake generator instead of ninja + --clean - clean an individual target (note: to do a complete rebuild, use the clean target described above) + -h - print this text + + default action (no args) is to build and install 'libcugraph' then 'libcugraph_etl' then 'pylibcugraph' then 'cugraph' then 'cugraph-service' targets + +examples: +$ ./build.sh clean # remove prior build artifacts (start over) +$ ./build.sh libcugraph -v # compile and install libcugraph with verbose output +$ ./build.sh libcugraph -g # compile and install libcugraph for debug +$ ./build.sh libcugraph -n # compile libcugraph but do not install + +# make parallelism options can also be defined: Example build jobs using 4 threads (make -j4) +$ PARALLEL_LEVEL=4 ./build.sh libcugraph + +Note that the libraries will be installed to the location set in `$PREFIX` if set (i.e. `export PREFIX=/install/path`), otherwise to `$CONDA_PREFIX`. +``` + + +## Building each section independently +#### Build and Install the C++/CUDA `libcugraph` Library +CMake depends on the `nvcc` executable being on your path or defined in `$CUDACXX`. + +This project uses cmake for building the C/C++ library. To configure cmake, run: + + ```bash + # Set the localtion to cuGraph in an environment variable CUGRAPH_HOME + export CUGRAPH_HOME=$(pwd)/cugraph + + cd $CUGRAPH_HOME + cd cpp # enter cpp directory + mkdir build # create build directory + cd build # enter the build directory + cmake .. -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX + + # now build the code + make -j # "-j" starts multiple threads + make install # install the libraries + ``` +The default installation locations are `$CMAKE_INSTALL_PREFIX/lib` and `$CMAKE_INSTALL_PREFIX/include/cugraph` respectively. + +#### Updating the RAFT branch + +`libcugraph` uses the [RAFT](https://github.com/rapidsai/raft) library and there are times when it might be desirable to build against a different RAFT branch, such as when working on new features that might span both RAFT and cuGraph. + +For local development, the `CPM_raft_SOURCE=` option can be passed to the `cmake` command to enable `libcugraph` to use the local RAFT branch. + +To have CI test a `cugraph` pull request against a different RAFT branch, modify the bottom of the `cpp/cmake/thirdparty/get_raft.cmake` file as follows: + +```cmake +# Change pinned tag and fork here to test a commit in CI +# To use a different RAFT locally, set the CMake variable +# RPM_raft_SOURCE=/path/to/local/raft +find_and_configure_raft(VERSION ${CUGRAPH_MIN_VERSION_raft} + FORK + PINNED_TAG + + # When PINNED_TAG above doesn't match cugraph, + # force local raft clone in build directory + # even if it's already installed. + CLONE_ON_PIN ON + ) +``` + +When the above change is pushed to a pull request, the continuous integration servers will use the specified RAFT branch to run the cuGraph tests. After the changes in the RAFT branch are merged to the release branch, remember to revert the `get_raft.cmake` file back to the original cuGraph branch. + +### Building and installing the Python package + +2) Install the Python packages to your Python path: + +```bash +cd $CUGRAPH_HOME +cd python +cd pylibcugraph +python setup.py build_ext --inplace +python setup.py install # install pylibcugraph +cd ../cugraph +python setup.py build_ext --inplace +python setup.py install # install cugraph python bindings + +``` + + + +## Run tests + +If you already have the datasets: + + ```bash + export RAPIDS_DATASET_ROOT_DIR= + ``` + If you do not have the datasets: + + ```bash + cd $CUGRAPH_HOME/datasets + source get_test_data.sh #This takes about 10 minutes and downloads 1GB data (>5 GB uncompressed) + ``` + +Run either the C++ or the Python tests with datasets + + - **Python tests with datasets** + + + ```bash + pip install python-louvain #some tests require this package to run + cd $CUGRAPH_HOME + cd python + pytest + ``` + - **C++ stand alone tests** + + From the build directory : + + ```bash + # Run the cugraph tests + cd $CUGRAPH_HOME + cd cpp/build + gtests/GDFGRAPH_TEST # this is an executable file + ``` + - **C++ tests with larger datasets** + + + + Run the C++ tests on large input: + + ```bash + cd $CUGRAPH_HOME/cpp/build + #test one particular analytics (eg. pagerank) + gtests/PAGERANK_TEST + #test everything + make test + ``` + +Note: This conda installation only applies to Linux and Python versions 3.8/3.10. + +### (OPTIONAL) Set environment variable on activation + +It is possible to configure the conda environment to set environmental variables on activation. Providing instructions to set PATH to include the CUDA toolkit bin directory and LD_LIBRARY_PATH to include the CUDA lib64 directory will be helpful. + +```bash +cd ~/anaconda3/envs/cugraph_dev + +mkdir -p ./etc/conda/activate.d +mkdir -p ./etc/conda/deactivate.d +touch ./etc/conda/activate.d/env_vars.sh +touch ./etc/conda/deactivate.d/env_vars.sh +``` + +Next the env_vars.sh file needs to be edited + +```bash +vi ./etc/conda/activate.d/env_vars.sh + +#!/bin/bash +export PATH=/usr/local/cuda-11.0/bin:$PATH # or cuda-11.1 if using CUDA 11.1 and cuda-11.2 if using CUDA 11.2, respectively +export LD_LIBRARY_PATH=/usr/local/cuda-11.0/lib64:$LD_LIBRARY_PATH # or cuda-11.1 if using CUDA 11.1 and cuda-11.2 if using CUDA 11.2, respectively +``` + +``` +vi ./etc/conda/deactivate.d/env_vars.sh + +#!/bin/bash +unset PATH +unset LD_LIBRARY_PATH +``` + +## Creating documentation + +Python API documentation can be generated from _./docs/cugraph directory_. Or through using "./build.sh docs" + +## Attribution +Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md diff --git a/docs/cugraph/source/basics/cugraph_ref.rst b/docs/cugraph/source/references/cugraph_ref.rst similarity index 100% rename from docs/cugraph/source/basics/cugraph_ref.rst rename to docs/cugraph/source/references/cugraph_ref.rst diff --git a/docs/cugraph/source/references/datasets.rst b/docs/cugraph/source/references/datasets.rst new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/cugraph/source/references/index.rst b/docs/cugraph/source/references/index.rst new file mode 100644 index 00000000000..e70df627fe3 --- /dev/null +++ b/docs/cugraph/source/references/index.rst @@ -0,0 +1,9 @@ +========== +References +========== + + +.. toctree:: + :maxdepth: 3 + + cugraph_ref \ No newline at end of file diff --git a/docs/cugraph/source/references/licenses.rst b/docs/cugraph/source/references/licenses.rst new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/cugraph/source/releases/index.rst b/docs/cugraph/source/releases/index.rst new file mode 100644 index 00000000000..7bd4f6dfa31 --- /dev/null +++ b/docs/cugraph/source/releases/index.rst @@ -0,0 +1,5 @@ +======== +Releases +======== + +https://github.com/rapidsai/cugraph/blob/main/CHANGELOG.md \ No newline at end of file diff --git a/docs/cugraph/source/tutorials/community_resources.md b/docs/cugraph/source/tutorials/community_resources.md new file mode 100644 index 00000000000..572f85a015b --- /dev/null +++ b/docs/cugraph/source/tutorials/community_resources.md @@ -0,0 +1,2 @@ +# Commmunity Resources +[Rapids Community Repository](https://github.com/rapidsai-community/notebooks-contrib) \ No newline at end of file diff --git a/docs/cugraph/source/basics/cugraph_blogs.rst b/docs/cugraph/source/tutorials/cugraph_blogs.rst similarity index 100% rename from docs/cugraph/source/basics/cugraph_blogs.rst rename to docs/cugraph/source/tutorials/cugraph_blogs.rst diff --git a/docs/cugraph/source/tutorials/cugraph_notebooks.md b/docs/cugraph/source/tutorials/cugraph_notebooks.md new file mode 100644 index 00000000000..1624ef10aa5 --- /dev/null +++ b/docs/cugraph/source/tutorials/cugraph_notebooks.md @@ -0,0 +1,77 @@ +# cuGraph Notebooks + +![GraphAnalyticsFigure](img/GraphAnalyticsFigure.jpg) + +This repository contains a collection of Jupyter Notebooks that outline how to run various cuGraph analytics. The notebooks do not address a complete data science problem. The notebooks are simply examples of how to run the graph analytics. Manipulation of the data before or after the graph analytic is not covered here. Extended, more problem focused, notebooks are being created and available https://github.com/rapidsai/notebooks-extended + +## Summary + +| Folder | Notebook | Description | +| --------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| Centrality | | | +| | [Centrality](algorithms/centrality/Centrality.ipynb) | Compute and compare multiple (currently 5) centrality scores | +| | [Katz](algorithms/centrality/Katz.ipynb) | Compute the Katz centrality for every vertex | +| | [Betweenness](algorithms/centrality/Betweenness.ipynb) | Compute both Edge and Vertex Betweenness centrality | +| | [Degree](algorithms/centrality/Degree.ipynb) | Compute Degree Centraility for each vertex | +| | [Eigenvector](algorithms/centrality/Eigenvector.ipynb) | Compute Eigenvector for every vertex | +| Community | | | +| | [Louvain](algorithms/community/Louvain.ipynb) and Leiden | Identify clusters in a graph using both the Louvain and Leiden algorithms | +| | [ECG](algorithms/community/ECG.ipynb) | Identify clusters in a graph using the Ensemble Clustering for Graph | +| | [K-Truss](algorithms/community/ktruss.ipynb) | Extracts the K-Truss cluster | +| | [Spectral-Clustering](algorithms/community/Spectral-Clustering.ipynb) | Identify clusters in a graph using Spectral Clustering with both
- Balanced Cut
- Modularity Modularity | +| | [Subgraph Extraction](algorithms/community/Subgraph-Extraction.ipynb) | Compute a subgraph of the existing graph including only the specified vertices | +| | [Triangle Counting](algorithms/community/Triangle-Counting.ipynb) | Count the number of Triangle in a graph | +| Components | | | +| | [Connected Components](algorithms/components/ConnectedComponents.ipynb) | Find weakly and strongly connected components in a graph | +| Core | | | +| | [K-Core](algorithms/cores/kcore.ipynb) | Extracts the K-core cluster | +| | [Core Number](algorithms/cores/core-number.ipynb) | Computer the Core number for each vertex in a graph | +Layout | | | +| | [Force-Atlas2](algorithms/layout/Force-Atlas2.ipynb) |A large graph visualization achieved with cuGraph. | +| Link Analysis | | | +| | [Pagerank](algorithms/link_analysis/Pagerank.ipynb) | Compute the PageRank of every vertex in a graph | +| | [HITS](algorithms/link_analysis/HITS.ipynb) | Compute the HITS' Hub and Authority scores for every vertex in a graph | +| Link Prediction | | | +| | [Jaccard Similarity](algorithms/link_prediction/Jaccard-Similarity.ipynb) | Compute vertex similarity score using both:
- Jaccard Similarity
- Weighted Jaccard | +| | [Overlap Similarity](algorithms/link_prediction/Overlap-Similarity.ipynb) | Compute vertex similarity score using the Overlap Coefficient | +| Sampling | +| | [Random Walk](algorithms/sampling/RandomWalk.ipynb) | Compute Random Walk for a various number of seeds and path lengths | +| Traversal | | | +| | [BFS](algorithms/traversal/BFS.ipynb) | Compute the Breadth First Search path from a starting vertex to every other vertex in a graph | +| | [SSSP](algorithms/traversal/SSSP.ipynb) | Single Source Shortest Path - compute the shortest path from a starting vertex to every other vertex | +| Structure | | | +| | [Renumbering](algorithms/structure/Renumber.ipynb)
[Renumbering 2](algorithms/structure/Renumber-2.ipynb) | Renumber the vertex IDs in a graph (two sample notebooks) | +| | [Symmetrize](algorithms/structure/Symmetrize.ipynb) | Symmetrize the edges in a graph | + + +## RAPIDS notebooks +Visit the main RAPIDS [notebooks](https://github.com/rapidsai/notebooks) repo for a listing of all notebooks across all RAPIDS libraries. + +## Requirements + +Running the example in these notebooks requires: + +* The latest version of RAPIDS with cuGraph. + * Download via Docker, Conda (See [__Getting Started__](https://rapids.ai/start.html)) + +* cuGraph is dependent on the latest version of cuDF. Please install all components of RAPIDS +* Python 3.8+ +* A system with an NVIDIA GPU: Pascal architecture or better +* CUDA 11.4+ +* NVIDIA driver 450.51+ + +#### Copyright + +Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + + + + +![RAPIDS](img/rapids_logo.png) \ No newline at end of file diff --git a/docs/cugraph/source/tutorials/how_to_guides.md b/docs/cugraph/source/tutorials/how_to_guides.md new file mode 100644 index 00000000000..42da6ed21ca --- /dev/null +++ b/docs/cugraph/source/tutorials/how_to_guides.md @@ -0,0 +1,9 @@ +# How To Guides +- Basic use of cuGraph, on the page +- Property graph with analytic flow +- GNN – model building +- cuGraph Service – client/server setup and use (ucx) +- MNMG Graph – dask, rmm basics and examples +- Pylibcugraph – why and how +- Cugraph for C, C++ users +- Use of nvidia-smi with cugraph \ No newline at end of file diff --git a/docs/cugraph/source/tutorials/index.rst b/docs/cugraph/source/tutorials/index.rst new file mode 100644 index 00000000000..cce3525097b --- /dev/null +++ b/docs/cugraph/source/tutorials/index.rst @@ -0,0 +1,11 @@ +========= +Tutorials +========= + + +.. toctree:: + :maxdepth: 3 + + how_to_guides.md + cugraph_blogs.rst + community_resources.md diff --git a/python/cugraph-service/scripts/README.md b/mg_utils/README.md similarity index 56% rename from python/cugraph-service/scripts/README.md rename to mg_utils/README.md index c9e6014f34b..26dbbd5e705 100644 --- a/python/cugraph-service/scripts/README.md +++ b/mg_utils/README.md @@ -1,6 +1,6 @@ -This directory contains various scripts helpful for cugraph_service users and developers. +This directory contains various scripts helpful for cugraph users and developers. -The following scripts were copied from https://github.com/rapidsai/multi-gpu-tools and are useful for starting a dask cluster, which is needed by cugraph_service for multi-GPU support. +The following scripts were copied from https://github.com/rapidsai/multi-gpu-tools and are useful for starting a dask cluster, which is needed by cugraph for multi-GPU support. * `run-dask-process.sh` * `functions.sh` * `default-config.sh` diff --git a/python/cugraph-service/scripts/default-config.sh b/mg_utils/default-config.sh similarity index 97% rename from python/cugraph-service/scripts/default-config.sh rename to mg_utils/default-config.sh index 694135e2e8d..26cef2aee78 100755 --- a/python/cugraph-service/scripts/default-config.sh +++ b/mg_utils/default-config.sh @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/python/cugraph-service/scripts/functions.sh b/mg_utils/functions.sh similarity index 98% rename from python/cugraph-service/scripts/functions.sh rename to mg_utils/functions.sh index 7b5fc7bf305..7eedb5f1b1f 100644 --- a/python/cugraph-service/scripts/functions.sh +++ b/mg_utils/functions.sh @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/python/cugraph-service/scripts/run-dask-process.sh b/mg_utils/run-dask-process.sh similarity index 99% rename from python/cugraph-service/scripts/run-dask-process.sh rename to mg_utils/run-dask-process.sh index f26fb368d7b..e5fa8fab332 100755 --- a/python/cugraph-service/scripts/run-dask-process.sh +++ b/mg_utils/run-dask-process.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/notebooks/gnn/cgs_creation_extensions/cgs_mag_extension.py b/notebooks/gnn/cgs_creation_extensions/cgs_mag_extension.py deleted file mode 100644 index f74de5c6559..00000000000 --- a/notebooks/gnn/cgs_creation_extensions/cgs_mag_extension.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# cuGraph or cuGraph-Service is required; each has its own version of -# import_optional and we need to select the correct one. - -from ogb.nodeproppred import NodePropPredDataset - -def create_mag(server): - dataset = NodePropPredDataset(name = 'ogbn-mag') - - data = dataset[0] - - # Can't import these before loading MAG; known OGB issue - import cudf - import dask_cudf - - from cugraph.experimental import MGPropertyGraph - from cugraph.experimental import PropertyGraph - - pG = PropertyGraph() - - vertex_offsets = {} - last_offset = 0 - - for node_type, num_nodes in data[0]['num_nodes_dict'].items(): - vertex_offsets[node_type] = last_offset - last_offset += num_nodes - - blank_df = cudf.DataFrame({'id':range(vertex_offsets[node_type], vertex_offsets[node_type] + num_nodes)}) - blank_df.id = blank_df.id.astype('int64') - if isinstance(pG, MGPropertyGraph): - blank_df = dask_cudf.from_cudf(blank_df, npartitions=2) - pG.add_vertex_data(blank_df, vertex_col_name='id', type_name=node_type) - - - for i, (node_type, node_features) in enumerate(data[0]['node_feat_dict'].items()): - vertex_offset = vertex_offsets[node_type] - - feature_df = cudf.DataFrame(node_features) - feature_df.columns = [str(c) for c in range(feature_df.shape[1])] - feature_df['id'] = range(vertex_offset, vertex_offset + node_features.shape[0]) - feature_df.id = feature_df.id.astype('int64') - if isinstance(pG, MGPropertyGraph): - feature_df = dask_cudf.from_cudf(feature_df, npartitions=2) - - pG.add_vertex_data(feature_df, vertex_col_name='id', type_name=node_type) - - # Fill in an empty value for vertices without properties. - pG.fillna_vertices(0.0) - - for i, (edge_key, eidx) in enumerate(data[0]['edge_index_dict'].items()): - node_type_src, edge_type, node_type_dst = edge_key - - vertex_offset_src = vertex_offsets[node_type_src] - vertex_offset_dst = vertex_offsets[node_type_dst] - eidx = [n + vertex_offset_src for n in eidx[0]], [n + vertex_offset_dst for n in eidx[1]] - - edge_df = cudf.DataFrame({'src':eidx[0], 'dst':eidx[1]}) - edge_df.src = edge_df.src.astype('int64') - edge_df.dst = edge_df.dst.astype('int64') - edge_df['type'] = edge_type - if isinstance(pG, MGPropertyGraph): - edge_df = dask_cudf.from_cudf(edge_df, npartitions=2) - - # Adding backwards edges is currently required in both the cuGraph PG and PyG APIs. - pG.add_edge_data(edge_df, vertex_col_names=['src','dst'], type_name=edge_type) - pG.add_edge_data(edge_df, vertex_col_names=['dst','src'], type_name=f'{edge_type}_bw') - - y_df = cudf.DataFrame(data[1]['paper'], columns=['y']) - y_df['id'] = range(vertex_offsets['paper'], vertex_offsets['paper'] + len(y_df)) - y_df.id = y_df.id.astype('int64') - if isinstance(pG, MGPropertyGraph): - y_df = dask_cudf.from_cudf(y_df, npartitions=2) - - pG.add_vertex_data(y_df, vertex_col_name='id', type_name='paper') - - return pG \ No newline at end of file diff --git a/notebooks/gnn/pyg_hetero_mag.ipynb b/notebooks/gnn/pyg_hetero_mag.ipynb deleted file mode 100644 index 2e8c0a6b311..00000000000 --- a/notebooks/gnn/pyg_hetero_mag.ipynb +++ /dev/null @@ -1,391 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# PyG+cuGraph Heterogeneous MAG Example\n", - "# Skip notebook test\n", - "\n", - "### Requires installation of PyG" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load MAG into CPU Memory" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import ogb\n", - "from ogb.nodeproppred import NodePropPredDataset\n", - "\n", - "import cugraph\n", - "import cudf\n", - "\n", - "\n", - "dataset = NodePropPredDataset(name = 'ogbn-mag') \n", - "data = dataset[0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create PropertyGraph from MAG Data" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Partially Load the Vertex Data (just ids)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import cudf\n", - "import dask_cudf\n", - "import cugraph\n", - "from cugraph.experimental import MGPropertyGraph\n", - "from cugraph.experimental import PropertyGraph\n", - "pG = PropertyGraph()\n", - "\n", - "vertex_offsets = {}\n", - "last_offset = 0\n", - "\n", - "for node_type, num_nodes in data[0]['num_nodes_dict'].items():\n", - " vertex_offsets[node_type] = last_offset\n", - " last_offset += num_nodes\n", - " \n", - " blank_df = cudf.DataFrame({'id':range(vertex_offsets[node_type], vertex_offsets[node_type] + num_nodes)})\n", - " blank_df.id = blank_df.id.astype('int64')\n", - " if isinstance(pG, MGPropertyGraph):\n", - " blank_df = dask_cudf.from_cudf(blank_df, npartitions=2)\n", - " pG.add_vertex_data(blank_df, vertex_col_name='id', type_name=node_type)\n", - "\n", - "vertex_offsets" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Add the Remaining Node Features" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i, (node_type, node_features) in enumerate(data[0]['node_feat_dict'].items()):\n", - " vertex_offset = vertex_offsets[node_type]\n", - "\n", - " feature_df = cudf.DataFrame(node_features)\n", - " feature_df.columns = [str(c) for c in range(feature_df.shape[1])]\n", - " feature_df['id'] = range(vertex_offset, vertex_offset + node_features.shape[0])\n", - " feature_df.id = feature_df.id.astype('int64')\n", - " if isinstance(pG, MGPropertyGraph):\n", - " feature_df = dask_cudf.from_cudf(feature_df, npartitions=2)\n", - "\n", - " pG.add_vertex_data(feature_df, vertex_col_name='id', type_name=node_type)\n", - "\n", - "# Fill in an empty value for vertices without properties.\n", - "pG.fillna_vertices(0.0)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Add the Edges" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i, (edge_key, eidx) in enumerate(data[0]['edge_index_dict'].items()):\n", - " node_type_src, edge_type, node_type_dst = edge_key\n", - " print(node_type_src, edge_type, node_type_dst)\n", - " vertex_offset_src = vertex_offsets[node_type_src]\n", - " vertex_offset_dst = vertex_offsets[node_type_dst]\n", - " eidx = [n + vertex_offset_src for n in eidx[0]], [n + vertex_offset_dst for n in eidx[1]]\n", - "\n", - " edge_df = cudf.DataFrame({'src':eidx[0], 'dst':eidx[1]})\n", - " edge_df.src = edge_df.src.astype('int64')\n", - " edge_df.dst = edge_df.dst.astype('int64')\n", - " edge_df['type'] = edge_type\n", - " if isinstance(pG, MGPropertyGraph):\n", - " edge_df = dask_cudf.from_cudf(edge_df, npartitions=2)\n", - "\n", - " # Adding backwards edges is currently required in both the cuGraph PG and PyG APIs.\n", - " pG.add_edge_data(edge_df, vertex_col_names=['src','dst'], type_name=edge_type)\n", - " pG.add_edge_data(edge_df, vertex_col_names=['dst','src'], type_name=f'{edge_type}_bw')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Add the Target Variable" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "y_df = cudf.DataFrame(data[1]['paper'], columns=['y'])\n", - "y_df['id'] = range(vertex_offsets['paper'], vertex_offsets['paper'] + len(y_df))\n", - "y_df.id = y_df.id.astype('int64')\n", - "if isinstance(pG, MGPropertyGraph):\n", - " y_df = dask_cudf.from_cudf(y_df, npartitions=2)\n", - "\n", - "pG.add_vertex_data(y_df, vertex_col_name='id', type_name='paper')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Construct a Graph Store, Feature Store, and Loaders" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from cugraph_pyg.data import to_pyg\n", - "\n", - "feature_store, graph_store = to_pyg(pG)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from cugraph_pyg.sampler import CuGraphSampler\n", - "sampler = CuGraphSampler(\n", - " data=(feature_store, graph_store),\n", - " shuffle=True,\n", - " num_neighbors=[10,25],\n", - " batch_size=50,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torch_geometric.loader import NodeLoader\n", - "loader = NodeLoader(\n", - " data=(feature_store, graph_store),\n", - " shuffle=True,\n", - " batch_size=50,\n", - " node_sampler=sampler,\n", - " input_nodes=('author', graph_store.get_vertex_index('author'))\n", - ")\n", - "\n", - "test_loader = NodeLoader(\n", - " data=(feature_store, graph_store),\n", - " shuffle=True,\n", - " batch_size=50,\n", - " node_sampler=sampler,\n", - " input_nodes=('author', graph_store.get_vertex_index('author'))\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create the Network" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit next(iter(loader))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "edge_types = [attr.edge_type for attr in graph_store.get_all_edge_attrs()]\n", - "edge_types" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "num_classes = pG.get_vertex_data(columns=['y'])['y'].max() + 1\n", - "if isinstance(pG, MGPropertyGraph):\n", - " num_classes = num_classes.compute()\n", - "num_classes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torch.nn.functional as F\n", - "\n", - "from torch_geometric.nn import HeteroConv, Linear, SAGEConv\n", - "\n", - "class HeteroGNN(torch.nn.Module):\n", - " def __init__(self, edge_types, hidden_channels, out_channels, num_layers):\n", - " super().__init__()\n", - "\n", - " self.convs = torch.nn.ModuleList()\n", - " for _ in range(num_layers):\n", - " conv = HeteroConv({\n", - " edge_type: SAGEConv((-1, -1), hidden_channels)\n", - " for edge_type in edge_types\n", - " })\n", - " self.convs.append(conv)\n", - "\n", - " self.lin = Linear(hidden_channels, out_channels)\n", - "\n", - " def forward(self, x_dict, edge_index_dict):\n", - " for conv in self.convs:\n", - " x_dict = conv(x_dict, edge_index_dict)\n", - " x_dict = {key: F.leaky_relu(x) for key, x in x_dict.items()}\n", - " print(x_dict, edge_index_dict)\n", - " return self.lin(x_dict['paper'])\n", - "\n", - "\n", - "model = HeteroGNN(edge_types, hidden_channels=64, out_channels=num_classes,\n", - " num_layers=2).cuda()\n", - "\n", - "with torch.no_grad(): # Initialize lazy modules.\n", - " data = next(iter(loader))\n", - " out = model(data.x_dict, data.edge_index_dict)\n", - "\n", - "optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=0.001)\n", - "\n", - "num_batches = 5\n", - "def train():\n", - " model.train()\n", - " optimizer.zero_grad()\n", - " for b_i, data in enumerate(loader):\n", - " if b_i == num_batches:\n", - " break\n", - "\n", - " out = model(data.x_dict, data.edge_index_dict)\n", - " loss = F.cross_entropy(out, data.y_dict['paper'])\n", - " loss.backward()\n", - " optimizer.step()\n", - " \n", - " return float(loss) / num_batches\n", - "\n", - "\n", - "@torch.no_grad()\n", - "def test():\n", - " model.eval()\n", - " test_iter = iter(test_loader)\n", - "\n", - " acc = 0.0\n", - " for _ in range(2*num_batches):\n", - " data = next(test_iter)\n", - " pred = model(data.x_dict, data.edge_index_dict).argmax(dim=-1)\n", - "\n", - " \n", - " acc += (pred == data['paper'].y).sum() / len(data['paper'])\n", - " return acc / (2*num_batches)\n", - "\n", - "\n", - "for epoch in range(1, 101):\n", - " loss = train()\n", - " train_acc = test()\n", - " print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}')\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Train the Network" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for epoch in range(1, 101):\n", - " loss = train()\n", - " train_acc = test()\n", - " print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.9.7 ('base')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.13" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "f708a36acfaef0acf74ccd43dfb58100269bf08fb79032a1e0a6f35bd9856f51" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/gnn/pyg_hetero_mag_cgs.ipynb b/notebooks/gnn/pyg_hetero_mag_cgs.ipynb deleted file mode 100644 index ae55198e67f..00000000000 --- a/notebooks/gnn/pyg_hetero_mag_cgs.ipynb +++ /dev/null @@ -1,273 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# PyG+cuGraph Heterogeneous MAG Example with cuGraph-Service\n", - "# Skip notebook test\n", - "\n", - "### Requires installation of PyG & cuGraph-Service\n", - "#### A cuGraph-Service Server must be running" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pathlib\n", - "import os\n", - "from cugraph_service_client.client import CugraphServiceClient\n", - "# Create a new client instance\n", - "client = CugraphServiceClient()\n", - "\n", - "# Set up the creation extensions\n", - "ext_path = os.path.join(\n", - " pathlib.Path('__file__').parent.resolve(),\n", - " 'cgs_creation_extensions'\n", - ")\n", - "print(f'loading extensions from {ext_path}')\n", - "client.load_graph_creation_extensions(str(ext_path))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from cugraph_service_client.client import RemoteGraph\n", - "\n", - "# This line may take a while if the data has not yet been downloaded.\n", - "graph_id = client.call_graph_creation_extension('create_mag')\n", - "\n", - "pG = RemoteGraph(client, graph_id)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Construct a Graph Store, Feature Store, and Loaders" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from cugraph.experimental.pyg_extensions import to_pyg\n", - "\n", - "feature_store, graph_store = to_pyg(pG)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from cugraph.experimental.pyg_extensions import CuGraphSampler\n", - "sampler = CuGraphSampler(\n", - " data=(feature_store, graph_store),\n", - " shuffle=True,\n", - " num_neighbors=[10,25],\n", - " batch_size=50,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torch_geometric.loader import NodeLoader\n", - "loader = NodeLoader(\n", - " data=(feature_store, graph_store),\n", - " shuffle=True,\n", - " batch_size=50,\n", - " node_sampler=sampler,\n", - " input_nodes=('author', graph_store.get_vertex_index('author'))\n", - ")\n", - "\n", - "test_loader = NodeLoader(\n", - " data=(feature_store, graph_store),\n", - " shuffle=True,\n", - " batch_size=50,\n", - " node_sampler=sampler,\n", - " input_nodes=('author', graph_store.get_vertex_index('author'))\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create the Network" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "edge_types = [attr.edge_type for attr in graph_store.get_all_edge_attrs()]\n", - "edge_types" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "num_classes = pG.get_vertex_data(columns=['y'])['y'].max() + 1\n", - "num_classes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torch.nn.functional as F\n", - "\n", - "from torch_geometric.nn import HeteroConv, Linear, SAGEConv\n", - "\n", - "class HeteroGNN(torch.nn.Module):\n", - " def __init__(self, edge_types, hidden_channels, out_channels, num_layers):\n", - " super().__init__()\n", - "\n", - " self.convs = torch.nn.ModuleList()\n", - " for _ in range(num_layers):\n", - " conv = HeteroConv({\n", - " edge_type: SAGEConv((-1, -1), hidden_channels)\n", - " for edge_type in edge_types\n", - " })\n", - " self.convs.append(conv)\n", - "\n", - " self.lin = Linear(hidden_channels, out_channels)\n", - "\n", - " def forward(self, x_dict, edge_index_dict):\n", - " for conv in self.convs:\n", - " x_dict = conv(x_dict, edge_index_dict)\n", - " x_dict = {key: F.leaky_relu(x) for key, x in x_dict.items()}\n", - " print(x_dict, edge_index_dict)\n", - " return self.lin(x_dict['paper'])\n", - "\n", - "\n", - "model = HeteroGNN(edge_types, hidden_channels=64, out_channels=num_classes,\n", - " num_layers=2).cuda()\n", - "\n", - "with torch.no_grad(): # Initialize lazy modules.\n", - " data = next(iter(loader))\n", - " out = model(data.x_dict, data.edge_index_dict)\n", - "\n", - "optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=0.001)\n", - "\n", - "num_batches = 5\n", - "def train():\n", - " model.train()\n", - " optimizer.zero_grad()\n", - " for b_i, data in enumerate(loader):\n", - " if b_i == num_batches:\n", - " break\n", - "\n", - " out = model(data.x_dict, data.edge_index_dict)\n", - " loss = F.cross_entropy(out, data.y_dict['paper'])\n", - " loss.backward()\n", - " optimizer.step()\n", - " \n", - " return float(loss) / num_batches\n", - "\n", - "\n", - "@torch.no_grad()\n", - "def test():\n", - " model.eval()\n", - " test_iter = iter(test_loader)\n", - "\n", - " acc = 0.0\n", - " for _ in range(2*num_batches):\n", - " data = next(test_iter)\n", - " pred = model(data.x_dict, data.edge_index_dict).argmax(dim=-1)\n", - "\n", - " \n", - " acc += (pred == data['paper'].y).sum() / len(data['paper'])\n", - " return acc / (2*num_batches)\n", - "\n", - "\n", - "for epoch in range(1, 101):\n", - " loss = train()\n", - " train_acc = test()\n", - " print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}')\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Train the Network" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for epoch in range(1, 101):\n", - " loss = train()\n", - " train_acc = test()\n", - " print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.9.7 ('base')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.13" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "f708a36acfaef0acf74ccd43dfb58100269bf08fb79032a1e0a6f35bd9856f51" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/python/cugraph-dgl/cugraph_dgl/cugraph_storage.py b/python/cugraph-dgl/cugraph_dgl/cugraph_storage.py index e845ea147dc..ded77245e57 100644 --- a/python/cugraph-dgl/cugraph_dgl/cugraph_storage.py +++ b/python/cugraph-dgl/cugraph_dgl/cugraph_storage.py @@ -67,23 +67,28 @@ def __init__( specifying the source node, edge, and destination node types. The values are graph data is a dataframe with 2 columns form of (𝑈,𝑉), where (𝑈[𝑖],𝑉[𝑖]) forms the edge with ID 𝑖. + num_nodes_dict: dict[str, int] The number of nodes for some node types, which is a dictionary mapping a node type T to the number of T-typed nodes. + single_gpu: bool Whether to create the cugraph Property Graph on a single GPU or multiple GPUs single GPU = True single GPU = False + device_id: int If specified, must be the integer ID of the GPU device to have the results being created on + idtype: Framework-specific device object, The data type for storing the structure-related graph information this can be ``torch.int32`` or ``torch.int64`` for PyTorch. Defaults to ``torch.int64`` if pytorch is installed + Examples -------- The following example uses `CuGraphStorage` : diff --git a/python/cugraph-dgl/cugraph_dgl/dataloading/dataloader.py b/python/cugraph-dgl/cugraph_dgl/dataloading/dataloader.py index a47fa98dca4..31528d7061c 100644 --- a/python/cugraph-dgl/cugraph_dgl/dataloading/dataloader.py +++ b/python/cugraph-dgl/cugraph_dgl/dataloading/dataloader.py @@ -168,26 +168,26 @@ def __init__( ) if use_ddp: - worker_info = torch.utils.data.get_worker_info() + rank = torch.distributed.get_rank() client = default_client() - event = Event("cugraph_dgl_load_mg_graph_event") - if worker_info.id == 0: + self._graph_creation_event = Event("cugraph_dgl_load_mg_graph_event") + if rank == 0: G = create_cugraph_graph_from_edges_dict( edges_dict=graph._edges_dict, etype_id_dict=graph._etype_id_dict, edge_dir=graph_sampler.edge_dir, ) client.publish_dataset(cugraph_dgl_mg_graph_ds=G) - event.set() + self._graph_creation_event.set() else: - if event.wait(timeout=1000): - G = client.get_dataset(G, "cugraph_dgl_mg_graph_ds") + if self._graph_creation_event.wait(timeout=1000): + G = client.get_dataset("cugraph_dgl_mg_graph_ds") else: raise RuntimeError( - f"Fetch cugraph_dgl_mg_graph_ds to worker_id {worker_info.id}", + f"Fetch cugraph_dgl_mg_graph_ds to worker_id {rank}", "from worker_id 0 failed", ) - self._rank = worker_info.id + self._rank = rank else: G = create_cugraph_graph_from_edges_dict( edges_dict=graph._edges_dict, @@ -209,9 +209,6 @@ def __iter__(self): output_dir = os.path.join( self._sampling_output_dir, "epoch_" + str(self.epoch_number) ) - _clean_directory(output_dir) - - # Todo: Figure out how to get rank rank = self._rank bs = BulkSampler( output_path=output_dir, @@ -234,6 +231,16 @@ def __iter__(self): self.epoch_number = self.epoch_number + 1 return super().__iter__() + def __del__(self): + if self.use_ddp: + torch.distributed.barrier() + if self._rank == 0: + if self.use_ddp: + client = default_client() + client.unpublish_dataset("cugraph_dgl_mg_graph_ds") + self._graph_creation_event.clear() + _clean_directory(self._sampling_output_dir) + def get_batch_id_series(n_output_rows: int, batch_size: int): num_batches = (n_output_rows + batch_size - 1) // batch_size diff --git a/python/cugraph-dgl/cugraph_dgl/dataloading/utils/extract_graph_helpers.py b/python/cugraph-dgl/cugraph_dgl/dataloading/utils/extract_graph_helpers.py index b81a052ddd4..db35d8cf379 100644 --- a/python/cugraph-dgl/cugraph_dgl/dataloading/utils/extract_graph_helpers.py +++ b/python/cugraph-dgl/cugraph_dgl/dataloading/utils/extract_graph_helpers.py @@ -44,7 +44,6 @@ def create_cugraph_graph_from_edges_dict( source="_SRC_", destination="_DST_", edge_attr=["wgt", "_EDGE_ID_", "etp"], - legacy_renum_only=True, renumber=True, ) else: @@ -53,7 +52,6 @@ def create_cugraph_graph_from_edges_dict( source="_SRC_", destination="_DST_", edge_attr=["wgt", "_EDGE_ID_", "etp"], - legacy_renum_only=True, renumber=True, ) return G diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/base.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/base.py new file mode 100644 index 00000000000..9eb1e5222ca --- /dev/null +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/base.py @@ -0,0 +1,50 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cugraph.utilities.utils import import_optional + +torch = import_optional("torch") +nn = import_optional("torch.nn") +ops_torch = import_optional("pylibcugraphops.pytorch") + + +class BaseConv(nn.Module): + r"""An abstract base class for cugraph-ops nn module.""" + + def __init__(self): + super().__init__() + self._cached_offsets_fg = None + + def reset_parameters(self): + r"""Resets all learnable parameters of the module.""" + raise NotImplementedError + + def forward(self, *args): + r"""Runs the forward pass of the module.""" + raise NotImplementedError + + def pad_offsets(self, offsets: torch.Tensor, size: int) -> torch.Tensor: + r"""Pad zero-in-degree nodes to the end of offsets to reach size. This + is used to augment offset tensors from DGL blocks (MFGs) to be + compatible with cugraph-ops full-graph primitives.""" + if self._cached_offsets_fg is None: + self._cached_offsets_fg = torch.empty( + size, dtype=offsets.dtype, device=offsets.device + ) + elif self._cached_offsets_fg.numel() < size: + self._cached_offsets_fg.resize_(size) + + self._cached_offsets_fg[: offsets.numel()] = offsets + self._cached_offsets_fg[offsets.numel() : size] = offsets[-1] + + return self._cached_offsets_fg[:size] diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py index 112d2350a9c..2b57089189f 100644 --- a/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py @@ -16,16 +16,16 @@ from __future__ import annotations from typing import Optional +from cugraph_dgl.nn.conv.base import BaseConv from cugraph.utilities.utils import import_optional dgl = import_optional("dgl") torch = import_optional("torch") nn = import_optional("torch.nn") -ops = import_optional("pylibcugraphops") -ops_autograd = import_optional("pylibcugraphops.torch.autograd") +ops_torch = import_optional("pylibcugraphops.pytorch") -class GATConv(nn.Module): +class GATConv(BaseConv): r"""Graph attention layer from `Graph Attention Network `__, with the sparse aggregation accelerated by cugraph-ops. @@ -80,6 +80,7 @@ class GATConv(nn.Module): [ 1.6477, -1.9986], [ 1.1138, -1.9302]]], device='cuda:0', grad_fn=) """ + MAX_IN_DEGREE_MFG = 200 def __init__( self, @@ -144,29 +145,32 @@ def forward( :math:`H` is the number of heads, and :math:`D_{out}` is size of output feature. """ - offsets, indices, _ = g.adj_sparse("csc") if g.is_block: if max_in_degree is None: max_in_degree = g.in_degrees().max().item() - _graph = ops.make_mfg_csr( - g.dstnodes(), offsets, indices, max_in_degree, g.num_src_nodes() - ) + + if max_in_degree < self.MAX_IN_DEGREE_MFG: + _graph = ops_torch.SampledCSC( + offsets, indices, max_in_degree, g.num_src_nodes() + ) + else: + offsets_fg = self.pad_offsets(offsets, g.num_src_nodes() + 1) + _graph = ops_torch.StaticCSC(offsets_fg, indices) else: - _graph = ops.make_fg_csr(offsets, indices) + _graph = ops_torch.StaticCSC(offsets, indices) feat_transformed = self.fc(feat) - out = ops_autograd.mha_gat_n2n( + out = ops_torch.operators.mha_gat_n2n( feat_transformed, self.attn_weights, _graph, self.num_heads, "LeakyReLU", self.negative_slope, - add_own_node=False, concat_heads=True, - ).view(-1, self.num_heads, self.out_feats) + ).view(-1, self.num_heads, self.out_feats)[: g.num_dst_nodes()] if self.bias is not None: out = out + self.bias diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/relgraphconv.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/relgraphconv.py index f93905ba7ec..c93c58c3473 100644 --- a/python/cugraph-dgl/cugraph_dgl/nn/conv/relgraphconv.py +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/relgraphconv.py @@ -17,16 +17,16 @@ import math from typing import Optional +from cugraph_dgl.nn.conv.base import BaseConv from cugraph.utilities.utils import import_optional dgl = import_optional("dgl") torch = import_optional("torch") nn = import_optional("torch.nn") -ops = import_optional("pylibcugraphops") -ops_autograd = import_optional("pylibcugraphops.torch.autograd") +ops_torch = import_optional("pylibcugraphops.pytorch") -class RelGraphConv(nn.Module): +class RelGraphConv(BaseConv): r"""An accelerated relational graph convolution layer from `Modeling Relational Data with Graph Convolutional Networks `__ that leverages the highly-optimized @@ -84,6 +84,7 @@ class RelGraphConv(nn.Module): [-1.4335, -2.3758], [-1.4331, -2.3295]], device='cuda:0', grad_fn=) """ + MAX_IN_DEGREE_MFG = 500 def __init__( self, @@ -178,43 +179,45 @@ def forward( torch.Tensor New node features. Shape: :math:`(|V|, D_{out})`. """ - # Create csc-representation and cast etypes to int32. offsets, indices, edge_ids = g.adj_sparse("csc") edge_types_perm = etypes[edge_ids.long()].int() - # Create cugraph-ops graph. if g.is_block: if max_in_degree is None: max_in_degree = g.in_degrees().max().item() - _graph = ops.make_mfg_csr_hg( - g.dstnodes(), - offsets, - indices, - max_in_degree, - g.num_src_nodes(), - n_node_types=0, - n_edge_types=self.num_rels, - out_node_types=None, - in_node_types=None, - edge_types=edge_types_perm, - ) + + if max_in_degree < self.MAX_IN_DEGREE_MFG: + _graph = ops_torch.SampledHeteroCSC( + offsets, + indices, + edge_types_perm, + max_in_degree, + g.num_src_nodes(), + self.num_rels, + ) + else: + offsets_fg = self.pad_offsets(offsets, g.num_src_nodes() + 1) + _graph = ops_torch.StaticHeteroCSC( + offsets_fg, + indices, + edge_types_perm, + self.num_rels, + ) else: - _graph = ops.make_fg_csr_hg( + _graph = ops_torch.StaticHeteroCSC( offsets, indices, - n_node_types=0, - n_edge_types=self.num_rels, - node_types=None, - edge_types=edge_types_perm, + edge_types_perm, + self.num_rels, ) - h = ops_autograd.agg_hg_basis_n2n_post( + h = ops_torch.operators.agg_hg_basis_n2n_post( feat, self.coeff, _graph, concat_own=self.self_loop, norm_by_out_degree=self.apply_norm, - ) + )[: g.num_dst_nodes()] h = h @ self.W.view(-1, self.out_feats) if self.bias is not None: h = h + self.bias diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/sageconv.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/sageconv.py index fa69dab4b57..1f9b651984b 100644 --- a/python/cugraph-dgl/cugraph_dgl/nn/conv/sageconv.py +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/sageconv.py @@ -16,16 +16,16 @@ from __future__ import annotations from typing import Optional +from cugraph_dgl.nn.conv.base import BaseConv from cugraph.utilities.utils import import_optional dgl = import_optional("dgl") torch = import_optional("torch") nn = import_optional("torch.nn") -ops = import_optional("pylibcugraphops") -ops_autograd = import_optional("pylibcugraphops.torch.autograd") +ops_torch = import_optional("pylibcugraphops.pytorch") -class SAGEConv(nn.Module): +class SAGEConv(BaseConv): r"""An accelerated GraphSAGE layer from `Inductive Representation Learning on Large Graphs `__ that leverages the highly-optimized aggregation primitives in cugraph-ops. @@ -68,6 +68,7 @@ class SAGEConv(nn.Module): [-1.1690, 0.1952], [-1.1690, 0.1952]], device='cuda:0', grad_fn=) """ + MAX_IN_DEGREE_MFG = 500 def __init__( self, @@ -127,14 +128,20 @@ def forward( if max_in_degree is None: max_in_degree = g.in_degrees().max().item() - _graph = ops.make_mfg_csr( - g.dstnodes(), offsets, indices, max_in_degree, g.num_src_nodes() - ) + if max_in_degree < self.MAX_IN_DEGREE_MFG: + _graph = ops_torch.SampledCSC( + offsets, indices, max_in_degree, g.num_src_nodes() + ) + else: + offsets_fg = self.pad_offsets(offsets, g.num_src_nodes() + 1) + _graph = ops_torch.StaticCSC(offsets_fg, indices) else: - _graph = ops.make_fg_csr(offsets, indices) + _graph = ops_torch.StaticCSC(offsets, indices) feat = self.feat_drop(feat) - h = ops_autograd.agg_concat_n2n(feat, _graph, self.aggr) + h = ops_torch.operators.agg_concat_n2n(feat, _graph, self.aggr)[ + : g.num_dst_nodes() + ] h = self.linear(h) return h diff --git a/python/cugraph-dgl/examples/dataset_from_disk_cudf.ipynb b/python/cugraph-dgl/examples/dataset_from_disk_cudf.ipynb index f3d907fba63..d31dd2a74b0 100644 --- a/python/cugraph-dgl/examples/dataset_from_disk_cudf.ipynb +++ b/python/cugraph-dgl/examples/dataset_from_disk_cudf.ipynb @@ -19,11 +19,12 @@ "source": [ "import os\n", "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"4\"\n", - "import rmm \n", + "import cudf\n", + "import rmm\n", "import torch\n", - "#rmm.reinitialize(pool_allocator = True, initial_pool_size = 15e9, maximum_pool_size=25e9)\n", - "#torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator)\n", - "import cudf" + "from rmm.allocators.torch import rmm_torch_allocator\n", + "# rmm.reinitialize(pool_allocator = True, initial_pool_size = 15e9, maximum_pool_size=24e9)\n", + "# torch.cuda.memory.change_current_allocator(rmm_torch_allocator)" ] }, { diff --git a/python/cugraph-dgl/examples/muti_trainer_MG_example/workflow.py b/python/cugraph-dgl/examples/muti_trainer_MG_example/workflow.py new file mode 100644 index 00000000000..00c67f92409 --- /dev/null +++ b/python/cugraph-dgl/examples/muti_trainer_MG_example/workflow.py @@ -0,0 +1,233 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dgl +import torch +import time +from distributed import Client, Event as Dask_Event +import tempfile + + +def enable_spilling(): + import cudf + + cudf.set_option("spill", True) + + +def setup_cluster(dask_worker_devices): + dask_worker_devices_str = ",".join([str(i) for i in dask_worker_devices]) + from dask_cuda import LocalCUDACluster + + cluster = LocalCUDACluster( + protocol="tcp", + CUDA_VISIBLE_DEVICES=dask_worker_devices_str, + rmm_pool_size="25GB", + ) + + client = Client(cluster) + client.wait_for_workers(n_workers=len(dask_worker_devices)) + client.run(enable_spilling) + print("Dask Cluster Setup Complete") + del client + return cluster.scheduler_address + + +def create_dask_client(scheduler_address): + from cugraph.dask.comms import comms as Comms + + client = Client(scheduler_address) + Comms.initialize(p2p=True) + return client + + +def initalize_pytorch_worker(dev_id): + import cupy as cp + import rmm + + dev = cp.cuda.Device( + dev_id + ) # Create cuda context on the right gpu, defaults to gpu-0 + dev.use() + rmm.reinitialize( + pool_allocator=True, + initial_pool_size=10e9, + maximum_pool_size=15e9, + devices=[dev_id], + ) + + if dev_id == 0: + torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator) + + torch.cuda.set_device(dev_id) + cp.cuda.set_allocator(rmm.rmm_cupy_allocator) + enable_spilling() + print("device_id", dev_id, flush=True) + + +def load_dgl_dataset(dataset_name="ogbn-products"): + from ogb.nodeproppred import DglNodePropPredDataset + + dataset = DglNodePropPredDataset(name=dataset_name) + split_idx = dataset.get_idx_split() + train_idx, valid_idx, test_idx = ( + split_idx["train"], + split_idx["valid"], + split_idx["test"], + ) + g, label = dataset[0] + g.ndata["label"] = label + if len(g.etypes) <= 1: + g = dgl.add_self_loop(g) + else: + for etype in g.etypes: + if etype[0] == etype[2]: + # only add self loops for src->dst + g = dgl.add_self_loop(g, etype=etype) + + g = g.int() + train_idx = train_idx.int() + valid_idx = valid_idx.int() + test_idx = test_idx.int() + return g, train_idx, valid_idx, test_idx + + +def create_cugraph_graphstore_from_dgl_dataset( + dataset_name="ogbn-products", single_gpu=False +): + from cugraph_dgl import cugraph_storage_from_heterograph + + dgl_g, train_idx, valid_idx, test_idx = load_dgl_dataset(dataset_name) + cugraph_gs = cugraph_storage_from_heterograph(dgl_g, single_gpu=single_gpu) + return cugraph_gs, train_idx, valid_idx, test_idx + + +def create_dataloader(gs, train_idx, device): + import cugraph_dgl + + temp_dir = tempfile.TemporaryDirectory() + sampler = cugraph_dgl.dataloading.NeighborSampler([10, 20]) + dataloader = cugraph_dgl.dataloading.DataLoader( + gs, + train_idx, + sampler, + sampling_output_dir=temp_dir.name, + batches_per_partition=10, + device=device, # Put the sampled MFGs on CPU or GPU + use_ddp=True, # Make it work with distributed data parallel + batch_size=1024, + shuffle=False, # Whether to shuffle the nodes for every epoch + drop_last=False, + num_workers=0, + ) + return dataloader + + +def run_workflow(rank, devices, scheduler_address): + # Below sets gpu_num + dev_id = devices[rank] + initalize_pytorch_worker(dev_id) + device = torch.device(f"cuda:{dev_id}") + # cugraph dask client initialization + client = create_dask_client(scheduler_address) + + # Pytorch training worker initialization + dist_init_method = "tcp://{master_ip}:{master_port}".format( + master_ip="127.0.0.1", master_port="12346" + ) + + torch.distributed.init_process_group( + backend="nccl", + init_method=dist_init_method, + world_size=len(devices), + rank=rank, + ) + + print(f"rank {rank}.", flush=True) + print("Initalized across GPUs.") + + event = Dask_Event("cugraph_gs_creation_event") + if rank == 0: + ( + gs, + train_idx, + valid_idx, + test_idx, + ) = create_cugraph_graphstore_from_dgl_dataset( + "ogbn-products", single_gpu=False + ) + client.publish_dataset(cugraph_gs=gs) + client.publish_dataset(train_idx=train_idx) + client.publish_dataset(valid_idx=valid_idx) + client.publish_dataset(test_idx=test_idx) + event.set() + else: + if event.wait(timeout=1000): + gs = client.get_dataset("cugraph_gs") + train_idx = client.get_dataset("train_idx") + valid_idx = client.get_dataset("valid_idx") + test_idx = client.get_dataset("test_idx") + else: + raise RuntimeError(f"Fetch cugraph_gs to worker_id {rank} failed") + + torch.distributed.barrier() + print(f"Loading cugraph_store to worker {rank} is complete", flush=True) + dataloader = create_dataloader(gs, train_idx, device) + print("Data Loading Complete", flush=True) + del gs # Clean up gs reference + # Comment below + st = time.time() + for step, (input_nodes, seeds, blocks) in enumerate(dataloader): + pass + et = time.time() + print(f"Warmup loading took = {et-st} s on worker = {rank}") + torch.distributed.barrier() + + n_epochs = 30 + total_st = time.time() + for i in range(0, n_epochs): + st = time.time() + for step, (input_nodes, seeds, blocks) in enumerate(dataloader): + pass + # print(len(input_nodes)) + # print(len(seeds)) + # train_model() + et = time.time() + print(f"Data Loading took = {et-st} s for epoch = {i} on worker = {rank}") + torch.distributed.barrier() + total_et = time.time() + print( + f"Total time taken on n_epochs {n_epochs} = {total_et-total_st} s", + f"measured by worker = {rank}", + ) + + # cleanup dask cluster + if rank == 0: + client.unpublish_dataset("cugraph_gs") + client.unpublish_dataset("train_idx") + client.unpublish_dataset("valid_idx") + client.unpublish_dataset("test_idx") + event.clear() + + +if __name__ == "__main__": + dask_worker_devices = [5, 6] + scheduler_address = setup_cluster(dask_worker_devices) + + trainer_devices = [0, 1, 2] + import torch.multiprocessing as mp + + mp.spawn( + run_workflow, + args=(trainer_devices, scheduler_address), + nprocs=len(trainer_devices), + ) diff --git a/python/cugraph-dgl/pyproject.toml b/python/cugraph-dgl/pyproject.toml index 56ba1912a44..86103b29ac9 100644 --- a/python/cugraph-dgl/pyproject.toml +++ b/python/cugraph-dgl/pyproject.toml @@ -4,7 +4,8 @@ requires = [ "setuptools", -] + "wheel", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. build-backend = "setuptools.build_meta" [project] @@ -18,10 +19,10 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ - "cugraph", + "cugraph==23.4.*", "numba>=0.56.2", - "numpy", -] + "numpy>=1.21", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", "Programming Language :: Python", diff --git a/python/cugraph-dgl/tests/nn/test_gatconv.py b/python/cugraph-dgl/tests/nn/test_gatconv.py index c743663bff3..332ba2f3657 100644 --- a/python/cugraph-dgl/tests/nn/test_gatconv.py +++ b/python/cugraph-dgl/tests/nn/test_gatconv.py @@ -28,7 +28,7 @@ options = { "idtype_int": [False, True], "max_in_degree": [None, 8], - "num_heads": [1, 3], + "num_heads": [1, 2, 3, 7], "to_block": [False, True], } diff --git a/python/cugraph-dgl/tests/nn/test_sageconv.py b/python/cugraph-dgl/tests/nn/test_sageconv.py index e75fc39ef92..6baa5fb5287 100644 --- a/python/cugraph-dgl/tests/nn/test_sageconv.py +++ b/python/cugraph-dgl/tests/nn/test_sageconv.py @@ -26,6 +26,7 @@ dgl = import_optional("dgl") options = { + "bias": [False, True], "idtype_int": [False, True], "max_in_degree": [None, 8], "to_block": [False, True], @@ -33,14 +34,13 @@ @pytest.mark.parametrize(",".join(options.keys()), product(*options.values())) -def test_SAGEConv_equality(idtype_int, max_in_degree, to_block): +def test_SAGEConv_equality(bias, idtype_int, max_in_degree, to_block): SAGEConv = dgl.nn.SAGEConv CuGraphSAGEConv = cugraph_dgl.nn.SAGEConv device = "cuda" in_feat, out_feat = 5, 2 - # TODO(tingyu66): re-enable bias after upgrading DGL to 1.0 in conda env - kwargs = {"aggregator_type": "mean", "bias": False} + kwargs = {"aggregator_type": "mean", "bias": bias} g = create_graph1().to(device) if idtype_int: g = g.int() @@ -57,7 +57,8 @@ def test_SAGEConv_equality(idtype_int, max_in_degree, to_block): with torch.no_grad(): conv2.linear.weight.data[:, :in_feat] = conv1.fc_neigh.weight.data conv2.linear.weight.data[:, in_feat:] = conv1.fc_self.weight.data - # conv2.linear.bias.data[:] = conv1.fc_self.bias.data + if bias: + conv2.linear.bias.data[:] = conv1.fc_self.bias.data out1 = conv1(g, feat) out2 = conv2(g, feat, max_in_degree=max_in_degree) @@ -76,4 +77,7 @@ def test_SAGEConv_equality(idtype_int, max_in_degree, to_block): conv2.linear.weight.grad[:, in_feat:], atol=1e-6, ) - # assert torch.allclose(conv1.fc_self.bias.grad, conv2.linear.bias.grad, atol=1e-6) + if bias: + assert torch.allclose( + conv1.fc_self.bias.grad, conv2.linear.bias.grad, atol=1e-6 + ) diff --git a/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py b/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py index f4f5f0c29e9..300c56fe6a7 100644 --- a/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py +++ b/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py @@ -13,7 +13,7 @@ from typing import Optional, Tuple, Any, Union, List, Dict -from enum import Enum +from enum import Enum, auto from dataclasses import dataclass from collections import defaultdict @@ -28,22 +28,17 @@ from cugraph.utilities.utils import import_optional, MissingModule -import dask.dataframe as dd -from dask.distributed import get_client - +dd = import_optional("dask.dataframe") +distributed = import_optional("dask.distributed") +dask_cudf = import_optional("dask_cudf") torch = import_optional("torch") Tensor = None if isinstance(torch, MissingModule) else torch.Tensor NdArray = None if isinstance(cupy, MissingModule) else cupy.ndarray +DaskCudfSeries = None if isinstance(dask_cudf, MissingModule) else dask_cudf.Series -TensorType = Union[Tensor, NdArray] - - -def _torch_as_array(a): - if len(a) == 0: - return torch.as_tensor(a.get()).to("cuda") - return torch.as_tensor(a, device="cuda") +TensorType = Union[Tensor, NdArray, cudf.Series, DaskCudfSeries] class EdgeLayout(Enum): @@ -54,7 +49,9 @@ class EdgeLayout(Enum): @dataclass class CuGraphEdgeAttr: - r"""Defines the attributes of an :obj:`GraphStore` edge.""" + """ + Defines the attributes of an :obj:`GraphStore` edge. + """ # The type of the edge edge_type: Optional[Any] @@ -98,12 +95,14 @@ def cast(cls, *args, **kwargs): return cls(*args, **kwargs) -_field_status = Enum("FieldStatus", "UNSET") +class _field_status(Enum): + UNSET = auto() @dataclass class CuGraphTensorAttr: - r"""Defines the attributes of a class:`FeatureStore` tensor; in particular, + """ + Defines the attributes of a class:`FeatureStore` tensor; in particular, all the parameters necessary to uniquely identify a tensor from the feature store. @@ -132,26 +131,34 @@ class CuGraphTensorAttr: # Convenience methods def is_set(self, key): - r"""Whether an attribute is set in :obj:`TensorAttr`.""" + """ + Whether an attribute is set in :obj:`TensorAttr`. + """ if key not in self.__dataclass_fields__: raise KeyError(key) attr = getattr(self, key) return type(attr) != _field_status or attr != _field_status.UNSET def is_fully_specified(self): - r"""Whether the :obj:`TensorAttr` has no unset fields.""" + """ + Whether the :obj:`TensorAttr` has no unset fields. + """ return all([self.is_set(key) for key in self.__dataclass_fields__]) def fully_specify(self): - r"""Sets all :obj:`UNSET` fields to :obj:`None`.""" + """ + Sets all :obj:`UNSET` fields to :obj:`None`. + """ for key in self.__dataclass_fields__: if not self.is_set(key): setattr(self, key, None) return self def update(self, attr): - r"""Updates an :class:`TensorAttr` with set attributes from another - :class:`TensorAttr`.""" + """ + Updates an :class:`TensorAttr` with set attributes from another + :class:`TensorAttr`. + """ for key in self.__dataclass_fields__: if attr.is_set(key): setattr(self, key, getattr(attr, key)) @@ -160,6 +167,7 @@ def update(self, attr): def cast(cls, *args, **kwargs): """ Casts to a CuGraphTensorAttr from a tuple, list, or dict + Returns ------- CuGraphTensorAttr @@ -188,32 +196,48 @@ class EXPERIMENTAL__CuGraphStore: # TODO add an "expensive check" argument that ensures the graph store # and feature store are valid and compatible with PyG. def __init__( - self, F, G, num_nodes_dict, backend: str = "torch", multi_gpu: bool = False + self, + F: cugraph.gnn.FeatureStore, + G: Union[Dict[str, Tuple[TensorType]], Dict[str, int]], + num_nodes_dict: Dict[str, int], + multi_gpu: bool = False, ): """ Constructs a new CuGraphStore from the provided arguments. + Parameters ---------- - F : cugraph.gnn.FeatureStore (Required) + F: cugraph.gnn.FeatureStore (Required) The feature store containing this graph's features. Typed lexicographic-ordered numbering convention should match that of the graph. - G : dict[tuple[tensor]] (Required) + + G: dict[str, tuple[TensorType]] or dict[str, int] (Required) Dictionary of edge indices. - i.e. { - ('author', 'writes', 'paper'): [[0,1,2],[2,0,1]], - ('author', 'affiliated', 'institution'): [[0,1],[0,1]] - } + Option 1 (graph in memory): + Pass the edge indices + i.e. { + ('author', 'writes', 'paper'): [[0,1,2],[2,0,1]], + ('author', 'affiliated', 'institution'): [[0,1],[0,1]] + } + Option 2 (graph not in memory): + Pass the number of edges + i.e. { + ('author', 'writes', 'paper'): 2, + ('author', 'affiliated', 'institution'): 2 + } + If the graph is not in memory, manipulating the edge indices + or calling sampling is not possible. This is for cases where + sampling has already been done and samples were written to disk. Note: the internal cugraph representation will use offsetted vertex and edge ids. - num_nodes_dict : dict (Required) + + num_nodes_dict: dict (Required) A dictionary mapping each node type to the count of nodes of that type in the graph. - backend : ('torch', 'cupy') (Optional, default = 'torch') - The backend that manages tensors (default = 'torch') - Should usually be 'torch' ('torch', 'cupy' supported). - multi_gpu : bool (Optional, default = False) + + multi_gpu: bool (Optional, default = False) Whether the store should be backed by a multi-GPU graph. Requires dask to have been set up. """ @@ -221,39 +245,24 @@ def __init__( if None in G: raise ValueError("Unspecified edge types not allowed in PyG") - # FIXME drop the cupy backend and remove these checks (#2995) - if backend == "torch": - asarray = _torch_as_array - from torch import int64 as vertex_dtype - from torch import float32 as property_dtype - from torch import searchsorted as searchsorted - from torch import concatenate as concatenate - from torch import arange as arange - elif backend == "cupy": - from cupy import asarray - from cupy import int64 as vertex_dtype - from cupy import float32 as property_dtype - from cupy import searchsorted as searchsorted - from cupy import concatenate as concatenate - from cupy import arange as arange - else: - raise ValueError(f"Invalid backend {backend}.") - - self.__backend = backend - self.asarray = asarray - self.vertex_dtype = vertex_dtype - self.property_dtype = property_dtype - self.searchsorted = searchsorted - self.concatenate = concatenate - self.arange = arange + self.__vertex_dtype = torch.int64 self._tensor_attr_cls = CuGraphTensorAttr self._tensor_attr_dict = defaultdict(list) - # Infer number of edges from the edge index dict - num_edges_dict = { - pyg_can_edge_type: len(ei[0]) for pyg_can_edge_type, ei in G.items() - } + construct_graph = True + if isinstance(next(iter(G.values())), int): + # User has passed in the number of edges + # (not the actual edge index), so the number of edges + # does not need to be counted. + num_edges_dict = dict(G) # make sure the cugraph store owns this dict + construct_graph = False + else: + # User has passed in the actual edge index, so the + # number of edges needs to be counted. + num_edges_dict = { + pyg_can_edge_type: len(ei[0]) for pyg_can_edge_type, ei in G.items() + } self.__infer_offsets(num_nodes_dict, num_edges_dict) self.__infer_existing_tensors(F) @@ -262,18 +271,33 @@ def __init__( self._edge_attr_cls = CuGraphEdgeAttr self.__features = F - self.__graph = self.__construct_graph(G, multi_gpu=multi_gpu) + self.__graph = None + self.__is_graph_owner = False + + if construct_graph: + if multi_gpu: + self.__graph = distributed.get_client().get_dataset( + "cugraph_graph", default=None + ) + + if self.__graph is None: + self.__graph = self.__construct_graph(G, multi_gpu=multi_gpu) + self.__is_graph_owner = True + self.__subgraphs = {} + def __del__(self): + if self.__is_graph_owner: + if isinstance(self.__graph._plc_graph, dict): + distributed.get_client().unpublish_dataset("cugraph_graph") + del self.__graph + def __make_offsets(self, input_dict): offsets = {} offsets["stop"] = [input_dict[v] for v in sorted(input_dict.keys())] - if self.__backend == "cupy": - offsets["stop"] = cupy.array(offsets["stop"]) - else: - offsets["stop"] = torch.tensor(offsets["stop"]) - if torch.has_cuda: - offsets["stop"] = offsets["stop"].cuda() + offsets["stop"] = torch.tensor(offsets["stop"]) + if torch.has_cuda: + offsets["stop"] = offsets["stop"].cuda() cumsum = offsets["stop"].cumsum(0) offsets["start"] = cumsum - offsets["stop"] @@ -304,7 +328,9 @@ def __infer_offsets( ) def __construct_graph( - self, edge_info: Dict[Tuple[str, str, str], List], multi_gpu: bool = False + self, + edge_info: Dict[Tuple[str, str, str], List[TensorType]], + multi_gpu: bool = False, ) -> cugraph.MultiGraph: """ This function takes edge information and uses it to construct @@ -314,7 +340,7 @@ def __construct_graph( Parameters ---------- - edge_info: Dict[Tuple[str, str, str]] (Required) + edge_info: Dict[Tuple[str, str, str], List[TensorType]] (Required) Input edge info dictionary, where keys are the canonical edge type and values are the edge index (src/dst). @@ -332,15 +358,30 @@ def __construct_graph( # numerical types correspond to the lexicographic order # of the keys, which is critical to converting the numeric # keys back to canonical edge types later. + # FIXME don't always convert to host arrays (#3383) for pyg_can_edge_type in sorted(edge_info.keys()): src_type, _, dst_type = pyg_can_edge_type srcs, dsts = edge_info[pyg_can_edge_type] src_offset = np.searchsorted(self.__vertex_type_offsets["type"], src_type) srcs_t = srcs + int(self.__vertex_type_offsets["start"][src_offset]) + if isinstance(srcs_t, torch.Tensor): + srcs_t = srcs_t.cpu() + else: + if isinstance(srcs_t, dask_cudf.Series): + srcs_t = srcs_t.compute() + if isinstance(srcs_t, cudf.Series): + srcs_t = srcs_t.values_host dst_offset = np.searchsorted(self.__vertex_type_offsets["type"], dst_type) dsts_t = dsts + int(self.__vertex_type_offsets["start"][dst_offset]) + if isinstance(dsts_t, torch.Tensor): + dsts_t = dsts_t.cpu() + else: + if isinstance(dsts_t, dask_cudf.Series): + dsts_t = dsts_t.compute() + if isinstance(dsts_t, cudf.Series): + dsts_t = dsts_t.values_host edge_info_cg[pyg_can_edge_type] = (srcs_t, dsts_t) @@ -372,16 +413,16 @@ def __construct_graph( df = pandas.DataFrame( { - "src": na_src, - "dst": na_dst, - "w": np.zeros(len(na_src)), - "eid": np.arange(len(na_src)), - "etp": na_etp, + "src": pandas.Series(na_src), + "dst": pandas.Series(na_dst), + "w": pandas.Series(np.zeros(len(na_src))), + "eid": pandas.Series(np.arange(len(na_src))), + "etp": pandas.Series(na_etp), } ) if multi_gpu: - nworkers = len(get_client().scheduler_info()["workers"]) + nworkers = len(distributed.get_client().scheduler_info()["workers"]) df = dd.from_pandas(df, npartitions=nworkers).persist() df = df.map_partitions(cudf.DataFrame.from_pandas) else: @@ -396,15 +437,14 @@ def __construct_graph( source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) + distributed.get_client().publish_dataset(cugraph_graph=graph) else: graph.from_cudf_edgelist( df, source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) return graph @@ -413,31 +453,30 @@ def __construct_graph( def _edge_types_to_attrs(self) -> dict: return dict(self.__edge_types_to_attrs) - @property - def backend(self) -> str: - return self.__backend - @cached_property def _is_delayed(self): + if self.__graph is None: + return False return self.__graph.is_multi_gpu() def get_vertex_index(self, vtypes) -> TensorType: if isinstance(vtypes, str): vtypes = [vtypes] - # FIXME always use torch, drop cupy (#2995) - if self.__backend == "torch": - ix = torch.tensor([], dtype=torch.int64) - else: - ix = cupy.array([], dtype="int64") + ix = torch.tensor([], dtype=torch.int64) if isinstance(self.__vertex_type_offsets, dict): vtypes = np.searchsorted(self.__vertex_type_offsets["type"], vtypes) for vtype in vtypes: start = int(self.__vertex_type_offsets["start"][vtype]) stop = int(self.__vertex_type_offsets["stop"][vtype]) - ix = self.concatenate( - [ix, self.arange(start, stop + 1, 1, dtype=self.vertex_dtype)] + ix = torch.concatenate( + [ + ix, + torch.arange( + start, stop + 1, 1, dtype=self.__vertex_dtype, device="cuda" + ), + ] ) return ix @@ -482,12 +521,14 @@ def _get_edge_index(self, attr: CuGraphEdgeAttr) -> Tuple[TensorType, TensorType Currently, only COO form is supported. """ + if self.__graph is None: + raise ValueError("Graph is not in memory, cannot access edge index!") + if attr.layout != EdgeLayout.COO: raise TypeError("Only COO direct access is supported!") - # Currently, graph creation enforces that legacy_renum_only=True - # is always called, and the input vertex ids are always of integer - # type. Therefore, it is currently safe to assume that for MG + # Currently, graph creation enforces that input vertex ids are always of + # integer type. Therefore, it is currently safe to assume that for MG # graphs, the src/dst col names are renumbered_src/dst # and for SG graphs, the src/dst col names are src/dst. # This may change in the future if/when renumbering or the graph @@ -536,25 +577,11 @@ def _get_edge_index(self, attr: CuGraphEdgeAttr) -> Tuple[TensorType, TensorType if self._is_delayed: df = df.compute() - src = self.asarray(df[src_col_name]) - src_offset - dst = self.asarray(df[dst_col_name]) - dst_offset + src = torch.as_tensor(df[src_col_name], device="cuda") - src_offset + dst = torch.as_tensor(df[dst_col_name], device="cuda") - dst_offset - if self.__backend == "torch": - src = src.to(self.vertex_dtype) - dst = dst.to(self.vertex_dtype) - elif self.__backend == "cupy": - src = src.astype(self.vertex_dtype) - dst = dst.astype(self.vertex_dtype) - else: - raise TypeError(f"Invalid backend type {self.__backend}") - - if self.__backend == "torch": - src = src.to(self.vertex_dtype) - dst = dst.to(self.vertex_dtype) - else: - # self.__backend == 'cupy' - src = src.astype(self.vertex_dtype) - dst = dst.astype(self.vertex_dtype) + src = src.to(self.__vertex_dtype) + dst = dst.to(self.__vertex_dtype) if src.shape[0] != dst.shape[0]: raise IndexError("src and dst shape do not match!") @@ -562,7 +589,8 @@ def _get_edge_index(self, attr: CuGraphEdgeAttr) -> Tuple[TensorType, TensorType return (src, dst) def get_edge_index(self, *args, **kwargs) -> Tuple[TensorType, TensorType]: - r"""Synchronously gets an edge_index tensor from the materialized + """ + Synchronously gets an edge_index tensor from the materialized graph. Args: @@ -605,6 +633,9 @@ def _subgraph(self, edge_types: List[tuple] = None) -> cugraph.MultiGraph: if it has not already been extracted. """ + if self.__graph is None: + raise ValueError("Graph is not in memory, cannot get subgraph") + if edge_types is not None and set(edge_types) != set( self.__edge_types_to_attrs.keys() ): @@ -616,7 +647,9 @@ def _subgraph(self, edge_types: List[tuple] = None) -> cugraph.MultiGraph: return self.__graph - def _get_vertex_groups_from_sample(self, nodes_of_interest: cudf.Series) -> dict: + def _get_vertex_groups_from_sample( + self, nodes_of_interest: TensorType, is_sorted: bool = False + ) -> dict: """ Given a cudf (NOT dask_cudf) Series of nodes of interest, this method a single dictionary, noi_index. @@ -627,8 +660,8 @@ def _get_vertex_groups_from_sample(self, nodes_of_interest: cudf.Series) -> dict Output: {'red_vertex': [5, 8], 'blue_vertex': [2], 'green_vertex': [10, 11]} """ - - nodes_of_interest = self.asarray(nodes_of_interest.sort_values()) + if not is_sorted: + nodes_of_interest, _ = torch.sort(nodes_of_interest) noi_index = {} @@ -636,12 +669,14 @@ def _get_vertex_groups_from_sample(self, nodes_of_interest: cudf.Series) -> dict if len(vtypes) == 1: noi_index[vtypes[0]] = nodes_of_interest else: - noi_type_indices = self.searchsorted( - self.asarray(self.__vertex_type_offsets["stop"]), + noi_type_indices = torch.searchsorted( + torch.as_tensor(self.__vertex_type_offsets["stop"], device="cuda"), nodes_of_interest, ) - noi_types = vtypes.iloc[noi_type_indices].reset_index(drop=True) + noi_types = vtypes.iloc[cupy.asarray(noi_type_indices)].reset_index( + drop=True + ) noi_starts = self.__vertex_type_offsets["start"][noi_type_indices] noi_types = cudf.Series(noi_types, name="t").groupby("t").groups @@ -649,7 +684,7 @@ def _get_vertex_groups_from_sample(self, nodes_of_interest: cudf.Series) -> dict for type_name, ix in noi_types.items(): # store the renumbering for this vertex type # renumbered vertex id is the index of the old id - ix = self.asarray(ix) + ix = torch.as_tensor(ix, device="cuda") # subtract off the offsets noi_index[type_name] = nodes_of_interest[ix] - noi_starts[ix] @@ -701,14 +736,14 @@ def _get_renumbered_edge_groups_from_sample( t_pyg_type = list(self.__edge_types_to_attrs.values())[0].edge_type src_type, _, dst_type = t_pyg_type - sources = self.asarray(sampling_results.sources) + sources = torch.as_tensor(sampling_results.sources, device="cuda") src_id_table = noi_index[src_type] - src = self.searchsorted(src_id_table, sources) + src = torch.searchsorted(src_id_table, sources) row_dict[t_pyg_type] = src - destinations = self.asarray(sampling_results.destinations) + destinations = torch.as_tensor(sampling_results.destinations, device="cuda") dst_id_table = noi_index[dst_type] - dst = self.searchsorted(dst_id_table, destinations) + dst = torch.searchsorted(dst_id_table, destinations) col_dict[t_pyg_type] = dst else: # This will retrieve the single string representation. @@ -726,27 +761,31 @@ def _get_renumbered_edge_groups_from_sample( src_type, _, dst_type = pyg_can_edge_type # Get the de-offsetted sources - sources = self.asarray(sampling_results.sources.iloc[ix]) - sources_ix = self.searchsorted( + sources = torch.as_tensor( + sampling_results.sources.iloc[ix], device="cuda" + ) + sources_ix = torch.searchsorted( self.__vertex_type_offsets["stop"], sources ) sources -= self.__vertex_type_offsets["start"][sources_ix] # Create the row entry for this type src_id_table = noi_index[src_type] - src = self.searchsorted(src_id_table, sources) + src = torch.searchsorted(src_id_table, sources) row_dict[pyg_can_edge_type] = src # Get the de-offsetted destinations - destinations = self.asarray(sampling_results.destinations.iloc[ix]) - destinations_ix = self.searchsorted( + destinations = torch.as_tensor( + sampling_results.destinations.iloc[ix], device="cuda" + ) + destinations_ix = torch.searchsorted( self.__vertex_type_offsets["stop"], destinations ) destinations -= self.__vertex_type_offsets["start"][destinations_ix] # Create the col entry for this type dst_id_table = noi_index[dst_type] - dst = self.searchsorted(dst_id_table, destinations) + dst = torch.searchsorted(dst_id_table, destinations) col_dict[pyg_can_edge_type] = dst return row_dict, col_dict @@ -771,8 +810,7 @@ def create_named_tensor( vertex_type : str The vertex type associated with this new tensor property. dtype : numpy/cupy dtype (i.e. 'int32') or torch dtype (i.e. torch.float) - The datatype of the tensor. Should be a dtype appropriate - for this store's backend. Usually float32/float64. + The datatype of the tensor. Usually float32/float64. """ self._tensor_attr_dict[vertex_type].append( CuGraphTensorAttr( @@ -807,7 +845,9 @@ def __infer_existing_tensors(self, F) -> None: ) def get_all_tensor_attrs(self) -> List[CuGraphTensorAttr]: - r"""Obtains all tensor attributes stored in this feature store.""" + """ + Obtains all tensor attributes stored in this feature store. + """ # unpack and return the list of lists it = chain.from_iterable(self._tensor_attr_dict.values()) return [CuGraphTensorAttr.cast(c) for c in it] @@ -828,47 +868,41 @@ def _get_tensor(self, attr: CuGraphTensorAttr) -> TensorType: # allow indexing through cupy arrays if isinstance(idx, cupy.ndarray): idx = idx.get() + elif isinstance(idx, torch.Tensor): + idx = np.asarray(idx.cpu()) if cols is None: t = self.__features.get_data(idx, attr.group_name, attr.attr_name) - if self.backend == "torch": - t = t.cuda() + if isinstance(t, np.ndarray): + t = torch.as_tensor(t, device="cuda") else: - t = cupy.array(t) + t = t.cuda() + return t else: t = self.__features.get_data(idx, attr.group_name, cols[0]) if len(t.shape) == 1: - if self.backend == "torch": - t = torch.tensor([t]) - else: - t = cupy.array([t]) + t = torch.tensor([t]) for col in cols[1:]: u = self.__features.get_data(idx, attr.group_name, col) if len(u.shape) == 1: - if self.backend == "torch": - u = torch.tensor([u]) - else: - u = cupy.array([u]) + u = torch.tensor([u]) t = torch.concatenate([t, u]) - if self.backend == "torch": - t = t.cuda() - else: - t = cupy.array(t) + t = t.cuda() return t def _multi_get_tensor(self, attrs: List[CuGraphTensorAttr]) -> List[TensorType]: return [self._get_tensor(attr) for attr in attrs] def multi_get_tensor(self, attrs: List[CuGraphTensorAttr]) -> List[TensorType]: - r""" + """ Synchronously obtains a :class:`FeatureTensorType` object from the feature store for each tensor associated with the attributes in `attrs`. @@ -911,7 +945,8 @@ def multi_get_tensor(self, attrs: List[CuGraphTensorAttr]) -> List[TensorType]: return [tensor for attr, tensor in zip(attrs, tensors)] def get_tensor(self, *args, **kwargs) -> TensorType: - r"""Synchronously obtains a :class:`FeatureTensorType` object from the + """ + Synchronously obtains a :class:`FeatureTensorType` object from the feature store. Feature store implementors guarantee that the call :obj:`get_tensor(put_tensor(tensor, attr), attr) = tensor` holds. @@ -950,10 +985,10 @@ def get_tensor(self, *args, **kwargs) -> TensorType: return tensor def _get_tensor_size(self, attr: CuGraphTensorAttr) -> Union[List, int]: - return self._get_tensor(attr).size + return self._get_tensor(attr).size() def get_tensor_size(self, *args, **kwargs) -> Union[List, int]: - r""" + """ Obtains the size of a tensor given its attributes, or :obj:`None` if the tensor does not exist. """ diff --git a/python/cugraph-pyg/cugraph_pyg/examples/README.md b/python/cugraph-pyg/cugraph_pyg/examples/README.md new file mode 100644 index 00000000000..572111ac26a --- /dev/null +++ b/python/cugraph-pyg/cugraph_pyg/examples/README.md @@ -0,0 +1,11 @@ +This directory contains examples for running cugraph-pyg training. + +For single-GPU (SG) scripts, no special configuration is required. + +For multi-GPU (MG) scripts, dask must be started first in a separate process. +To do this, the `start_dask.sh` script has been provided. This scripts starts +a dask scheduler and dask workers. To select the GPUs and amount of memory +allocated to dask per GPU, the `CUDA_VISIBLE_DEVICES` and `WORKER_RMM_POOL_SIZE` +arguments in that script can be modified. +To connect to dask, the scheduler JSON file must be provided. This can be done +using the `--dask_scheduler_file` argument in the mg python script being run. diff --git a/python/cugraph-pyg/cugraph_pyg/examples/graph_sage_mg.py b/python/cugraph-pyg/cugraph_pyg/examples/graph_sage_mg.py new file mode 100644 index 00000000000..9c0adaad879 --- /dev/null +++ b/python/cugraph-pyg/cugraph_pyg/examples/graph_sage_mg.py @@ -0,0 +1,432 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from ogb.nodeproppred import NodePropPredDataset + +import time +import argparse +import gc + +import torch +import numpy as np + +from torch_geometric.nn import CuGraphSAGEConv + +import torch.nn as nn +import torch.nn.functional as F + +import torch.distributed as td +import torch.multiprocessing as tmp +from torch.nn.parallel import DistributedDataParallel as ddp + +from typing import List + + +class CuGraphSAGE(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, num_layers): + super().__init__() + + self.convs = torch.nn.ModuleList() + self.convs.append(CuGraphSAGEConv(in_channels, hidden_channels)) + for _ in range(num_layers - 1): + conv = CuGraphSAGEConv(hidden_channels, hidden_channels) + self.convs.append(conv) + + self.lin = nn.Linear(hidden_channels, out_channels) + + def forward(self, x, edge, size): + edge_csc = CuGraphSAGEConv.to_csc(edge, (size[0], size[0])) + for conv in self.convs: + x = conv(x, edge_csc)[: size[1]] + x = F.relu(x) + x = F.dropout(x, p=0.5) + + return self.lin(x) + + +def enable_cudf_spilling(): + import cudf + + cudf.set_option("spill", True) + + +def init_pytorch_worker(rank, devices, manager_ip, manager_port) -> None: + import cupy + import rmm + + device_id = devices[rank] + + rmm.reinitialize( + devices=[device_id], + pool_allocator=False, + ) + + # torch.cuda.change_current_allocator(rmm.rmm_torch_allocator) + # cupy.cuda.set_allocator(rmm.rmm_cupy_allocator) + + cupy.cuda.Device(device_id).use() + torch.cuda.set_device(device_id) + + # Pytorch training worker initialization + dist_init_method = f"tcp://{manager_ip}:{manager_port}" + + torch.distributed.init_process_group( + backend="nccl", + init_method=dist_init_method, + world_size=len(devices), + rank=rank, + ) + + # enable_cudf_spilling() + + +def start_cugraph_dask_client(rank, dask_scheduler_file): + print( + "Connecting to dask... " + "(warning: this may take a while depending on your configuration)" + ) + start_time_connect_dask = time.perf_counter_ns() + from distributed import Client + from cugraph.dask.comms import comms as Comms + + client = Client(scheduler_file=dask_scheduler_file) + Comms.initialize(p2p=True) + + end_time_connect_dask = time.perf_counter_ns() + print( + f"Successfully connected to dask on rank {rank}, took " + f"{(end_time_connect_dask - start_time_connect_dask) / 1e9:3.4f} s" + ) + return client + + +def stop_cugraph_dask_client(): + from cugraph.dask.comms import comms as Comms + + Comms.destroy() + + from dask.distributed import get_client + + get_client().close() + + +def train( + rank, + torch_devices: List[int], + manager_ip: str, + manager_port: int, + dask_scheduler_file: str, + num_epochs: int, + features_on_gpu=True, +) -> None: + """ + Parameters + ---------- + device: int + The CUDA device where the model, graph data, and node labels will be stored. + features_on_gpu: bool + Whether to store a replica of features on each worker's GPU. If False, + all features will be stored on the CPU. + """ + + start_time_preprocess = time.perf_counter_ns() + + world_size = len(torch_devices) + device_id = torch_devices[rank] + features_device = device_id if features_on_gpu else "cpu" + init_pytorch_worker(rank, torch_devices, manager_ip, manager_port) + td.barrier() + + client = start_cugraph_dask_client(rank, dask_scheduler_file) + + from distributed import Event as Dask_Event + + event = Dask_Event("cugraph_store_creation_event") + download_event = Dask_Event("dataset_download_event") + + td.barrier() + + import cugraph + from cugraph_pyg.data import CuGraphStore + from cugraph_pyg.loader import CuGraphNeighborLoader + + if rank == 0: + print("Rank 0 downloading dataset") + dataset = NodePropPredDataset(name="ogbn-mag") + data = dataset[0] + download_event.set() + print("Dataset downloaded") + else: + if download_event.wait(timeout=1000): + print(f"Rank {rank} loading dataset") + dataset = NodePropPredDataset(name="ogbn-mag") + data = dataset[0] + print(f"Rank {rank} loaded dataset successfully") + + ei = data[0]["edge_index_dict"][("paper", "cites", "paper")] + G = { + ("paper", "cites", "paper"): np.stack( + [np.concatenate([ei[0], ei[1]]), np.concatenate([ei[1], ei[0]])] + ) + } + N = {"paper": data[0]["num_nodes_dict"]["paper"]} + + fs = cugraph.gnn.FeatureStore(backend="torch") + + fs.add_data( + torch.as_tensor(data[0]["node_feat_dict"]["paper"], device=features_device), + "paper", + "x", + ) + + fs.add_data(torch.as_tensor(data[1]["paper"].T[0], device=device_id), "paper", "y") + + num_papers = data[0]["num_nodes_dict"]["paper"] + + if rank == 0: + train_perc = 0.1 + all_train_nodes = torch.randperm(num_papers) + all_train_nodes = all_train_nodes[: int(train_perc * num_papers)] + train_nodes = all_train_nodes[: int(len(all_train_nodes) / world_size)] + + train_mask = torch.full((num_papers,), -1, device=device_id) + train_mask[train_nodes] = 1 + fs.add_data(train_mask, "paper", "train") + + print(f"Rank {rank} finished loading graph and feature data") + + if rank == 0: + print("Rank 0 creating its cugraph store and initializing distributed graph") + # Rank 0 will initialize the distributed cugraph graph. + cugraph_store_create_start = time.perf_counter_ns() + print("G:", G[("paper", "cites", "paper")].shape) + cugraph_store = CuGraphStore(fs, G, N, multi_gpu=True) + cugraph_store_create_end = time.perf_counter_ns() + print( + "cuGraph Store created on rank 0 in " + f"{(cugraph_store_create_end - cugraph_store_create_start) / 1e9:3.4f} s" + ) + client.publish_dataset(train_nodes=all_train_nodes) + event.set() + print("Rank 0 done with cugraph store creation") + else: + if event.wait(timeout=1000): + print(f"Rank {rank} creating cugraph store") + train_nodes = client.get_dataset("train_nodes") + train_nodes = train_nodes[ + int(rank * len(train_nodes) / world_size) : int( + (rank + 1) * len(train_nodes) / world_size + ) + ] + + train_mask = torch.full((num_papers,), -1, device=device_id) + train_mask[train_nodes] = 1 + fs.add_data(train_mask, "paper", "train") + + # Will automatically use the stored distributed cugraph graph on rank 0. + cugraph_store_create_start = time.perf_counter_ns() + cugraph_store = CuGraphStore(fs, G, N, multi_gpu=True) + cugraph_store_create_end = time.perf_counter_ns() + print( + f"Rank {rank} created cugraph store in " + f"{(cugraph_store_create_end - cugraph_store_create_start) / 1e9:3.4f}" + " s" + ) + print(f"Rank {rank} done with cugraph store creation") + + end_time_preprocess = time.perf_counter_ns() + print(f"rank {rank}: train {train_nodes.shape}", flush=True) + print( + f"rank {rank}: all preprocessing took" + f" {(end_time_preprocess - start_time_preprocess) / 1e9:3.4f}", + flush=True, + ) + td.barrier() + model = ( + CuGraphSAGE(in_channels=128, hidden_channels=64, out_channels=349, num_layers=3) + .to(torch.float32) + .to(device_id) + ) + model = ddp(model, device_ids=[device_id], output_device=device_id) + td.barrier() + + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + + for epoch in range(num_epochs): + start_time_train = time.perf_counter_ns() + model.train() + + start_time_loader = time.perf_counter_ns() + cugraph_bulk_loader = CuGraphNeighborLoader( + cugraph_store, + train_nodes, + batch_size=250, + num_neighbors=[10, 10, 10], + seeds_per_call=1000, + batches_per_partition=2, + replace=False, + ) + end_time_loader = time.perf_counter_ns() + total_time_loader = (end_time_loader - start_time_loader) / 1e9 + + total_loss = 0 + num_batches = 0 + + print(f"rank {rank} starting epoch {epoch}") + with td.algorithms.join.Join([model]): + total_time_sample = 0 + total_time_forward = 0 + total_time_backward = 0 + + start_time_sample = time.perf_counter_ns() + for iter_i, hetero_data in enumerate(cugraph_bulk_loader): + end_time_sample = time.perf_counter_ns() + total_time_sample += (end_time_sample - start_time_sample) / 1e9 + num_batches += 1 + + if iter_i % 20 == 0: + print(f"iteration {iter_i}") + + # train + train_mask = hetero_data.train_dict["paper"] + y_true = hetero_data.y_dict["paper"] + + start_time_forward = time.perf_counter_ns() + y_pred = model( + hetero_data.x_dict["paper"].to(device_id).to(torch.float32), + hetero_data.edge_index_dict[("paper", "cites", "paper")].to( + device_id + ), + (len(y_true), len(y_true)), + ) + end_time_forward = time.perf_counter_ns() + total_time_forward += (end_time_forward - start_time_forward) / 1e9 + + y_true = F.one_hot( + y_true[train_mask].to(torch.int64), num_classes=349 + ).to(torch.float32) + + y_pred = y_pred[train_mask] + + loss = F.cross_entropy(y_pred, y_true) + + start_time_backward = time.perf_counter_ns() + optimizer.zero_grad() + loss.backward() + optimizer.step() + end_time_backward = time.perf_counter_ns() + total_time_backward += (end_time_backward - start_time_backward) / 1e9 + + total_loss += loss.item() + + del y_true + del y_pred + del loss + del hetero_data + gc.collect() + + start_time_sample = time.perf_counter_ns() + + end_time_train = time.perf_counter_ns() + print( + f"epoch {epoch} " + f"total time: {(end_time_train - start_time_train) / 1e9:3.4f} s" + f"\nloader create time per batch: {total_time_loader / num_batches} s" + f"\nsampling/load time per batch: {total_time_sample / num_batches} s" + f"\nforward time per batch: {total_time_forward / num_batches} s" + f"\nbackward time per batch: {total_time_backward / num_batches} s" + f"\nnum batches: {num_batches}" + ) + print(f"loss after epoch {epoch}: {total_loss / num_batches}") + + td.barrier() + if rank == 0: + print("DONE", flush=True) + client.unpublish_dataset("train_nodes") + event.clear() + + td.destroy_process_group() + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--torch_devices", + type=str, + default="0,1", + help="GPU to allocate to pytorch for model, graph data, and node label storage", + required=False, + ) + + parser.add_argument( + "--num_epochs", + type=int, + default=1, + help="Number of training epochs", + required=False, + ) + + parser.add_argument( + "--features_on_gpu", + type=bool, + default=True, + help="Whether to store the features on each worker's GPU", + required=False, + ) + + parser.add_argument( + "--torch_manager_ip", + type=str, + default="127.0.0.1", + help="The torch distributed manager ip address", + required=False, + ) + + parser.add_argument( + "--torch_manager_port", + type=str, + default="12346", + help="The torch distributed manager port", + required=False, + ) + + parser.add_argument( + "--dask_scheduler_file", + type=str, + help="The path to the dask scheduler file", + required=True, + ) + + return parser.parse_args() + + +def main(): + args = parse_args() + + torch_devices = [int(d) for d in args.torch_devices.split(",")] + + train_args = ( + torch_devices, + args.torch_manager_ip, + args.torch_manager_port, + args.dask_scheduler_file, + args.num_epochs, + args.features_on_gpu, + ) + + tmp.spawn(train, args=train_args, nprocs=len(torch_devices)) + + +if __name__ == "__main__": + main() diff --git a/python/cugraph-pyg/cugraph_pyg/examples/graph_sage_sg.py b/python/cugraph-pyg/cugraph_pyg/examples/graph_sage_sg.py new file mode 100644 index 00000000000..82f5e7ea67d --- /dev/null +++ b/python/cugraph-pyg/cugraph_pyg/examples/graph_sage_sg.py @@ -0,0 +1,215 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import time +import argparse +import gc + +import torch + +from torch_geometric.nn import CuGraphSAGEConv + +import torch.nn as nn +import torch.nn.functional as F + +from typing import Union + + +class CuGraphSAGE(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, num_layers): + super().__init__() + + self.convs = torch.nn.ModuleList() + self.convs.append(CuGraphSAGEConv(in_channels, hidden_channels)) + for _ in range(num_layers - 1): + conv = CuGraphSAGEConv(hidden_channels, hidden_channels) + self.convs.append(conv) + + self.lin = nn.Linear(hidden_channels, out_channels) + + def forward(self, x, edge, size): + edge_csc = CuGraphSAGEConv.to_csc(edge, (size[0], size[0])) + for conv in self.convs: + x = conv(x, edge_csc)[: size[1]] + x = F.relu(x) + x = F.dropout(x, p=0.5) + + return self.lin(x) + + +def init_pytorch_worker(device_id: int) -> None: + import cupy + import rmm + + rmm.reinitialize( + devices=[device_id], + pool_allocator=False, + ) + + cupy.cuda.Device(device_id).use() + torch.cuda.set_device(device_id) + + +def train(device: int, features_device: Union[str, int] = "cpu", num_epochs=2) -> None: + """ + Parameters + ---------- + device: int + The CUDA device where the model, graph data, and node labels will be stored. + features_device: Union[str, int] + The device (CUDA device or CPU) where features will be stored. + """ + + init_pytorch_worker(device) + + import cugraph + from cugraph_pyg.data import CuGraphStore + from cugraph_pyg.loader import CuGraphNeighborLoader + + from ogb.nodeproppred import NodePropPredDataset + + dataset = NodePropPredDataset(name="ogbn-mag") + data = dataset[0] + + G = data[0]["edge_index_dict"] + N = data[0]["num_nodes_dict"] + + fs = cugraph.gnn.FeatureStore(backend="torch") + + fs.add_data( + torch.as_tensor(data[0]["node_feat_dict"]["paper"], device=features_device), + "paper", + "x", + ) + + fs.add_data(torch.as_tensor(data[1]["paper"].T[0], device=device), "paper", "y") + + num_papers = data[0]["num_nodes_dict"]["paper"] + train_perc = 0.1 + train_nodes = torch.randperm(num_papers) + train_nodes = train_nodes[: int(train_perc * num_papers)] + train_mask = torch.full((num_papers,), -1, device=device) + train_mask[train_nodes] = 1 + fs.add_data(train_mask, "paper", "train") + + cugraph_store = CuGraphStore(fs, G, N) + + model = ( + CuGraphSAGE(in_channels=128, hidden_channels=64, out_channels=349, num_layers=3) + .to(torch.float32) + .to(device) + ) + + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + + for epoch in range(num_epochs): + start_time_train = time.perf_counter_ns() + model.train() + + cugraph_bulk_loader = CuGraphNeighborLoader( + cugraph_store, train_nodes, batch_size=500, num_neighbors=[10, 25] + ) + + total_loss = 0 + num_batches = 0 + + # This context manager will handle different # batches per rank + # barrier() cannot do this since the number of ops per rank is + # different. It essentially acts like barrier would if the + # number of ops per rank was the same. + for epoch in range(num_epochs): + for iter_i, hetero_data in enumerate(cugraph_bulk_loader): + num_batches += 1 + if iter_i % 20 == 0: + print(f"iteration {iter_i}") + + # train + train_mask = hetero_data.train_dict["paper"] + y_true = hetero_data.y_dict["paper"] + + y_pred = model( + hetero_data.x_dict["paper"].to(device).to(torch.float32), + hetero_data.edge_index_dict[("paper", "cites", "paper")].to(device), + (len(y_true), len(y_true)), + ) + + y_true = F.one_hot( + y_true[train_mask].to(torch.int64), num_classes=349 + ).to(torch.float32) + + y_pred = y_pred[train_mask] + + loss = F.cross_entropy(y_pred, y_true) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + total_loss += loss.item() + + del y_true + del y_pred + del loss + del hetero_data + gc.collect() + + end_time_train = time.perf_counter_ns() + print( + f"epoch {epoch} time: " + f"{(end_time_train - start_time_train) / 1e9:3.4f} s" + ) + print(f"loss after epoch {epoch}: {total_loss / num_batches}") + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--device", + type=int, + default=0, + help="GPU to allocate to pytorch for model, graph data, and node label storage", + required=False, + ) + + parser.add_argument( + "--features_device", + type=str, + default="0", + help="Device to allocate to pytorch for feature storage", + required=False, + ) + + parser.add_argument( + "--num_epochs", + type=int, + default=1, + help="Number of training epochs", + required=False, + ) + + return parser.parse_args() + + +def main(): + args = parse_args() + + try: + features_device = int(args.features_device) + except ValueError: + features_device = args.features_device + + train(args.device, features_device, args.num_epochs) + + +if __name__ == "__main__": + main() diff --git a/python/cugraph/cugraph/community/ecg.pxd b/python/cugraph-pyg/cugraph_pyg/examples/start_dask.sh old mode 100644 new mode 100755 similarity index 54% rename from python/cugraph/cugraph/community/ecg.pxd rename to python/cugraph-pyg/cugraph_pyg/examples/start_dask.sh index 4f13237eac7..54c82f81298 --- a/python/cugraph/cugraph/community/ecg.pxd +++ b/python/cugraph-pyg/cugraph_pyg/examples/start_dask.sh @@ -1,4 +1,6 @@ -# Copyright (c) 2019-2021, NVIDIA CORPORATION. +#!/bin/bash + +# Copyright (c) 2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,19 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -# cython: profile=False -# distutils: language = c++ -# cython: embedsignature = True -# cython: language_level = 3 - -from cugraph.structure.graph_primtypes cimport * - - -cdef extern from "cugraph/algorithms.hpp" namespace "cugraph": - - cdef void ecg[VT,ET,WT]( - const handle_t &handle, - const GraphCSRView[VT,ET,WT] &graph, - WT min_weight, - VT ensemble_size, - VT* ecg_parts) except + +WORKER_RMM_POOL_SIZE=14G \ +CUDA_VISIBLE_DEVICES=0,1 \ +SCHEDULER_FILE=$(pwd)/scheduler.json \ +../../../../mg_utils/run-dask-process.sh \ + scheduler workers \ + --tcp diff --git a/python/cugraph-pyg/cugraph_pyg/loader/cugraph_node_loader.py b/python/cugraph-pyg/cugraph_pyg/loader/cugraph_node_loader.py index 2668f5598e1..fa02ac78f43 100644 --- a/python/cugraph-pyg/cugraph_pyg/loader/cugraph_node_loader.py +++ b/python/cugraph-pyg/cugraph_pyg/loader/cugraph_node_loader.py @@ -35,12 +35,14 @@ def __init__( self, feature_store: CuGraphStore, graph_store: CuGraphStore, - all_indices: Sequence, - batch_size: int, + all_indices: Union[Sequence, int], + batch_size: int = 0, shuffle=False, edge_types: Sequence[Tuple[str]] = None, directory=None, rank=0, + starting_batch_id=0, + batches_per_partition=100, # Sampler args num_neighbors: List[int] = [1, 1], replace: bool = True, @@ -59,11 +61,15 @@ def __init__( graph_store: CuGraphStore The graph store containing the graph structure. - all_indices: Tensor + all_indices: Union[Tensor, int] The input nodes associated with this sampler. + If this is an integer N , this loader will load N batches + from disk rather than performing sampling in memory. batch_size: int The number of input nodes per sampling batch. + Generally required unless loading already-sampled + data from disk. shuffle: bool (optional, default=False) Whether to shuffle the input indices. @@ -81,12 +87,35 @@ def __init__( rank: int (optional, default=0) The rank of the current worker. Should be provided when there are multiple workers. + + starting_batch_id: int (optional, default=0) + The starting id for each batch. Defaults to 0. + Generally used when loading previously-sampled + batches from disk. + + batches_per_partition: int (optional, default=100) + The number of batches in each output partition. + Defaults to 100. Gets passed to the bulk + sampler if there is one; otherwise, this argument + is used to determine which files to read. """ self.__feature_store = feature_store self.__graph_store = graph_store self.__rank = rank - self.__next_batch = 0 + self.__next_batch = starting_batch_id + self.__end_exclusive = starting_batch_id + self.__batches_per_partition = batches_per_partition + self.__starting_batch_id = starting_batch_id + + if isinstance(all_indices, int): + # Will be loading from disk + self.__num_batches = all_indices + self.__directory = directory + return + + if batch_size is None or batch_size < 1: + raise ValueError("Batch size must be >= 1") self.__directory = tempfile.TemporaryDirectory(dir=directory) @@ -97,9 +126,9 @@ def __init__( rank=rank, fanout_vals=num_neighbors, with_replacement=replace, + batches_per_partition=self.__batches_per_partition, **kwargs, ) - self.__batches_per_partition = bulk_sampler.batches_per_partition # Make sure indices are in cupy all_indices = cupy.asarray(all_indices) @@ -114,7 +143,6 @@ def __init__( # Split into batches all_indices = cupy.split(all_indices, len(all_indices) // batch_size) - print("all_indices:", all_indices) self.__num_batches = 0 for batch_num, batch_i in enumerate(all_indices): @@ -123,7 +151,9 @@ def __init__( cudf.DataFrame( { "start": batch_i, - "batch": cupy.full(batch_size, batch_num, dtype="int32"), + "batch": cupy.full( + batch_size, batch_num + starting_batch_id, dtype="int32" + ), } ), start_col_name="start", @@ -132,29 +162,46 @@ def __init__( bulk_sampler.flush() - self.__end_exclusive = 0 - def __next__(self): # Quit iterating if there are no batches left - if self.__next_batch >= self.__num_batches: + if self.__next_batch >= self.__num_batches + self.__starting_batch_id: raise StopIteration # Load the next set of sampling results if necessary if self.__next_batch >= self.__end_exclusive: # Read the next parquet file into memory - rank_path = os.path.join(self.__directory.name, f"rank={self.__rank}") + dir_path = ( + self.__directory + if isinstance(self.__directory, str) + else self.__directory.name + ) + rank_path = os.path.join(dir_path, f"rank={self.__rank}") + file_end_batch_incl = min( + self.__end_exclusive + self.__batches_per_partition - 1, + self.__starting_batch_id + self.__num_batches - 1, + ) parquet_path = os.path.join( rank_path, - f"batch={self.__end_exclusive}" - f"-{self.__end_exclusive + self.__batches_per_partition - 1}.parquet", + f"batch={self.__end_exclusive}" f"-{file_end_batch_incl}.parquet", ) self.__end_exclusive += self.__batches_per_partition + + columns = { + "sources": "int64", + "destinations": "int64", + # 'edge_id':'int64', + "edge_type": "int32", + "batch_id": "int32", + # 'hop_id':'int32' + } self.__data = cudf.read_parquet(parquet_path) + self.__data = self.__data[list(columns.keys())].astype(columns) # Pull the next set of sampling results out of the dataframe in memory f = self.__data["batch_id"] == self.__next_batch + sampler_output = _sampler_output_from_sampling_results( self.__data[f], self.__graph_store ) @@ -162,7 +209,8 @@ def __next__(self): # Get ready for next iteration # If there is no next iteration, make sure results are deleted self.__next_batch += 1 - if self.__next_batch >= self.__num_batches: + if self.__next_batch >= self.__num_batches + self.__starting_batch_id: + # Won't delete a non-temp dir (since it would just be deleting a string) del self.__directory # Get and return the sampled subgraph @@ -177,7 +225,7 @@ def __next__(self): edge_dict, ) else: - return torch_geometric.loader.utils.filter_custom_store( + out = torch_geometric.loader.utils.filter_custom_store( self.__feature_store, self.__graph_store, sampler_output.node, @@ -186,6 +234,11 @@ def __next__(self): sampler_output.edge, ) + return out + + def __iter__(self): + return self + class EXPERIMENTAL__CuGraphNeighborLoader: def __init__( @@ -228,10 +281,12 @@ def __init__( self.inner_loader_args = kwargs def __iter__(self): - return EXPERIMENTAL__BulkSampleLoader( + self.current_loader = EXPERIMENTAL__BulkSampleLoader( self.__feature_store, self.__graph_store, self.__input_nodes, self.__batch_size, **self.inner_loader_args, ) + + return self.current_loader diff --git a/python/cugraph-pyg/cugraph_pyg/sampler/cugraph_sampler.py b/python/cugraph-pyg/cugraph_pyg/sampler/cugraph_sampler.py index 325a6f78c4d..b6ec932abbe 100644 --- a/python/cugraph-pyg/cugraph_pyg/sampler/cugraph_sampler.py +++ b/python/cugraph-pyg/cugraph_pyg/sampler/cugraph_sampler.py @@ -54,12 +54,20 @@ def _sampler_output_from_sampling_results( HeteroSamplerOutput, if PyG is installed. dict, if PyG is not installed. """ - nodes_of_interest = cudf.concat( - [sampling_results.destinations, sampling_results.sources] - ).unique() + nodes_of_interest = torch.unique( + torch.stack( + [ + torch.as_tensor(sampling_results.destinations, device="cuda"), + torch.as_tensor(sampling_results.sources, device="cuda"), + ] + ) + ) + # unique will always sort this array # Get the grouped node index (for creating the renumbered grouped edge index) - noi_index = graph_store._get_vertex_groups_from_sample(nodes_of_interest) + noi_index = graph_store._get_vertex_groups_from_sample( + nodes_of_interest, is_sorted=True + ) # Get the new edge index (by type as expected for HeteroData) # FIXME handle edge ids/types after the C++ updates @@ -160,14 +168,6 @@ def __neighbor_sample( metadata=None, **kwargs, ) -> Union[dict, HeteroSamplerOutput]: - backend = self.__graph_store.backend - if backend != self.__feature_store.backend: - raise ValueError( - f"Graph store backend {backend}" - f"does not match feature store " - f"backend {self.__feature_store.backend}" - ) - if not directed: raise ValueError("Undirected sampling not currently supported") @@ -180,7 +180,7 @@ def __neighbor_sample( # FIXME support variable num neighbors per edge type num_neighbors = list(num_neighbors.values())[0] - if backend == "torch" and not index.is_cuda: + if not index.is_cuda: index = index.cuda() G = self.__graph_store._subgraph(edge_types) diff --git a/python/cugraph-pyg/cugraph_pyg/tests/conftest.py b/python/cugraph-pyg/cugraph_pyg/tests/conftest.py index 624709799c1..2aa6221cc3f 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/conftest.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/conftest.py @@ -21,6 +21,7 @@ from cugraph.dask.common.mg_utils import get_visible_devices from cugraph.testing.mg_utils import stop_dask_client +import torch import numpy as np import cudf from cugraph.gnn import FeatureStore @@ -80,14 +81,14 @@ def karate_gnn(): el.dst = el.dst.astype("int64") all_vertices = np.array_split(cudf.concat([el.src, el.dst]).unique().values_host, 2) - F = FeatureStore(backend="numpy") + F = FeatureStore(backend="torch") F.add_data( - np.arange(len(all_vertices[0]), dtype="float32") * 31, + torch.arange(len(all_vertices[0]), dtype=torch.float32) * 31, "type0", "prop0", ) F.add_data( - np.arange(len(all_vertices[1]), dtype="float32") * 41, + torch.arange(len(all_vertices[1]), dtype=torch.float32) * 41, "type1", "prop0", ) @@ -116,8 +117,8 @@ def karate_gnn(): G = { (src_type, edge_type, dst_type): ( - elx["src"].values_host - offsets[src_type], - elx["dst"].values_host - offsets[dst_type], + torch.tensor(elx["src"].values_host - offsets[src_type]), + torch.tensor(elx["dst"].values_host - offsets[dst_type]), ) for (src_type, edge_type, dst_type), elx in G.items() } @@ -129,17 +130,19 @@ def karate_gnn(): def basic_graph_1(): G = { ("vt1", "pig", "vt1"): [ - np.array([0, 0, 1, 2, 2, 3]), - np.array([1, 2, 4, 3, 4, 1]), + torch.tensor([0, 0, 1, 2, 2, 3]), + torch.tensor([1, 2, 4, 3, 4, 1]), ] } N = {"vt1": 5} F = FeatureStore() - F.add_data(np.array([100, 200, 300, 400, 500]), type_name="vt1", feat_name="prop1") + F.add_data( + torch.tensor([100, 200, 300, 400, 500]), type_name="vt1", feat_name="prop1" + ) - F.add_data(np.array([5, 4, 3, 2, 1]), type_name="vt1", feat_name="prop2") + F.add_data(torch.tensor([5, 4, 3, 2, 1]), type_name="vt1", feat_name="prop2") return F, G, N @@ -147,20 +150,22 @@ def basic_graph_1(): @pytest.fixture def multi_edge_graph_1(): G = { - ("vt1", "pig", "vt1"): [np.array([0, 2, 3, 1]), np.array([1, 3, 1, 4])], - ("vt1", "dog", "vt1"): [np.array([0, 3, 4]), np.array([2, 2, 3])], + ("vt1", "pig", "vt1"): [torch.tensor([0, 2, 3, 1]), torch.tensor([1, 3, 1, 4])], + ("vt1", "dog", "vt1"): [torch.tensor([0, 3, 4]), torch.tensor([2, 2, 3])], ("vt1", "cat", "vt1"): [ - np.array([1, 2, 2]), - np.array([4, 3, 4]), + torch.tensor([1, 2, 2]), + torch.tensor([4, 3, 4]), ], } N = {"vt1": 5} F = FeatureStore() - F.add_data(np.array([100, 200, 300, 400, 500]), type_name="vt1", feat_name="prop1") + F.add_data( + torch.tensor([100, 200, 300, 400, 500]), type_name="vt1", feat_name="prop1" + ) - F.add_data(np.array([5, 4, 3, 2, 1]), type_name="vt1", feat_name="prop2") + F.add_data(torch.tensor([5, 4, 3, 2, 1]), type_name="vt1", feat_name="prop2") return F, G, N @@ -170,29 +175,53 @@ def multi_edge_multi_vertex_graph_1(): G = { ("brown", "horse", "brown"): [ - np.array([0, 0]), - np.array([1, 2]), + torch.tensor([0, 0]), + torch.tensor([1, 2]), ], ("brown", "tortoise", "black"): [ - np.array([1, 1, 2]), - np.array([1, 0, 1]), + torch.tensor([1, 1, 2]), + torch.tensor([1, 0, 1]), ], ("brown", "mongoose", "black"): [ - np.array([2, 1]), - np.array([0, 1]), + torch.tensor([2, 1]), + torch.tensor([0, 1]), ], ("black", "cow", "brown"): [ - np.array([0, 0]), - np.array([1, 2]), + torch.tensor([0, 0]), + torch.tensor([1, 2]), ], ("black", "snake", "black"): [ - np.array([1]), - np.array([0]), + torch.tensor([1]), + torch.tensor([0]), ], } N = {"brown": 3, "black": 2} + F = FeatureStore() + F.add_data(torch.tensor([100, 200, 300]), type_name="brown", feat_name="prop1") + + F.add_data(torch.tensor([400, 500]), type_name="black", feat_name="prop1") + + F.add_data(torch.tensor([5, 4, 3]), type_name="brown", feat_name="prop2") + + F.add_data(torch.tensor([2, 1]), type_name="black", feat_name="prop2") + + return F, G, N + + +@pytest.fixture +def multi_edge_multi_vertex_no_graph_1(): + G = { + ("brown", "horse", "brown"): 2, + ("brown", "tortoise", "black"): 3, + ("brown", "mongoose", "black"): 3, + ("black", "cow", "brown"): 3, + ("black", "snake", "black"): 1, + } + + N = {"brown": 3, "black": 2} + F = FeatureStore() F.add_data(np.array([100, 200, 300]), type_name="brown", feat_name="prop1") diff --git a/python/cugraph-pyg/cugraph_pyg/tests/int/test_int_cugraph.py b/python/cugraph-pyg/cugraph_pyg/tests/int/test_int_cugraph.py deleted file mode 100644 index 044a40f210d..00000000000 --- a/python/cugraph-pyg/cugraph_pyg/tests/int/test_int_cugraph.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - -# Integration test requirements -""" -rmm -cugraph -cugraph_pyg -cudf -dask_cudf -ogb -torch_geometric -""" -import cudf -import dask_cudf -from cugraph.experimental import PropertyGraph, MGPropertyGraph -from ogb.nodeproppred import NodePropPredDataset - -from cugraph_pyg.data import to_pyg -from cugraph_pyg.sampler import CuGraphSampler - -from torch_geometric.loader import NodeLoader - - -@pytest.fixture(scope="module") -def loader_hetero_mag(): - # Load MAG into CPU memory - dataset = NodePropPredDataset(name="ogbn-mag") - - data = dataset[0] - pG = PropertyGraph() - - # Load the vertex ids into a new property graph - vertex_offsets = {} - last_offset = 0 - - for node_type, num_nodes in data[0]["num_nodes_dict"].items(): - vertex_offsets[node_type] = last_offset - last_offset += num_nodes - - blank_df = cudf.DataFrame( - { - "id": range( - vertex_offsets[node_type], vertex_offsets[node_type] + num_nodes - ) - } - ) - blank_df.id = blank_df.id.astype("int64") - - pG.add_vertex_data(blank_df, vertex_col_name="id", type_name=node_type) - - # Add the remaining vertex features - for i, (node_type, node_features) in enumerate(data[0]["node_feat_dict"].items()): - vertex_offset = vertex_offsets[node_type] - - feature_df = cudf.DataFrame(node_features) - feature_df.columns = [str(c) for c in range(feature_df.shape[1])] - feature_df["id"] = range(vertex_offset, vertex_offset + node_features.shape[0]) - feature_df.id = feature_df.id.astype("int64") - - pG.add_vertex_data(feature_df, vertex_col_name="id", type_name=node_type) - - # Fill in an empty value for vertices without properties. - pG.fillna(0.0) - - # Add the edges - for i, (edge_key, eidx) in enumerate(data[0]["edge_index_dict"].items()): - node_type_src, edge_type, node_type_dst = edge_key - print(node_type_src, edge_type, node_type_dst) - vertex_offset_src = vertex_offsets[node_type_src] - vertex_offset_dst = vertex_offsets[node_type_dst] - eidx = [n + vertex_offset_src for n in eidx[0]], [ - n + vertex_offset_dst for n in eidx[1] - ] - - edge_df = cudf.DataFrame({"src": eidx[0], "dst": eidx[1]}) - edge_df.src = edge_df.src.astype("int64") - edge_df.dst = edge_df.dst.astype("int64") - edge_df["type"] = edge_type - - # Adding backwards edges is currently required in both - # the cuGraph PG and PyG APIs. - pG.add_edge_data(edge_df, vertex_col_names=["src", "dst"], type_name=edge_type) - pG.add_edge_data( - edge_df, vertex_col_names=["dst", "src"], type_name=f"{edge_type}_bw" - ) - - # Add the target variable - y_df = cudf.DataFrame(data[1]["paper"], columns=["y"]) - y_df["id"] = range(vertex_offsets["paper"], vertex_offsets["paper"] + len(y_df)) - y_df.id = y_df.id.astype("int64") - - pG.add_vertex_data(y_df, vertex_col_name="id", type_name="paper") - - # Construct a graph/feature store and loaders - feature_store, graph_store = to_pyg(pG) - sampler = CuGraphSampler( - data=(feature_store, graph_store), - shuffle=True, - num_neighbors=[10, 25], - batch_size=50, - ) - loader = NodeLoader( - data=(feature_store, graph_store), - shuffle=True, - batch_size=50, - node_sampler=sampler, - input_nodes=("author", graph_store.get_vertex_index("author")), - ) - - return loader - - -@pytest.fixture(scope="module") -def loader_hetero_mag_multi_gpu(rmmc): - # Load MAG into CPU memory - dataset = NodePropPredDataset(name="ogbn-mag") - - data = dataset[0] - pG = MGPropertyGraph() - - # Load the vertex ids into a new property graph - vertex_offsets = {} - last_offset = 0 - - for node_type, num_nodes in data[0]["num_nodes_dict"].items(): - vertex_offsets[node_type] = last_offset - last_offset += num_nodes - - blank_df = cudf.DataFrame( - { - "id": range( - vertex_offsets[node_type], vertex_offsets[node_type] + num_nodes - ) - } - ) - blank_df.id = blank_df.id.astype("int64") - blank_df = dask_cudf.from_cudf(blank_df, npartitions=2) - - pG.add_vertex_data(blank_df, vertex_col_name="id", type_name=node_type) - - # Add the remaining vertex features - for i, (node_type, node_features) in enumerate(data[0]["node_feat_dict"].items()): - vertex_offset = vertex_offsets[node_type] - - feature_df = cudf.DataFrame(node_features) - feature_df.columns = [str(c) for c in range(feature_df.shape[1])] - feature_df["id"] = range(vertex_offset, vertex_offset + node_features.shape[0]) - feature_df.id = feature_df.id.astype("int64") - feature_df = dask_cudf.from_cudf(feature_df, npartitions=2) - - pG.add_vertex_data(feature_df, vertex_col_name="id", type_name=node_type) - - # Fill in an empty value for vertices without properties. - pG.fillna(0.0) - - # Add the edges - for i, (edge_key, eidx) in enumerate(data[0]["edge_index_dict"].items()): - node_type_src, edge_type, node_type_dst = edge_key - print(node_type_src, edge_type, node_type_dst) - vertex_offset_src = vertex_offsets[node_type_src] - vertex_offset_dst = vertex_offsets[node_type_dst] - eidx = [n + vertex_offset_src for n in eidx[0]], [ - n + vertex_offset_dst for n in eidx[1] - ] - - edge_df = cudf.DataFrame({"src": eidx[0], "dst": eidx[1]}) - edge_df.src = edge_df.src.astype("int64") - edge_df.dst = edge_df.dst.astype("int64") - edge_df["type"] = edge_type - edge_df = dask_cudf.from_cudf(edge_df, npartitions=2) - - # Adding backwards edges is currently required in both - # the cuGraph PG and PyG APIs. - pG.add_edge_data(edge_df, vertex_col_names=["src", "dst"], type_name=edge_type) - pG.add_edge_data( - edge_df, vertex_col_names=["dst", "src"], type_name=f"{edge_type}_bw" - ) - - # Add the target variable - y_df = cudf.DataFrame(data[1]["paper"], columns=["y"]) - y_df["id"] = range(vertex_offsets["paper"], vertex_offsets["paper"] + len(y_df)) - y_df.id = y_df.id.astype("int64") - y_df = dask_cudf.from_cudf(y_df, npartitions=2) - - pG.add_vertex_data(y_df, vertex_col_name="id", type_name="paper") - - # Construct a graph/feature store and loaders - feature_store, graph_store = to_pyg(pG) - sampler = CuGraphSampler( - data=(feature_store, graph_store), - shuffle=True, - num_neighbors=[10, 25], - batch_size=50, - ) - loader = NodeLoader( - data=(feature_store, graph_store), - shuffle=True, - batch_size=50, - node_sampler=sampler, - input_nodes=("author", graph_store.get_vertex_index("author")), - ) - - return loader diff --git a/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_loader.py b/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_loader.py index 98fa9af594b..5a043acd300 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_loader.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_loader.py @@ -10,22 +10,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import pytest -import cupy from cugraph_pyg.loader import CuGraphNeighborLoader from cugraph_pyg.data import CuGraphStore +from cugraph.utilities.utils import import_optional, MissingModule + +torch = import_optional("torch") -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) + +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_cugraph_loader_basic(dask_client, karate_gnn): F, G, N = karate_gnn - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) loader = CuGraphNeighborLoader( (cugraph_store, cugraph_store), - cupy.arange(N["type0"] + N["type1"], dtype="int64"), + torch.arange(N["type0"] + N["type1"], dtype=torch.int64), 10, num_neighbors=[4, 4], random_state=62, @@ -38,7 +40,10 @@ def test_cugraph_loader_basic(dask_client, karate_gnn): assert len(samples) == 3 for sample in samples: - for prop in sample["type0"]["prop0"].tolist(): - assert prop % 31 == 0 - for prop in sample["type1"]["prop0"].tolist(): - assert prop % 41 == 0 + if "type0" in sample: + for prop in sample["type0"]["prop0"].tolist(): + assert prop % 31 == 0 + + if "type1" in sample: + for prop in sample["type1"]["prop0"].tolist(): + assert prop % 41 == 0 diff --git a/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_sampler.py b/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_sampler.py index 500881d0f58..66dfa89aece 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_sampler.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_sampler.py @@ -20,14 +20,16 @@ from cugraph_pyg.data import CuGraphStore +from cugraph.utilities.utils import import_optional, MissingModule + +torch = import_optional("torch") + -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) @pytest.mark.cugraph_ops +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_neighbor_sample(basic_graph_1, dask_client): F, G, N = basic_graph_1 - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) sampler = CuGraphSampler( (cugraph_store, cugraph_store), @@ -39,8 +41,8 @@ def test_neighbor_sample(basic_graph_1, dask_client): out_dict = sampler.sample_from_nodes( ( - cupy.arange(6, dtype="int64"), - cupy.array([0, 1, 2, 3, 4], dtype="int64"), + torch.arange(6, dtype=torch.int64), + torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64), None, ) ) @@ -54,25 +56,25 @@ def test_neighbor_sample(basic_graph_1, dask_client): col_dict = out_dict.col metadata = out_dict.metadata - assert metadata.get().tolist() == list(range(6)) + assert metadata.tolist() == list(range(6)) for node_type, node_ids in noi_groups.items(): - actual_vertex_ids = cupy.arange(N[node_type]) + actual_vertex_ids = torch.arange(N[node_type]) - assert list(node_ids) == list(actual_vertex_ids) + assert node_ids.tolist() == actual_vertex_ids.tolist() for edge_type, ei in G.items(): expected_df = cudf.DataFrame( { - "src": ei[0], - "dst": ei[1], + "src": cupy.asarray(ei[0]), + "dst": cupy.asarray(ei[1]), } ) results_df = cudf.DataFrame( { - "src": row_dict[edge_type], - "dst": col_dict[edge_type], + "src": cupy.asarray(row_dict[edge_type]), + "dst": cupy.asarray(col_dict[edge_type]), } ) @@ -86,13 +88,11 @@ def test_neighbor_sample(basic_graph_1, dask_client): ) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) @pytest.mark.cugraph_ops +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_neighbor_sample_multi_vertex(multi_edge_multi_vertex_graph_1, dask_client): F, G, N = multi_edge_multi_vertex_graph_1 - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) sampler = CuGraphSampler( (cugraph_store, cugraph_store), @@ -104,8 +104,8 @@ def test_neighbor_sample_multi_vertex(multi_edge_multi_vertex_graph_1, dask_clie out_dict = sampler.sample_from_nodes( ( - cupy.arange(6, dtype="int64"), - cupy.array([0, 1, 2, 3, 4], dtype="int64"), + torch.arange(6, dtype=torch.int64), + torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64), None, ) ) @@ -119,25 +119,25 @@ def test_neighbor_sample_multi_vertex(multi_edge_multi_vertex_graph_1, dask_clie col_dict = out_dict.col metadata = out_dict.metadata - assert metadata.get().tolist() == list(range(6)) + assert metadata.tolist() == list(range(6)) for node_type, node_ids in noi_groups.items(): - actual_vertex_ids = cupy.arange(N[node_type]) + actual_vertex_ids = torch.arange(N[node_type]) - assert list(node_ids) == list(actual_vertex_ids) + assert node_ids.tolist() == actual_vertex_ids.tolist() for edge_type, ei in G.items(): expected_df = cudf.DataFrame( { - "src": ei[0], - "dst": ei[1], + "src": cupy.asarray(ei[0]), + "dst": cupy.asarray(ei[1]), } ) results_df = cudf.DataFrame( { - "src": row_dict[edge_type], - "dst": col_dict[edge_type], + "src": cupy.asarray(row_dict[edge_type]), + "dst": cupy.asarray(col_dict[edge_type]), } ) diff --git a/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_store.py b/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_store.py index c3bd74b64f2..c09a311bd66 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_store.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/mg/test_mg_cugraph_store.py @@ -20,17 +20,21 @@ from cugraph_pyg.data import CuGraphStore import cudf +import dask_cudf import cupy import numpy as np +from random import randint + +from cugraph.utilities.utils import import_optional, MissingModule + import pytest -from random import randint +torch = import_optional("torch") -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) + +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_tensor_attr(): ta = CuGraphTensorAttr("group0", "property1") assert not ta.is_fully_specified() @@ -60,9 +64,7 @@ def test_tensor_attr(): assert casted_ta3.index == [1, 2, 3] -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_edge_attr(): ea = CuGraphEdgeAttr("type0", EdgeLayout.COO, False, 10) assert ea.edge_type == "type0" @@ -96,28 +98,55 @@ def single_vertex_graph(request): return request.getfixturevalue(request.param) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") +@pytest.mark.parametrize( + "edge_index_type", ["numpy", "torch-cpu", "torch-gpu", "cudf", "dask-cudf"] ) -def test_get_edge_index(graph, dask_client): +def test_get_edge_index(graph, edge_index_type, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + if "torch" in edge_index_type: + if edge_index_type == "torch-cpu": + device = "cpu" + else: + device = "cuda" + for et in list(G.keys()): + G[et][0] = torch.as_tensor(G[et][0], device=device) + G[et][1] = torch.as_tensor(G[et][1], device=device) + elif edge_index_type == "cudf": + for et in list(G.keys()): + G[et][0] = cudf.Series(G[et][0]) + G[et][1] = cudf.Series(G[et][1]) + elif edge_index_type == "dask-cudf": + for et in list(G.keys()): + G[et][0] = dask_cudf.from_cudf(cudf.Series(G[et][0]), npartitions=2) + G[et][1] = dask_cudf.from_cudf(cudf.Series(G[et][1]), npartitions=2) + + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) for pyg_can_edge_type in G: src, dst = cugraph_store.get_edge_index( edge_type=pyg_can_edge_type, layout="coo", is_sorted=False ) - assert G[pyg_can_edge_type][0].tolist() == src.get().tolist() - assert G[pyg_can_edge_type][1].tolist() == dst.get().tolist() + if edge_index_type == "cudf": + assert G[pyg_can_edge_type][0].values_host.tolist() == src.tolist() + assert G[pyg_can_edge_type][1].values_host.tolist() == dst.tolist() + elif edge_index_type == "dask-cudf": + assert ( + G[pyg_can_edge_type][0].compute().values_host.tolist() == src.tolist() + ) + assert ( + G[pyg_can_edge_type][1].compute().values_host.tolist() == dst.tolist() + ) + else: + assert G[pyg_can_edge_type][0].tolist() == src.tolist() + assert G[pyg_can_edge_type][1].tolist() == dst.tolist() -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_edge_types(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) eta = cugraph_store._edge_types_to_attrs assert eta.keys() == G.keys() @@ -127,12 +156,10 @@ def test_edge_types(graph, dask_client): assert attr_name == attr_repr.edge_type -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_subgraph(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) if len(G.keys()) > 1: for edge_type in G.keys(): @@ -147,29 +174,29 @@ def test_get_subgraph(graph, dask_client): assert sg.number_of_edges() == num_edges -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_renumber_vertices_basic(single_vertex_graph, dask_client): F, G, N = single_vertex_graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) - nodes_of_interest = cudf.Series(cupy.random.randint(0, sum(N.values()), 3)) + nodes_of_interest = torch.as_tensor( + cupy.random.randint(0, sum(N.values()), 3), device="cuda" + ) index = cugraph_store._get_vertex_groups_from_sample(nodes_of_interest) - assert index["vt1"].get().tolist() == sorted(nodes_of_interest.values_host.tolist()) + assert index["vt1"].tolist() == sorted(nodes_of_interest.tolist()) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_renumber_vertices_multi_edge_multi_vertex( multi_edge_multi_vertex_graph_1, dask_client ): F, G, N = multi_edge_multi_vertex_graph_1 - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) - nodes_of_interest = cudf.Series(cupy.random.randint(0, sum(N.values()), 3)).unique() + nodes_of_interest = torch.as_tensor( + cupy.random.randint(0, sum(N.values()), 3), device="cuda" + ).unique() index = cugraph_store._get_vertex_groups_from_sample(nodes_of_interest) @@ -177,14 +204,12 @@ def test_renumber_vertices_multi_edge_multi_vertex( brown_nodes = nodes_of_interest[nodes_of_interest > 1] - 2 if len(black_nodes) > 0: - assert index["black"].get().tolist() == sorted(black_nodes.values_host.tolist()) + assert index["black"].tolist() == sorted(black_nodes.tolist()) if len(brown_nodes) > 0: - assert index["brown"].get().tolist() == sorted(brown_nodes.values_host.tolist()) + assert index["brown"].tolist() == sorted(brown_nodes.tolist()) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_renumber_edges(graph, dask_client): """ FIXME this test is not very good and should be replaced, @@ -192,10 +217,10 @@ def test_renumber_edges(graph, dask_client): """ F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) v_offsets = [N[v] for v in sorted(N.keys())] - v_offsets = cupy.array(v_offsets) + v_offsets = np.array(v_offsets) cumsum = v_offsets.cumsum(0) v_offsets = cumsum - v_offsets @@ -205,25 +230,25 @@ def test_renumber_edges(graph, dask_client): pyg_can_edge_type: i for i, pyg_can_edge_type in enumerate(sorted(G.keys())) } - eoi_src = cupy.array([], dtype="int64") - eoi_dst = cupy.array([], dtype="int64") - eoi_type = cupy.array([], dtype="int32") + eoi_src = np.array([], dtype="int64") + eoi_dst = np.array([], dtype="int64") + eoi_type = np.array([], dtype="int32") for pyg_can_edge_type, ei in G.items(): src_type, _, dst_type = pyg_can_edge_type c = randint(0, len(ei[0])) # number to select sel = np.random.randint(0, len(ei[0]), c) - src_i = cupy.array(ei[0][sel]) + v_offsets[src_type] - dst_i = cupy.array(ei[1][sel]) + v_offsets[dst_type] - eoi_src = cupy.concatenate([eoi_src, src_i]) - eoi_dst = cupy.concatenate([eoi_dst, dst_i]) - eoi_type = cupy.concatenate( - [eoi_type, cupy.array([e_num[pyg_can_edge_type]] * c)] - ) + src_i = np.array(ei[0][sel]) + v_offsets[src_type] + dst_i = np.array(ei[1][sel]) + v_offsets[dst_type] + eoi_src = np.concatenate([eoi_src, src_i]) + eoi_dst = np.concatenate([eoi_dst, dst_i]) + eoi_type = np.concatenate([eoi_type, np.array([e_num[pyg_can_edge_type]] * c)]) - nodes_of_interest = ( - cudf.Series(cupy.concatenate([eoi_src, eoi_dst])).unique().sort_values() + nodes_of_interest, _ = torch.sort( + torch.as_tensor( + np.unique(np.concatenate([eoi_src, eoi_dst])), + ).cuda() ) noi_index = cugraph_store._get_vertex_groups_from_sample(nodes_of_interest) @@ -241,8 +266,8 @@ def test_renumber_edges(graph, dask_client): for pyg_can_edge_type in G: df = cudf.DataFrame( { - "src": G[pyg_can_edge_type][0], - "dst": G[pyg_can_edge_type][1], + "src": cupy.asarray(G[pyg_can_edge_type][0]), + "dst": cupy.asarray(G[pyg_can_edge_type][1]), } ) @@ -265,12 +290,10 @@ def test_renumber_edges(graph, dask_client): assert len(df) == 1 -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_tensor(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) for feature_name, feature_on_types in F.get_feature_list().items(): for type_name in feature_on_types: @@ -281,23 +304,17 @@ def test_get_tensor(graph, dask_client): feat_name=feature_name, ).tolist() - tsr = ( - cugraph_store.get_tensor( - type_name, feature_name, v_ids, None, cupy.int64 - ) - .get() - .tolist() - ) + tsr = cugraph_store.get_tensor( + type_name, feature_name, v_ids, None, cupy.int64 + ).tolist() assert tsr == base_series -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_multi_get_tensor(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) for vertex_type in sorted(N.keys()): v_ids = np.arange(N[vertex_type]) @@ -318,15 +335,13 @@ def test_multi_get_tensor(graph, dask_client): ] ) - assert np.stack(tsr).get().tolist() == base_series.tolist() + assert torch.stack(tsr).tolist() == base_series.tolist() -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_all_tensor_attrs(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) tensor_attrs = [] for vertex_type in sorted(N.keys()): @@ -340,14 +355,6 @@ def test_get_all_tensor_attrs(graph, dask_client): ) ) - for t in tensor_attrs: - print(t) - - print("\n\n") - - for t in cugraph_store.get_all_tensor_attrs(): - print(t) - assert sorted(tensor_attrs, key=lambda a: (a.group_name, a.attr_name)) == sorted( cugraph_store.get_all_tensor_attrs(), key=lambda a: (a.group_name, a.attr_name) ) @@ -363,12 +370,10 @@ def test_multi_get_tensor_spec_props(multi_edge_multi_vertex_graph_1, dask_clien raise NotImplementedError("not implemented") -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_tensor_from_tensor_attrs(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) tensor_attrs = cugraph_store.get_all_tensor_attrs() for tensor_attr in tensor_attrs: @@ -379,25 +384,21 @@ def test_get_tensor_from_tensor_attrs(graph, dask_client): assert cugraph_store.get_tensor(tensor_attr).tolist() == data.tolist() -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_tensor_size(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) tensor_attrs = cugraph_store.get_all_tensor_attrs() for tensor_attr in tensor_attrs: sz = N[tensor_attr.group_name] tensor_attr.index = np.arange(sz) - assert cugraph_store.get_tensor_size(tensor_attr) == sz + assert cugraph_store.get_tensor_size(tensor_attr) == torch.Size((sz,)) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_mg_frame_handle(graph, dask_client): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy", multi_gpu=True) + cugraph_store = CuGraphStore(F, G, N, multi_gpu=True) assert isinstance(cugraph_store._EXPERIMENTAL__CuGraphStore__graph._plc_graph, dict) diff --git a/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_loader.py b/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_loader.py index b55b5bc178d..0eae6e08a0d 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_loader.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_loader.py @@ -10,22 +10,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import pytest -import cupy from cugraph_pyg.loader import CuGraphNeighborLoader from cugraph_pyg.data import CuGraphStore +from cugraph.utilities.utils import import_optional, MissingModule + +torch = import_optional("torch") -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_cugraph_loader_basic(karate_gnn): F, G, N = karate_gnn - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) loader = CuGraphNeighborLoader( (cugraph_store, cugraph_store), - cupy.arange(N["type0"] + N["type1"], dtype="int64"), + torch.arange(N["type0"] + N["type1"], dtype=torch.int64), 10, num_neighbors=[4, 4], random_state=62, @@ -36,7 +37,10 @@ def test_cugraph_loader_basic(karate_gnn): assert len(samples) == 3 for sample in samples: - for prop in sample["type0"]["prop0"].tolist(): - assert prop % 31 == 0 - for prop in sample["type1"]["prop0"].tolist(): - assert prop % 41 == 0 + if "type0" in sample: + for prop in sample["type0"]["prop0"].tolist(): + assert prop % 31 == 0 + + if "type1" in sample: + for prop in sample["type1"]["prop0"].tolist(): + assert prop % 41 == 0 diff --git a/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_sampler.py b/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_sampler.py index 0b22e559894..c9981f5f715 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_sampler.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_sampler.py @@ -20,14 +20,16 @@ from cugraph_pyg.data import CuGraphStore +from cugraph.utilities.utils import import_optional, MissingModule + +torch = import_optional("torch") + -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) @pytest.mark.cugraph_ops +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_neighbor_sample(basic_graph_1): F, G, N = basic_graph_1 - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) sampler = CuGraphSampler( (cugraph_store, cugraph_store), @@ -39,8 +41,8 @@ def test_neighbor_sample(basic_graph_1): out_dict = sampler.sample_from_nodes( ( - cupy.arange(6, dtype="int64"), - cupy.array([0, 1, 2, 3, 4], dtype="int64"), + torch.arange(6, dtype=torch.int64), + torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64), None, ) ) @@ -54,29 +56,25 @@ def test_neighbor_sample(basic_graph_1): col_dict = out_dict.col metadata = out_dict.metadata - assert metadata.get().tolist() == list(range(6)) + assert metadata.tolist() == list(range(6)) for node_type, node_ids in noi_groups.items(): - actual_vertex_ids = cupy.arange(N[node_type]) + actual_vertex_ids = torch.arange(N[node_type]) - assert list(node_ids) == list(actual_vertex_ids) - - print("row:", row_dict) - print("col:", col_dict) - print("G:", G) + assert node_ids.tolist() == actual_vertex_ids.tolist() for edge_type, ei in G.items(): expected_df = cudf.DataFrame( { - "src": ei[0], - "dst": ei[1], + "src": cupy.asarray(ei[0]), + "dst": cupy.asarray(ei[1]), } ) results_df = cudf.DataFrame( { - "src": row_dict[edge_type], - "dst": col_dict[edge_type], + "src": cupy.asarray(row_dict[edge_type]), + "dst": cupy.asarray(col_dict[edge_type]), } ) @@ -90,13 +88,11 @@ def test_neighbor_sample(basic_graph_1): ) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) @pytest.mark.cugraph_ops +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_neighbor_sample_multi_vertex(multi_edge_multi_vertex_graph_1): F, G, N = multi_edge_multi_vertex_graph_1 - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) sampler = CuGraphSampler( (cugraph_store, cugraph_store), @@ -108,8 +104,8 @@ def test_neighbor_sample_multi_vertex(multi_edge_multi_vertex_graph_1): out_dict = sampler.sample_from_nodes( ( - cupy.arange(6, dtype="int64"), - cupy.array([0, 1, 2, 3, 4], dtype="int64"), + torch.arange(6, dtype=torch.int64), + torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64), None, ) ) @@ -123,29 +119,25 @@ def test_neighbor_sample_multi_vertex(multi_edge_multi_vertex_graph_1): col_dict = out_dict.col metadata = out_dict.metadata - assert metadata.get().tolist() == list(range(6)) + assert metadata.tolist() == list(range(6)) for node_type, node_ids in noi_groups.items(): - actual_vertex_ids = cupy.arange(N[node_type]) - - assert list(node_ids) == list(actual_vertex_ids) + actual_vertex_ids = torch.arange(N[node_type]) - print("row:", row_dict) - print("col:", col_dict) - print("G:", G) + assert node_ids.tolist() == actual_vertex_ids.tolist() for edge_type, ei in G.items(): expected_df = cudf.DataFrame( { - "src": ei[0], - "dst": ei[1], + "src": cupy.asarray(ei[0]), + "dst": cupy.asarray(ei[1]), } ) results_df = cudf.DataFrame( { - "src": row_dict[edge_type], - "dst": col_dict[edge_type], + "src": cupy.asarray(row_dict[edge_type]), + "dst": cupy.asarray(col_dict[edge_type]), } ) diff --git a/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_store.py b/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_store.py index 0ed1875be1c..347d35a3172 100644 --- a/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_store.py +++ b/python/cugraph-pyg/cugraph_pyg/tests/test_cugraph_store.py @@ -22,15 +22,17 @@ import cudf import cupy import numpy as np +from random import randint + +from cugraph.utilities.utils import import_optional, MissingModule import pytest -from random import randint +torch = import_optional("torch") -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) + +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_tensor_attr(): ta = CuGraphTensorAttr("group0", "property1") assert not ta.is_fully_specified() @@ -60,9 +62,7 @@ def test_tensor_attr(): assert casted_ta3.index == [1, 2, 3] -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_edge_attr(): ea = CuGraphEdgeAttr("type0", EdgeLayout.COO, False, 10) assert ea.edge_type == "type0" @@ -80,9 +80,6 @@ def test_edge_attr(): assert ea.size == 10 -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) @pytest.fixture( params=[ "basic_graph_1", @@ -94,36 +91,47 @@ def graph(request): return request.getfixturevalue(request.param) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) @pytest.fixture(params=["basic_graph_1", "multi_edge_graph_1"]) def single_vertex_graph(request): return request.getfixturevalue(request.param) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) -def test_get_edge_index(graph): +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") +@pytest.mark.parametrize("edge_index_type", ["numpy", "torch-cpu", "torch-gpu", "cudf"]) +def test_get_edge_index(graph, edge_index_type): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + if "torch" in edge_index_type: + if edge_index_type == "torch-cpu": + device = "cpu" + else: + device = "cuda" + for et in list(G.keys()): + G[et][0] = torch.as_tensor(G[et][0], device=device) + G[et][1] = torch.as_tensor(G[et][1], device=device) + elif edge_index_type == "cudf": + for et in list(G.keys()): + G[et][0] = cudf.Series(G[et][0]) + G[et][1] = cudf.Series(G[et][1]) + + cugraph_store = CuGraphStore(F, G, N) for pyg_can_edge_type in G: src, dst = cugraph_store.get_edge_index( edge_type=pyg_can_edge_type, layout="coo", is_sorted=False ) - assert G[pyg_can_edge_type][0].tolist() == src.get().tolist() - assert G[pyg_can_edge_type][1].tolist() == dst.get().tolist() + if edge_index_type == "cudf": + assert G[pyg_can_edge_type][0].values_host.tolist() == src.tolist() + assert G[pyg_can_edge_type][1].values_host.tolist() == dst.tolist() + else: + assert G[pyg_can_edge_type][0].tolist() == src.tolist() + assert G[pyg_can_edge_type][1].tolist() == dst.tolist() -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_edge_types(graph): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) eta = cugraph_store._edge_types_to_attrs assert eta.keys() == G.keys() @@ -133,12 +141,10 @@ def test_edge_types(graph): assert attr_name == attr_repr.edge_type -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_subgraph(graph): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) if len(G.keys()) > 1: for edge_type in G.keys(): @@ -153,27 +159,27 @@ def test_get_subgraph(graph): assert sg.number_of_edges() == num_edges -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_renumber_vertices_basic(single_vertex_graph): F, G, N = single_vertex_graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) - nodes_of_interest = cudf.Series(cupy.random.randint(0, sum(N.values()), 3)) + nodes_of_interest = torch.as_tensor( + cupy.random.randint(0, sum(N.values()), 3), device="cuda" + ) index = cugraph_store._get_vertex_groups_from_sample(nodes_of_interest) - assert index["vt1"].get().tolist() == sorted(nodes_of_interest.values_host.tolist()) + assert index["vt1"].tolist() == sorted(nodes_of_interest.tolist()) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_renumber_vertices_multi_edge_multi_vertex(multi_edge_multi_vertex_graph_1): F, G, N = multi_edge_multi_vertex_graph_1 - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) - nodes_of_interest = cudf.Series(cupy.random.randint(0, sum(N.values()), 3)).unique() + nodes_of_interest = torch.as_tensor( + cupy.random.randint(0, sum(N.values()), 3), device="cuda" + ).unique() index = cugraph_store._get_vertex_groups_from_sample(nodes_of_interest) @@ -181,14 +187,12 @@ def test_renumber_vertices_multi_edge_multi_vertex(multi_edge_multi_vertex_graph brown_nodes = nodes_of_interest[nodes_of_interest > 1] - 2 if len(black_nodes) > 0: - assert index["black"].get().tolist() == sorted(black_nodes.values_host.tolist()) + assert index["black"].tolist() == sorted(black_nodes.tolist()) if len(brown_nodes) > 0: - assert index["brown"].get().tolist() == sorted(brown_nodes.values_host.tolist()) + assert index["brown"].tolist() == sorted(brown_nodes.tolist()) -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_renumber_edges(graph): """ FIXME this test is not very good and should be replaced, @@ -196,10 +200,10 @@ def test_renumber_edges(graph): """ F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) v_offsets = [N[v] for v in sorted(N.keys())] - v_offsets = cupy.array(v_offsets) + v_offsets = np.array(v_offsets) cumsum = v_offsets.cumsum(0) v_offsets = cumsum - v_offsets @@ -209,25 +213,25 @@ def test_renumber_edges(graph): pyg_can_edge_type: i for i, pyg_can_edge_type in enumerate(sorted(G.keys())) } - eoi_src = cupy.array([], dtype="int64") - eoi_dst = cupy.array([], dtype="int64") - eoi_type = cupy.array([], dtype="int32") + eoi_src = np.array([], dtype="int64") + eoi_dst = np.array([], dtype="int64") + eoi_type = np.array([], dtype="int32") for pyg_can_edge_type, ei in G.items(): src_type, _, dst_type = pyg_can_edge_type c = randint(0, len(ei[0])) # number to select sel = np.random.randint(0, len(ei[0]), c) - src_i = cupy.array(ei[0][sel]) + v_offsets[src_type] - dst_i = cupy.array(ei[1][sel]) + v_offsets[dst_type] - eoi_src = cupy.concatenate([eoi_src, src_i]) - eoi_dst = cupy.concatenate([eoi_dst, dst_i]) - eoi_type = cupy.concatenate( - [eoi_type, cupy.array([e_num[pyg_can_edge_type]] * c)] - ) + src_i = np.array(ei[0][sel]) + v_offsets[src_type] + dst_i = np.array(ei[1][sel]) + v_offsets[dst_type] + eoi_src = np.concatenate([eoi_src, src_i]) + eoi_dst = np.concatenate([eoi_dst, dst_i]) + eoi_type = np.concatenate([eoi_type, np.array([e_num[pyg_can_edge_type]] * c)]) - nodes_of_interest = ( - cudf.Series(cupy.concatenate([eoi_src, eoi_dst])).unique().sort_values() + nodes_of_interest, _ = torch.sort( + torch.as_tensor( + np.unique(np.concatenate([eoi_src, eoi_dst])), + ).cuda() ) noi_index = cugraph_store._get_vertex_groups_from_sample(nodes_of_interest) @@ -245,8 +249,8 @@ def test_renumber_edges(graph): for pyg_can_edge_type in G: df = cudf.DataFrame( { - "src": G[pyg_can_edge_type][0], - "dst": G[pyg_can_edge_type][1], + "src": cupy.asarray(G[pyg_can_edge_type][0]), + "dst": cupy.asarray(G[pyg_can_edge_type][1]), } ) @@ -269,12 +273,10 @@ def test_renumber_edges(graph): assert len(df) == 1 -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_tensor(graph): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) for feature_name, feature_on_types in F.get_feature_list().items(): for type_name in feature_on_types: @@ -285,23 +287,17 @@ def test_get_tensor(graph): feat_name=feature_name, ).tolist() - tsr = ( - cugraph_store.get_tensor( - type_name, feature_name, v_ids, None, cupy.int64 - ) - .get() - .tolist() - ) + tsr = cugraph_store.get_tensor( + type_name, feature_name, v_ids, None, cupy.int64 + ).tolist() assert tsr == base_series -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_multi_get_tensor(graph): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) for vertex_type in sorted(N.keys()): v_ids = np.arange(N[vertex_type]) @@ -322,15 +318,13 @@ def test_multi_get_tensor(graph): ] ) - assert np.stack(tsr).get().tolist() == base_series.tolist() + assert torch.stack(tsr).tolist() == base_series.tolist() -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_all_tensor_attrs(graph): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) tensor_attrs = [] for vertex_type in sorted(N.keys()): @@ -367,12 +361,10 @@ def test_multi_get_tensor_spec_props(multi_edge_multi_vertex_graph_1): raise NotImplementedError("not implemented") -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_tensor_from_tensor_attrs(graph): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) tensor_attrs = cugraph_store.get_all_tensor_attrs() for tensor_attr in tensor_attrs: @@ -383,16 +375,37 @@ def test_get_tensor_from_tensor_attrs(graph): assert cugraph_store.get_tensor(tensor_attr).tolist() == data.tolist() -@pytest.mark.skip( - "Skipping for now, unskip after https://github.com/rapidsai/cugraph/pull/3289" -) +@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") def test_get_tensor_size(graph): F, G, N = graph - cugraph_store = CuGraphStore(F, G, N, backend="cupy") + cugraph_store = CuGraphStore(F, G, N) tensor_attrs = cugraph_store.get_all_tensor_attrs() for tensor_attr in tensor_attrs: sz = N[tensor_attr.group_name] tensor_attr.index = np.arange(sz) - assert cugraph_store.get_tensor_size(tensor_attr) == sz + assert cugraph_store.get_tensor_size(tensor_attr) == torch.Size((sz,)) + + +def test_serialize(multi_edge_multi_vertex_no_graph_1): + import pickle + + F, G, N = multi_edge_multi_vertex_no_graph_1 + cugraph_store = CuGraphStore(F, G, N) + + cugraph_store_copy = pickle.loads(pickle.dumps(cugraph_store)) + + for tensor_attr in cugraph_store.get_all_tensor_attrs(): + sz = cugraph_store.get_tensor_size(tensor_attr)[0] + tensor_attr.index = np.arange(sz) + assert cugraph_store.get_tensor(tensor_attr) == cugraph_store_copy.get_tensor( + tensor_attr + ) + + # Currently does not store edgelist properly for SG + """ + for edge_attr in cugraph_store.get_all_edge_attrs(): + assert cugraph_store.get_edge_index(edge_attr) \ + == cugraph_store_copy.get_edge_index(edge_attr) + """ diff --git a/python/cugraph-pyg/pyproject.toml b/python/cugraph-pyg/pyproject.toml index 5f99f63fdc2..d26d2f0883d 100644 --- a/python/cugraph-pyg/pyproject.toml +++ b/python/cugraph-pyg/pyproject.toml @@ -3,9 +3,9 @@ [build-system] requires = [ - "wheel", "setuptools", -] + "wheel", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. [tool.pytest.ini_options] testpaths = ["cugraph_pyg/tests"] @@ -25,6 +25,9 @@ classifiers = [ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.10", ] +dependencies = [ + "thriftpy2", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. [project.urls] Homepage = "https://github.com/rapidsai/cugraph" diff --git a/python/cugraph-service/client/pyproject.toml b/python/cugraph-service/client/pyproject.toml index 7526a265660..f1cf71792d4 100644 --- a/python/cugraph-service/client/pyproject.toml +++ b/python/cugraph-service/client/pyproject.toml @@ -3,9 +3,9 @@ [build-system] requires = [ - "wheel", "setuptools", -] + "wheel", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. build-backend = "setuptools.build_meta" [project] @@ -20,7 +20,7 @@ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ "thriftpy2", -] +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", "Programming Language :: Python", diff --git a/python/cugraph-service/server/cugraph_service_server/testing/benchmark_server_extension.py b/python/cugraph-service/server/cugraph_service_server/testing/benchmark_server_extension.py index d46a37cd41f..5f9eac6b2a3 100644 --- a/python/cugraph-service/server/cugraph_service_server/testing/benchmark_server_extension.py +++ b/python/cugraph-service/server/cugraph_service_server/testing/benchmark_server_extension.py @@ -95,7 +95,6 @@ def create_graph_from_rmat_generator( source="src", destination="dst", edge_attr="weight", - legacy_renum_only=True, ) else: G.from_cudf_edgelist( @@ -103,7 +102,6 @@ def create_graph_from_rmat_generator( source="src", destination="dst", edge_attr="weight", - legacy_renum_only=True, ) return G diff --git a/python/cugraph-service/server/pyproject.toml b/python/cugraph-service/server/pyproject.toml index d4459fd48f7..e929fe938dd 100644 --- a/python/cugraph-service/server/pyproject.toml +++ b/python/cugraph-service/server/pyproject.toml @@ -3,9 +3,9 @@ [build-system] requires = [ - "wheel", "setuptools", -] + "wheel", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. build-backend = "setuptools.build_meta" [project] @@ -19,18 +19,19 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ - "cudf", - "cugraph", - "cugraph-service-client", - "cupy-cuda11x", - "dask-cuda", - "dask-cudf", - "distributed >=2023.1.1", - "numpy", - "rmm", + "cudf==23.6.*", + "cugraph-service-client==23.6.*", + "cugraph==23.4.*", + "cupy-cuda11x>=9.5.0,<12.0.0a0", + "dask-cuda==23.6.*", + "dask-cudf==23.6.*", + "dask==2023.3.2", + "distributed==2023.3.2.1", + "numpy>=1.21", + "rmm==23.6.*", "thriftpy2", - "ucx-py", -] + "ucx-py==0.32.*", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", "Programming Language :: Python", @@ -41,19 +42,20 @@ dynamic = ["entry-points"] [project.optional-dependencies] test = [ + "aiohttp", + "fsspec[http]>=0.6.0", + "networkx>=2.5.1", + "numpy>=1.21", + "pandas", "pytest", - "pytest-xdist", "pytest-benchmark", - "scipy", - "numpy", - "pandas", - "networkx>=2.5.1", - "scikit-learn>=0.23.1", + "pytest-cov", + "pytest-xdist", "python-louvain", - # cudf will use fsspec but is protocol independent. cugraph tests - # specifically require http for the test files it asks cudf to read. - "fsspec[http]>=0.6.0", -] + "requests", + "scikit-learn>=0.23.1", + "scipy", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. [project.urls] Homepage = "https://github.com/rapidsai/cugraph" diff --git a/python/cugraph/cugraph/__init__.py b/python/cugraph/cugraph/__init__.py index ab7c3d31839..d7324a379c0 100644 --- a/python/cugraph/cugraph/__init__.py +++ b/python/cugraph/cugraph/__init__.py @@ -13,6 +13,7 @@ from cugraph.community import ( ecg, + induced_subgraph, ktruss_subgraph, k_truss, louvain, diff --git a/python/cugraph/cugraph/centrality/CMakeLists.txt b/python/cugraph/cugraph/centrality/CMakeLists.txt index ca288743b08..f5036cca0a8 100644 --- a/python/cugraph/cugraph/centrality/CMakeLists.txt +++ b/python/cugraph/cugraph/centrality/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -13,7 +13,6 @@ # ============================================================================= set(cython_sources - betweenness_centrality_wrapper.pyx edge_betweenness_centrality_wrapper.pyx ) set(linked_libraries cugraph::cugraph) diff --git a/python/cugraph/cugraph/centrality/betweenness_centrality.py b/python/cugraph/cugraph/centrality/betweenness_centrality.py index 28798c7e861..56fa7fea9a7 100644 --- a/python/cugraph/cugraph/centrality/betweenness_centrality.py +++ b/python/cugraph/cugraph/centrality/betweenness_centrality.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,28 +11,34 @@ # See the License for the specific language governing permissions and # limitations under the License. -import random -import numpy as np -import cudf -from cugraph.centrality import betweenness_centrality_wrapper +from pylibcugraph import ( + betweenness_centrality as pylibcugraph_betweenness_centrality, + ResourceHandle, +) from cugraph.centrality import edge_betweenness_centrality_wrapper + from cugraph.utilities import ( df_edge_score_to_dictionary, - df_score_to_dictionary, ensure_cugraph_obj_for_nx, + df_score_to_dictionary, ) +import cudf +import warnings +import numpy as np +import random +from typing import Union -# NOTE: result_type=float could be an intuitive way to indicate the result type def betweenness_centrality( G, - k=None, - normalized=True, - weight=None, - endpoints=False, - seed=None, - result_dtype=np.float64, -): + k: Union[int, list, cudf.Series, cudf.DataFrame] = None, + normalized: bool = True, + weight: cudf.DataFrame = None, + endpoints: bool = False, + seed: int = None, + random_state: int = None, + result_dtype: Union[np.float32, np.float64] = np.float64, +) -> Union[cudf.DataFrame, dict]: """ Compute the betweenness centrality for all vertices of the graph G. Betweenness centrality is a measure of the number of shortest paths that @@ -50,18 +56,18 @@ def betweenness_centrality( ---------- G : cuGraph.Graph or networkx.Graph The graph can be either directed (Graph(directed=True)) or undirected. - Weights in the graph are ignored, the current implementation uses - BFS traversals. Use weight parameter if weights need to be considered - (currently not supported) + Weights in the graph are ignored, the current implementation uses a parallel + variation of the Brandes Algorithm (2001) to compute exact or approximate + betweenness. If weights are provided in the edgelist, they will not be + used. - k : int or list or None, optional (default=None) + k : int, list or cudf object or None, optional (default=None) If k is not None, use k node samples to estimate betweenness. Higher - values give better approximation. If k is a list, use the content - of the list for estimation: the list should contain vertex - identifiers. If k is None (the default), all the vertices are used - to estimate betweenness. Vertices obtained through sampling or - defined as a list will be used assources for traversals inside the - algorithm. + values give better approximation. If k is either a list or a cudf, use its + content for estimation: it contain vertex identifiers. If k is None + (the default), all the vertices are used to estimate betweenness. Vertices + obtained through sampling or defined as a list will be used as sources for + traversals inside the algorithm. normalized : bool, optional (default=True) If true, the betweenness values are normalized by @@ -76,21 +82,30 @@ def betweenness_centrality( Specifies the weights to be used for each edge. Should contain a mapping between edges and weights. - (Not Supported) + + (Not Supported): if weights are provided at the Graph creation, + they will not be used. endpoints : bool, optional (default=False) If true, include the endpoints in the shortest path counts. - (Not Supported) - seed : optional (default=None) - if k is specified and k is an integer, use seed to initialize the - random number generator. - Using None as seed relies on random.seed() behavior: using current - system time - If k is either None or list: seed parameter is ignored + seed : int, optional (default=None) + if k is specified and k is an integer, use seed to initialize + the random number generator. + Using None defaults to a hash of process id, time, and hostname + If k is either None or list: seed parameter is ignored. + + This parameter is here for backwards-compatibility and identical + to 'random_state'. + + random_state : int, optional (default=None) + if k is specified and k is an integer, use random_state to initialize + the random number generator. + Using None defaults to a hash of process id, time, and hostname + If k is either None or list: random_state parameter is ignored. result_dtype : np.float32 or np.float64, optional, default=np.float64 - Indicate the data type of the betweenness centrality scores + Indicate the data type of the betweenness centrality scores. Returns ------- @@ -112,32 +127,72 @@ def betweenness_centrality( >>> bc = cugraph.betweenness_centrality(G) """ - # vertices is intended to be a cuDF series that contains a sampling of - # k vertices out of the graph. - # - # NOTE: cuDF doesn't currently support sampling, but there is a python - # workaround. + if seed is not None: + warning_msg = ( + "This parameter is deprecated and will be remove " + "in the next release. Use 'random_state' instead." + ) + warnings.warn(warning_msg, UserWarning) + + G, isNx = ensure_cugraph_obj_for_nx(G) + + # FIXME: Should we raise an error if the graph created is weighted? if weight is not None: raise NotImplementedError( "weighted implementation of betweenness " "centrality not currently supported" ) + if G.store_transposed is True: + warning_msg = ( + "Betweenness centrality expects the 'store_transposed' flag " + "to be set to 'False' for optimal performance during " + "the graph creation" + ) + warnings.warn(warning_msg, UserWarning) + + # FIXME: Should we now remove this paramter? if result_dtype not in [np.float32, np.float64]: raise TypeError("result type can only be np.float32 or np.float64") + else: + warning_msg = ( + "This parameter is deprecated and will be remove " "in the next release." + ) + warnings.warn(warning_msg, PendingDeprecationWarning) + + if not isinstance(k, (cudf.DataFrame, cudf.Series)): + if isinstance(k, list): + vertex_dtype = G.edgelist.edgelist_df.dtypes[0] + k = cudf.Series(k, dtype=vertex_dtype) + + if isinstance(k, (cudf.DataFrame, cudf.Series)): + if G.renumbered: + k = G.lookup_internal_vertex_id(k) + + vertices, values = pylibcugraph_betweenness_centrality( + resource_handle=ResourceHandle(), + graph=G._plc_graph, + k=k, + random_state=random_state, + normalized=normalized, + include_endpoints=endpoints, + do_expensive_check=False, + ) - G, isNx = ensure_cugraph_obj_for_nx(G) - - vertices = _initialize_vertices(G, k, seed) + vertices = cudf.Series(vertices) + values = cudf.Series(values) - df = betweenness_centrality_wrapper.betweenness_centrality( - G, normalized, endpoints, weight, vertices, result_dtype - ) + df = cudf.DataFrame() + df["vertex"] = vertices + df["betweenness_centrality"] = values if G.renumbered: df = G.unrenumber(df, "vertex") + if df["betweenness_centrality"].dtype != result_dtype: + df["betweenness_centrality"] = df["betweenness_centrality"].astype(result_dtype) + if isNx is True: dict = df_score_to_dictionary(df, "betweenness_centrality") return dict @@ -146,8 +201,13 @@ def betweenness_centrality( def edge_betweenness_centrality( - G, k=None, normalized=True, weight=None, seed=None, result_dtype=np.float64 -): + G, + k: Union[int, list, cudf.Series, cudf.DataFrame] = None, + normalized: bool = True, + weight: cudf.DataFrame = None, + seed: int = None, + result_dtype: Union[np.float32, np.float64] = np.float64, +) -> Union[cudf.DataFrame, dict]: """ Compute the edge betweenness centrality for all edges of the graph G. Betweenness centrality is a measure of the number of shortest paths @@ -279,7 +339,7 @@ def edge_betweenness_centrality( # int: Generate an random sample with k elements # list: k become the length of the list and vertices become the content # None: All the vertices are considered -def _initialize_vertices(G, k, seed): +def _initialize_vertices(G, k: Union[int, list], seed: int) -> np.ndarray: vertices = None numpy_vertices = None if k is not None: @@ -301,13 +361,13 @@ def _initialize_vertices(G, k, seed): # - vertices '0' '1' '3' '4' exist # - There is a vertex at index 2 (there is not guarantee that it is # vertice '3' ) -def _initialize_vertices_from_indices_sampling(G, k, seed): +def _initialize_vertices_from_indices_sampling(G, k: int, seed: int) -> list: random.seed(seed) vertices = random.sample(range(G.number_of_vertices()), k) return vertices -def _initialize_vertices_from_identifiers_list(G, identifiers): +def _initialize_vertices_from_identifiers_list(G, identifiers: list) -> np.ndarray: vertices = identifiers if G.renumbered: vertices = G.lookup_internal_vertex_id(cudf.Series(vertices)).to_numpy() diff --git a/python/cugraph/cugraph/centrality/betweenness_centrality_wrapper.pyx b/python/cugraph/cugraph/centrality/betweenness_centrality_wrapper.pyx deleted file mode 100644 index 3d34304ff13..00000000000 --- a/python/cugraph/cugraph/centrality/betweenness_centrality_wrapper.pyx +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# cython: profile=False -# distutils: language = c++ -# cython: embedsignature = True -# cython: language_level = 3 - -from cugraph.centrality.betweenness_centrality cimport betweenness_centrality as c_betweenness_centrality -from cugraph.structure.graph_primtypes cimport * -from libc.stdint cimport uintptr_t -from libcpp cimport bool -import cudf -import numpy as np -import cugraph.dask.comms.comms as Comms -from cugraph.dask.common.mg_utils import get_client -import dask.distributed - - -def get_output_df(number_of_vertices, result_dtype): - df = cudf.DataFrame() - df['vertex'] = cudf.Series(np.zeros(number_of_vertices, dtype=np.int32)) - df['betweenness_centrality'] = cudf.Series(np.zeros(number_of_vertices, - dtype=result_dtype)) - return df - - -def get_batch(sources, number_of_workers, current_worker): - batch_size = len(sources) // number_of_workers - begin = current_worker * batch_size - end = (current_worker + 1) * batch_size - if current_worker == (number_of_workers - 1): - end = len(sources) - batch = sources[begin:end] - return batch - - -cdef void run_c_betweenness_centrality(uintptr_t c_handle, - uintptr_t c_graph, - uintptr_t c_betweenness, - bool normalized, - bool endpoints, - uintptr_t c_weights, - int number_of_sources_in_batch, - uintptr_t c_batch, - result_dtype): - if result_dtype == np.float64: - c_betweenness_centrality[int, int, double, double](( c_handle)[0], - ( c_graph)[0], - c_betweenness, - normalized, - endpoints, - c_weights, - number_of_sources_in_batch, - c_batch) - elif result_dtype == np.float32: - c_betweenness_centrality[int, int, float, float](( c_handle)[0], - ( c_graph)[0], - c_betweenness, - normalized, - endpoints, - c_weights, - number_of_sources_in_batch, - c_batch) - else: - raise ValueError("result_dtype can only be np.float64 or np.float32") - - -def run_internal_work(handle, input_data, normalized, endpoints, - weights, - batch, - result_dtype): - cdef uintptr_t c_handle = NULL - cdef uintptr_t c_graph = NULL - cdef uintptr_t c_identifier = NULL - cdef uintptr_t c_weights = NULL - cdef uintptr_t c_betweenness = NULL - cdef uintptr_t c_batch = NULL - - cdef uintptr_t c_offsets = NULL - cdef uintptr_t c_indices = NULL - cdef uintptr_t c_graph_weights = NULL - - cdef GraphCSRViewDouble graph_double - cdef GraphCSRViewFloat graph_float - - (offsets, indices, graph_weights), is_directed = input_data - - if graph_weights is not None: - c_graph_weights = graph_weights.__cuda_array_interface__['data'][0] - c_offsets = offsets.__cuda_array_interface__['data'][0] - c_indices = indices.__cuda_array_interface__['data'][0] - - number_of_vertices = len(offsets) - 1 - number_of_edges = len(indices) - - result_size = number_of_vertices - result_df = get_output_df(result_size, result_dtype) - number_of_sources_in_batch = len(batch) - if result_dtype == np.float64: - graph_double = GraphCSRView[int, int, double]( c_offsets, - c_indices, - c_graph_weights, - number_of_vertices, - number_of_edges) - graph_double.prop.directed = is_directed - c_graph = &graph_double - elif result_dtype == np.float32: - graph_float = GraphCSRView[int, int, float](c_offsets, - c_indices, - c_graph_weights, - number_of_vertices, - number_of_edges) - graph_float.prop.directed = is_directed - c_graph = &graph_float - else: - raise ValueError("result_dtype can only be np.float64 or np.float32") - - c_identifier = result_df['vertex'].__cuda_array_interface__['data'][0] - c_betweenness = result_df['betweenness_centrality'].__cuda_array_interface__['data'][0] - if weights is not None: - c_weights = weights.__cuda_array_interface__['data'][0] - - c_batch = batch.__array_interface__['data'][0] - c_handle = handle.getHandle() - - run_c_betweenness_centrality(c_handle, - c_graph, - c_betweenness, - normalized, - endpoints, - c_weights, - number_of_sources_in_batch, - c_batch, - result_dtype) - if result_dtype == np.float64: - graph_double.get_vertex_identifiers( c_identifier) - elif result_dtype == np.float32: - graph_float.get_vertex_identifiers( c_identifier) - else: - raise ValueError("result_dtype can only be np.float64 or np.float32") - - return result_df - -def run_mg_work(input_data, normalized, endpoints, - weights, sources, - result_dtype, session_id): - result = None - - number_of_workers = Comms.get_n_workers(session_id) - worker_idx = Comms.get_worker_id(session_id) - handle = Comms.get_handle(session_id) - - batch = get_batch(sources, number_of_workers, worker_idx) - - result = run_internal_work(handle, input_data, normalized, - endpoints, weights, batch, - result_dtype) - return result - - -def batch_betweenness_centrality(input_graph, normalized, endpoints, - weights, vertices, result_dtype): - df = None - client = get_client() - comms = Comms.get_comms() - replicated_adjlists = input_graph.batch_adjlists - work_futures = [client.submit(run_mg_work, - (data, input_graph.is_directed()), - normalized, - endpoints, - weights, - vertices, - result_dtype, - comms.sessionId, - workers=[worker]) for - idx, (worker, data) in enumerate(replicated_adjlists.items())] - dask.distributed.wait(work_futures) - df = work_futures[0].result() - return df - - -def sg_betweenness_centrality(input_graph, normalized, endpoints, weights, - vertices, result_dtype): - handle = Comms.get_default_handle() - adjlist = input_graph.adjlist - input_data = ((adjlist.offsets, adjlist.indices, adjlist.weights), - input_graph.is_directed()) - df = run_internal_work(handle, input_data, normalized, endpoints, weights, - vertices, result_dtype) - return df - - -# NOTE: The current implementation only has and -# as explicit template declaration -# The current BFS requires the GraphCSR to be declared -# as or even if weights is null -def betweenness_centrality(input_graph, normalized, endpoints, weights, - vertices, result_dtype): - """ - Call betweenness centrality - """ - df = None - - if not input_graph.adjlist: - input_graph.view_adj_list() - - if Comms.is_initialized() and input_graph.batch_enabled == True: - df = batch_betweenness_centrality(input_graph, - normalized, - endpoints, - weights, - vertices, - result_dtype) - else: - df = sg_betweenness_centrality(input_graph, - normalized, - endpoints, - weights, - vertices, - result_dtype) - return df diff --git a/python/cugraph/cugraph/centrality/betweenness_centrality.pxd b/python/cugraph/cugraph/centrality/edge_betweenness_centrality.pxd similarity index 75% rename from python/cugraph/cugraph/centrality/betweenness_centrality.pxd rename to python/cugraph/cugraph/centrality/edge_betweenness_centrality.pxd index 7abc9009cc8..7ec20e35b83 100644 --- a/python/cugraph/cugraph/centrality/betweenness_centrality.pxd +++ b/python/cugraph/cugraph/centrality/edge_betweenness_centrality.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2021, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -21,16 +21,6 @@ from libcpp cimport bool cdef extern from "cugraph/algorithms.hpp" namespace "cugraph": - cdef void betweenness_centrality[VT, ET, WT, result_t]( - const handle_t &handle, - const GraphCSRView[VT, ET, WT] &graph, - result_t *result, - bool normalized, - bool endpoints, - const WT *weight, - VT k, - const VT *vertices) except + - cdef void edge_betweenness_centrality[VT, ET, WT, result_t]( const handle_t &handle, const GraphCSRView[VT, ET, WT] &graph, diff --git a/python/cugraph/cugraph/centrality/edge_betweenness_centrality_wrapper.pyx b/python/cugraph/cugraph/centrality/edge_betweenness_centrality_wrapper.pyx index bf4a80701ff..8c64dcbf952 100644 --- a/python/cugraph/cugraph/centrality/edge_betweenness_centrality_wrapper.pyx +++ b/python/cugraph/cugraph/centrality/edge_betweenness_centrality_wrapper.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -16,7 +16,7 @@ # cython: embedsignature = True # cython: language_level = 3 -from cugraph.centrality.betweenness_centrality cimport edge_betweenness_centrality as c_edge_betweenness_centrality +from cugraph.centrality.edge_betweenness_centrality cimport edge_betweenness_centrality as c_edge_betweenness_centrality from cugraph.structure import graph_primtypes_wrapper from cugraph.structure.graph_primtypes cimport * from libc.stdint cimport uintptr_t diff --git a/python/cugraph/cugraph/community/CMakeLists.txt b/python/cugraph/cugraph/community/CMakeLists.txt index c8a3f9be796..6461cc60a7c 100644 --- a/python/cugraph/cugraph/community/CMakeLists.txt +++ b/python/cugraph/cugraph/community/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -13,11 +13,8 @@ # ============================================================================= set(cython_sources - ecg_wrapper.pyx ktruss_subgraph_wrapper.pyx leiden_wrapper.pyx - spectral_clustering_wrapper.pyx - subgraph_extraction_wrapper.pyx ) set(linked_libraries cugraph::cugraph) diff --git a/python/cugraph/cugraph/community/__init__.py b/python/cugraph/cugraph/community/__init__.py index c14cb188d3c..2aa782e7080 100644 --- a/python/cugraph/cugraph/community/__init__.py +++ b/python/cugraph/cugraph/community/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -22,6 +22,7 @@ analyzeClustering_ratio_cut, ) from cugraph.community.subgraph_extraction import subgraph +from cugraph.community.induced_subgraph import induced_subgraph from cugraph.community.triangle_count import triangle_count from cugraph.community.ktruss_subgraph import ktruss_subgraph from cugraph.community.ktruss_subgraph import k_truss diff --git a/python/cugraph/cugraph/community/ecg.py b/python/cugraph/cugraph/community/ecg.py index caa2435245f..e59f3dcb1b7 100644 --- a/python/cugraph/cugraph/community/ecg.py +++ b/python/cugraph/cugraph/community/ecg.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,11 +11,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cugraph.community import ecg_wrapper from cugraph.utilities import ( ensure_cugraph_obj_for_nx, df_score_to_dictionary, ) +import cudf + +from pylibcugraph import ecg as pylibcugraph_ecg +from pylibcugraph import ResourceHandle def ecg(input_graph, min_weight=0.05, ensemble_size=16, weight=None): @@ -68,14 +71,24 @@ def ecg(input_graph, min_weight=0.05, ensemble_size=16, weight=None): """ - input_graph, isNx = ensure_cugraph_obj_for_nx(input_graph, weight) + input_graph, isNx = ensure_cugraph_obj_for_nx(input_graph) + + vertex, partition = pylibcugraph_ecg( + resource_handle=ResourceHandle(), + graph=input_graph._plc_graph, + min_weight=min_weight, + ensemble_size=ensemble_size, + do_expensive_check=False, + ) - parts = ecg_wrapper.ecg(input_graph, min_weight, ensemble_size) + df = cudf.DataFrame() + df["vertex"] = vertex + df["partition"] = partition if input_graph.renumbered: - parts = input_graph.unrenumber(parts, "vertex") + df = input_graph.unrenumber(df, "vertex") if isNx is True: - return df_score_to_dictionary(parts, "partition") - else: - return parts + df = df_score_to_dictionary(df, "partition") + + return df diff --git a/python/cugraph/cugraph/community/ecg_wrapper.pyx b/python/cugraph/cugraph/community/ecg_wrapper.pyx deleted file mode 100644 index c6d3251b730..00000000000 --- a/python/cugraph/cugraph/community/ecg_wrapper.pyx +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# cython: profile=False -# distutils: language = c++ -# cython: embedsignature = True -# cython: language_level = 3 - -from cugraph.community.ecg cimport ecg as c_ecg -from cugraph.structure.graph_primtypes cimport * -from cugraph.structure import graph_primtypes_wrapper -from libc.stdint cimport uintptr_t - -import cudf -import numpy as np - - -def ecg(input_graph, min_weight=.05, ensemble_size=16): - """ - Call ECG - """ - if not input_graph.adjlist: - input_graph.view_adj_list() - - if input_graph.adjlist.weights is None: - raise Exception('ECG must be called on a weighted graph') - - cdef unique_ptr[handle_t] handle_ptr - handle_ptr.reset(new handle_t()) - - [offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, - input_graph.adjlist.indices], [np.int32, np.int64]) - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64]) - - num_verts = input_graph.number_of_vertices() - num_edges = input_graph.number_of_edges(directed_edges=True) - - df = cudf.DataFrame() - df['vertex'] = cudf.Series(np.zeros(num_verts, dtype=np.int32)) - df['partition'] = cudf.Series(np.zeros(num_verts, dtype=np.int32)) - - cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0] - cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0] - cdef uintptr_t c_identifier = df['vertex'].__cuda_array_interface__['data'][0] - cdef uintptr_t c_partition = df['partition'].__cuda_array_interface__['data'][0] - cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0] - - cdef GraphCSRView[int,int,float] graph_float - cdef GraphCSRView[int,int,double] graph_double - - if weights.dtype == np.float32: - graph_float = GraphCSRView[int,int,float](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - graph_float.get_vertex_identifiers(c_identifier) - - c_ecg[int,int,float](handle_ptr.get()[0], - graph_float, - min_weight, - ensemble_size, - c_partition) - else: - graph_double = GraphCSRView[int,int,double](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - graph_double.get_vertex_identifiers(c_identifier) - - c_ecg[int,int,double](handle_ptr.get()[0], - graph_double, - min_weight, - ensemble_size, - c_partition) - - return df diff --git a/python/cugraph/cugraph/community/induced_subgraph.py b/python/cugraph/cugraph/community/induced_subgraph.py new file mode 100644 index 00000000000..d64200d0423 --- /dev/null +++ b/python/cugraph/cugraph/community/induced_subgraph.py @@ -0,0 +1,165 @@ +# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Union, Tuple + +import cudf +from pylibcugraph import ResourceHandle +from pylibcugraph import induced_subgraph as pylibcugraph_induced_subgraph + +from cugraph.structure import Graph +from cugraph.utilities import ( + ensure_cugraph_obj_for_nx, + cugraph_to_nx, +) +from cugraph.utilities.utils import import_optional + +# FIXME: the networkx.Graph type used in the type annotation for +# induced_subgraph() is specified using a string literal to avoid depending on +# and importing networkx. Instead, networkx is imported optionally, which may +# cause a problem for a type checker if run in an environment where networkx is +# not installed. +networkx = import_optional("networkx") + + +# FIXME: Move this function to the utility module so that it can be +# shared by other algos +def ensure_valid_dtype(input_graph: Graph, input: cudf.Series, input_name: str): + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + input_dtype = input.dtype + if input_dtype != vertex_dtype: + warning_msg = ( + f"Subgraph requires '{input_name}' " + "to match the graph's 'vertex' type. " + f"input graph's vertex type is: {vertex_dtype} and got " + f"'{input_name}' of type: " + f"{input_dtype}." + ) + warnings.warn(warning_msg, UserWarning) + input = input.astype(vertex_dtype) + + return input + + +def induced_subgraph( + G: Graph, + vertices: Union[cudf.Series, cudf.DataFrame], + offsets: Union[list, cudf.Series] = None, +) -> Tuple[Union[Graph, "networkx.Graph"], cudf.Series]: + """ + Compute a subgraph of the existing graph including only the specified + vertices. This algorithm works with both directed and undirected graphs + and does not actually traverse the edges, but instead simply pulls out any + edges that are incident on vertices that are both contained in the vertices + list. + + If no subgraph can be extracted from the vertices provided, a 'None' value + will be returned. + + Parameters + ---------- + G : cugraph.Graph or networkx.Graph + The current implementation only supports weighted graphs. + + vertices : cudf.Series or cudf.DataFrame + Specifies the vertices of the induced subgraph. For multi-column + vertices, vertices should be provided as a cudf.DataFrame + + offsets : list or cudf.Series, optional + Specifies the subgraph offsets into the subgraph vertices. + If no offsets array is provided, a default array [0, len(vertices)] + will be used. + + Returns + ------- + Sg : cugraph.Graph or networkx.Graph + A graph object containing the subgraph induced by the given vertex set. + seeds_offsets: cudf.Series + A cudf Series containing the starting offset in the returned edge list + for each seed. + + Examples + -------- + >>> from cugraph.experimental.datasets import karate + >>> G = karate.get_graph(fetch=True) + >>> verts = np.zeros(3, dtype=np.int32) + >>> verts[0] = 0 + >>> verts[1] = 1 + >>> verts[2] = 2 + >>> sverts = cudf.Series(verts) + >>> Sg, seeds_offsets = cugraph.induced_subgraph(G, sverts) + + """ + + G, isNx = ensure_cugraph_obj_for_nx(G) + directed = G.is_directed() + + # FIXME: Hardcoded for now + offsets = None + + if G.renumbered: + if isinstance(vertices, cudf.DataFrame): + vertices = G.lookup_internal_vertex_id(vertices, vertices.columns) + else: + vertices = G.lookup_internal_vertex_id(vertices) + + vertices = ensure_valid_dtype(G, vertices, "subgraph_vertices") + + if not isinstance(offsets, cudf.Series): + if isinstance(offsets, list): + offsets = cudf.Series(offsets) + elif offsets is None: + # FIXME: Does the offsets always start from zero? + offsets = cudf.Series([0, len(vertices)]) + + result_graph = Graph(directed=directed) + + do_expensive_check = False + source, destination, weight, offsets = pylibcugraph_induced_subgraph( + resource_handle=ResourceHandle(), + graph=G._plc_graph, + subgraph_vertices=vertices, + subgraph_offsets=offsets, + do_expensive_check=do_expensive_check, + ) + + df = cudf.DataFrame() + df["src"] = source + df["dst"] = destination + df["weight"] = weight + + if len(df) == 0: + return None, None + + seeds_offsets = cudf.Series(offsets) + + if G.renumbered: + df, src_names = G.unrenumber(df, "src", get_column_names=True) + df, dst_names = G.unrenumber(df, "dst", get_column_names=True) + else: + # FIXME: THe original 'src' and 'dst' are not stored in 'simpleGraph' + src_names = "src" + dst_names = "dst" + + if G.edgelist.weights: + result_graph.from_cudf_edgelist( + df, source=src_names, destination=dst_names, edge_attr="weight" + ) + else: + result_graph.from_cudf_edgelist(df, source=src_names, destination=dst_names) + + if isNx is True: + result_graph = cugraph_to_nx(result_graph) + + return result_graph, seeds_offsets diff --git a/python/cugraph/cugraph/community/spectral_clustering.pxd b/python/cugraph/cugraph/community/spectral_clustering.pxd deleted file mode 100644 index 346eb50a157..00000000000 --- a/python/cugraph/cugraph/community/spectral_clustering.pxd +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2019-2021, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# cython: profile=False -# distutils: language = c++ -# cython: embedsignature = True -# cython: language_level = 3 - -from cugraph.structure.graph_primtypes cimport * - - -cdef extern from "cugraph/algorithms.hpp" namespace "cugraph::ext_raft": - - cdef void balancedCutClustering[VT,ET,WT]( - const GraphCSRView[VT,ET,WT] &graph, - const int num_clusters, - const int num_eigen_vects, - const float evs_tolerance, - const int evs_max_iter, - const float kmean_tolerance, - const int kmean_max_iter, - VT* clustering) except + - - cdef void spectralModularityMaximization[VT,ET,WT]( - const GraphCSRView[VT,ET,WT] &graph, - const int n_clusters, - const int n_eig_vects, - const float evs_tolerance, - const int evs_max_iter, - const float kmean_tolerance, - const int kmean_max_iter, - VT* clustering) except + - - cdef void analyzeClustering_modularity[VT,ET,WT]( - const GraphCSRView[VT,ET,WT] &graph, - const int n_clusters, - const VT* clustering, - WT* score) except + - - cdef void analyzeClustering_edge_cut[VT,ET,WT]( - const GraphCSRView[VT,ET,WT] &graph, - const int n_clusters, - const VT* clustering, - WT* score) except + - - cdef void analyzeClustering_ratio_cut[VT,ET,WT]( - const GraphCSRView[VT,ET,WT] &graph, - const int n_clusters, - const VT* clustering, - WT* score) except + diff --git a/python/cugraph/cugraph/community/spectral_clustering.py b/python/cugraph/cugraph/community/spectral_clustering.py index 3951d42ade3..5116d4c8fdf 100644 --- a/python/cugraph/cugraph/community/spectral_clustering.py +++ b/python/cugraph/cugraph/community/spectral_clustering.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,11 +11,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cugraph.community import spectral_clustering_wrapper from cugraph.utilities import ( ensure_cugraph_obj_for_nx, df_score_to_dictionary, ) +from pylibcugraph import ( + balanced_cut_clustering as pylibcugraph_balanced_cut_clustering, + spectral_modularity_maximization as pylibcugraph_spectral_modularity_maximization, + analyze_clustering_modularity as pylibcugraph_analyze_clustering_modularity, + analyze_clustering_edge_cut as pylibcugraph_analyze_clustering_edge_cut, + analyze_clustering_ratio_cut as pylibcugraph_analyze_clustering_ratio_cut, +) +from pylibcugraph import ResourceHandle +import cudf +import numpy as np def spectralBalancedCutClustering( @@ -81,17 +90,31 @@ def spectralBalancedCutClustering( # Error checking in C++ code G, isNx = ensure_cugraph_obj_for_nx(G) - - df = spectral_clustering_wrapper.spectralBalancedCutClustering( - G, + # Check if vertex type is "int32" + if ( + G.edgelist.edgelist_df.dtypes[0] != np.int32 + or G.edgelist.edgelist_df.dtypes[1] != np.int32 + ): + raise ValueError( + "'spectralBalancedCutClustering' requires the input graph's vertex to be " + "of type 'int32'" + ) + vertex, partition = pylibcugraph_balanced_cut_clustering( + ResourceHandle(), + G._plc_graph, num_clusters, num_eigen_vects, evs_tolerance, evs_max_iter, kmean_tolerance, kmean_max_iter, + do_expensive_check=False, ) + df = cudf.DataFrame() + df["vertex"] = vertex + df["cluster"] = partition + if G.renumbered: df = G.unrenumber(df, "vertex") @@ -161,20 +184,32 @@ def spectralModularityMaximizationClustering( """ - # Error checking in C++ code - G, isNx = ensure_cugraph_obj_for_nx(G) + if ( + G.edgelist.edgelist_df.dtypes[0] != np.int32 + or G.edgelist.edgelist_df.dtypes[1] != np.int32 + ): + raise ValueError( + "'spectralModularityMaximizationClustering' requires the input graph's " + "vertex to be of type 'int32'" + ) - df = spectral_clustering_wrapper.spectralModularityMaximizationClustering( - G, + vertex, partition = pylibcugraph_spectral_modularity_maximization( + ResourceHandle(), + G._plc_graph, num_clusters, num_eigen_vects, evs_tolerance, evs_max_iter, kmean_tolerance, kmean_max_iter, + do_expensive_check=False, ) + df = cudf.DataFrame() + df["vertex"] = vertex + df["cluster"] = partition + if G.renumbered: df = G.unrenumber(df, "vertex") @@ -235,16 +270,32 @@ def analyzeClustering_modularity( raise Exception("cluster_col_name must be a string") G, isNx = ensure_cugraph_obj_for_nx(G) + if ( + G.edgelist.edgelist_df.dtypes[0] != np.int32 + or G.edgelist.edgelist_df.dtypes[1] != np.int32 + ): + raise ValueError( + "'analyzeClustering_modularity' requires the input graph's " + "vertex to be of type 'int32'" + ) if G.renumbered: clustering = G.add_internal_vertex_id( clustering, "vertex", vertex_col_name, drop=True ) - clustering = clustering.sort_values("vertex") + if clustering.dtypes[0] != np.int32 or clustering.dtypes[1] != np.int32: + raise ValueError( + "'analyzeClustering_modularity' requires both the clustering 'vertex' " + "and 'cluster' to be of type 'int32'" + ) - score = spectral_clustering_wrapper.analyzeClustering_modularity( - G, n_clusters, clustering[cluster_col_name] + score = pylibcugraph_analyze_clustering_modularity( + ResourceHandle(), + G._plc_graph, + n_clusters, + clustering["vertex"], + clustering[cluster_col_name], ) return score @@ -302,15 +353,32 @@ def analyzeClustering_edge_cut( G, isNx = ensure_cugraph_obj_for_nx(G) + if ( + G.edgelist.edgelist_df.dtypes[0] != np.int32 + or G.edgelist.edgelist_df.dtypes[1] != np.int32 + ): + raise ValueError( + "'analyzeClustering_edge_cut' requires the input graph's vertex to be " + "of type 'int32'" + ) + if G.renumbered: clustering = G.add_internal_vertex_id( clustering, "vertex", vertex_col_name, drop=True ) - clustering = clustering.sort_values("vertex").reset_index(drop=True) + if clustering.dtypes[0] != np.int32 or clustering.dtypes[1] != np.int32: + raise ValueError( + "'analyzeClustering_edge_cut' requires both the clustering 'vertex' " + "and 'cluster' to be of type 'int32'" + ) - score = spectral_clustering_wrapper.analyzeClustering_edge_cut( - G, n_clusters, clustering[cluster_col_name] + score = pylibcugraph_analyze_clustering_edge_cut( + ResourceHandle(), + G._plc_graph, + n_clusters, + clustering["vertex"], + clustering[cluster_col_name], ) return score @@ -369,10 +437,18 @@ def analyzeClustering_ratio_cut( clustering, "vertex", vertex_col_name, drop=True ) - clustering = clustering.sort_values("vertex") + if clustering.dtypes[0] != np.int32 or clustering.dtypes[1] != np.int32: + raise ValueError( + "'analyzeClustering_ratio_cut' requires both the clustering 'vertex' " + "and 'cluster' to be of type 'int32'" + ) - score = spectral_clustering_wrapper.analyzeClustering_ratio_cut( - G, n_clusters, clustering[cluster_col_name] + score = pylibcugraph_analyze_clustering_ratio_cut( + ResourceHandle(), + G._plc_graph, + n_clusters, + clustering["vertex"], + clustering[cluster_col_name], ) return score diff --git a/python/cugraph/cugraph/community/spectral_clustering_wrapper.pyx b/python/cugraph/cugraph/community/spectral_clustering_wrapper.pyx deleted file mode 100644 index 8a04e2c1017..00000000000 --- a/python/cugraph/cugraph/community/spectral_clustering_wrapper.pyx +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# cython: profile=False -# distutils: language = c++ -# cython: embedsignature = True -# cython: language_level = 3 - -from cugraph.community.spectral_clustering cimport balancedCutClustering as c_balanced_cut_clustering -from cugraph.community.spectral_clustering cimport spectralModularityMaximization as c_spectral_modularity_maximization -from cugraph.community.spectral_clustering cimport analyzeClustering_modularity as c_analyze_clustering_modularity -from cugraph.community.spectral_clustering cimport analyzeClustering_edge_cut as c_analyze_clustering_edge_cut -from cugraph.community.spectral_clustering cimport analyzeClustering_ratio_cut as c_analyze_clustering_ratio_cut -from cugraph.structure.graph_primtypes cimport * -from cugraph.structure import graph_primtypes_wrapper -from libc.stdint cimport uintptr_t -import cugraph -import cudf -import numpy as np - - -def spectralBalancedCutClustering(input_graph, - num_clusters, - num_eigen_vects=2, - evs_tolerance=.00001, - evs_max_iter=100, - kmean_tolerance=.00001, - kmean_max_iter=100): - """ - Call balancedCutClustering_nvgraph - """ - if isinstance(input_graph, cugraph.Graph): - if input_graph.is_directed(): - raise ValueError("directed graphs are not supported") - else: - raise TypeError(f"only cugraph.Graph objects are supported, got: {type(input_graph)}") - if not input_graph.adjlist: - input_graph.view_adj_list() - - weights = None - - [offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32]) - - num_verts = input_graph.number_of_vertices() - num_edges = input_graph.number_of_edges(directed_edges=True) - - if input_graph.adjlist.weights is not None: - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64]) - else: - weights = cudf.Series(np.full(num_edges, 1.0, dtype=np.float32)) - - # Create the output dataframe - df = cudf.DataFrame() - df['vertex'] = cudf.Series(np.zeros(num_verts, dtype=np.int32)) - df['cluster'] = cudf.Series(np.zeros(num_verts, dtype=np.int32)) - - cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0] - cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0] - cdef uintptr_t c_identifier = df['vertex'].__cuda_array_interface__['data'][0] - cdef uintptr_t c_cluster = df['cluster'].__cuda_array_interface__['data'][0] - cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0] - - cdef GraphCSRView[int,int,float] graph_float - cdef GraphCSRView[int,int,double] graph_double - - if weights.dtype == np.float32: - graph_float = GraphCSRView[int,int,float](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - graph_float.get_vertex_identifiers(c_identifier) - c_balanced_cut_clustering(graph_float, - num_clusters, - num_eigen_vects, - evs_tolerance, - evs_max_iter, - kmean_tolerance, - kmean_max_iter, - c_cluster) - else: - graph_double = GraphCSRView[int,int,double](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - graph_double.get_vertex_identifiers(c_identifier) - c_balanced_cut_clustering(graph_double, - num_clusters, - num_eigen_vects, - evs_tolerance, - evs_max_iter, - kmean_tolerance, - kmean_max_iter, - c_cluster) - - return df - -def spectralModularityMaximizationClustering(input_graph, - num_clusters, - num_eigen_vects=2, - evs_tolerance=.00001, - evs_max_iter=100, - kmean_tolerance=.00001, - kmean_max_iter=100): - """ - Call spectralModularityMaximization_nvgraph - """ - if isinstance(input_graph, cugraph.Graph): - if input_graph.is_directed(): - raise ValueError("directed graphs are not supported") - else: - raise TypeError(f"only cugraph.Graph objects are supported, got: {type(input_graph)}") - if not input_graph.adjlist: - input_graph.view_adj_list() - - if input_graph.adjlist.weights is None: - raise Exception("spectral modularity maximization must be called on a graph with weights") - - [offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32]) - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64]) - - num_verts = input_graph.number_of_vertices() - num_edges = input_graph.number_of_edges(directed_edges=True) - - # Create the output dataframe - df = cudf.DataFrame() - df['vertex'] = cudf.Series(np.zeros(num_verts, dtype=np.int32)) - df['cluster'] = cudf.Series(np.zeros(num_verts, dtype=np.int32)) - - cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0] - cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0] - cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0] - cdef uintptr_t c_identifier = df['vertex'].__cuda_array_interface__['data'][0] - cdef uintptr_t c_cluster = df['cluster'].__cuda_array_interface__['data'][0] - - cdef GraphCSRView[int,int,float] graph_float - cdef GraphCSRView[int,int,double] graph_double - - if weights.dtype == np.float32: - graph_float = GraphCSRView[int,int,float](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - graph_float.get_vertex_identifiers(c_identifier) - c_spectral_modularity_maximization(graph_float, - num_clusters, - num_eigen_vects, - evs_tolerance, - evs_max_iter, - kmean_tolerance, - kmean_max_iter, - c_cluster) - else: - graph_double = GraphCSRView[int,int,double](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - graph_double.get_vertex_identifiers(c_identifier) - c_spectral_modularity_maximization(graph_double, - num_clusters, - num_eigen_vects, - evs_tolerance, - evs_max_iter, - kmean_tolerance, - kmean_max_iter, - c_cluster) - - return df - -def analyzeClustering_modularity(input_graph, n_clusters, clustering): - """ - Call analyzeClustering_modularity_nvgraph - """ - if isinstance(input_graph, cugraph.Graph): - if input_graph.is_directed(): - raise ValueError("directed graphs are not supported") - else: - raise TypeError(f"only cugraph.Graph objects are supported, got: {type(input_graph)}") - if not input_graph.adjlist: - input_graph.view_adj_list() - - [offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32]) - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64]) - - score = None - num_verts = input_graph.number_of_vertices() - num_edges = input_graph.number_of_edges(directed_edges=True) - - if input_graph.adjlist.weights is None: - raise Exception("analyze clustering modularity must be called on a graph with weights") - if input_graph.adjlist.weights is not None: - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64]) - else: - weights = cudf.Series(np.full(num_edges, 1.0, dtype=np.float32)) - - cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0] - cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0] - cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0] - cdef uintptr_t c_cluster = clustering.__cuda_array_interface__['data'][0] - - cdef GraphCSRView[int,int,float] graph_float - cdef GraphCSRView[int,int,double] graph_double - cdef float score_float - cdef double score_double - - if weights.dtype == np.float32: - graph_float = GraphCSRView[int,int,float](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - c_analyze_clustering_modularity(graph_float, - n_clusters, - c_cluster, - &score_float) - - score = score_float - else: - graph_double = GraphCSRView[int,int,double](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - c_analyze_clustering_modularity(graph_double, - n_clusters, - c_cluster, - &score_double) - score = score_double - - return score - -def analyzeClustering_edge_cut(input_graph, n_clusters, clustering): - """ - Call analyzeClustering_edge_cut_nvgraph - """ - if isinstance(input_graph, cugraph.Graph): - if input_graph.is_directed(): - raise ValueError("directed graphs are not supported") - else: - raise TypeError(f"only cugraph.Graph objects are supported, got: {type(input_graph)}") - if not input_graph.adjlist: - input_graph.view_adj_list() - - [offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32]) - - score = None - num_verts = input_graph.number_of_vertices() - num_edges = input_graph.number_of_edges(directed_edges=True) - - if input_graph.adjlist.weights is not None: - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64]) - else: - weights = cudf.Series(np.full(num_edges, 1.0, dtype=np.float32)) - - cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0] - cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0] - cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0] - cdef uintptr_t c_cluster = clustering.__cuda_array_interface__['data'][0] - - cdef GraphCSRView[int,int,float] graph_float - cdef GraphCSRView[int,int,double] graph_double - cdef float score_float - cdef double score_double - - if weights.dtype == np.float32: - graph_float = GraphCSRView[int,int,float](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - c_analyze_clustering_edge_cut(graph_float, - n_clusters, - c_cluster, - &score_float) - - score = score_float - else: - graph_double = GraphCSRView[int,int,double](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - c_analyze_clustering_edge_cut(graph_double, - n_clusters, - c_cluster, - &score_double) - score = score_double - - return score - -def analyzeClustering_ratio_cut(input_graph, n_clusters, clustering): - """ - Call analyzeClustering_ratio_cut_nvgraph - """ - if isinstance(input_graph, cugraph.Graph): - if input_graph.is_directed(): - raise ValueError("directed graphs are not supported") - else: - raise TypeError(f"only cugraph.Graph objects are supported, got: {type(input_graph)}") - if not input_graph.adjlist: - input_graph.view_adj_list() - - [offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32]) - - score = None - num_verts = input_graph.number_of_vertices() - num_edges = input_graph.number_of_edges(directed_edges=True) - - if input_graph.adjlist.weights is not None: - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64]) - else: - weights = cudf.Series(np.full(num_edges, 1.0, dtype=np.float32)) - - cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0] - cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0] - cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0] - cdef uintptr_t c_cluster = clustering.__cuda_array_interface__['data'][0] - - cdef GraphCSRView[int,int,float] graph_float - cdef GraphCSRView[int,int,double] graph_double - cdef float score_float - cdef double score_double - - if weights.dtype == np.float32: - graph_float = GraphCSRView[int,int,float](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - c_analyze_clustering_ratio_cut(graph_float, - n_clusters, - c_cluster, - &score_float) - - score = score_float - else: - graph_double = GraphCSRView[int,int,double](c_offsets, c_indices, - c_weights, num_verts, num_edges) - - c_analyze_clustering_ratio_cut(graph_double, - n_clusters, - c_cluster, - &score_double) - score = score_double - - return score diff --git a/python/cugraph/cugraph/community/subgraph_extraction.pxd b/python/cugraph/cugraph/community/subgraph_extraction.pxd deleted file mode 100644 index 583e220327d..00000000000 --- a/python/cugraph/cugraph/community/subgraph_extraction.pxd +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2019-2021, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# cython: profile=False -# distutils: language = c++ -# cython: embedsignature = True -# cython: language_level = 3 - -from cugraph.structure.graph_primtypes cimport * -from libcpp.memory cimport unique_ptr - - -cdef extern from "cugraph/algorithms.hpp" namespace "cugraph::subgraph": - - cdef unique_ptr[GraphCOO[VT,ET,WT]] extract_subgraph_vertex[VT,ET,WT]( - const GraphCOOView[VT,ET,WT] &graph, - const VT *vertices, - ET num_vertices) except + diff --git a/python/cugraph/cugraph/community/subgraph_extraction.py b/python/cugraph/cugraph/community/subgraph_extraction.py index 05d61db4132..efd2f49829f 100644 --- a/python/cugraph/cugraph/community/subgraph_extraction.py +++ b/python/cugraph/cugraph/community/subgraph_extraction.py @@ -11,16 +11,26 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Union +import warnings + import cudf + +import cugraph from cugraph.structure import Graph -from cugraph.community import subgraph_extraction_wrapper -from cugraph.utilities import ( - ensure_cugraph_obj_for_nx, - cugraph_to_nx, -) +from cugraph.utilities.utils import import_optional + +# FIXME: the networkx.Graph type used in the type annotation for subgraph() is +# specified using a string literal to avoid depending on and importing +# networkx. Instead, networkx is imported optionally, which may cause a problem +# for a type checker if run in an environment where networkx is not installed. +networkx = import_optional("networkx") -def subgraph(G, vertices): +def subgraph( + G, + vertices: Union[cudf.Series, cudf.DataFrame], +) -> Union[Graph, "networkx.Graph"]: """ Compute a subgraph of the existing graph including only the specified vertices. This algorithm works with both directed and undirected graphs @@ -28,10 +38,12 @@ def subgraph(G, vertices): edges that are incident on vertices that are both contained in the vertices list. + If no subgraph can be extracted from the vertices provided, a 'None' value + will be returned. + Parameters ---------- - G : cugraph.Graph - cuGraph graph descriptor + G : cugraph.Graph or networkx.Graph The current implementation only supports weighted graphs. vertices : cudf.Series or cudf.DataFrame @@ -40,7 +52,7 @@ def subgraph(G, vertices): Returns ------- - Sg : cugraph.Graph + Sg : cugraph.Graph or networkx.Graph A graph object containing the subgraph induced by the given vertex set. Examples @@ -56,33 +68,11 @@ def subgraph(G, vertices): """ - G, isNx = ensure_cugraph_obj_for_nx(G) - directed = G.is_directed() - - if G.renumbered: - if isinstance(vertices, cudf.DataFrame): - vertices = G.lookup_internal_vertex_id(vertices, vertices.columns) - else: - vertices = G.lookup_internal_vertex_id(vertices) - - result_graph = Graph(directed=directed) - - df = subgraph_extraction_wrapper.subgraph(G, vertices) - src_names = "src" - dst_names = "dst" - - if G.renumbered: - df, src_names = G.unrenumber(df, src_names, get_column_names=True) - df, dst_names = G.unrenumber(df, dst_names, get_column_names=True) - - if G.edgelist.weights: - result_graph.from_cudf_edgelist( - df, source=src_names, destination=dst_names, edge_attr="weight" - ) - else: - result_graph.from_cudf_edgelist(df, source=src_names, destination=dst_names) + warning_msg = ( + "This call is deprecated. Please call 'cugraph.induced_subgraph()' instead." + ) + warnings.warn(warning_msg, DeprecationWarning) - if isNx is True: - result_graph = cugraph_to_nx(result_graph) + result_graph, _ = cugraph.induced_subgraph(G, vertices) return result_graph diff --git a/python/cugraph/cugraph/community/subgraph_extraction_wrapper.pyx b/python/cugraph/cugraph/community/subgraph_extraction_wrapper.pyx deleted file mode 100644 index 46dc5c07eaf..00000000000 --- a/python/cugraph/cugraph/community/subgraph_extraction_wrapper.pyx +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2019-2021, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# cython: profile=False -# distutils: language = c++ -# cython: embedsignature = True -# cython: language_level = 3 - -from cugraph.community.subgraph_extraction cimport extract_subgraph_vertex as c_extract_subgraph_vertex -from cugraph.structure.graph_primtypes cimport * -from cugraph.structure import graph_primtypes_wrapper -from libc.stdint cimport uintptr_t -import cudf -import numpy as np - - -def subgraph(input_graph, vertices): - """ - Call extract_subgraph_vertex - """ - src = None - dst = None - weights = None - use_float = True - - if not input_graph.edgelist: - input_graph.view_edge_list() - - [src, dst] = graph_primtypes_wrapper.datatype_cast([input_graph.edgelist.edgelist_df['src'], input_graph.edgelist.edgelist_df['dst']], [np.int32]) - - if input_graph.edgelist.weights: - [weights] = graph_primtypes_wrapper.datatype_cast([input_graph.edgelist.edgelist_df['weights']], [np.float32, np.float64]) - if weights.dtype == np.float64: - use_float = False - - num_verts = input_graph.number_of_vertices() - num_edges = len(src) - num_input_vertices = len(vertices) - - cdef GraphCOOView[int,int,float] in_graph_float - cdef GraphCOOView[int,int,double] in_graph_double - cdef unique_ptr[GraphCOO[int,int,float]] out_graph_float - cdef unique_ptr[GraphCOO[int,int,double]] out_graph_double - - cdef uintptr_t c_src = src.__cuda_array_interface__['data'][0] - cdef uintptr_t c_dst = dst.__cuda_array_interface__['data'][0] - cdef uintptr_t c_weights = NULL - - if weights is not None: - c_weights = weights.__cuda_array_interface__['data'][0] - - [vertices] = graph_primtypes_wrapper.datatype_cast([vertices], [np.int32]) - cdef uintptr_t c_vertices = vertices.__cuda_array_interface__['data'][0] - - if use_float: - in_graph_float = GraphCOOView[int,int,float](c_src, c_dst, c_weights, num_verts, num_edges); - df = coo_to_df(move(c_extract_subgraph_vertex(in_graph_float, c_vertices, num_input_vertices))); - else: - in_graph_double = GraphCOOView[int,int,double](c_src, c_dst, c_weights, num_verts, num_edges); - df = coo_to_df(move(c_extract_subgraph_vertex(in_graph_double, c_vertices, num_input_vertices))); - - # renumber vertices to match original input - vertices_df = cudf.DataFrame() - vertices_df['v'] = vertices - vertices_df = vertices_df.reset_index(drop=True).reset_index() - - df = df.merge(vertices_df, left_on='src', right_on='index', how='left').drop(columns=['src', 'index']).rename(columns={'v': 'src'}, copy=False) - df = df.merge(vertices_df, left_on='dst', right_on='index', how='left').drop(columns=['dst', 'index']).rename(columns={'v': 'dst'}, copy=False) - - return df diff --git a/python/cugraph/cugraph/community/triangle_count.py b/python/cugraph/cugraph/community/triangle_count.py index ff8504e3c07..306cdb2333f 100644 --- a/python/cugraph/cugraph/community/triangle_count.py +++ b/python/cugraph/cugraph/community/triangle_count.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -18,6 +18,29 @@ from pylibcugraph import ResourceHandle +import warnings + + +# FIXME: Move this function to the utility module so that it can be +# shared by other algos +def ensure_valid_dtype(input_graph, start_list): + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + if isinstance(start_list, cudf.Series): + start_list_dtypes = start_list.dtype + else: + start_list_dtypes = start_list.dtypes[0] + + if start_list_dtypes != vertex_dtype: + warning_msg = ( + "Triangle_count requires 'start_list' to match the graph's 'vertex' type. " + f"input graph's vertex type is: {vertex_dtype} and got " + f"'start_list' of type: {start_list_dtypes}." + ) + warnings.warn(warning_msg, UserWarning) + start_list = start_list.astype(vertex_dtype) + + return start_list + def triangle_count(G, start_list=None): """ @@ -72,6 +95,7 @@ def triangle_count(G, start_list=None): f"'start_list' must be either a list or a cudf.Series," f"got: {start_list.dtype}" ) + start_list = ensure_valid_dtype(G, start_list) if G.renumbered is True: if isinstance(start_list, cudf.DataFrame): diff --git a/python/cugraph/cugraph/components/connectivity.py b/python/cugraph/cugraph/components/connectivity.py index 0863fff7067..3b12c8cb5e0 100644 --- a/python/cugraph/cugraph/components/connectivity.py +++ b/python/cugraph/cugraph/components/connectivity.py @@ -291,6 +291,20 @@ def strongly_connected_components( (G, input_type) = ensure_cugraph_obj( G, nx_weight_attr="weight", matrix_graph_type=Graph(directed=directed) ) + # Renumber the vertices so that they are contiguous (required) + # FIXME: Remove 'renumbering' once the algo leverage the CAPI graph + if not G.renumbered: + edgelist = G.edgelist.edgelist_df + renumbered_edgelist_df, renumber_map = G.renumber_map.renumber( + edgelist, ["src"], ["dst"] + ) + renumbered_src_col_name = renumber_map.renumbered_src_col_name + renumbered_dst_col_name = renumber_map.renumbered_dst_col_name + G.edgelist.edgelist_df = renumbered_edgelist_df.rename( + columns={renumbered_src_col_name: "src", renumbered_dst_col_name: "dst"} + ) + G.properties.renumbered = True + G.renumber_map = renumber_map df = connectivity_wrapper.strongly_connected_components(G) diff --git a/python/cugraph/cugraph/dask/__init__.py b/python/cugraph/cugraph/dask/__init__.py index ef9503b4349..63b171d8ee6 100644 --- a/python/cugraph/cugraph/dask/__init__.py +++ b/python/cugraph/cugraph/dask/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -20,12 +20,14 @@ from .community.louvain import louvain from .community.triangle_count import triangle_count from .community.egonet import ego_graph +from .community.induced_subgraph import induced_subgraph from .centrality.katz_centrality import katz_centrality from .components.connectivity import weakly_connected_components from .sampling.uniform_neighbor_sample import uniform_neighbor_sample from .sampling.random_walks import random_walks from .centrality.eigenvector_centrality import eigenvector_centrality from .cores.core_number import core_number +from .centrality.betweenness_centrality import betweenness_centrality from .cores.k_core import k_core from .link_prediction.jaccard import jaccard from .link_prediction.sorensen import sorensen diff --git a/python/cugraph/cugraph/dask/centrality/betweenness_centrality.py b/python/cugraph/cugraph/dask/centrality/betweenness_centrality.py new file mode 100644 index 00000000000..e048c91f34d --- /dev/null +++ b/python/cugraph/cugraph/dask/centrality/betweenness_centrality.py @@ -0,0 +1,232 @@ +# Copyright (c) 2022-2023, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from dask.distributed import wait, get_client +from pylibcugraph import ( + ResourceHandle, + betweenness_centrality as pylibcugraph_betweenness_centrality, +) +import cugraph.dask.comms.comms as Comms +from cugraph.dask.common.input_utils import get_distributed_data +import dask_cudf +import cudf +import cupy as cp +import warnings +import dask +from typing import Union + + +def convert_to_cudf(cp_arrays: cp.ndarray) -> cudf.DataFrame: + """ + create a cudf DataFrame from cupy arrays + """ + cupy_vertices, cupy_values = cp_arrays + df = cudf.DataFrame() + df["vertex"] = cupy_vertices + df["betweenness_centrality"] = cupy_values + return df + + +def _call_plc_betweenness_centrality( + mg_graph_x, + sID: bytes, + k: Union[int, cudf.Series], + random_state: int, + normalized: bool, + endpoints: bool, + do_expensive_check: bool, +) -> cudf.DataFrame: + + cp_arrays = pylibcugraph_betweenness_centrality( + resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()), + graph=mg_graph_x, + k=k, + random_state=random_state, + normalized=normalized, + include_endpoints=endpoints, + do_expensive_check=do_expensive_check, + ) + return convert_to_cudf(cp_arrays) + + +def _mg_call_plc_betweenness_centrality( + input_graph, + client: dask.distributed.client.Client, + sID: bytes, + k: dict, + random_state: int, + normalized: bool, + endpoints: bool, + do_expensive_check: bool, +) -> dask_cudf.DataFrame: + + result = [ + client.submit( + _call_plc_betweenness_centrality, + input_graph._plc_graph[w], + sID, + k if isinstance(k, (int, type(None))) else k[w][0], + hash((random_state, i)), + normalized, + endpoints, + do_expensive_check, + workers=[w], + allow_other_workers=False, + pure=False, + ) + for i, w in enumerate(Comms.get_workers()) + ] + + ddf = dask_cudf.from_delayed(result, verify_meta=False).persist() + wait(ddf) + wait([r.release() for r in result]) + return ddf + + +def betweenness_centrality( + input_graph, + k: Union[ + int, list, cudf.Series, cudf.DataFrame, dask_cudf.Series, dask_cudf.DataFrame + ] = None, + normalized: bool = True, + endpoints: bool = False, + random_state: int = None, +) -> dask_cudf.DataFrame: + """ + Compute the betweenness centrality for all vertices of the graph G. + Betweenness centrality is a measure of the number of shortest paths that + pass through a vertex. A vertex with a high betweenness centrality score + has more paths passing through it and is therefore believed to be more + important. + + To improve performance. rather than doing an all-pair shortest path, + a sample of k starting vertices can be used. + + CuGraph does not currently support the 'endpoints' and 'weight' parameters + as seen in the corresponding networkX call. + + Parameters + ---------- + input_graph: cuGraph.Graph + The graph can be either directed (Graph(directed=True)) or undirected. + Weights in the graph are ignored, the current implementation uses a parallel + variation of the Brandes Algorithm (2001) to compute exact or approximate + betweenness. If weights are provided in the edgelist, they will not be + used. + + k : int, list or (dask)cudf object or None, optional (default=None) + If k is not None, use k node samples to estimate betweenness. Higher + values give better approximation. If k is either a list or a (dask)cudf, + use its content for estimation: it contain vertex identifiers. If k is None + (the default), all the vertices are used to estimate betweenness. Vertices + obtained through sampling or defined as a list will be used as sources for + traversals inside the algorithm. + + normalized : bool, optional (default=True) + If True normalize the resulting betweenness centrality values + + endpoints : bool, optional (default=False) + If true, include the endpoints in the shortest path counts. + + random_state : int, optional (default=None) + if k is specified and k is an integer, use random_state to initialize the + random number generator. + Using None defaults to a hash of process id, time, and hostname + If k is either None or list or cudf objects: random_state parameter is + ignored. + + Returns + ------- + betweenness_centrality : dask_cudf.DataFrame + GPU distributed data frame containing two dask_cudf.Series of size V: + the vertex identifiers and the corresponding betweenness centrality values. + + ddf['vertex'] : dask_cudf.Series + Contains the vertex identifiers + ddf['betweenness_centrality'] : dask_cudf.Series + Contains the betweenness centrality of vertices + + Examples + -------- + >>> import cugraph.dask as dcg + >>> import dask_cudf + >>> # ... Init a DASK Cluster + >>> # see https://docs.rapids.ai/api/cugraph/stable/dask-cugraph.html + >>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/.. + >>> chunksize = dcg.get_chunksize(datasets_path / "karate.csv") + >>> ddf = dask_cudf.read_csv(datasets_path / "karate.csv", + ... chunksize=chunksize, delimiter=" ", + ... names=["src", "dst", "value"], + ... dtype=["int32", "int32", "float32"]) + >>> dg = cugraph.Graph(directed=True) + >>> dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst') + >>> pr = dcg.betweenness_centrality(dg) + + """ + + if input_graph.store_transposed is True: + warning_msg = ( + "Betweenness centrality expects the 'store_transposed' flag " + "to be set to 'False' for optimal performance during " + "the graph creation" + ) + warnings.warn(warning_msg, UserWarning) + + if not isinstance(k, (dask_cudf.DataFrame, dask_cudf.Series)): + if isinstance(k, (cudf.DataFrame, cudf.Series, list)): + if isinstance(k, list): + k_dtype = input_graph.nodes().dtype + k = cudf.Series(k, dtype=k_dtype) + + if isinstance(k, (cudf.Series, cudf.DataFrame)): + splits = cp.array_split(cp.arange(len(k)), len(Comms.get_workers())) + k = {w: [k.iloc[splits[i]]] for i, w in enumerate(Comms.get_workers())} + + else: + if k is not None: + k = get_distributed_data(k) + wait(k) + k = k.worker_to_parts + + if input_graph.renumbered: + if isinstance(k, dask_cudf.DataFrame): + tmp_col_names = k.columns + + elif isinstance(k, dask_cudf.Series): + tmp_col_names = None + + if isinstance(k, (dask_cudf.DataFrame, dask_cudf.Series)): + k = input_graph.lookup_internal_vertex_id(k, tmp_col_names) + + # FIXME: should we add this parameter as an option? + do_expensive_check = False + + client = get_client() + + ddf = _mg_call_plc_betweenness_centrality( + input_graph, + client, + Comms.get_session_id(), + k, + random_state, + normalized, + endpoints, + do_expensive_check, + ) + + if input_graph.renumbered: + return input_graph.unrenumber(ddf, "vertex") + + return ddf diff --git a/python/cugraph/cugraph/dask/community/__init__.py b/python/cugraph/cugraph/dask/community/__init__.py index e1447a3aa34..b963edfa1cc 100644 --- a/python/cugraph/cugraph/dask/community/__init__.py +++ b/python/cugraph/cugraph/dask/community/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,3 +13,4 @@ from .louvain import louvain from .triangle_count import triangle_count +from .induced_subgraph import induced_subgraph diff --git a/python/cugraph/cugraph/dask/community/egonet.py b/python/cugraph/cugraph/dask/community/egonet.py index 44e66a08614..2d0d07b59ce 100644 --- a/python/cugraph/cugraph/dask/community/egonet.py +++ b/python/cugraph/cugraph/dask/community/egonet.py @@ -80,7 +80,7 @@ def ego_graph(input_graph, n, radius=1, center=True): Parameters ---------- - input_graph : cugraph.Graph, networkx.Graph + input_graph : cugraph.Graph Graph or matrix object, which should contain the connectivity information. Edge weights, if present, should be single or double precision floating point values. @@ -176,4 +176,8 @@ def ego_graph(input_graph, n, radius=1, center=True): ddf = ddf.drop(columns="labels") + if input_graph.renumbered: + ddf = input_graph.unrenumber(ddf, "src") + ddf = input_graph.unrenumber(ddf, "dst") + return ddf, offsets diff --git a/python/cugraph/cugraph/dask/community/induced_subgraph.py b/python/cugraph/cugraph/dask/community/induced_subgraph.py new file mode 100644 index 00000000000..5d902f667a4 --- /dev/null +++ b/python/cugraph/cugraph/dask/community/induced_subgraph.py @@ -0,0 +1,215 @@ +# Copyright (c) 2022-2023, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from dask.distributed import wait, default_client + +import cugraph.dask.comms.comms as Comms +import dask_cudf +import cudf +import cupy as cp +from cugraph.dask.common.input_utils import get_distributed_data +from typing import Union, Tuple + +from pylibcugraph import ( + ResourceHandle, + induced_subgraph as pylibcugraph_induced_subgraph, +) + + +def _call_induced_subgraph( + sID: bytes, + mg_graph_x, + vertices: cudf.Series, + offsets: cudf.Series, + do_expensive_check: bool, +) -> Tuple[cp.ndarray, cp.ndarray, cp.ndarray, cp.ndarray]: + return pylibcugraph_induced_subgraph( + resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()), + graph=mg_graph_x, + subgraph_vertices=vertices, + subgraph_offsets=offsets, + do_expensive_check=do_expensive_check, + ) + + +def consolidate_results(df: cudf.DataFrame, offsets: cudf.Series) -> cudf.DataFrame: + """ + Each rank returns its induced_subgraph dataframe with its corresponding + offsets array. This is ideal if the user operates on distributed memory + but when attempting to bring the result into a single machine, + the induced_subgraph dataframes generated from each seed cannot be extracted + using the offsets array. This function consolidate the final result by + performing segmented copies. + + Returns: consolidated induced_subgraph dataframe + """ + for i in range(len(offsets) - 1): + df_tmp = df[offsets[i] : offsets[i + 1]] + df_tmp["labels"] = i + if i == 0: + df_consolidate = df_tmp + else: + df_consolidate = cudf.concat([df_consolidate, df_tmp]) + return df_consolidate + + +def convert_to_cudf(cp_arrays: cp.ndarray) -> cudf.DataFrame: + cp_src, cp_dst, cp_weight, cp_offsets = cp_arrays + + df = cudf.DataFrame() + df["src"] = cp_src + df["dst"] = cp_dst + df["weight"] = cp_weight + + offsets = cudf.Series(cp_offsets) + + return consolidate_results(df, offsets) + + +def induced_subgraph( + input_graph, + vertices: Union[cudf.Series, cudf.DataFrame], + offsets: Union[list, cudf.Series] = None, +) -> Tuple[dask_cudf.DataFrame, dask_cudf.Series]: + """ + Compute a subgraph of the existing graph including only the specified + vertices. This algorithm works with both directed and undirected graphs + and does not actually traverse the edges, but instead simply pulls out any + edges that are incident on vertices that are both contained in the vertices + list. + + If no subgraph can be extracted from the vertices provided, a 'None' value + will be returned. + + Parameters + ---------- + input_graph : cugraph.Graph + Graph or matrix object, which should contain the connectivity + information. Edge weights, if present, should be single or double + precision floating point values. + + vertices : cudf.Series or cudf.DataFrame + Specifies the vertices of the induced subgraph. For multi-column + vertices, vertices should be provided as a cudf.DataFrame + + offsets : list or cudf.Series, optional + Specifies the subgraph offsets into the subgraph vertices. + If no offsets array is provided, a default array [0, len(vertices)] + will be used. + + Returns + ------- + ego_edge_lists : dask_cudf.DataFrame + Distributed GPU data frame containing all induced sources identifiers, + destination identifiers, edge weights + seeds_offsets: dask_cudf.Series + Distributed Series containing the starting offset in the returned edge list + for each seed. + + """ + + # Initialize dask client + client = default_client() + + if isinstance(vertices, (int, list)): + vertices = cudf.Series(vertices) + elif not isinstance( + vertices, (cudf.Series, dask_cudf.Series, cudf.DataFrame, dask_cudf.DataFrame) + ): + raise TypeError( + f"'vertices' must be either an integer or a list or a " + f"cudf or dask_cudf Series or DataFrame, got: {type(vertices)}" + ) + + if isinstance(offsets, list): + offsets = cudf.Series(offsets) + + if offsets is None: + offsets = cudf.Series([0, len(vertices)]) + + if not isinstance(offsets, cudf.Series): + raise TypeError( + f"'offsets' must be either 'None', a list or a " + f"cudf Series, got: {type(offsets)}" + ) + + # vertices uses "external" vertex IDs, but since the graph has been + # renumbered, the node ID must also be renumbered. + if input_graph.renumbered: + vertices = input_graph.lookup_internal_vertex_id(vertices) + vertices_type = input_graph.edgelist.edgelist_df.dtypes[0] + else: + vertices_type = input_graph.input_df.dtypes[0] + + if isinstance(vertices, (cudf.Series, cudf.DataFrame)): + vertices = dask_cudf.from_cudf( + vertices, npartitions=min(input_graph._npartitions, len(vertices)) + ) + vertices = vertices.astype(vertices_type) + + vertices = get_distributed_data(vertices) + wait(vertices) + + vertices = vertices.worker_to_parts + + do_expensive_check = False + + result = [ + client.submit( + _call_induced_subgraph, + Comms.get_session_id(), + input_graph._plc_graph[w], + vertices[w][0], + offsets, + do_expensive_check, + workers=[w], + allow_other_workers=False, + ) + for w in Comms.get_workers() + ] + wait(result) + + cudf_result = [client.submit(convert_to_cudf, cp_arrays) for cp_arrays in result] + + wait(cudf_result) + + ddf = dask_cudf.from_delayed(cudf_result).persist() + wait(ddf) + + if len(ddf) == 0: + return None, None + + wait([(r.release(), c_r.release()) for r, c_r in zip(result, cudf_result)]) + + ddf = ddf.sort_values("labels") + + # extract offsets from segmented induced_subgraph dataframes + offsets = ddf["labels"].value_counts().compute().sort_index() + offsets = cudf.concat([cudf.Series(0), offsets]) + offsets = ( + dask_cudf.from_cudf( + offsets, npartitions=min(input_graph._npartitions, len(vertices)) + ) + .cumsum() + .astype(vertices_type) + ) + + ddf = ddf.drop(columns="labels") + + if input_graph.renumbered: + ddf = input_graph.unrenumber(ddf, "src") + ddf = input_graph.unrenumber(ddf, "dst") + + return ddf, offsets diff --git a/python/cugraph/cugraph/dask/link_analysis/pagerank.py b/python/cugraph/cugraph/dask/link_analysis/pagerank.py index 820a46692a5..75d5b6d16c6 100644 --- a/python/cugraph/cugraph/dask/link_analysis/pagerank.py +++ b/python/cugraph/cugraph/dask/link_analysis/pagerank.py @@ -44,21 +44,38 @@ def convert_to_cudf(cp_arrays): # shared by other algos def ensure_valid_dtype(input_graph, input_df, input_df_name): if input_graph.properties.weighted is False: - edge_attr_dtype = np.float64 + # If the graph is not weighted, an artificial weight column + # of type 'float32' is added and it must match the user + # personalization/nstart values. + edge_attr_dtype = np.float32 else: edge_attr_dtype = input_graph.input_df["value"].dtype - input_df_dtype = input_df["values"].dtype - if input_df_dtype != edge_attr_dtype: + if "values" in input_df.columns: + input_df_values_dtype = input_df["values"].dtype + if input_df_values_dtype != edge_attr_dtype: + warning_msg = ( + f"PageRank requires '{input_df_name}' values " + "to match the graph's 'edge_attr' type. " + f"edge_attr type is: {edge_attr_dtype} and got " + f"'{input_df_name}' values of type: " + f"{input_df_values_dtype}." + ) + warnings.warn(warning_msg, UserWarning) + input_df = input_df.astype({"values": edge_attr_dtype}) + + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + input_df_vertex_dtype = input_df["vertex"].dtype + if input_df_vertex_dtype != vertex_dtype: warning_msg = ( - f"PageRank requires '{input_df_name}' values " - "to match the graph's 'edge_attr' type. " - f"edge_attr type is: {edge_attr_dtype} and got " - f"'{input_df_name}' values of type: " - f"{input_df_dtype}." + f"PageRank requires '{input_df_name}' vertex " + "to match the graph's 'vertex' type. " + f"input graph's vertex type is: {vertex_dtype} and got " + f"'{input_df_name}' vertex of type: " + f"{input_df_vertex_dtype}." ) warnings.warn(warning_msg, UserWarning) - input_df = input_df.astype({"values": edge_attr_dtype}) + input_df = input_df.astype({"vertex": vertex_dtype}) return input_df @@ -263,6 +280,9 @@ def pagerank( precomputed_vertex_out_weight = renumber_vertices( input_graph, precomputed_vertex_out_weight ) + precomputed_vertex_out_weight = ensure_valid_dtype( + input_graph, precomputed_vertex_out_weight, "precomputed_vertex_out_weight" + ) precomputed_vertex_out_weight_vertices = precomputed_vertex_out_weight["vertex"] precomputed_vertex_out_weight_sums = precomputed_vertex_out_weight["sums"] diff --git a/python/cugraph/cugraph/dask/sampling/uniform_neighbor_sample.py b/python/cugraph/cugraph/dask/sampling/uniform_neighbor_sample.py index 0778fe14403..15d109452eb 100644 --- a/python/cugraph/cugraph/dask/sampling/uniform_neighbor_sample.py +++ b/python/cugraph/cugraph/dask/sampling/uniform_neighbor_sample.py @@ -87,8 +87,8 @@ def create_empty_df_with_edge_props(indices_t, weight_t, return_offsets=False): weight_n: numpy.empty(shape=0, dtype=weight_t), edge_id_n: numpy.empty(shape=0, dtype=indices_t), edge_type_n: numpy.empty(shape=0, dtype="int32"), - batch_id_n: numpy.empty(shape=0, dtype="int32"), hop_id_n: numpy.empty(shape=0, dtype="int32"), + batch_id_n: numpy.empty(shape=0, dtype="int32"), } ) return df @@ -263,19 +263,16 @@ def uniform_neighbor_sample( Does neighborhood sampling, which samples nodes from a graph based on the current node's neighbors, with a corresponding fanout value at each hop. - Note: This is a pylibcugraph-enabled algorithm, which requires that the - graph was created with legacy_renum_only=True. - Parameters ---------- input_graph : cugraph.Graph cuGraph graph, which contains connectivity information as dask cudf edge list dataframe - start_list : list or cudf.Series (int32) + start_list : int, list, cudf.Series, or dask_cudf.Series (int32 or int64) a list of starting vertices for sampling - fanout_vals : list (int32) + fanout_vals : list List of branching out (fan-out) degrees per starting vertex for each hop level. @@ -286,15 +283,16 @@ def uniform_neighbor_sample( Flag to specify whether to return edge properties (weight, edge id, edge type, batch id, hop id) with the sampled edges. - batch_id_list: list (int32), optional (default=None) + batch_id_list: cudf.Series or dask_cudf.Series (int32), optional (default=None) List of batch ids that will be returned with the sampled edges if with_edge_properties is set to True. - label_list: list (int32), optional (default=None) + label_list: cudf.Series or dask_cudf.Series (int32), optional (default=None) List of unique batch id labels. Used along with label_to_output_comm_rank to assign batch ids to GPUs. - label_to_out_comm_rank (int32), optional (default=None) + label_to_out_comm_rank: cudf.Series or dask_cudf.Series (int32), + optional (default=None) List of output GPUs (by rank) corresponding to batch id labels in the label list. Used to assign each batch id to a GPU. @@ -396,23 +394,41 @@ def uniform_neighbor_sample( else: indices_t = numpy.int32 - if input_graph.renumbered: - start_list = input_graph.lookup_internal_vertex_id(start_list) - - start_list = start_list.rename(start_col_name).to_frame() + start_list = start_list.rename(start_col_name) if batch_id_list is not None: - ddf = start_list.join(batch_id_list.rename(batch_col_name)) + batch_id_list = batch_id_list.rename(batch_col_name) + if hasattr(start_list, "compute"): + # mg input + start_list = start_list.to_frame() + batch_id_list = batch_id_list.to_frame() + ddf = start_list.merge( + batch_id_list, + how="left", + left_index=True, + right_index=True, + ) + else: + # sg input + ddf = cudf.concat( + [ + start_list, + batch_id_list, + ], + axis=1, + ) else: - ddf = start_list + ddf = start_list.to_frame() - if isinstance(ddf, cudf.DataFrame): - splits = cp.array_split(cp.arange(len(ddf)), len(Comms.get_workers())) - ddf = {w: [ddf.iloc[splits[i]]] for i, w in enumerate(Comms.get_workers())} + if input_graph.renumbered: + ddf = input_graph.lookup_internal_vertex_id(ddf, column_name=start_col_name) - else: + if hasattr(ddf, "compute"): ddf = get_distributed_data(ddf) wait(ddf) ddf = ddf.worker_to_parts + else: + splits = cp.array_split(cp.arange(len(ddf)), len(Comms.get_workers())) + ddf = {w: [ddf.iloc[splits[i]]] for i, w in enumerate(Comms.get_workers())} client = get_client() session_id = Comms.get_session_id() diff --git a/python/cugraph/cugraph/dask/structure/mg_property_graph.py b/python/cugraph/cugraph/dask/structure/mg_property_graph.py index c4b4c7d3ac4..d81d40597dc 100644 --- a/python/cugraph/cugraph/dask/structure/mg_property_graph.py +++ b/python/cugraph/cugraph/dask/structure/mg_property_graph.py @@ -1428,21 +1428,6 @@ def edge_props_to_graph( f"cannot be represented with the {msg}" ) - # FIXME: This forces the renumbering code to run a python-only - # renumbering without the newer C++ renumbering step. This is - # required since the newest graph algos which are using the - # pylibcugraph library will crash if passed data renumbered using the - # C++ renumbering. The consequence of this is that these extracted - # subgraphs can only be used with newer pylibcugraph-based MG algos. - # - # NOTE: if the vertices are integers (int32 or int64), renumbering is - # actually skipped with the assumption that the C renumbering will - # take place. The C renumbering only occurs for pylibcugraph algos, - # hence the reason these extracted subgraphs only work with PLC algos. - if renumber_graph is False: - raise ValueError("currently, renumber_graph must be set to True for MG") - legacy_renum_only = True - col_names = [self.src_col_name, self.dst_col_name] if edge_attr is not None: col_names.append(edge_attr) @@ -1460,7 +1445,6 @@ def edge_props_to_graph( destination=self.dst_col_name, edge_attr=edge_attr, renumber=renumber_graph, - legacy_renum_only=legacy_renum_only, ) if add_edge_data: @@ -1530,7 +1514,11 @@ def renumber_vertices_by_type(self, prev_id_column=None): # Include self.vertex_col_name when sorting by values to ensure we can # evenly distribute the data across workers. df = df.reset_index().persist() - df = df.sort_values(by=[TCN, self.vertex_col_name], ignore_index=True).persist() + if len(cat_dtype.categories) > 1 and len(self.vertex_types) > 1: + # `self.vertex_types` is currently not cheap, b/c it looks at edge df + df = df.sort_values( + by=[TCN, self.vertex_col_name], ignore_index=True + ).persist() if self.__edge_prop_dataframe is not None: new_name = f"new_{self.vertex_col_name}" df[new_name] = 1 @@ -1621,9 +1609,10 @@ def renumber_edges_by_type(self, prev_id_column=None): # Include self.edge_id_col_name when sorting by values to ensure we can # evenly distribute the data across workers. df = df.reset_index().persist() - df = df.sort_values( - by=[self.type_col_name, self.edge_id_col_name], ignore_index=True - ).persist() + if len(cat_dtype.categories) > 1 and len(self.edge_types) > 1: + df = df.sort_values( + by=[self.type_col_name, self.edge_id_col_name], ignore_index=True + ).persist() if prev_id_column is not None: df[prev_id_column] = df[self.edge_id_col_name] @@ -1639,8 +1628,8 @@ def renumber_edges_by_type(self, prev_id_column=None): # FIXME DASK_CUDF: https://github.com/rapidsai/cudf/issues/11795 df = self._edge_type_value_counts - assert df.index.dtype == cat_dtype - df.index = df.index.astype(str) + if df.index.dtype == cat_dtype: + df.index = df.index.astype(str) # self._edge_type_value_counts rv = df.sort_index().cumsum().to_frame("stop") diff --git a/python/cugraph/cugraph/dask/traversal/bfs.py b/python/cugraph/cugraph/dask/traversal/bfs.py index 6aa80784ac5..cf467aaa18f 100644 --- a/python/cugraph/cugraph/dask/traversal/bfs.py +++ b/python/cugraph/cugraph/dask/traversal/bfs.py @@ -62,9 +62,6 @@ def bfs(input_graph, start, depth_limit=None, return_distances=True, check_start The input graph must contain edge list as a dask-cudf dataframe with one partition per GPU. - Note: This is a pylibcugraph-enabled algorithm, which requires that the - graph was created with legacy_renum_only=True. - Parameters ---------- input_graph : cugraph.Graph diff --git a/python/cugraph/cugraph/experimental/link_prediction/jaccard.py b/python/cugraph/cugraph/experimental/link_prediction/jaccard.py index eba487eb8e5..29f2f3ffe16 100644 --- a/python/cugraph/cugraph/experimental/link_prediction/jaccard.py +++ b/python/cugraph/cugraph/experimental/link_prediction/jaccard.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,6 +17,7 @@ renumber_vertex_pair, ) import cudf +import warnings from pylibcugraph.experimental import ( jaccard_coefficients as pylibcugraph_jaccard_coefficients, @@ -24,6 +25,25 @@ from pylibcugraph import ResourceHandle +# FIXME: Move this function to the utility module so that it can be +# shared by other algos +def ensure_valid_dtype(input_graph, vertex_pair): + + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + vertex_pair_dtypes = vertex_pair.dtypes + + if vertex_pair_dtypes[0] != vertex_dtype or vertex_pair_dtypes[1] != vertex_dtype: + warning_msg = ( + "Jaccard requires 'vertex_pair' to match the graph's 'vertex' type. " + f"input graph's vertex type is: {vertex_dtype} and got " + f"'vertex_pair' of type: {vertex_pair_dtypes}." + ) + warnings.warn(warning_msg, UserWarning) + vertex_pair = vertex_pair.astype(vertex_dtype) + + return vertex_pair + + def EXPERIMENTAL__jaccard(G, vertex_pair=None, use_weight=False): """ Compute the Jaccard similarity between each pair of vertices connected by @@ -133,6 +153,7 @@ def EXPERIMENTAL__jaccard(G, vertex_pair=None, use_weight=False): if isinstance(vertex_pair, cudf.DataFrame): vertex_pair = renumber_vertex_pair(G, vertex_pair) + vertex_pair = ensure_valid_dtype(G, vertex_pair) src_col_name = vertex_pair.columns[0] dst_col_name = vertex_pair.columns[1] first = vertex_pair[src_col_name] diff --git a/python/cugraph/cugraph/experimental/link_prediction/overlap.py b/python/cugraph/cugraph/experimental/link_prediction/overlap.py index b587dc3bfed..f0c320be26b 100644 --- a/python/cugraph/cugraph/experimental/link_prediction/overlap.py +++ b/python/cugraph/cugraph/experimental/link_prediction/overlap.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,6 +17,7 @@ renumber_vertex_pair, ) import cudf +import warnings from pylibcugraph.experimental import ( overlap_coefficients as pylibcugraph_overlap_coefficients, @@ -24,6 +25,25 @@ from pylibcugraph import ResourceHandle +# FIXME: Move this function to the utility module so that it can be +# shared by other algos +def ensure_valid_dtype(input_graph, vertex_pair): + + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + vertex_pair_dtypes = vertex_pair.dtypes + + if vertex_pair_dtypes[0] != vertex_dtype or vertex_pair_dtypes[1] != vertex_dtype: + warning_msg = ( + "Overlap requires 'vertex_pair' to match the graph's 'vertex' type. " + f"input graph's vertex type is: {vertex_dtype} and got " + f"'vertex_pair' of type: {vertex_pair_dtypes}." + ) + warnings.warn(warning_msg, UserWarning) + vertex_pair = vertex_pair.astype(vertex_dtype) + + return vertex_pair + + def EXPERIMENTAL__overlap_coefficient(G, ebunch=None, use_weight=False): """ For NetworkX Compatability. See `overlap` @@ -168,6 +188,7 @@ def EXPERIMENTAL__overlap(G, vertex_pair=None, use_weight=False): if isinstance(vertex_pair, cudf.DataFrame): vertex_pair = renumber_vertex_pair(G, vertex_pair) + vertex_pair = ensure_valid_dtype(G, vertex_pair) src_col_name = vertex_pair.columns[0] dst_col_name = vertex_pair.columns[1] first = vertex_pair[src_col_name] diff --git a/python/cugraph/cugraph/experimental/link_prediction/sorensen.py b/python/cugraph/cugraph/experimental/link_prediction/sorensen.py index 7908127cac5..c6fdc1ee422 100644 --- a/python/cugraph/cugraph/experimental/link_prediction/sorensen.py +++ b/python/cugraph/cugraph/experimental/link_prediction/sorensen.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2022, NVIDIA CORPORATION. +# Copyright (c) 2021-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,12 +17,32 @@ renumber_vertex_pair, ) import cudf +import warnings from pylibcugraph.experimental import ( sorensen_coefficients as pylibcugraph_sorensen_coefficients, ) from pylibcugraph import ResourceHandle +# FIXME: Move this function to the utility module so that it can be +# shared by other algos +def ensure_valid_dtype(input_graph, vertex_pair): + + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + vertex_pair_dtypes = vertex_pair.dtypes + + if vertex_pair_dtypes[0] != vertex_dtype or vertex_pair_dtypes[1] != vertex_dtype: + warning_msg = ( + "Sorensen requires 'vertex_pair' to match the graph's 'vertex' type. " + f"input graph's vertex type is: {vertex_dtype} and got " + f"'vertex_pair' of type: {vertex_pair_dtypes}." + ) + warnings.warn(warning_msg, UserWarning) + vertex_pair = vertex_pair.astype(vertex_dtype) + + return vertex_pair + + def EXPERIMENTAL__sorensen(G, vertex_pair=None, use_weight=False): """ Compute the Sorensen coefficient between each pair of vertices connected by @@ -101,6 +121,7 @@ def EXPERIMENTAL__sorensen(G, vertex_pair=None, use_weight=False): if isinstance(vertex_pair, cudf.DataFrame): vertex_pair = renumber_vertex_pair(G, vertex_pair) + vertex_pair = ensure_valid_dtype(G, vertex_pair) src_col_name = vertex_pair.columns[0] dst_col_name = vertex_pair.columns[1] first = vertex_pair[src_col_name] diff --git a/python/cugraph/cugraph/generators/rmat.py b/python/cugraph/cugraph/generators/rmat.py index 828f54459a4..2c9167a5217 100644 --- a/python/cugraph/cugraph/generators/rmat.py +++ b/python/cugraph/cugraph/generators/rmat.py @@ -283,7 +283,7 @@ def rmat( Probability of the second partition c : float - Probability of the thrid partition + Probability of the third partition seed : int Seed value for the random number generator diff --git a/python/cugraph/cugraph/gnn/data_loading/bulk_sampler.py b/python/cugraph/cugraph/gnn/data_loading/bulk_sampler.py index a4d1467a259..95fab240eb2 100644 --- a/python/cugraph/cugraph/gnn/data_loading/bulk_sampler.py +++ b/python/cugraph/cugraph/gnn/data_loading/bulk_sampler.py @@ -57,6 +57,19 @@ def __init__( kwargs: kwargs Keyword arguments to be passed to the sampler (i.e. fanout). """ + + max_batches_per_partition = seeds_per_call // batch_size + if batches_per_partition > max_batches_per_partition: + import warnings + + warnings.warn( + f"batches_per_partition ({batches_per_partition}) is >" + f" seeds_per_call / batch size ({max_batches_per_partition})" + "; automatically setting batches_per_partition to " + "{max_batches_per_partition}" + ) + batches_per_partition = max_batches_per_partition + self.__batch_size = batch_size self.__output_path = output_path self.__graph = graph @@ -142,7 +155,11 @@ def add_batches( self.__batches = df else: if isinstance(df, type(self.__batches)): - self.__batches = self.__batches.append(df) + if isinstance(df, dask_cudf.DataFrame): + concat_fn = dask_cudf.concat + else: + concat_fn = cudf.concat + self.__batches = concat_fn([self.__batches, df]) else: raise TypeError( "Provided batches must match the dataframe" @@ -158,6 +175,7 @@ def flush(self) -> None: """ if self.size == 0: return + self.__batches.reset_index(drop=True) min_batch_id = self.__batches[self.batch_col_name].min() if isinstance(self.__batches, dask_cudf.DataFrame): @@ -173,11 +191,11 @@ def flush(self) -> None: max_batch_id = min_batch_id + npartitions * self.batches_per_partition - 1 batch_id_filter = self.__batches[self.batch_col_name] <= max_batch_id - sample_fn = ( - cugraph.uniform_neighbor_sample - if isinstance(self.__graph._plc_graph, pylibcugraph.graphs.SGGraph) - else cugraph.dask.uniform_neighbor_sample - ) + if isinstance(self.__graph._plc_graph, pylibcugraph.graphs.SGGraph): + sample_fn = cugraph.uniform_neighbor_sample + else: + sample_fn = cugraph.dask.uniform_neighbor_sample + self.__sample_call_args["_multiple_clients"] = True samples = sample_fn( self.__graph, diff --git a/python/cugraph/cugraph/gnn/dgl_extensions/utils/sampling.py b/python/cugraph/cugraph/gnn/dgl_extensions/utils/sampling.py index 64374f5b071..92b900cdb8e 100644 --- a/python/cugraph/cugraph/gnn/dgl_extensions/utils/sampling.py +++ b/python/cugraph/cugraph/gnn/dgl_extensions/utils/sampling.py @@ -51,8 +51,6 @@ def get_subgraph_and_src_range_from_edgelist(edge_list, is_mg, reverse_edges=Fal destination=dst_n, edge_attr=eid_n, renumber=renumber, - # FIXME: renumber=False is not supported for MNMG algos - legacy_renum_only=True, ) if hasattr(subgraph, "input_df"): subgraph.input_df = None diff --git a/python/cugraph/cugraph/layout/force_atlas2_wrapper.pyx b/python/cugraph/cugraph/layout/force_atlas2_wrapper.pyx index 73de7415971..4258be3ef71 100644 --- a/python/cugraph/cugraph/layout/force_atlas2_wrapper.pyx +++ b/python/cugraph/cugraph/layout/force_atlas2_wrapper.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -56,7 +56,9 @@ def force_atlas2(input_graph, if not input_graph.edgelist: input_graph.view_edge_list() - num_verts = input_graph.number_of_vertices() + # FIXME: This implementation assumes that the number of vertices + # is the max vertex ID + 1 which is not always the case. + num_verts = input_graph.nodes().max() + 1 num_edges = len(input_graph.edgelist.edgelist_df['src']) cdef GraphCOOView[int,int,float] graph_float diff --git a/python/cugraph/cugraph/link_analysis/pagerank.py b/python/cugraph/cugraph/link_analysis/pagerank.py index 057ba0383c3..83b8af35e4c 100644 --- a/python/cugraph/cugraph/link_analysis/pagerank.py +++ b/python/cugraph/cugraph/link_analysis/pagerank.py @@ -40,21 +40,38 @@ def renumber_vertices(input_graph, input_df): # shared by other algos def ensure_valid_dtype(input_graph, input_df, input_df_name): if input_graph.edgelist.weights is False: + # If the graph is not weighted, an artificial weight column + # of type 'float32' is added and it must match the user + # personalization/nstart values. edge_attr_dtype = np.float32 else: edge_attr_dtype = input_graph.edgelist.edgelist_df["weights"].dtype - input_df_dtype = input_df["values"].dtype - if input_df_dtype != edge_attr_dtype: + if "values" in input_df.columns: + input_df_values_dtype = input_df["values"].dtype + if input_df_values_dtype != edge_attr_dtype: + warning_msg = ( + f"PageRank requires '{input_df_name}' values " + "to match the graph's 'edge_attr' type. " + f"edge_attr type is: {edge_attr_dtype} and got " + f"'{input_df_name}' values of type: " + f"{input_df_values_dtype}." + ) + warnings.warn(warning_msg, UserWarning) + input_df = input_df.astype({"values": edge_attr_dtype}) + + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + input_df_vertex_dtype = input_df["vertex"].dtype + if input_df_vertex_dtype != vertex_dtype: warning_msg = ( - f"PageRank requires '{input_df_name}' values " - "to match the graph's 'edge_attr' type. " - f"edge_attr type is: {edge_attr_dtype} and got " - f"'{input_df_name}' values of type: " - f"{input_df_dtype}." + f"PageRank requires '{input_df_name}' vertex " + "to match the graph's 'vertex' type. " + f"input graph's vertex type is: {vertex_dtype} and got " + f"'{input_df_name}' vertex of type: " + f"{input_df_vertex_dtype}." ) warnings.warn(warning_msg, UserWarning) - input_df = input_df.astype({"values": edge_attr_dtype}) + input_df = input_df.astype({"vertex": vertex_dtype}) return input_df @@ -196,6 +213,9 @@ def pagerank( precomputed_vertex_out_weight = renumber_vertices( G, precomputed_vertex_out_weight ) + precomputed_vertex_out_weight = ensure_valid_dtype( + G, precomputed_vertex_out_weight, "precomputed_vertex_out_weight" + ) pre_vtx_o_wgt_vertices = precomputed_vertex_out_weight["vertex"] pre_vtx_o_wgt_sums = precomputed_vertex_out_weight["sums"] @@ -207,7 +227,7 @@ def pagerank( if G.renumbered is True: personalization = renumber_vertices(G, personalization) - personalization = ensure_valid_dtype(G, personalization, "personalization") + personalization = ensure_valid_dtype(G, personalization, "personalization") vertex, pagerank_values = pylibcugraph_p_pagerank( resource_handle=ResourceHandle(), diff --git a/python/cugraph/cugraph/sampling/node2vec.py b/python/cugraph/cugraph/sampling/node2vec.py index 2f94fa9dceb..247989648f3 100644 --- a/python/cugraph/cugraph/sampling/node2vec.py +++ b/python/cugraph/cugraph/sampling/node2vec.py @@ -25,9 +25,6 @@ def node2vec(G, start_vertices, max_depth=1, compress_result=True, p=1.0, q=1.0) Computes random walks for each node in 'start_vertices', under the node2vec sampling framework. - Note: This is a pylibcugraph-enabled algorithm, which requires that the - graph was created with legacy_renum_only=True. - References ---------- diff --git a/python/cugraph/cugraph/sampling/random_walks.py b/python/cugraph/cugraph/sampling/random_walks.py index 565acd857a6..653bccccabd 100644 --- a/python/cugraph/cugraph/sampling/random_walks.py +++ b/python/cugraph/cugraph/sampling/random_walks.py @@ -92,7 +92,8 @@ def random_walks( >>> from cugraph.experimental.datasets import karate >>> M = karate.get_edgelist(fetch=True) >>> G = karate.get_graph() - >>> _, _, _ = cugraph.random_walks(G, "uniform", M, 3) + >>> start_vertices = G.nodes()[:4] + >>> _, _, _ = cugraph.random_walks(G, "uniform", start_vertices, 3) """ if legacy_result_type: @@ -117,7 +118,10 @@ def random_walks( start_vertices = [start_vertices] if isinstance(start_vertices, list): - start_vertices = cudf.Series(start_vertices) + # Ensure the 'start_vertices' have the same dtype as the edge list. + # Failing to do that may produce erroneous results. + vertex_dtype = G.edgelist.edgelist_df.dtypes[0] + start_vertices = cudf.Series(start_vertices, dtype=vertex_dtype) if G.renumbered is True: if isinstance(start_vertices, cudf.DataFrame): @@ -135,6 +139,8 @@ def random_walks( else: raise ValueError("Only 'uniform' random walks is currently supported") + vertex_paths = cudf.Series(vertex_paths) + if G.renumbered: df_ = cudf.DataFrame() df_["vertex_paths"] = vertex_paths diff --git a/python/cugraph/cugraph/sampling/uniform_neighbor_sample.py b/python/cugraph/cugraph/sampling/uniform_neighbor_sample.py index 99ff60b33ac..a7dad6c01a6 100644 --- a/python/cugraph/cugraph/sampling/uniform_neighbor_sample.py +++ b/python/cugraph/cugraph/sampling/uniform_neighbor_sample.py @@ -20,6 +20,7 @@ import cudf import cupy as cp +import warnings from typing import Union, Tuple, Sequence, List from typing import TYPE_CHECKING @@ -28,6 +29,27 @@ from cugraph import Graph +# FIXME: Move this function to the utility module so that it can be +# shared by other algos +def ensure_valid_dtype(input_graph, start_list): + vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] + if isinstance(start_list, cudf.Series): + start_list_dtypes = start_list.dtype + else: + start_list_dtypes = start_list.dtypes[0] + + if start_list_dtypes != vertex_dtype: + warning_msg = ( + "Uniform neighbor sample requires 'start_list' to match the graph's " + f"'vertex' type. input graph's vertex type is: {vertex_dtype} and got " + f"'start_list' of type: {start_list_dtypes}." + ) + warnings.warn(warning_msg, UserWarning) + start_list = start_list.astype(vertex_dtype) + + return start_list + + def uniform_neighbor_sample( G: Graph, start_list: Sequence, @@ -42,9 +64,6 @@ def uniform_neighbor_sample( Does neighborhood sampling, which samples nodes from a graph based on the current node's neighbors, with a corresponding fanout value at each hop. - Note: This is a pylibcugraph-enabled algorithm, which requires that the - graph was created with legacy_renum_only=True. - Parameters ---------- G : cugraph.Graph @@ -152,6 +171,8 @@ def uniform_neighbor_sample( else: weight_t = "float32" + start_list = ensure_valid_dtype(G, start_list) + if G.renumbered is True: if isinstance(start_list, cudf.DataFrame): start_list = G.lookup_internal_vertex_id(start_list, start_list.columns) diff --git a/python/cugraph/cugraph/structure/CMakeLists.txt b/python/cugraph/cugraph/structure/CMakeLists.txt index 609878410c2..f8385a08afd 100644 --- a/python/cugraph/cugraph/structure/CMakeLists.txt +++ b/python/cugraph/cugraph/structure/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -12,7 +12,7 @@ # the License. # ============================================================================= -set(cython_sources graph_primtypes_wrapper.pyx graph_primtypes.pyx renumber_wrapper.pyx utils_wrapper.pyx) +set(cython_sources graph_primtypes_wrapper.pyx graph_primtypes.pyx utils_wrapper.pyx) set(linked_libraries cugraph::cugraph) rapids_cython_create_modules( CXX diff --git a/python/cugraph/cugraph/structure/__init__.py b/python/cugraph/cugraph/structure/__init__.py index a3a64432ded..d7e0ff62358 100644 --- a/python/cugraph/cugraph/structure/__init__.py +++ b/python/cugraph/cugraph/structure/__init__.py @@ -39,4 +39,3 @@ from_adjlist, ) from cugraph.structure.hypergraph import hypergraph -from cugraph.structure.shuffle import shuffle diff --git a/python/cugraph/cugraph/structure/graph_classes.py b/python/cugraph/cugraph/structure/graph_classes.py index 162efbece4c..5fd398124b8 100644 --- a/python/cugraph/cugraph/structure/graph_classes.py +++ b/python/cugraph/cugraph/structure/graph_classes.py @@ -153,6 +153,8 @@ def from_cudf_edgelist( pylibcugraph algorithms. Must be false for algorithms not yet converted to the pylibcugraph C API. + This parameter is deprecated and will be removed. + Examples -------- >>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ', @@ -293,9 +295,11 @@ def from_dask_cudf_edgelist( If True, skips the C++ renumbering step. Must be true for pylibcugraph algorithms. Must be false for algorithms not yet converted to the pylibcugraph C API. + + This parameter is deprecated and will be removed. + """ - if renumber is False: - raise ValueError("'renumber' must be set to 'True' for MNMG algos") + if self._Impl is None: self._Impl = simpleDistributedGraphImpl(self.graph_properties) elif type(self._Impl) is not simpleDistributedGraphImpl: @@ -825,6 +829,8 @@ def from_cudf_edgelist( pylibcugraph algorithms. Must be false for algorithms not yet converted to the pylibcugraph C API. + This parameter is deprecated and will be removed. + Examples -------- >>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ', @@ -893,6 +899,8 @@ def from_dask_cudf_edgelist( If True, skips the C++ renumbering step. Must be true for pylibcugraph algorithms. Must be false for algorithms not yet converted to the pylibcugraph C API. + + This parameter is deprecated and will be removed. """ raise TypeError("Distributed N-partite graph not supported") diff --git a/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py b/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py index 699b5efa06b..4b33cf4c847 100644 --- a/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py +++ b/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py @@ -17,7 +17,12 @@ from cugraph.structure.symmetrize import symmetrize import cupy import cudf +import warnings import dask_cudf +import cupy as cp +import dask +from typing import Union +import numpy as np from pylibcugraph import ( MGGraph, @@ -27,7 +32,10 @@ from dask.distributed import wait, default_client from cugraph.dask.common.input_utils import get_distributed_data -from pylibcugraph import get_two_hop_neighbors as pylibcugraph_get_two_hop_neighbors +from pylibcugraph import ( + get_two_hop_neighbors as pylibcugraph_get_two_hop_neighbors, + select_random_vertices as pylibcugraph_select_random_vertices, +) import cugraph.dask.comms.comms as Comms @@ -64,7 +72,6 @@ def __init__(self, properties): # Structure self.edgelist = None self.renumber_map = None - self.aggregate_segment_offsets = None self.properties = simpleDistributedGraphImpl.Properties(properties) self.source_columns = None self.destination_columns = None @@ -122,9 +129,18 @@ def __from_edgelist( store_transposed=False, legacy_renum_only=False, ): + if not isinstance(input_ddf, dask_cudf.DataFrame): raise TypeError("input should be a dask_cudf dataFrame") + if renumber is False: + if type(source) is list and type(destination) is list: + raise ValueError("set renumber to True for multi column ids") + elif input_ddf[source].dtype not in [np.int32, np.int64] or input_ddf[ + destination + ].dtype not in [np.int32, np.int64]: + raise ValueError("set renumber to True for non integer columns ids") + s_col = source d_col = destination if not isinstance(s_col, list): @@ -181,13 +197,6 @@ def __from_edgelist( "types are not permitted for an " "undirected graph." ) - if not legacy_renum_only: - raise ValueError( - "User-provided edge ids and edge " - "types are only permitted when " - "from_edgelist is called with " - "legacy_renum_only=True." - ) source_col, dest_col, value_col = symmetrize( input_ddf, @@ -245,13 +254,24 @@ def __from_edgelist( transposed=store_transposed, legacy_renum_only=legacy_renum_only ) - self.properties.renumbered = self.renumber_map.implementation.numbered + if renumber is False: + self.properties.renumbered = False + src_col_name = self.source_columns + dst_col_name = self.destination_columns + + else: + # If 'renumber' is set to 'True', an extra renumbering (python) + # occurs if there are non-integer or multi-columns vertices + self.properties.renumbered = self.renumber_map.is_renumbered + + src_col_name = self.renumber_map.renumbered_src_col_name + dst_col_name = self.renumber_map.renumbered_dst_col_name + ddf = self.edgelist.edgelist_df num_edges = len(ddf) edge_data = get_distributed_data(ddf) - src_col_name = self.renumber_map.renumbered_src_col_name - dst_col_name = self.renumber_map.renumbered_dst_col_name + graph_props = GraphProperties( is_multigraph=self.properties.multi_edge, is_symmetric=not self.properties.directed, @@ -761,6 +781,92 @@ def convert_to_cudf(cp_arrays): return ddf + def select_random_vertices( + self, random_state: int = None, num_vertices: int = None + ) -> Union[dask_cudf.Series, dask_cudf.DataFrame]: + """ + Select random vertices from the graph + + Parameters + ---------- + random_state : int , optional(default=None) + Random state to use when generating samples. Optional argument, + defaults to a hash of process id, time, and hostname. + + num_vertices : int, optional(default=None) + Number of vertices to sample. If None, all vertices will be selected + + Returns + ------- + return random vertices from the graph as a dask object + """ + + _client = default_client() + + def convert_to_cudf(cp_arrays: cp.ndarray) -> cudf.Series: + """ + Creates a cudf Series from cupy arrays + """ + vertices = cudf.Series(cp_arrays) + + return vertices + + def _call_plc_select_random_vertices( + mg_graph_x, sID: bytes, random_state: int, num_vertices: int + ) -> cudf.Series: + + cp_arrays = pylibcugraph_select_random_vertices( + graph=mg_graph_x, + resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()), + random_state=random_state, + num_vertices=num_vertices, + ) + return convert_to_cudf(cp_arrays) + + def _mg_call_plc_select_random_vertices( + input_graph, + client: dask.distributed.client.Client, + sID: bytes, + random_state: int, + num_vertices: int, + ) -> dask_cudf.Series: + + result = [ + client.submit( + _call_plc_select_random_vertices, + input_graph._plc_graph[w], + sID, + hash((random_state, i)), + num_vertices, + workers=[w], + allow_other_workers=False, + pure=False, + ) + for i, w in enumerate(Comms.get_workers()) + ] + ddf = dask_cudf.from_delayed(result, verify_meta=False).persist() + wait(ddf) + wait([r.release() for r in result]) + return ddf + + ddf = _mg_call_plc_select_random_vertices( + self, + _client, + Comms.get_session_id(), + random_state, + num_vertices, + ) + + if self.properties.renumbered: + vertices = ddf.rename("vertex").to_frame() + vertices = self.renumber_map.unrenumber(vertices, "vertex") + if len(vertices.columns) == 1: + vertices = vertices["vertex"] + else: + vertices = ddf + + return vertices + def to_directed(self, G): """ Return a directed representation of the graph. @@ -947,8 +1053,16 @@ def compute_renumber_edge_list(self, transposed=False, legacy_renum_only=False): if True, The C++ renumbering will not be triggered. This parameter is added for new algos following the C/Pylibcugraph path + + This parameter is deprecated and will be removed. """ + if legacy_renum_only: + warning_msg = ( + "The parameter 'legacy_renum_only' is deprecated and will be removed." + ) + warnings.warn(warning_msg, DeprecationWarning) + if not self.properties.renumber: self.edgelist = self.EdgeList(self.input_df) self.renumber_map = None @@ -962,11 +1076,7 @@ def compute_renumber_edge_list(self, transposed=False, legacy_renum_only=False): del self.edgelist - ( - renumbered_ddf, - number_map, - aggregate_segment_offsets, - ) = NumberMap.renumber_and_segment( + (renumbered_ddf, number_map,) = NumberMap.renumber_and_segment( self.input_df, self.source_columns, self.destination_columns, @@ -976,7 +1086,6 @@ def compute_renumber_edge_list(self, transposed=False, legacy_renum_only=False): self.edgelist = self.EdgeList(renumbered_ddf) self.renumber_map = number_map - self.aggregate_segment_offsets = aggregate_segment_offsets self.properties.store_transposed = transposed def vertex_column_size(self): diff --git a/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py b/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py index 567b5aff307..7ad694e62f5 100644 --- a/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py +++ b/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py @@ -22,8 +22,13 @@ import cugraph.dask.comms.comms as Comms import pandas as pd import numpy as np +import warnings from cugraph.dask.structure import replication -from pylibcugraph import get_two_hop_neighbors as pylibcugraph_get_two_hop_neighbors +from typing import Union +from pylibcugraph import ( + get_two_hop_neighbors as pylibcugraph_get_two_hop_neighbors, + select_random_vertices as pylibcugraph_select_random_vertices, +) from pylibcugraph import ( ResourceHandle, @@ -114,6 +119,11 @@ def __from_edgelist( legacy_renum_only=True, store_transposed=False, ): + if legacy_renum_only: + warning_msg = ( + "The parameter 'legacy_renum_only' is deprecated and will be removed." + ) + warnings.warn(warning_msg, DeprecationWarning) # Verify column names present in input DataFrame s_col = source @@ -159,13 +169,6 @@ def __from_edgelist( "types are not permitted for an " "undirected graph." ) - if not legacy_renum_only: - raise ValueError( - "User-provided edge ids and edge " - "types are only permitted when " - "from_edgelist is called with " - "legacy_renum_only=True." - ) input_df = input_df[df_columns] # FIXME: check if the consolidated graph fits on the @@ -189,6 +192,8 @@ def __from_edgelist( "input should be a cudf.DataFrame or " "a dask_cudf dataFrame" ) + # Original, unmodified input dataframe. + self.input_df = elist # Renumbering self.renumber_map = None self.store_transposed = store_transposed @@ -203,10 +208,8 @@ def __from_edgelist( ) source = renumber_map.renumbered_src_col_name destination = renumber_map.renumbered_dst_col_name - # Use renumber_map to figure out if renumbering was skipped or not - # This was added to handle 'legacy_renum_only' which will skip the - # old C++ renumbering when running the pylibcugraph/C algos - self.properties.renumbered = renumber_map.implementation.numbered + # Use renumber_map to figure out if the python renumbering occured + self.properties.renumbered = renumber_map.is_renumbered self.renumber_map = renumber_map else: if type(source) is list and type(destination) is list: @@ -306,8 +309,8 @@ def to_pandas_adjacency(self): np_array_data = self.to_numpy_array() pdf = pd.DataFrame(np_array_data) - if self.properties.renumbered: - nodes = self.renumber_map.implementation.df["0"].values_host.tolist() + + nodes = self.nodes().values_host.tolist() pdf.columns = nodes pdf.index = nodes return pdf @@ -321,10 +324,12 @@ def to_numpy_array(self): elen = self.number_of_edges() df = self.edgelist.edgelist_df np_array = np.full((nlen, nlen), 0.0) + nodes = self.nodes() for i in range(0, elen): - np_array[ - df[simpleGraphImpl.srcCol].iloc[i], df[simpleGraphImpl.dstCol].iloc[i] - ] = df[self.edgeWeightCol].iloc[i] + # Map vertices to consecutive integers + idx_src = nodes[nodes == df[simpleGraphImpl.srcCol].iloc[i]].index[0] + idx_dst = nodes[nodes == df[simpleGraphImpl.dstCol].iloc[i]].index[0] + np_array[idx_src, idx_dst] = df[self.edgeWeightCol].iloc[i] return np_array def to_numpy_matrix(self): @@ -634,6 +639,43 @@ def get_two_hop_neighbors(self, start_vertices=None): return df + def select_random_vertices( + self, + random_state: int = None, + num_vertices: int = None, + ) -> Union[cudf.Series, cudf.DataFrame]: + """ + Select random vertices from the graph + + Parameters + ---------- + random_state : int , optional(default=None) + Random state to use when generating samples. Optional argument, + defaults to a hash of process id, time, and hostname. + + num_vertices : int, optional(default=None) + Number of vertices to sample. If None, all vertices will be selected + + Returns + ------- + return random vertices from the graph as a cudf + """ + vertices = pylibcugraph_select_random_vertices( + resource_handle=ResourceHandle(), + graph=self._plc_graph, + random_state=random_state, + num_vertices=num_vertices, + ) + + vertices = cudf.Series(vertices) + if self.properties.renumbered is True: + df_ = cudf.DataFrame() + df_["vertex"] = vertices + df_ = self.renumber_map.unrenumber(df_, "vertex") + vertices = df_["vertex"] + + return vertices + def number_of_vertices(self): """ Get the number of nodes in the graph. @@ -644,10 +686,7 @@ def number_of_vertices(self): elif self.transposedadjlist is not None: self.properties.node_count = len(self.transposedadjlist.offsets) - 1 elif self.edgelist is not None: - df = self.edgelist.edgelist_df[ - [simpleGraphImpl.srcCol, simpleGraphImpl.dstCol] - ] - self.properties.node_count = df.max().max() + 1 + self.properties.node_count = len(self.nodes()) else: raise RuntimeError("Graph is Empty") return self.properties.node_count @@ -721,7 +760,9 @@ def in_degree(self, vertex_subset=None): >>> df = G.in_degree([0,9,12]) """ - return self._degree(vertex_subset, direction=Direction.IN) + in_degree = self._degree(vertex_subset, direction=Direction.IN) + + return in_degree def out_degree(self, vertex_subset=None): """ @@ -759,7 +800,8 @@ def out_degree(self, vertex_subset=None): >>> df = G.out_degree([0,9,12]) """ - return self._degree(vertex_subset, direction=Direction.OUT) + out_degree = self._degree(vertex_subset, direction=Direction.OUT) + return out_degree def degree(self, vertex_subset=None): """ @@ -849,12 +891,28 @@ def degrees(self, vertex_subset=None): df["in_degree"] = in_degree_col df["out_degree"] = out_degree_col - if self.properties.renumbered is True: - df = self.renumber_map.unrenumber(df, "vertex") + if self.properties.renumbered: + # Get the internal vertex IDs + nodes = self.renumber_map.df_internal_to_external["id"] + else: + nodes = self.nodes() + # If the vertex IDs are not contiguous, remove results for the + # isolated vertices + df = df[df["vertex"].isin(nodes.to_cupy())] if vertex_subset is not None: + if not isinstance(vertex_subset, cudf.Series): + vertex_subset = cudf.Series(vertex_subset) + if self.properties.renumbered: + vertex_subset = self.renumber_map.to_internal_vertex_id( + vertex_subset + ) + vertex_subset = vertex_subset.to_cupy() df = df[df["vertex"].isin(vertex_subset)] + if self.properties.renumbered: + df = self.renumber_map.unrenumber(df, "vertex") + return df def _degree(self, vertex_subset, direction=Direction.ALL): @@ -863,12 +921,28 @@ def _degree(self, vertex_subset, direction=Direction.ALL): df["vertex"] = vertex_col df["degree"] = degree_col - if self.properties.renumbered is True: - df = self.renumber_map.unrenumber(df, "vertex") + if self.properties.renumbered: + # Get the internal vertex IDs + nodes = self.renumber_map.df_internal_to_external["id"] + else: + nodes = self.nodes() + # If the vertex IDs are not contiguous, remove results for the + # isolated vertices + df = df[df["vertex"].isin(nodes.to_cupy())] if vertex_subset is not None: + if not isinstance(vertex_subset, cudf.Series): + vertex_subset = cudf.Series(vertex_subset) + if self.properties.renumbered: + vertex_subset = self.renumber_map.to_internal_vertex_id( + vertex_subset + ) + vertex_subset = vertex_subset.to_cupy() df = df[df["vertex"].isin(vertex_subset)] + if self.properties.renumbered: + df = self.renumber_map.unrenumber(df, "vertex") + return df def _make_plc_graph(self, value_col=None, store_transposed=False, renumber=True): @@ -1071,10 +1145,8 @@ def nodes(self): if self.edgelist is not None: df = self.edgelist.edgelist_df if self.properties.renumbered: - # FIXME: This relies on current implementation - # of NumberMap, should not really expose - # this, perhaps add a method to NumberMap - df = self.renumber_map.implementation.df.drop(columns="id") + df = self.renumber_map.df_internal_to_external.drop(columns="id") + if len(df.columns) > 1: return df else: diff --git a/python/cugraph/cugraph/structure/number_map.py b/python/cugraph/cugraph/structure/number_map.py index 80f2436c09e..481f99b9060 100644 --- a/python/cugraph/cugraph/structure/number_map.py +++ b/python/cugraph/cugraph/structure/number_map.py @@ -15,37 +15,10 @@ from collections.abc import Iterable -from dask.distributed import wait, default_client import dask_cudf import numpy as np import cudf - -from cugraph.dask.common.input_utils import get_distributed_data -from cugraph.structure import renumber_wrapper as c_renumber -import cugraph.dask.comms.comms as Comms - - -def call_renumber( - sID, - data, - renumbered_src_col_name, - renumbered_dst_col_name, - num_edges, - is_mnmg, - store_transposed, -): - wid = Comms.get_worker_id(sID) - handle = Comms.get_handle(sID) - return c_renumber.renumber( - data[0], - renumbered_src_col_name, - renumbered_dst_col_name, - num_edges, - wid, - handle, - is_mnmg, - store_transposed, - ) +import warnings class NumberMap: @@ -266,15 +239,25 @@ def indirection_map(self, ddf, src_col_names, dst_col_names): self.ddf = tmp_ddf return tmp_ddf - def __init__(self, renumber_id_type=np.int32, unrenumbered_id_type=np.int32): + def __init__( + self, + renumber_id_type=np.int32, + unrenumbered_id_type=np.int32, + is_renumbered=False, + ): self.implementation = None self.renumber_id_type = renumber_id_type self.unrenumbered_id_type = unrenumbered_id_type + self.is_renumbered = is_renumbered # The default src/dst column names in the resulting renumbered # dataframe. These may be updated by the renumbering methods if the # input dataframe uses the default names. self.renumbered_src_col_name = "renumbered_src" self.renumbered_dst_col_name = "renumbered_dst" + # This dataframe maps internal to external vertex IDs. + # The column name 'id' contains the renumbered vertices and the other column(s) + # contain the original vertices + self.df_internal_to_external = None @staticmethod def compute_vals_types(df, column_names): @@ -481,7 +464,19 @@ def renumber_and_segment( store_transposed=False, legacy_renum_only=False, ): - renumbered = True + """ + Given an input dataframe with its column names, this function returns the + renumbered dataframe(if renumbering occured) along with a mapping from internal + to external vertex IDs. the parameter 'preserve_order' ensures that the order + of the edges is preserved during renumbering. + """ + if legacy_renum_only: + warning_msg = ( + "The parameter 'legacy_renum_only' is deprecated and will be removed." + ) + warnings.warn(warning_msg, DeprecationWarning) + + renumbered = False # For columns with mismatch dtypes, set the renumbered # id_type to either 'int32' or 'int64' @@ -497,29 +492,17 @@ def renumber_and_segment( # renumber the edgelist to 'int32' renumber_id_type = np.int32 - # FIXME: Drop the renumber_type 'experimental' once all the - # algos follow the C/Pylibcugraph path - - # The renumber_type 'legacy' runs both the python and the - # C++ renumbering. + # Renumbering occurs only if: + # 1) The column names are lists (multi-column vertices) if isinstance(src_col_names, list): - renumber_type = "legacy" - + renumbered = True + # 2) There are non-integer vertices elif not ( df[src_col_names].dtype == np.int32 or df[src_col_names].dtype == np.int64 ): - renumber_type = "legacy" - else: - # The renumber_type 'experimental' only runs the C++ - # renumbering - renumber_type = "experimental" + renumbered = True - if legacy_renum_only and renumber_type == "experimental": - # The original dataframe will be returned. - renumber_type = "skip_renumbering" - renumbered = False - - renumber_map = NumberMap(renumber_id_type, unrenumbered_id_type) + renumber_map = NumberMap(renumber_id_type, unrenumbered_id_type, renumbered) if not isinstance(src_col_names, list): src_col_names = [src_col_names] dst_col_names = [dst_col_names] @@ -548,12 +531,15 @@ def renumber_and_segment( else: raise TypeError("df must be cudf.DataFrame or dask_cudf.DataFrame") - renumber_map.implementation.numbered = renumbered - - if renumber_type == "legacy": - indirection_map = renumber_map.implementation.indirection_map( + if renumbered: + renumber_map.implementation.indirection_map( df, src_col_names, dst_col_names ) + if isinstance(df, dask_cudf.DataFrame): + renumber_map.df_internal_to_external = renumber_map.implementation.ddf + else: + renumber_map.df_internal_to_external = renumber_map.implementation.df + df = renumber_map.add_internal_vertex_id( df, renumber_map.renumbered_src_col_name, @@ -568,143 +554,14 @@ def renumber_and_segment( drop=True, preserve_order=preserve_order, ) - elif renumber_type == "skip_renumbering": + + else: # Update the renumbered source and destination column name # with the original input's source and destination name renumber_map.renumbered_src_col_name = src_col_names[0] renumber_map.renumbered_dst_col_name = dst_col_names[0] - else: - df = df.rename( - columns={ - src_col_names[0]: renumber_map.renumbered_src_col_name, - dst_col_names[0]: renumber_map.renumbered_dst_col_name, - } - ) - num_edges = len(df) - - if isinstance(df, dask_cudf.DataFrame): - is_mnmg = True - else: - is_mnmg = False - - if is_mnmg: - # Do not renumber the algos following the C/Pylibcugraph path - if renumber_type in ["legacy", "experimental"]: - client = default_client() - data = get_distributed_data(df) - result = [ - ( - client.submit( - call_renumber, - Comms.get_session_id(), - wf[1], - renumber_map.renumbered_src_col_name, - renumber_map.renumbered_dst_col_name, - num_edges, - is_mnmg, - store_transposed, - workers=[wf[0]], - ), - wf[0], - ) - for idx, wf in enumerate(data.worker_to_parts.items()) - ] - wait(result) - - def get_renumber_map(id_type, data): - return data[0].astype(id_type) - - def get_segment_offsets(data): - return data[1] - - def get_renumbered_df(id_type, data): - data[2][renumber_map.renumbered_src_col_name] = data[2][ - renumber_map.renumbered_src_col_name - ].astype(id_type) - data[2][renumber_map.renumbered_dst_col_name] = data[2][ - renumber_map.renumbered_dst_col_name - ].astype(id_type) - return data[2] - - id_type = df[renumber_map.renumbered_src_col_name].dtype - renumbering_map = dask_cudf.from_delayed( - [ - client.submit(get_renumber_map, id_type, data, workers=[wf]) - for (data, wf) in result - ] - ) - - list_of_segment_offsets = client.gather( - [ - client.submit(get_segment_offsets, data, workers=[wf]) - for (data, wf) in result - ] - ) - aggregate_segment_offsets = [] - for segment_offsets in list_of_segment_offsets: - aggregate_segment_offsets.extend(segment_offsets) - - renumbered_df = dask_cudf.from_delayed( - [ - client.submit(get_renumbered_df, id_type, data, workers=[wf]) - for (data, wf) in result - ] - ) - - if renumber_type == "legacy": - renumber_map.implementation.ddf = ( - indirection_map.merge( - renumbering_map, - right_on="original_ids", - left_on="global_id", - how="right", - ) - .drop(columns=["global_id", "original_ids"]) - .rename(columns={"new_ids": "global_id"}) - ) - else: - renumber_map.implementation.ddf = renumbering_map.rename( - columns={"original_ids": "0", "new_ids": "global_id"} - ) - return renumbered_df, renumber_map, aggregate_segment_offsets - - else: - # There is no aggregate_segment_offsets since the - # C++ renumbering is skipped - return df, renumber_map, None - - else: - # Do not renumber the algos following the C/Pylibcugraph path - if renumber_type in ["legacy", "experimental"]: - renumbering_map, segment_offsets, renumbered_df = c_renumber.renumber( - df, - renumber_map.renumbered_src_col_name, - renumber_map.renumbered_dst_col_name, - num_edges, - 0, - Comms.get_default_handle(), - is_mnmg, - store_transposed, - ) - if renumber_type == "legacy": - renumber_map.implementation.df = ( - indirection_map.merge( - renumbering_map, right_on="original_ids", left_on="id" - ) - .drop(columns=["id", "original_ids"]) - .rename(columns={"new_ids": "id"}, copy=False) - ) - else: - renumber_map.implementation.df = renumbering_map.rename( - columns={"original_ids": "0", "new_ids": "id"}, copy=False - ) - - return renumbered_df, renumber_map, segment_offsets - else: - # There is no aggregate_segment_offsets since the - # C++ renumbering is skipped - return df, renumber_map, None + return df, renumber_map @staticmethod def renumber( @@ -768,6 +625,8 @@ def unrenumber(self, df, column_name, preserve_order=False, get_column_names=Fal >>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ', ... dtype=['int32', 'int32', 'float32'], ... header=None) + >>> df['0'] = df['0'].astype(str) + >>> df['1'] = df['1'].astype(str) >>> df, number_map = number_map.NumberMap.renumber(df, '0', '1') >>> G = cugraph.Graph() >>> G.from_cudf_edgelist(df, diff --git a/python/cugraph/cugraph/structure/property_graph.py b/python/cugraph/cugraph/structure/property_graph.py index 1c2ab8ff483..3bf7faea6cc 100644 --- a/python/cugraph/cugraph/structure/property_graph.py +++ b/python/cugraph/cugraph/structure/property_graph.py @@ -2074,7 +2074,11 @@ def renumber_vertices_by_type(self, prev_id_column=None): ].astype(cat_dtype) index_dtype = self.__vertex_prop_dataframe.index.dtype - df = self.__vertex_prop_dataframe.reset_index().sort_values(by=TCN) + df = self.__vertex_prop_dataframe.reset_index() + if len(df.dtypes[TCN].categories) > 1 and len(self.vertex_types) > 1: + # Avoid `sort_values` if we know there is only one type + # `self.vertex_types` is currently not cheap, b/c it looks at edge df + df = df.sort_values(by=TCN, ignore_index=True) df.index = df.index.astype(index_dtype) if self.__edge_prop_dataframe is not None: mapper = self.__series_type(df.index, index=df[self.vertex_col_name]) @@ -2164,9 +2168,15 @@ def renumber_edges_by_type(self, prev_id_column=None): df = self.__edge_prop_dataframe index_dtype = df.index.dtype if prev_id_column is None: - df = df.sort_values(by=TCN, ignore_index=True) + if len(df.dtypes[TCN].categories) > 1 and len(self.edge_types) > 1: + # Avoid `sort_values` if we know there is only one type + df = df.sort_values(by=TCN, ignore_index=True) + else: + df.reset_index(drop=True, inplace=True) else: - df = df.sort_values(by=TCN) + if len(df.dtypes[TCN].categories) > 1 and len(self.edge_types) > 1: + # Avoid `sort_values` if we know there is only one type + df = df.sort_values(by=TCN) df.index.name = prev_id_column df.reset_index(inplace=True) df.index = df.index.astype(index_dtype) diff --git a/python/cugraph/cugraph/structure/renumber_wrapper.pyx b/python/cugraph/cugraph/structure/renumber_wrapper.pyx deleted file mode 100644 index 58c64104f65..00000000000 --- a/python/cugraph/cugraph/structure/renumber_wrapper.pyx +++ /dev/null @@ -1,605 +0,0 @@ -# -# Copyright (c) 2020-2022, NVIDIA CORPORATION. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import numpy as np -from cython.operator cimport dereference as deref -from libc.stdint cimport uintptr_t -from libcpp cimport bool -from libcpp.memory cimport unique_ptr, make_unique -from libcpp.utility cimport move, pair -from libcpp.vector cimport vector - -import cudf -from rmm._lib.device_buffer cimport device_buffer - -from pylibraft.common.handle cimport handle_t -from cugraph.structure.graph_utilities cimport (shuffled_vertices_t, - major_minor_weights_t, - renum_tuple_t, - call_shuffle, - call_renumber, - ) -from cugraph.structure.graph_primtypes cimport move_device_buffer_to_series - - -cdef renumber_helper(shuffled_vertices_t* ptr_maj_min_w, vertex_t, weights): - # extract shuffled result: - # - cdef pair[unique_ptr[device_buffer], size_t] pair_s_major = deref(ptr_maj_min_w).get_major_wrap() - cdef pair[unique_ptr[device_buffer], size_t] pair_s_minor = deref(ptr_maj_min_w).get_minor_wrap() - cdef pair[unique_ptr[device_buffer], size_t] pair_s_weights = deref(ptr_maj_min_w).get_weights_wrap() - - shuffled_major_series = move_device_buffer_to_series( - move(pair_s_major.first), vertex_t, "shuffled_major") - - shuffled_minor_series = move_device_buffer_to_series( - move(pair_s_minor.first), vertex_t, "shuffled_minor") - - shuffled_df = cudf.DataFrame() - # Some workers might have no data therefore ensure the empty column have the appropriate - # vertex_t or weight_t. Failing to do that will create am empty column of type object - # which is not supported by '__cuda_array_interface__' - if shuffled_major_series is None: - shuffled_df['major_vertices'] = cudf.Series(dtype=vertex_t) - else: - shuffled_df['major_vertices']= shuffled_major_series - if shuffled_minor_series is None: - shuffled_df['minor_vertices'] = cudf.Series(dtype=vertex_t) - else: - shuffled_df['minor_vertices']= shuffled_minor_series - - if weights is not None: - weight_t = weights.dtype - shuffled_weights_series = move_device_buffer_to_series( - move(pair_s_weights.first), weight_t, "shuffled_weights") - if shuffled_weights_series is None: - shuffled_df['value']= cudf.Series(dtype=weight_t) - else: - shuffled_df['value']= shuffled_weights_series - - return shuffled_df - - -def renumber(input_df, # maybe use cpdef ? - renumbered_src_col_name, - renumbered_dst_col_name, - num_global_edges, - rank, - handle, - is_multi_gpu, - transposed): - """ - Call MNMG renumber - """ - cdef size_t handle_size_t = handle.getHandle() - # TODO: get handle_t out of handle... - handle_ptr = handle_size_t - - # FIXME: call_shuffle currently works on major/minor while call_renumber is updated to work on - # source/destination. We'd better update call_shuffle to work on source/destination as well to - # avoid switching between major/minor & source/destination. Deferring this work at this moment - # expecting this legacy code path will be replaced with the new pylibcugrpah & C API based path. - - if not transposed: - major_vertices = input_df[renumbered_src_col_name] - minor_vertices = input_df[renumbered_dst_col_name] - else: - major_vertices = input_df[renumbered_dst_col_name] - minor_vertices = input_df[renumbered_src_col_name] - - cdef uintptr_t c_edge_weights = NULL # set below... - - vertex_t = major_vertices.dtype - if num_global_edges > (2**31 - 1): - edge_t = np.dtype("int64") - else: - edge_t = vertex_t - if "value" in input_df.columns: - weights = input_df['value'] - weight_t = weights.dtype - c_edge_weights = weights.__cuda_array_interface__['data'][0] - else: - weights = None - weight_t = np.dtype("float32") - - if (vertex_t != np.dtype("int32") and vertex_t != np.dtype("int64")): - raise Exception("Incorrect vertex_t type.") - if (edge_t != np.dtype("int32") and edge_t != np.dtype("int64")): - raise Exception("Incorrect edge_t type.") - if (weight_t != np.dtype("float32") and weight_t != np.dtype("float64")): - raise Exception("Incorrect weight_t type.") - if (vertex_t != np.dtype("int32") and edge_t != np.dtype("int64")): - raise Exception("Incompatible vertex_t and edge_t types.") - - # FIXME: needs to be edge_t type not int - cdef int num_local_edges = len(major_vertices) - - cdef uintptr_t c_major_vertices = major_vertices.__cuda_array_interface__['data'][0] - cdef uintptr_t c_minor_vertices = minor_vertices.__cuda_array_interface__['data'][0] - - cdef uintptr_t shuffled_src = NULL - cdef uintptr_t shuffled_dst = NULL - - # FIXME: Fix fails when do_check = True - cdef bool do_check = False # ? for now... - cdef bool mg_flag = is_multi_gpu # run Single-GPU or MNMG - - cdef pair[unique_ptr[device_buffer], size_t] pair_original - - # tparams: vertex_t, edge_t, weight_t: - # - cdef unique_ptr[major_minor_weights_t[int, int, float]] ptr_shuffled_32_32_32 - cdef unique_ptr[major_minor_weights_t[int, int, double]] ptr_shuffled_32_32_64 - cdef unique_ptr[major_minor_weights_t[int, long, float]] ptr_shuffled_32_64_32 - cdef unique_ptr[major_minor_weights_t[int, long, double]] ptr_shuffled_32_64_64 - cdef unique_ptr[major_minor_weights_t[long, long, float]] ptr_shuffled_64_64_32 - cdef unique_ptr[major_minor_weights_t[long, long, double]] ptr_shuffled_64_64_64 - - # tparams: vertex_t, edge_t: - # - cdef unique_ptr[renum_tuple_t[int, int]] ptr_renum_tuple_32_32 - cdef unique_ptr[renum_tuple_t[int, long]] ptr_renum_tuple_32_64 - cdef unique_ptr[renum_tuple_t[long, long]] ptr_renum_tuple_64_64 - - # tparam: vertex_t: - # - cdef unique_ptr[vector[int]] edge_counts_32 - cdef unique_ptr[vector[long]] edge_counts_64 - - # tparam: vertex_t: - # - cdef unique_ptr[vector[int]] uniq_partition_vector_32 - cdef unique_ptr[vector[long]] uniq_partition_vector_64 - - # tparam: vertex_t: - # - cdef unique_ptr[vector[int]] uniq_segment_vector_32 - cdef unique_ptr[vector[long]] uniq_segment_vector_64 - - cdef size_t rank_indx = rank - - if (vertex_t == np.dtype("int32")): - if ( edge_t == np.dtype("int32")): - if( weight_t == np.dtype("float32")): - if(is_multi_gpu): - ptr_shuffled_32_32_32.reset(call_shuffle[int, int, float](deref(handle_ptr), - c_major_vertices, - c_minor_vertices, - c_edge_weights, - num_local_edges, - weights is not None).release()) - shuffled_df = renumber_helper(ptr_shuffled_32_32_32.get(), vertex_t, weights) - major_vertices = shuffled_df['major_vertices'] - minor_vertices = shuffled_df['minor_vertices'] - num_local_edges = len(shuffled_df) - if not transposed: - major = renumbered_src_col_name; minor = renumbered_dst_col_name - else: - major = renumbered_dst_col_name; minor = renumbered_src_col_name - shuffled_df = shuffled_df.rename(columns={'major_vertices':major, 'minor_vertices':minor}, copy=False) - edge_counts_32 = move(ptr_shuffled_32_32_32.get().get_edge_counts_wrap()) - else: - shuffled_df = input_df - edge_counts_32 = make_unique[vector[int]](1, num_local_edges) - - if not transposed: - shuffled_src = major_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = minor_vertices.__cuda_array_interface__['data'][0] - else: - shuffled_src = minor_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = major_vertices.__cuda_array_interface__['data'][0] - - ptr_renum_tuple_32_32.reset(call_renumber[int, int](deref(handle_ptr), - shuffled_src, - shuffled_dst, - deref(edge_counts_32.get()), - transposed, - do_check, - mg_flag).release()) - - pair_original = ptr_renum_tuple_32_32.get().get_dv_wrap() # original vertices: see helper - - original_series = move_device_buffer_to_series( - move(pair_original.first), vertex_t, "original") - - # extract unique_ptr[partition_offsets]: - # - uniq_partition_vector_32 = move(ptr_renum_tuple_32_32.get().get_partition_offsets_wrap()) - - # create series out of a partition range from rank to rank+1: - # - if is_multi_gpu: - new_series = cudf.Series(np.arange(uniq_partition_vector_32.get()[0].at(rank_indx), - uniq_partition_vector_32.get()[0].at(rank_indx+1)), - dtype=vertex_t) - else: - new_series = cudf.Series(np.arange(0, ptr_renum_tuple_32_32.get().get_num_vertices()), - dtype=vertex_t) - # create new cudf df - # - # and add the previous series to it: - # - renumbered_map = cudf.DataFrame() - renumbered_map['original_ids'] = original_series - renumbered_map['new_ids'] = new_series - - uniq_segment_vector_32 = move(ptr_renum_tuple_32_32.get().get_segment_offsets_wrap()) - segment_offsets = [None] * (deref(uniq_segment_vector_32).size()) - for i in range(len(segment_offsets)): - segment_offsets[i] = deref(uniq_segment_vector_32)[i] - - return renumbered_map, segment_offsets, shuffled_df - - elif( weight_t == np.dtype("float64")): - if(is_multi_gpu): - ptr_shuffled_32_32_64.reset(call_shuffle[int, int, double](deref(handle_ptr), - c_major_vertices, - c_minor_vertices, - c_edge_weights, - num_local_edges, - weights is not None).release()) - - shuffled_df = renumber_helper(ptr_shuffled_32_32_64.get(), vertex_t, weights) - major_vertices = shuffled_df['major_vertices'] - minor_vertices = shuffled_df['minor_vertices'] - num_local_edges = len(shuffled_df) - if not transposed: - major = renumbered_src_col_name; minor = renumbered_dst_col_name - else: - major = renumbered_dst_col_name; minor = renumbered_src_col_name - shuffled_df = shuffled_df.rename(columns={'major_vertices':major, 'minor_vertices':minor}, copy=False) - edge_counts_32 = move(ptr_shuffled_32_32_64.get().get_edge_counts_wrap()) - else: - shuffled_df = input_df - edge_counts_32 = make_unique[vector[int]](1, num_local_edges) - - if not transposed: - shuffled_src = major_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = minor_vertices.__cuda_array_interface__['data'][0] - else: - shuffled_src = minor_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = major_vertices.__cuda_array_interface__['data'][0] - - ptr_renum_tuple_32_32.reset(call_renumber[int, int](deref(handle_ptr), - shuffled_src, - shuffled_dst, - deref(edge_counts_32.get()), - transposed, - do_check, - mg_flag).release()) - - pair_original = ptr_renum_tuple_32_32.get().get_dv_wrap() # original vertices: see helper - - original_series = move_device_buffer_to_series( - move(pair_original.first), vertex_t, "original") - - # extract unique_ptr[partition_offsets]: - # - uniq_partition_vector_32 = move(ptr_renum_tuple_32_32.get().get_partition_offsets_wrap()) - - # create series out of a partition range from rank to rank+1: - # - if is_multi_gpu: - new_series = cudf.Series(np.arange(uniq_partition_vector_32.get()[0].at(rank_indx), - uniq_partition_vector_32.get()[0].at(rank_indx+1)), - dtype=vertex_t) - else: - new_series = cudf.Series(np.arange(0, ptr_renum_tuple_32_32.get().get_num_vertices()), - dtype=vertex_t) - - # create new cudf df - # - # and add the previous series to it: - # - renumbered_map = cudf.DataFrame() - renumbered_map['original_ids'] = original_series - renumbered_map['new_ids'] = new_series - - uniq_segment_vector_32 = move(ptr_renum_tuple_32_32.get().get_segment_offsets_wrap()) - segment_offsets = [None] * (deref(uniq_segment_vector_32).size()) - for i in range(len(segment_offsets)): - segment_offsets[i] = deref(uniq_segment_vector_32)[i] - - return renumbered_map, segment_offsets, shuffled_df - - elif ( edge_t == np.dtype("int64")): - if( weight_t == np.dtype("float32")): - if(is_multi_gpu): - ptr_shuffled_32_64_32.reset(call_shuffle[int, long, float](deref(handle_ptr), - c_major_vertices, - c_minor_vertices, - c_edge_weights, - num_local_edges, - weights is not None).release()) - - shuffled_df = renumber_helper(ptr_shuffled_32_64_32.get(), vertex_t, weights) - major_vertices = shuffled_df['major_vertices'] - minor_vertices = shuffled_df['minor_vertices'] - num_local_edges = len(shuffled_df) - if not transposed: - major = renumbered_src_col_name; minor = renumbered_dst_col_name - else: - major = renumbered_dst_col_name; minor = renumbered_src_col_name - shuffled_df = shuffled_df.rename(columns={'major_vertices':major, 'minor_vertices':minor}, copy=False) - edge_counts_64 = move(ptr_shuffled_32_64_32.get().get_edge_counts_wrap()) - else: - shuffled_df = input_df - edge_counts_64 = make_unique[vector[long]](1, num_local_edges) - - if not transposed: - shuffled_src = major_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = minor_vertices.__cuda_array_interface__['data'][0] - else: - shuffled_src = minor_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = major_vertices.__cuda_array_interface__['data'][0] - - ptr_renum_tuple_32_64.reset(call_renumber[int, long](deref(handle_ptr), - shuffled_src, - shuffled_dst, - deref(edge_counts_64.get()), - transposed, - do_check, - mg_flag).release()) - - pair_original = ptr_renum_tuple_32_64.get().get_dv_wrap() # original vertices: see helper - - original_series = move_device_buffer_to_series( - move(pair_original.first), vertex_t, "original") - - # extract unique_ptr[partition_offsets]: - # - uniq_partition_vector_32 = move(ptr_renum_tuple_32_64.get().get_partition_offsets_wrap()) - - # create series out of a partition range from rank to rank+1: - # - if is_multi_gpu: - new_series = cudf.Series(np.arange(uniq_partition_vector_32.get()[0].at(rank_indx), - uniq_partition_vector_32.get()[0].at(rank_indx+1)), - dtype=vertex_t) - else: - new_series = cudf.Series(np.arange(0, ptr_renum_tuple_32_64.get().get_num_vertices()), - dtype=vertex_t) - - # create new cudf df - # - # and add the previous series to it: - # - renumbered_map = cudf.DataFrame() - renumbered_map['original_ids'] = original_series - renumbered_map['new_ids'] = new_series - - uniq_segment_vector_32 = move(ptr_renum_tuple_32_64.get().get_segment_offsets_wrap()) - segment_offsets = [None] * (deref(uniq_segment_vector_32).size()) - for i in range(len(segment_offsets)): - segment_offsets[i] = deref(uniq_segment_vector_32)[i] - - return renumbered_map, segment_offsets, shuffled_df - elif( weight_t == np.dtype("float64")): - if(is_multi_gpu): - ptr_shuffled_32_64_64.reset(call_shuffle[int, long, double](deref(handle_ptr), - c_major_vertices, - c_minor_vertices, - c_edge_weights, - num_local_edges, - weights is not None).release()) - - shuffled_df = renumber_helper(ptr_shuffled_32_64_64.get(), vertex_t, weights) - major_vertices = shuffled_df['major_vertices'] - minor_vertices = shuffled_df['minor_vertices'] - num_local_edges = len(shuffled_df) - if not transposed: - major = renumbered_src_col_name; minor = renumbered_dst_col_name - else: - major = renumbered_dst_col_name; minor = renumbered_src_col_name - shuffled_df = shuffled_df.rename(columns={'major_vertices':major, 'minor_vertices':minor}, copy=False) - edge_counts_64 = move(ptr_shuffled_32_64_64.get().get_edge_counts_wrap()) - else: - shuffled_df = input_df - edge_counts_64 = make_unique[vector[long]](1, num_local_edges) - - if not transposed: - shuffled_src = major_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = minor_vertices.__cuda_array_interface__['data'][0] - else: - shuffled_src = minor_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = major_vertices.__cuda_array_interface__['data'][0] - - ptr_renum_tuple_32_64.reset(call_renumber[int, long](deref(handle_ptr), - shuffled_src, - shuffled_dst, - deref(edge_counts_64.get()), - transposed, - do_check, - mg_flag).release()) - - pair_original = ptr_renum_tuple_32_64.get().get_dv_wrap() # original vertices: see helper - - original_series = move_device_buffer_to_series( - move(pair_original.first), vertex_t, "original") - - # extract unique_ptr[partition_offsets]: - # - uniq_partition_vector_32 = move(ptr_renum_tuple_32_64.get().get_partition_offsets_wrap()) - - # create series out of a partition range from rank to rank+1: - # - if is_multi_gpu: - new_series = cudf.Series(np.arange(uniq_partition_vector_32.get()[0].at(rank_indx), - uniq_partition_vector_32.get()[0].at(rank_indx+1)), - dtype=vertex_t) - else: - new_series = cudf.Series(np.arange(0, ptr_renum_tuple_32_64.get().get_num_vertices()), - dtype=vertex_t) - # create new cudf df - # - # and add the previous series to it: - # - renumbered_map = cudf.DataFrame() - renumbered_map['original_ids'] = original_series - renumbered_map['new_ids'] = new_series - - uniq_segment_vector_32 = move(ptr_renum_tuple_32_64.get().get_segment_offsets_wrap()) - segment_offsets = [None] * (deref(uniq_segment_vector_32).size()) - for i in range(len(segment_offsets)): - segment_offsets[i] = deref(uniq_segment_vector_32)[i] - - return renumbered_map, segment_offsets, shuffled_df - - elif (vertex_t == np.dtype("int64")): - if ( edge_t == np.dtype("int64")): - if( weight_t == np.dtype("float32")): - if(is_multi_gpu): - ptr_shuffled_64_64_32.reset(call_shuffle[long, long, float](deref(handle_ptr), - c_major_vertices, - c_minor_vertices, - c_edge_weights, - num_local_edges, - weights is not None).release()) - - shuffled_df = renumber_helper(ptr_shuffled_64_64_32.get(), vertex_t, weights) - major_vertices = shuffled_df['major_vertices'] - minor_vertices = shuffled_df['minor_vertices'] - num_local_edges = len(shuffled_df) - if not transposed: - major = renumbered_src_col_name; minor = renumbered_dst_col_name - else: - major = renumbered_dst_col_name; minor = renumbered_src_col_name - shuffled_df = shuffled_df.rename(columns={'major_vertices':major, 'minor_vertices':minor}, copy=False) - edge_counts_64 = move(ptr_shuffled_64_64_32.get().get_edge_counts_wrap()) - else: - shuffled_df = input_df - edge_counts_64 = make_unique[vector[long]](1, num_local_edges) - - if not transposed: - shuffled_src = major_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = minor_vertices.__cuda_array_interface__['data'][0] - else: - shuffled_src = minor_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = major_vertices.__cuda_array_interface__['data'][0] - - ptr_renum_tuple_64_64.reset(call_renumber[long, long](deref(handle_ptr), - shuffled_src, - shuffled_dst, - deref(edge_counts_64.get()), - transposed, - do_check, - mg_flag).release()) - - pair_original = ptr_renum_tuple_64_64.get().get_dv_wrap() # original vertices: see helper - - original_series = move_device_buffer_to_series( - move(pair_original.first), vertex_t, "original") - - # extract unique_ptr[partition_offsets]: - # - uniq_partition_vector_64 = move(ptr_renum_tuple_64_64.get().get_partition_offsets_wrap()) - - # create series out of a partition range from rank to rank+1: - # - if is_multi_gpu: - new_series = cudf.Series(np.arange(uniq_partition_vector_64.get()[0].at(rank_indx), - uniq_partition_vector_64.get()[0].at(rank_indx+1)), - dtype=vertex_t) - else: - new_series = cudf.Series(np.arange(0, ptr_renum_tuple_64_64.get().get_num_vertices()), - dtype=vertex_t) - - # create new cudf df - # - # and add the previous series to it: - # - renumbered_map = cudf.DataFrame() - renumbered_map['original_ids'] = original_series - renumbered_map['new_ids'] = new_series - - uniq_segment_vector_64 = move(ptr_renum_tuple_64_64.get().get_segment_offsets_wrap()) - segment_offsets = [None] * (deref(uniq_segment_vector_64).size()) - for i in range(len(segment_offsets)): - segment_offsets[i] = deref(uniq_segment_vector_64)[i] - - return renumbered_map, segment_offsets, shuffled_df - - elif( weight_t == np.dtype("float64")): - if(is_multi_gpu): - ptr_shuffled_64_64_64.reset(call_shuffle[long, long, double](deref(handle_ptr), - c_major_vertices, - c_minor_vertices, - c_edge_weights, - num_local_edges, - weights is not None).release()) - - shuffled_df = renumber_helper(ptr_shuffled_64_64_64.get(), vertex_t, weights) - major_vertices = shuffled_df['major_vertices'] - minor_vertices = shuffled_df['minor_vertices'] - num_local_edges = len(shuffled_df) - if not transposed: - major = renumbered_src_col_name; minor = renumbered_dst_col_name - else: - major = renumbered_dst_col_name; minor = renumbered_src_col_name - shuffled_df = shuffled_df.rename(columns={'major_vertices':major, 'minor_vertices':minor}, copy=False) - edge_counts_64 = move(ptr_shuffled_64_64_64.get().get_edge_counts_wrap()) - else: - shuffled_df = input_df - edge_counts_64 = make_unique[vector[long]](1, num_local_edges) - - if not transposed: - shuffled_src = major_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = minor_vertices.__cuda_array_interface__['data'][0] - else: - shuffled_src = minor_vertices.__cuda_array_interface__['data'][0] - shuffled_dst = major_vertices.__cuda_array_interface__['data'][0] - - ptr_renum_tuple_64_64.reset(call_renumber[long, long](deref(handle_ptr), - shuffled_src, - shuffled_dst, - deref(edge_counts_64.get()), - transposed, - do_check, - mg_flag).release()) - - pair_original = ptr_renum_tuple_64_64.get().get_dv_wrap() # original vertices: see helper - - original_series = move_device_buffer_to_series( - move(pair_original.first), vertex_t, "original") - - # extract unique_ptr[partition_offsets]: - # - uniq_partition_vector_64 = move(ptr_renum_tuple_64_64.get().get_partition_offsets_wrap()) - - # create series out of a partition range from rank to rank+1: - # - if is_multi_gpu: - new_series = cudf.Series(np.arange(uniq_partition_vector_64.get()[0].at(rank_indx), - uniq_partition_vector_64.get()[0].at(rank_indx+1)), - dtype=vertex_t) - else: - new_series = cudf.Series(np.arange(0, ptr_renum_tuple_64_64.get().get_num_vertices()), - dtype=vertex_t) - - # create new cudf df - # - # and add the previous series to it: - # - renumbered_map = cudf.DataFrame() - renumbered_map['original_ids'] = original_series - renumbered_map['new_ids'] = new_series - - uniq_segment_vector_64 = move(ptr_renum_tuple_64_64.get().get_segment_offsets_wrap()) - segment_offsets = [None] * (deref(uniq_segment_vector_64).size()) - for i in range(len(segment_offsets)): - segment_offsets[i] = deref(uniq_segment_vector_64)[i] - - return renumbered_map, segment_offsets, shuffled_df diff --git a/python/cugraph/cugraph/structure/shuffle.py b/python/cugraph/cugraph/structure/shuffle.py deleted file mode 100644 index 792af1c3bd5..00000000000 --- a/python/cugraph/cugraph/structure/shuffle.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dask.dataframe.shuffle import rearrange_by_column -import cudf -import cugraph.dask.comms.comms as Comms - - -def _set_partitions_pre( - df, - vertex_row_partitions, - vertex_col_partitions, - prows, - pcols, - transposed, - partition_type, -): - if transposed: - r = df["dst"] - c = df["src"] - else: - r = df["src"] - c = df["dst"] - r_div = vertex_row_partitions.searchsorted(r, side="right") - 1 - c_div = vertex_col_partitions.searchsorted(c, side="right") - 1 - - if partition_type == 1: - partitions = r_div * pcols + c_div - else: - partitions = r_div % prows + c_div * prows - return partitions - - -def shuffle(dg, transposed=False): - """ - Shuffles the renumbered input distributed graph edgelist into ngpu - partitions. The number of processes/gpus P = prows*pcols. The 2D - partitioning divides the matrix into P*pcols rectangular partitions - as per vertex partitioning performed in renumbering, and then shuffles - these partitions into P gpus. - - Parameters - ---------- - transposed : bool, optional (default=False) - """ - - ddf = dg.edgelist.edgelist_df - ngpus = Comms.get_n_workers() - prows, pcols, partition_type = Comms.get_2D_partition() - - renumber_vertex_count = dg.renumber_map.implementation.ddf.map_partitions( - len - ).compute() - renumber_vertex_cumsum = renumber_vertex_count.cumsum() - - if transposed: - row_dtype = ddf["dst"].dtype - col_dtype = ddf["src"].dtype - else: - row_dtype = ddf["src"].dtype - col_dtype = ddf["dst"].dtype - - vertex_partition_offsets = cudf.Series([0], dtype=row_dtype) - vertex_partition_offsets = vertex_partition_offsets.append( - cudf.Series(renumber_vertex_cumsum, dtype=row_dtype) - ) - num_verts = vertex_partition_offsets.iloc[-1] - if partition_type == 1: - vertex_row_partitions = [] - for i in range(prows + 1): - vertex_row_partitions.append(vertex_partition_offsets.iloc[i * pcols]) - vertex_row_partitions = cudf.Series(vertex_row_partitions, dtype=row_dtype) - else: - vertex_row_partitions = vertex_partition_offsets - vertex_col_partitions = [] - for i in range(pcols + 1): - vertex_col_partitions.append(vertex_partition_offsets.iloc[i * prows]) - vertex_col_partitions = cudf.Series(vertex_col_partitions, dtype=col_dtype) - - meta = ddf._meta._constructor_sliced([0]) - partitions = ddf.map_partitions( - _set_partitions_pre, - vertex_row_partitions=vertex_row_partitions, - vertex_col_partitions=vertex_col_partitions, - prows=prows, - pcols=pcols, - transposed=transposed, - partition_type=partition_type, - meta=meta, - ) - ddf2 = ddf.assign(_partitions=partitions) - ddf3 = rearrange_by_column( - ddf2, - "_partitions", - max_branch=None, - npartitions=ngpus, - shuffle="tasks", - ignore_index=True, - ).drop(columns=["_partitions"]) - - partition_row_size = pcols - partition_col_size = prows - - return ( - ddf3, - num_verts, - partition_row_size, - partition_col_size, - vertex_partition_offsets, - ) diff --git a/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality.py b/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality.py index d4b52d01046..759ed01a7eb 100644 --- a/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality.py +++ b/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality.py @@ -161,6 +161,7 @@ def _calc_bc_subset(G, Gnx, normalized, weight, endpoints, k, seed, result_dtype # We first mimic acquisition of the nodes to compare with same sources random.seed(seed) # It will be called again in nx's call sources = random.sample(list(Gnx.nodes()), k) + print("\nsources are ", sources) df = cugraph.betweenness_centrality( G, k=sources, @@ -304,7 +305,7 @@ def compare_scores(sorted_df, first_key, second_key, epsilon=DEFAULT_EPSILON): # ============================================================================= @pytest.mark.sg @pytest.mark.parametrize("graph_file", DATASETS_SMALL) -@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS) +@pytest.mark.parametrize("directed", [False, True]) @pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS) @pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS) @pytest.mark.parametrize("weight", [None]) @@ -312,7 +313,7 @@ def compare_scores(sorted_df, first_key, second_key, epsilon=DEFAULT_EPSILON): @pytest.mark.parametrize("subset_seed", SUBSET_SEED_OPTIONS) @pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS) @pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS) -def test_betweenness_centrality( +def test_betweenness_centrality_0( graph_file, directed, subset_size, diff --git a/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality_mg.py b/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality_mg.py new file mode 100644 index 00000000000..e36e50c91aa --- /dev/null +++ b/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality_mg.py @@ -0,0 +1,191 @@ +# Copyright (c) 2022-2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cugraph.dask as dcg +import gc +import pytest +import cugraph +import dask_cudf +import cupy +import cudf + + +# from cugraph.dask.common.mg_utils import is_single_gpu +from cugraph.testing import utils +from pylibcugraph.testing import gen_fixture_params_product + + +# ============================================================================= +# Pytest Setup / Teardown - called for each test function +# ============================================================================= + + +def setup_function(): + gc.collect() + + +IS_DIRECTED = [True, False] + + +# ============================================================================= +# Pytest fixtures +# ============================================================================= + +datasets = utils.DATASETS_UNDIRECTED + +fixture_params = gen_fixture_params_product( + (datasets, "graph_file"), + ([False, True], "normalized"), + ([False, True], "endpoints"), + ([42, None], "subset_seed"), + ([None, 15], "subset_size"), + (IS_DIRECTED, "directed"), + ([list, cudf], "vertex_list_type"), +) + + +@pytest.fixture(scope="module", params=fixture_params) +def input_combo(request): + """ + Simply return the current combination of params as a dictionary for use in + tests or other parameterized fixtures. + """ + parameters = dict( + zip( + ( + "graph_file", + "normalized", + "endpoints", + "subset_seed", + "subset_size", + "directed", + "vertex_list_type", + ), + request.param, + ) + ) + + return parameters + + +@pytest.fixture(scope="module") +def input_expected_output(input_combo): + """ + This fixture returns the inputs and expected results from the + betweenness_centrality algo based on cuGraph betweenness_centrality) which can + be used for validation. + """ + + input_data_path = input_combo["graph_file"] + normalized = input_combo["normalized"] + endpoints = input_combo["endpoints"] + random_state = input_combo["subset_seed"] + subset_size = input_combo["subset_size"] + directed = input_combo["directed"] + vertex_list_type = input_combo["vertex_list_type"] + + G = utils.generate_cugraph_graph_from_file(input_data_path, directed=directed) + + if subset_size is None: + k = subset_size + elif isinstance(subset_size, int): + # Select random vertices + k = G.select_random_vertices( + random_state=random_state, num_vertices=subset_size + ) + if vertex_list_type is list: + k = k.to_arrow().to_pylist() + + print("the seeds are \n", k) + if vertex_list_type is int: + # This internally sample k vertices in betweenness centrality. + # Since the nodes that will be sampled by each implementation will + # be random, therefore sample all vertices which will make the test + # consistent. + k = len(G.nodes()) + + input_combo["k"] = k + + sg_cugraph_bc = cugraph.betweenness_centrality( + G, k=k, normalized=normalized, endpoints=endpoints, random_state=random_state + ) + # Save the results back to the input_combo dictionary to prevent redundant + # cuGraph runs. Other tests using the input_combo fixture will look for + # them, and if not present they will have to re-run the same cuGraph call. + sg_cugraph_bc = sg_cugraph_bc.sort_values("vertex").reset_index(drop=True) + + input_combo["sg_cugraph_results"] = sg_cugraph_bc + chunksize = dcg.get_chunksize(input_data_path) + ddf = dask_cudf.read_csv( + input_data_path, + chunksize=chunksize, + delimiter=" ", + names=["src", "dst", "value"], + dtype=["int32", "int32", "float32"], + ) + + dg = cugraph.Graph(directed=directed) + dg.from_dask_cudf_edgelist( + ddf, + source="src", + destination="dst", + edge_attr="value", + renumber=True, + store_transposed=True, + ) + + input_combo["MGGraph"] = dg + + return input_combo + + +# ============================================================================= +# Tests +# ============================================================================= + + +# @pytest.mark.skipif( +# is_single_gpu(), reason="skipping MG testing on Single GPU system" +# ) + + +@pytest.mark.mg +def test_dask_betweenness_centrality(dask_client, benchmark, input_expected_output): + + dg = input_expected_output["MGGraph"] + k = input_expected_output["k"] + endpoints = input_expected_output["endpoints"] + normalized = input_expected_output["normalized"] + random_state = input_expected_output["subset_seed"] + mg_bc_results = benchmark( + dcg.betweenness_centrality, + dg, + k=k, + normalized=normalized, + endpoints=endpoints, + random_state=random_state, + ) + + mg_bc_results = ( + mg_bc_results.compute().sort_values("vertex").reset_index(drop=True) + )["betweenness_centrality"].to_cupy() + + sg_bc_results = ( + input_expected_output["sg_cugraph_results"] + .sort_values("vertex") + .reset_index(drop=True) + )["betweenness_centrality"].to_cupy() + + diff = cupy.isclose(mg_bc_results, sg_bc_results) + + assert diff.all() diff --git a/python/cugraph/cugraph/tests/centrality/test_eigenvector_centrality_mg.py b/python/cugraph/cugraph/tests/centrality/test_eigenvector_centrality_mg.py index 02a4532f864..f91ac418ef0 100644 --- a/python/cugraph/cugraph/tests/centrality/test_eigenvector_centrality_mg.py +++ b/python/cugraph/cugraph/tests/centrality/test_eigenvector_centrality_mg.py @@ -50,9 +50,7 @@ def test_dask_eigenvector_centrality(dask_client, directed, input_data_path): dtype=["int32", "int32", "float32"], ) dg = cugraph.Graph(directed=True) - dg.from_dask_cudf_edgelist( - ddf, "src", "dst", legacy_renum_only=True, store_transposed=True - ) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=True) mg_res = dcg.eigenvector_centrality(dg, tol=1e-6) mg_res = mg_res.compute() import networkx as nx @@ -101,9 +99,7 @@ def test_dask_eigenvector_centrality_transposed_false(dask_client): ) dg = cugraph.Graph(directed=True) - dg.from_dask_cudf_edgelist( - ddf, "src", "dst", legacy_renum_only=True, store_transposed=False - ) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=False) warning_msg = ( "Eigenvector centrality expects the 'store_transposed' " diff --git a/python/cugraph/cugraph/tests/centrality/test_katz_centrality_mg.py b/python/cugraph/cugraph/tests/centrality/test_katz_centrality_mg.py index d320a862902..9a6ee2d2668 100644 --- a/python/cugraph/cugraph/tests/centrality/test_katz_centrality_mg.py +++ b/python/cugraph/cugraph/tests/centrality/test_katz_centrality_mg.py @@ -52,9 +52,7 @@ def test_dask_katz_centrality(dask_client, directed): ) dg = cugraph.Graph(directed=True) - dg.from_dask_cudf_edgelist( - ddf, "src", "dst", legacy_renum_only=True, store_transposed=True - ) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=True) degree_max = dg.degree()["degree"].max().compute() katz_alpha = 1 / (degree_max) @@ -111,9 +109,7 @@ def test_dask_katz_centrality_nstart(dask_client, directed): ) dg = cugraph.Graph(directed=True) - dg.from_dask_cudf_edgelist( - ddf, "src", "dst", legacy_renum_only=True, store_transposed=True - ) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=True) mg_res = dcg.katz_centrality(dg, max_iter=50, tol=1e-6) mg_res = mg_res.compute() @@ -158,9 +154,7 @@ def test_dask_katz_centrality_transposed_false(dask_client): ) dg = cugraph.Graph(directed=True) - dg.from_dask_cudf_edgelist( - ddf, "src", "dst", legacy_renum_only=True, store_transposed=False - ) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=False) warning_msg = ( "Katz centrality expects the 'store_transposed' " diff --git a/python/cugraph/cugraph/tests/community/test_balanced_cut.py b/python/cugraph/cugraph/tests/community/test_balanced_cut.py index 56f58cd04aa..5beca07dfb7 100644 --- a/python/cugraph/cugraph/tests/community/test_balanced_cut.py +++ b/python/cugraph/cugraph/tests/community/test_balanced_cut.py @@ -43,6 +43,7 @@ def random_call(G, partitions): assign_cu = cudf.DataFrame(assignment, columns=["cluster"]) assign_cu["vertex"] = assign_cu.index + assign_cu = assign_cu.astype("int32") score += cugraph.analyzeClustering_edge_cut(G, partitions, assign_cu) @@ -100,7 +101,7 @@ def test_edge_cut_clustering_with_edgevals(graph_file, partitions): @pytest.mark.sg -@pytest.mark.parametrize("graph_file", DATASETS) +@pytest.mark.parametrize("graph_file", [DATASETS[2]]) @pytest.mark.parametrize("partitions", PARTITIONS) def test_edge_cut_clustering_with_edgevals_nx(graph_file, partitions): gc.collect() @@ -125,6 +126,8 @@ def test_edge_cut_clustering_with_edgevals_nx(graph_file, partitions): pdf.columns = ["vertex", "cluster"] gdf = cudf.from_pandas(pdf) + gdf = gdf.astype("int32") + cu_score = cugraph.analyzeClustering_edge_cut( G, partitions, gdf, "vertex", "cluster" ) diff --git a/python/cugraph/cugraph/tests/community/test_ecg.py b/python/cugraph/cugraph/tests/community/test_ecg.py index 9d8c76737a0..5536d562402 100644 --- a/python/cugraph/cugraph/tests/community/test_ecg.py +++ b/python/cugraph/cugraph/tests/community/test_ecg.py @@ -79,6 +79,7 @@ def test_ecg_clustering(graph_file, min_weight, ensemble_size): @pytest.mark.parametrize("min_weight", MIN_WEIGHTS) @pytest.mark.parametrize("ensemble_size", ENSEMBLE_SIZES) def test_ecg_clustering_nx(graph_file, min_weight, ensemble_size): + gc.collect() dataset_path = graph_file.get_path() # Read in the graph and get a NetworkX graph diff --git a/python/cugraph/cugraph/tests/community/test_induced_subgraph_mg.py b/python/cugraph/cugraph/tests/community/test_induced_subgraph_mg.py new file mode 100644 index 00000000000..34cbf73aae6 --- /dev/null +++ b/python/cugraph/cugraph/tests/community/test_induced_subgraph_mg.py @@ -0,0 +1,174 @@ +# Copyright (c) 2022-2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import pytest + +import dask_cudf +from cudf.testing.testing import assert_frame_equal +from pylibcugraph.testing import gen_fixture_params_product + +import cugraph +import cugraph.dask as dcg +from cugraph.testing import utils +from cugraph.dask.common.mg_utils import is_single_gpu +import cudf + + +# ============================================================================= +# Pytest Setup / Teardown - called for each test function +# ============================================================================= + + +def setup_function(): + gc.collect() + + +IS_DIRECTED = [True, False] +NUM_SEEDS = [2, 5, 10, 20] + +# FIXME: This parameter will be tested in the next release when updating the +# SG implementation +OFFSETS = [None] + + +# ============================================================================= +# Pytest fixtures +# ============================================================================= + +datasets = utils.DATASETS_UNDIRECTED + [ + utils.RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv" +] + +fixture_params = gen_fixture_params_product( + (datasets, "graph_file"), + (IS_DIRECTED, "directed"), + (NUM_SEEDS, "num_seeds"), + (OFFSETS, "offsets"), +) + + +@pytest.fixture(scope="module", params=fixture_params) +def input_combo(request): + """ + Simply return the current combination of params as a dictionary for use in + tests or other parameterized fixtures. + """ + parameters = dict( + zip(("graph_file", "directed", "seeds", "offsets"), request.param) + ) + + return parameters + + +@pytest.fixture(scope="module") +def input_expected_output(input_combo): + """ + This fixture returns the inputs and expected results from the induced_subgraph algo. + (based on cuGraph subgraph) which can be used for validation. + """ + + input_data_path = input_combo["graph_file"] + directed = input_combo["directed"] + num_seeds = input_combo["seeds"] + + # FIXME: This parameter is not tested + # offsets= input_combo["offsets"] + G = utils.generate_cugraph_graph_from_file( + input_data_path, directed=directed, edgevals=True + ) + + # Sample k vertices from the cuGraph graph + # FIXME: Leverage the method 'select_random_vertices' instead + srcs = G.view_edge_list()["src"] + dsts = G.view_edge_list()["dst"] + vertices = cudf.concat([srcs, dsts]).drop_duplicates() + vertices = vertices.sample(num_seeds).astype("int32") + + # print randomly sample n seeds from the graph + print("\nvertices: \n", vertices) + + input_combo["vertices"] = vertices + + sg_induced_subgraph, _ = cugraph.induced_subgraph(G, vertices=vertices) + + # Save the results back to the input_combo dictionary to prevent redundant + # cuGraph runs. Other tests using the input_combo fixture will look for + # them, and if not present they will have to re-run the same cuGraph call. + + input_combo["sg_cugraph_results"] = sg_induced_subgraph + chunksize = dcg.get_chunksize(input_data_path) + ddf = dask_cudf.read_csv( + input_data_path, + chunksize=chunksize, + delimiter=" ", + names=["src", "dst", "value"], + dtype=["int32", "int32", "float32"], + ) + + dg = cugraph.Graph(directed=directed) + dg.from_dask_cudf_edgelist( + ddf, + source="src", + destination="dst", + edge_attr="value", + renumber=True, + store_transposed=True, + ) + + input_combo["MGGraph"] = dg + + return input_combo + + +# ============================================================================= +# Tests +# ============================================================================= + + +@pytest.mark.mg +@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system") +def test_mg_induced_subgraph(dask_client, benchmark, input_expected_output): + + dg = input_expected_output["MGGraph"] + vertices = input_expected_output["vertices"] + + result_induced_subgraph = benchmark( + dcg.induced_subgraph, + dg, + vertices, + input_expected_output["offsets"], + ) + + mg_df, mg_offsets = result_induced_subgraph + + # mg_offsets = mg_offsets.compute().reset_index(drop=True) + + sg = input_expected_output["sg_cugraph_results"] + + if mg_df is not None and sg is not None: + # FIXME: 'edges()' or 'view_edgelist()' takes half the edges out if + # 'directed=False'. + sg_result = sg.input_df + + sg_df = sg_result.sort_values(["src", "dst"]).reset_index(drop=True) + mg_df = mg_df.compute().sort_values(["src", "dst"]).reset_index(drop=True) + + assert_frame_equal(sg_df, mg_df, check_dtype=False, check_like=True) + + else: + # There is no edges between the vertices provided + # FIXME: Once k-hop neighbors is implemented, find one hop neighbors + # of all the vertices and ensure that there is None + assert sg is None + assert mg_df is None diff --git a/python/cugraph/cugraph/tests/community/test_leiden.py b/python/cugraph/cugraph/tests/community/test_leiden.py index 45ca28e8c17..4a04eac3500 100644 --- a/python/cugraph/cugraph/tests/community/test_leiden.py +++ b/python/cugraph/cugraph/tests/community/test_leiden.py @@ -71,7 +71,10 @@ def test_leiden(graph_file): louvain_parts, louvain_mod = cugraph_louvain(G) # Calculating modularity scores for comparison - assert leiden_mod >= (0.99 * louvain_mod) + # FIXME: If the datasets is not renumbered, the leiden parts will + # also include isolated vertices which will be reflected in the modularity + # score. + assert leiden_mod >= (0.97 * louvain_mod) @pytest.mark.sg @@ -88,7 +91,10 @@ def test_leiden_nx(graph_file): louvain_parts, louvain_mod = cugraph_louvain(G) # Calculating modularity scores for comparison - assert leiden_mod >= (0.99 * louvain_mod) + # FIXME: If the datasets is not renumbered, the leiden parts will + # also include isolated vertices which will be reflected in the modularity + # score. + assert leiden_mod >= (0.97 * louvain_mod) @pytest.mark.sg diff --git a/python/cugraph/cugraph/tests/community/test_modularity.py b/python/cugraph/cugraph/tests/community/test_modularity.py index 8e275b31d60..07fa2718ee1 100644 --- a/python/cugraph/cugraph/tests/community/test_modularity.py +++ b/python/cugraph/cugraph/tests/community/test_modularity.py @@ -29,6 +29,7 @@ def cugraph_call(G, partitions): df = cugraph.spectralModularityMaximizationClustering( G, partitions, num_eigen_vects=(partitions - 1) ) + score = cugraph.analyzeClustering_modularity(G, partitions, df, "vertex", "cluster") return score @@ -42,6 +43,7 @@ def random_call(G, partitions): assignment_cu = cudf.DataFrame(assignment, columns=["cluster"]) assignment_cu["vertex"] = assignment_cu.index + assignment_cu = assignment_cu.astype("int32") score = cugraph.analyzeClustering_modularity( G, partitions, assignment_cu, "vertex", "cluster" diff --git a/python/cugraph/cugraph/tests/community/test_subgraph_extraction.py b/python/cugraph/cugraph/tests/community/test_subgraph_extraction.py index a76b0894bc5..5b115be81e0 100644 --- a/python/cugraph/cugraph/tests/community/test_subgraph_extraction.py +++ b/python/cugraph/cugraph/tests/community/test_subgraph_extraction.py @@ -45,6 +45,9 @@ def cugraph_call(M, verts, directed=True): cu_M = cudf.from_pandas(M) + # FIXME: Add the column name in a list to trigger the python renumbering + # Drop this requirement when 'subgraph_extraction' leverages the CAPI graph + # which calls renumbering G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="weight") cu_verts = cudf.Series(verts) @@ -91,7 +94,7 @@ def test_subgraph_extraction_Graph(graph_file): @pytest.mark.sg -@pytest.mark.parametrize("graph_file", DATASETS) +@pytest.mark.parametrize("graph_file", [DATASETS[2]]) def test_subgraph_extraction_Graph_nx(graph_file): directed = False verts = np.zeros(3, dtype=np.int32) @@ -163,7 +166,9 @@ def test_subgraph_extraction_multi_column(graph_file): # FIXME: the coverage provided by this test could probably be handled by # another test that also checks using renumber=False +# FIXME: Drop this test as 'subgraph_extraction' requires renumbering @pytest.mark.sg +@pytest.mark.skip("obsolete") def test_subgraph_extraction_graph_not_renumbered(): """ Ensure subgraph() works with a Graph that has not been renumbered diff --git a/python/cugraph/cugraph/tests/core/test_core_number_mg.py b/python/cugraph/cugraph/tests/core/test_core_number_mg.py index 4523272ac2d..cff2ae11ef3 100644 --- a/python/cugraph/cugraph/tests/core/test_core_number_mg.py +++ b/python/cugraph/cugraph/tests/core/test_core_number_mg.py @@ -86,12 +86,7 @@ def input_expected_output(dask_client, input_combo): dg = cugraph.Graph(directed=False) dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - renumber=True, - legacy_renum_only=True, + ddf, source="src", destination="dst", edge_attr="value", renumber=True ) input_combo["MGGraph"] = dg @@ -161,7 +156,6 @@ def test_core_number_invalid_input(input_expected_output): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, ) invalid_degree_type = 3 diff --git a/python/cugraph/cugraph/tests/core/test_k_core_mg.py b/python/cugraph/cugraph/tests/core/test_k_core_mg.py index 216eb026c53..d8e7ef98d24 100644 --- a/python/cugraph/cugraph/tests/core/test_k_core_mg.py +++ b/python/cugraph/cugraph/tests/core/test_k_core_mg.py @@ -109,7 +109,6 @@ def input_expected_output(dask_client, input_combo): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, ) input_combo["MGGraph"] = dg @@ -172,7 +171,6 @@ def test_dask_k_core_invalid_input(dask_client): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, store_transposed=True, ) with pytest.raises(ValueError): @@ -184,7 +182,6 @@ def test_dask_k_core_invalid_input(dask_client): source="src", destination="dst", edge_attr="value", - legacy_renum_only=True, store_transposed=True, ) diff --git a/python/cugraph/cugraph/tests/data_store/test_property_graph.py b/python/cugraph/cugraph/tests/data_store/test_property_graph.py index a87274b3344..c5c382df2eb 100644 --- a/python/cugraph/cugraph/tests/data_store/test_property_graph.py +++ b/python/cugraph/cugraph/tests/data_store/test_property_graph.py @@ -1177,8 +1177,14 @@ def test_extract_subgraph_vertex_prop_condition_only( expected_edgelist = cudf.DataFrame( {"src": [89216, 78634], "dst": [78634, 89216], "weights": [99, 8]} ) - actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src", preserve_order=True) - actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + + if G.renumbered: + actual_edgelist = G.unrenumber( + G.edgelist.edgelist_df, "src", preserve_order=True + ) + actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + else: + actual_edgelist = G.edgelist.edgelist_df assert G.is_directed() # check_like=True ignores differences in column/index ordering @@ -1208,8 +1214,14 @@ def test_extract_subgraph_vertex_edge_prop_condition( ) expected_edgelist = cudf.DataFrame({"src": [78634], "dst": [32431], "weights": [4]}) - actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src", preserve_order=True) - actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + + if G.renumbered: + actual_edgelist = G.unrenumber( + G.edgelist.edgelist_df, "src", preserve_order=True + ) + actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + else: + actual_edgelist = G.edgelist.edgelist_df assert G.is_directed() assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True) @@ -1237,8 +1249,13 @@ def test_extract_subgraph_edge_prop_condition_only(dataset1_PropertyGraph, as_pg expected_edgelist = cudf.DataFrame({"src": srcs, "dst": dsts}) expected_edgelist = expected_edgelist.sort_values(by="src", ignore_index=True) - actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src", preserve_order=True) - actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + if G.renumbered: + actual_edgelist = G.unrenumber( + G.edgelist.edgelist_df, "src", preserve_order=True + ) + actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + else: + actual_edgelist = G.edgelist.edgelist_df actual_edgelist = actual_edgelist.sort_values(by="src", ignore_index=True) assert G.is_directed() @@ -1295,8 +1312,13 @@ def test_extract_subgraph_specific_query(dataset1_PropertyGraph, as_pg_first): ) expected_edgelist = cudf.DataFrame({"src": [89216], "dst": [4], "weights": [8832]}) - actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src", preserve_order=True) - actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + if G.renumbered: + actual_edgelist = G.unrenumber( + G.edgelist.edgelist_df, "src", preserve_order=True + ) + actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + else: + actual_edgelist = G.edgelist.edgelist_df assert G.is_directed() assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True) @@ -1330,8 +1352,14 @@ def test_select_vertices_from_previous_selection(dataset1_PropertyGraph, as_pg_f G = pG.extract_subgraph(create_using=DiGraph_inst, selection=selection) expected_edgelist = cudf.DataFrame({"src": [89216], "dst": [78634]}) - actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src", preserve_order=True) - actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + + if G.renumbered: + actual_edgelist = G.unrenumber( + G.edgelist.edgelist_df, "src", preserve_order=True + ) + actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + else: + actual_edgelist = G.edgelist.edgelist_df assert G.is_directed() assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True) @@ -1385,8 +1413,14 @@ def test_extract_subgraph_graph_without_vert_props(as_pg_first): expected_edgelist = cudf.DataFrame( {"src": [89216, 89216, 89216], "dst": [4, 89021, 32431], "weights": [0, 9, 9]} ) - actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src", preserve_order=True) - actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + + if G.renumbered: + actual_edgelist = G.unrenumber( + G.edgelist.edgelist_df, "src", preserve_order=True + ) + actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + else: + actual_edgelist = G.edgelist.edgelist_df assert G.is_directed() assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True) @@ -1532,8 +1566,13 @@ def test_extract_subgraph_default_edge_weight(dataset1_PropertyGraph): expected_edgelist = cudf.DataFrame({"src": srcs, "dst": dsts, "weights": weights}) expected_edgelist = expected_edgelist.sort_values(by="src", ignore_index=True) - actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src", preserve_order=True) - actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + if G.renumbered: + actual_edgelist = G.unrenumber( + G.edgelist.edgelist_df, "src", preserve_order=True + ) + actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True) + else: + actual_edgelist = G.edgelist.edgelist_df actual_edgelist = actual_edgelist.sort_values(by="src", ignore_index=True) assert G.is_directed() diff --git a/python/cugraph/cugraph/tests/data_store/test_property_graph_mg.py b/python/cugraph/cugraph/tests/data_store/test_property_graph_mg.py index e973e21d77a..bc526380957 100644 --- a/python/cugraph/cugraph/tests/data_store/test_property_graph_mg.py +++ b/python/cugraph/cugraph/tests/data_store/test_property_graph_mg.py @@ -600,19 +600,12 @@ def test_extract_subgraph_nonrenumbered_noedgedata( dataset2_simple_MGPropertyGraph, as_pg_first ): """ - Ensure a subgraph can be extracted that contains no edge_data. Also ensure - renumber cannot be False since that is currently not allowed for MG. + Ensure a subgraph can be extracted that contains no edge_data. """ from cugraph import Graph (pG, data) = dataset2_simple_MGPropertyGraph - # renumber=False is currently not allowed for MG. - with pytest.raises(ValueError): - G = pG.extract_subgraph( - create_using=Graph(directed=True), renumber_graph=False, add_edge_data=False - ) - if as_pg_first: G = pG.extract_subgraph(create_using=pG).extract_subgraph( create_using=Graph(directed=True), add_edge_data=False @@ -1484,6 +1477,37 @@ def test_types_from_numerals(dask_client): ] +@pytest.mark.mg +def test_renumber_by_type_only_default_type(dask_client): + from cugraph.experimental import MGPropertyGraph + + pG = MGPropertyGraph() + df = cudf.DataFrame( + { + "src": cp.array([0, 0, 1, 2, 2, 3], dtype="int32"), + "dst": cp.array([1, 2, 4, 3, 4, 1], dtype="int32"), + } + ) + ddf = dask_cudf.from_cudf(df, npartitions=2) + pG.add_edge_data(ddf, vertex_col_names=["src", "dst"]) + + df2 = cudf.DataFrame( + { + "prop1": [100, 200, 300, 400, 500], + "prop2": [5, 4, 3, 2, 1], + "id": cp.array([0, 1, 2, 3, 4], dtype="int32"), + } + ) + ddf2 = dask_cudf.from_cudf(df2, npartitions=2) + pG.add_vertex_data(ddf2, vertex_col_name="id") + pG.renumber_vertices_by_type() + got = pG.get_vertex_data().compute() + assert got[pG.vertex_col_name].to_arrow().to_pylist() == list(range(len(got))) + pG.renumber_edges_by_type() + got = pG.get_edge_data().compute() + assert got[pG.edge_id_col_name].to_arrow().to_pylist() == list(range(len(got))) + + # ============================================================================= # Benchmarks # ============================================================================= diff --git a/python/cugraph/cugraph/tests/internals/test_renumber.py b/python/cugraph/cugraph/tests/internals/test_renumber.py index 728389875b2..4526770ec2a 100644 --- a/python/cugraph/cugraph/tests/internals/test_renumber.py +++ b/python/cugraph/cugraph/tests/internals/test_renumber.py @@ -25,51 +25,6 @@ from cugraph.experimental.datasets import DATASETS -@pytest.mark.sg -def test_renumber_ips(): - source_list = [ - "192.168.1.1", - "172.217.5.238", - "216.228.121.209", - "192.16.31.23", - ] - dest_list = [ - "172.217.5.238", - "216.228.121.209", - "192.16.31.23", - "192.168.1.1", - ] - - pdf = pd.DataFrame({"source_list": source_list, "dest_list": dest_list}) - - gdf = cudf.from_pandas(pdf) - - gdf["source_as_int"] = gdf["source_list"].str.ip2int() - gdf["dest_as_int"] = gdf["dest_list"].str.ip2int() - - renumbered_gdf, renumber_map = NumberMap.renumber( - gdf, "source_as_int", "dest_as_int", preserve_order=True - ) - - input_check = renumbered_gdf.merge(gdf, on=["source_list", "dest_list"]) - - output_check = renumber_map.from_internal_vertex_id( - renumbered_gdf, - renumber_map.renumbered_src_col_name, - external_column_names=["check_src"], - ) - output_check = renumber_map.from_internal_vertex_id( - output_check, - renumber_map.renumbered_dst_col_name, - external_column_names=["check_dst"], - ) - - merged = output_check.merge(input_check, on=["source_list", "dest_list"]) - - assert_series_equal(merged["check_src"], merged["source_as_int"], check_names=False) - assert_series_equal(merged["check_dst"], merged["dest_as_int"], check_names=False) - - @pytest.mark.sg def test_renumber_ips_cols(): @@ -93,6 +48,7 @@ def test_renumber_ips_cols(): gdf["source_as_int"] = gdf["source_list"].str.ip2int() gdf["dest_as_int"] = gdf["dest_list"].str.ip2int() + # Brackets are added to the column names to trigger the python renumebring renumbered_gdf, renumber_map = NumberMap.renumber( gdf, ["source_as_int"], ["dest_as_int"], preserve_order=True ) @@ -116,40 +72,6 @@ def test_renumber_ips_cols(): assert_series_equal(merged["check_dst"], merged["dest_as_int"], check_names=False) -@pytest.mark.sg -def test_renumber_negative(): - source_list = [4, 6, 8, -20, 1] - dest_list = [1, 29, 35, 0, 77] - - df = pd.DataFrame({"source_list": source_list, "dest_list": dest_list}) - - gdf = cudf.DataFrame.from_pandas(df[["source_list", "dest_list"]]) - gdf["original_src"] = gdf["source_list"] - gdf["original_dst"] = gdf["dest_list"] - - renumbered_gdf, renumber_map = NumberMap.renumber( - gdf, "source_list", "dest_list", preserve_order=True - ) - - input_check = renumbered_gdf.merge(gdf, on=["original_src", "original_dst"]) - - output_check = renumber_map.from_internal_vertex_id( - renumbered_gdf, - renumber_map.renumbered_src_col_name, - external_column_names=["check_src"], - ) - output_check = renumber_map.from_internal_vertex_id( - output_check, - renumber_map.renumbered_dst_col_name, - external_column_names=["check_dst"], - ) - - merged = output_check.merge(input_check, on=["original_src", "original_dst"]) - - assert_series_equal(merged["check_src"], merged["original_src"], check_names=False) - assert_series_equal(merged["check_dst"], merged["original_dst"], check_names=False) - - @pytest.mark.sg def test_renumber_negative_col(): source_list = [4, 6, 8, -20, 1] @@ -161,6 +83,7 @@ def test_renumber_negative_col(): gdf["original_src"] = gdf["source_list"] gdf["original_dst"] = gdf["dest_list"] + # Brackets are added to the column names to trigger the python renumebring renumbered_gdf, renumber_map = NumberMap.renumber( gdf, ["source_list"], ["dest_list"], preserve_order=True ) @@ -184,47 +107,6 @@ def test_renumber_negative_col(): assert_series_equal(merged["check_dst"], merged["original_dst"], check_names=False) -@pytest.mark.sg -@pytest.mark.parametrize("graph_file", DATASETS) -def test_renumber_files(graph_file): - gc.collect() - dataset_path = graph_file.get_path() - M = utils.read_csv_for_nx(dataset_path) - sources = cudf.Series(M["0"]) - destinations = cudf.Series(M["1"]) - - translate = 1000 - - df = cudf.DataFrame() - df["src"] = cudf.Series([x + translate for x in sources.values_host]) - df["dst"] = cudf.Series([x + translate for x in destinations.values_host]) - - exp_src = cudf.Series([x + translate for x in sources.values_host]) - exp_dst = cudf.Series([x + translate for x in destinations.values_host]) - - renumbered_df, renumber_map = NumberMap.renumber( - df, "src", "dst", preserve_order=True - ) - - unrenumbered_df = renumber_map.unrenumber( - renumbered_df, renumber_map.renumbered_src_col_name, preserve_order=True - ) - unrenumbered_df = renumber_map.unrenumber( - unrenumbered_df, renumber_map.renumbered_dst_col_name, preserve_order=True - ) - - assert_series_equal( - exp_src, - unrenumbered_df[renumber_map.renumbered_src_col_name], - check_names=False, - ) - assert_series_equal( - exp_dst, - unrenumbered_df[renumber_map.renumbered_dst_col_name], - check_names=False, - ) - - @pytest.mark.sg @pytest.mark.parametrize("graph_file", DATASETS) def test_renumber_files_col(graph_file): @@ -243,6 +125,7 @@ def test_renumber_files_col(graph_file): exp_src = cudf.Series([x + translate for x in sources.values_host]) exp_dst = cudf.Series([x + translate for x in destinations.values_host]) + # Brackets are added to the column names to trigger the python renumebring renumbered_df, renumber_map = NumberMap.renumber( gdf, ["src"], ["dst"], preserve_order=True ) @@ -283,6 +166,7 @@ def test_renumber_files_multi_col(graph_file): gdf["src"] = sources + translate gdf["dst"] = destinations + translate + # Brackets are added to the column names to trigger the python renumebring renumbered_df, renumber_map = NumberMap.renumber( gdf, ["src", "src_old"], ["dst", "dst_old"], preserve_order=True ) @@ -364,7 +248,8 @@ def test_renumber_unrenumber_non_default_vert_names(): } ) - renumbered_df, number_map = NumberMap.renumber(input_gdf, "col_a", "col_b") + # Brackets are added to the column names to trigger the python renumebring + renumbered_df, number_map = NumberMap.renumber(input_gdf, ["col_a"], ["col_b"]) some_result_gdf = cudf.DataFrame({"vertex": [0, 1, 2, 3]}) expected_values = [99, 199, 2, 32] diff --git a/python/cugraph/cugraph/tests/internals/test_renumber_mg.py b/python/cugraph/cugraph/tests/internals/test_renumber_mg.py index 4e5455f0bb8..f1c503ab6f7 100644 --- a/python/cugraph/cugraph/tests/internals/test_renumber_mg.py +++ b/python/cugraph/cugraph/tests/internals/test_renumber_mg.py @@ -179,52 +179,6 @@ def test_dask_pagerank(dask_client, directed): assert err == 0 -@pytest.mark.mg -@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system") -@pytest.mark.parametrize("renumber", [False]) -@pytest.mark.parametrize("directed", IS_DIRECTED) -def test_graph_renumber_false(renumber, dask_client, directed): - input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix() - chunksize = dcg.get_chunksize(input_data_path) - - ddf = dask_cudf.read_csv( - input_data_path, - chunksize=chunksize, - delimiter=" ", - names=["src", "dst", "value"], - dtype=["int32", "int32", "float32"], - ) - dg = cugraph.Graph(directed=directed) - - with pytest.raises(ValueError): - dg.from_dask_cudf_edgelist(ddf, "src", "dst", renumber=renumber) - - -@pytest.mark.mg -@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system") -@pytest.mark.parametrize("renumber", [False]) -@pytest.mark.parametrize("directed", IS_DIRECTED) -def test_multi_graph_renumber_false(renumber, dask_client, directed): - input_data_path = ( - RAPIDS_DATASET_ROOT_DIR_PATH / "karate_multi_edge.csv" - ).as_posix() - chunksize = dcg.get_chunksize(input_data_path) - - ddf = dask_cudf.read_csv( - input_data_path, - chunksize=chunksize, - delimiter=" ", - names=["src", "dst", "value"], - dtype=["int32", "int32", "float32"], - ) - dg = cugraph.MultiGraph(directed=directed) - - # ValueError always thrown since renumber must be True with - # MNMG algorithms - with pytest.raises(ValueError): - dg.from_dask_cudf_edgelist(ddf, "src", "dst", renumber=renumber) - - @pytest.mark.mg @pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system") @pytest.mark.parametrize( diff --git a/python/cugraph/cugraph/tests/link_analysis/test_hits_mg.py b/python/cugraph/cugraph/tests/link_analysis/test_hits_mg.py index 74169d45113..9885d47b24a 100644 --- a/python/cugraph/cugraph/tests/link_analysis/test_hits_mg.py +++ b/python/cugraph/cugraph/tests/link_analysis/test_hits_mg.py @@ -96,7 +96,6 @@ def input_expected_output(input_combo): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, store_transposed=True, ) @@ -158,7 +157,7 @@ def test_dask_hits(dask_client, benchmark, input_expected_output): @pytest.mark.mg -def test_dask_hots_transposed_false(dask_client): +def test_dask_hits_transposed_false(dask_client): input_data_path = (utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix() chunksize = dcg.get_chunksize(input_data_path) @@ -172,9 +171,7 @@ def test_dask_hots_transposed_false(dask_client): ) dg = cugraph.Graph(directed=True) - dg.from_dask_cudf_edgelist( - ddf, "src", "dst", legacy_renum_only=True, store_transposed=False - ) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=False) warning_msg = ( "HITS expects the 'store_transposed' " diff --git a/python/cugraph/cugraph/tests/link_analysis/test_pagerank.py b/python/cugraph/cugraph/tests/link_analysis/test_pagerank.py index 1cbc1bf66f8..ba136963b60 100644 --- a/python/cugraph/cugraph/tests/link_analysis/test_pagerank.py +++ b/python/cugraph/cugraph/tests/link_analysis/test_pagerank.py @@ -392,18 +392,33 @@ def test_pagerank_invalid_personalization_dtype(): store_transposed=True, ) - personalization_vec = cudf.DataFrame() - personalization_vec["vertex"] = [17, 26] - personalization_vec["values"] = [0.5, 0.75] + personalization = cudf.DataFrame() + personalization["vertex"] = [17, 26] + personalization["values"] = [0.5, 0.75] + + # cu_M["weights"] is of type 'float32' and personalization["values"] of type + # 'float64'. The python code should enforce that both types match nd raise the + # following warning. warning_msg = ( "PageRank requires 'personalization' values to match the " "graph's 'edge_attr' type. edge_attr type is: " "float32 and got 'personalization' values " "of type: float64." ) + with pytest.warns(UserWarning, match=warning_msg): + cugraph.pagerank(G, personalization=personalization) + # cu_M["src"] is of type 'int32' and personalization["vertex"] of type + # 'int64'. The python code should enforce that both types match and raise the + # following warning. + warning_msg = ( + "PageRank requires 'personalization' vertex to match the " + "graph's 'vertex' type. input graph's vertex type is: " + "int32 and got 'personalization' vertex " + "of type: int64." + ) with pytest.warns(UserWarning, match=warning_msg): - cugraph.pagerank(G, personalization=personalization_vec) + cugraph.pagerank(G, personalization=personalization) @pytest.mark.sg diff --git a/python/cugraph/cugraph/tests/link_analysis/test_pagerank_mg.py b/python/cugraph/cugraph/tests/link_analysis/test_pagerank_mg.py index 97729c5ef85..941974eea4f 100644 --- a/python/cugraph/cugraph/tests/link_analysis/test_pagerank_mg.py +++ b/python/cugraph/cugraph/tests/link_analysis/test_pagerank_mg.py @@ -210,9 +210,7 @@ def test_dask_pagerank_transposed_false(dask_client): ) dg = cugraph.Graph(directed=True) - dg.from_dask_cudf_edgelist( - ddf, "src", "dst", legacy_renum_only=True, store_transposed=False - ) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=False) warning_msg = ( "Pagerank expects the 'store_transposed' " diff --git a/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py b/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py index 34b5d47fe50..af1a1a35ec7 100644 --- a/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py +++ b/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py @@ -105,7 +105,6 @@ def input_expected_output(input_combo): source="src", destination="dst", renumber=True, - legacy_renum_only=True, store_transposed=True, ) @@ -174,7 +173,6 @@ def test_dask_weighted_jaccard(): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, store_transposed=True, ) with pytest.raises(ValueError): @@ -186,7 +184,6 @@ def test_dask_weighted_jaccard(): source="src", destination="dst", edge_attr="value", - legacy_renum_only=True, store_transposed=True, ) diff --git a/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py b/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py index c4c61ffbdd9..541e3123e78 100644 --- a/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py +++ b/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py @@ -105,7 +105,6 @@ def input_expected_output(input_combo): source="src", destination="dst", renumber=True, - legacy_renum_only=True, store_transposed=True, ) @@ -176,7 +175,6 @@ def test_dask_weighted_overlap(): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, store_transposed=True, ) with pytest.raises(ValueError): @@ -188,7 +186,6 @@ def test_dask_weighted_overlap(): source="src", destination="dst", edge_attr="value", - legacy_renum_only=True, store_transposed=True, ) diff --git a/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py b/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py index 31da7c742b5..4627b749426 100644 --- a/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py +++ b/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py @@ -106,7 +106,6 @@ def input_expected_output(input_combo): source="src", destination="dst", renumber=True, - legacy_renum_only=True, store_transposed=True, ) @@ -175,7 +174,6 @@ def test_dask_weighted_sorensen(): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, store_transposed=True, ) with pytest.raises(ValueError): @@ -187,7 +185,6 @@ def test_dask_weighted_sorensen(): source="src", destination="dst", edge_attr="value", - legacy_renum_only=True, store_transposed=True, ) diff --git a/python/cugraph/cugraph/tests/sampling/test_bulk_sampler.py b/python/cugraph/cugraph/tests/sampling/test_bulk_sampler.py index cac9cc965bc..bc801cab0a2 100644 --- a/python/cugraph/cugraph/tests/sampling/test_bulk_sampler.py +++ b/python/cugraph/cugraph/tests/sampling/test_bulk_sampler.py @@ -35,7 +35,6 @@ def test_bulk_sampler_simple(): source="src", destination="dst", edge_attr=["wgt", "eid", "etp"], - legacy_renum_only=True, ) tempdir_object = tempfile.TemporaryDirectory() @@ -64,7 +63,6 @@ def test_bulk_sampler_simple(): @pytest.mark.sg -@pytest.mark.skip("work in progress") def test_bulk_sampler_remainder(): el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"}) el["eid"] = el["eid"].astype("int32") @@ -76,7 +74,6 @@ def test_bulk_sampler_remainder(): source="src", destination="dst", edge_attr=["wgt", "eid", "etp"], - legacy_renum_only=True, ) tempdir_object = tempfile.TemporaryDirectory() @@ -117,11 +114,11 @@ def test_bulk_sampler_remainder(): subdir = f"{x}-{x+1}" df = cudf.read_parquet(os.path.join(tld, f"batch={subdir}.parquet")) - assert x in df.batch_id.values_host.tolist() - assert (x + 1) in df.batch_id.values_host.tolist() + assert ((df.batch_id == x) | (df.batch_id == (x + 1))).all() + assert ((df.hop_id == 0) | (df.hop_id == 1)).all() assert ( - cudf.read_parquet(os.path.join(tld, "batch=6-7.parquet")).batch_id == 6 + cudf.read_parquet(os.path.join(tld, "batch=6-6.parquet")).batch_id == 6 ).all() @@ -138,7 +135,6 @@ def test_bulk_sampler_large_batch_size(): source="src", destination="dst", edge_attr=["wgt", "eid", "etp"], - legacy_renum_only=True, ) tempdir_object = tempfile.TemporaryDirectory() diff --git a/python/cugraph/cugraph/tests/sampling/test_bulk_sampler_mg.py b/python/cugraph/cugraph/tests/sampling/test_bulk_sampler_mg.py index 25fe978da49..8bb16e03252 100644 --- a/python/cugraph/cugraph/tests/sampling/test_bulk_sampler_mg.py +++ b/python/cugraph/cugraph/tests/sampling/test_bulk_sampler_mg.py @@ -35,7 +35,6 @@ def test_bulk_sampler_simple(dask_client): source="src", destination="dst", edge_attr=["wgt", "eid", "etp"], - legacy_renum_only=True, ) tempdir_object = tempfile.TemporaryDirectory() @@ -67,7 +66,6 @@ def test_bulk_sampler_simple(dask_client): @pytest.mark.mg -@pytest.mark.skip("broken") def test_bulk_sampler_remainder(dask_client): el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"}) el["eid"] = el["eid"].astype("int32") @@ -79,7 +77,6 @@ def test_bulk_sampler_remainder(dask_client): source="src", destination="dst", edge_attr=["wgt", "eid", "etp"], - legacy_renum_only=True, ) tempdir_object = tempfile.TemporaryDirectory() @@ -123,11 +120,11 @@ def test_bulk_sampler_remainder(dask_client): subdir = f"{x}-{x+1}" df = cudf.read_parquet(os.path.join(tld, f"batch={subdir}.parquet")) - assert x in df.batch_id.values_host.tolist() - assert (x + 1) in df.batch_id.values_host.tolist() + assert ((df.batch_id == x) | (df.batch_id == (x + 1))).all() + assert ((df.hop_id == 0) | (df.hop_id == 1)).all() assert ( - cudf.read_parquet(os.path.join(tld, "batch=6-7.parquet")).batch_id == 6 + cudf.read_parquet(os.path.join(tld, "batch=6-6.parquet")).batch_id == 6 ).all() @@ -143,7 +140,6 @@ def test_bulk_sampler_mg_graph_sg_input(dask_client): source="src", destination="dst", edge_attr=["wgt", "eid", "etp"], - legacy_renum_only=True, ) tempdir_object = tempfile.TemporaryDirectory() diff --git a/python/cugraph/cugraph/tests/sampling/test_egonet_mg.py b/python/cugraph/cugraph/tests/sampling/test_egonet_mg.py index 5ea5411fab9..96b5ec2ac3a 100644 --- a/python/cugraph/cugraph/tests/sampling/test_egonet_mg.py +++ b/python/cugraph/cugraph/tests/sampling/test_egonet_mg.py @@ -101,7 +101,6 @@ def input_expected_output(input_combo): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, store_transposed=True, ) diff --git a/python/cugraph/cugraph/tests/sampling/test_random_walks_mg.py b/python/cugraph/cugraph/tests/sampling/test_random_walks_mg.py index 4db9ae91601..df1db0a95a9 100644 --- a/python/cugraph/cugraph/tests/sampling/test_random_walks_mg.py +++ b/python/cugraph/cugraph/tests/sampling/test_random_walks_mg.py @@ -195,7 +195,6 @@ def input_graph(request): destination="dst", edge_attr="value", renumber=True, - legacy_renum_only=True, store_transposed=True, ) diff --git a/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample.py b/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample.py index 074cebc2091..6fe16d97713 100644 --- a/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample.py +++ b/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample.py @@ -71,9 +71,7 @@ def input_combo(request): ) G = cugraph.Graph(directed=directed) - G.from_cudf_edgelist( - df, source="src", destination="dst", edge_attr="value", legacy_renum_only=True - ) + G.from_cudf_edgelist(df, source="src", destination="dst", edge_attr="value") parameters["Graph"] = G @@ -233,7 +231,7 @@ def test_uniform_neighbor_sample_tree(directed): ) G = cugraph.Graph(directed=directed) - G.from_cudf_edgelist(df, "src", "dst", "value", legacy_renum_only=True) + G.from_cudf_edgelist(df, "src", "dst", "value") # # Make sure the old C++ renumbering was skipped because: @@ -328,7 +326,6 @@ def test_uniform_neighbor_sample_edge_properties(return_offsets): source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) sampling_results = uniform_neighbor_sample( @@ -388,7 +385,6 @@ def test_uniform_neighbor_sample_edge_properties_self_loops(): source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) sampling_results = cugraph.uniform_neighbor_sample( @@ -442,7 +438,6 @@ def test_uniform_neighbor_sample_empty_start_list(): source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) sampling_results = cugraph.uniform_neighbor_sample( diff --git a/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample_mg.py b/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample_mg.py index 3cb33d68ee7..76657eb634f 100644 --- a/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample_mg.py +++ b/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample_mg.py @@ -15,6 +15,7 @@ import os import pytest +import cupy import cudf import dask_cudf from pylibcugraph.testing.utils import gen_fixture_params_product @@ -95,7 +96,6 @@ def input_combo(request): destination="dst", edge_attr="value", store_transposed=False, - legacy_renum_only=True, ) parameters["MGGraph"] = dg @@ -219,9 +219,7 @@ def test_mg_uniform_neighbor_sample_tree(dask_client, directed): ) G = cugraph.Graph(directed=directed) - G.from_dask_cudf_edgelist( - ddf, "src", "dst", "value", store_transposed=False, legacy_renum_only=True - ) + G.from_dask_cudf_edgelist(ddf, "src", "dst", "value", store_transposed=False) # TODO: Incomplete, include more testing for tree graph as well as # for larger graphs @@ -275,9 +273,7 @@ def test_mg_uniform_neighbor_sample_unweighted(dask_client): df = dask_cudf.from_cudf(df, npartitions=2) G = cugraph.Graph() - G.from_dask_cudf_edgelist( - df, source="src", destination="dst", legacy_renum_only=True - ) + G.from_dask_cudf_edgelist(df, source="src", destination="dst") start_list = cudf.Series([0], dtype="int32") fanout_vals = [-1] @@ -311,7 +307,7 @@ def test_mg_uniform_neighbor_sample_ensure_no_duplicates(dask_client): mg_G = cugraph.MultiGraph(directed=True) mg_G.from_dask_cudf_edgelist( - dask_df, source="src", destination="dst", renumber=True, legacy_renum_only=True + dask_df, source="src", destination="dst", renumber=True ) output_df = cugraph.dask.uniform_neighbor_sample( @@ -349,7 +345,6 @@ def test_uniform_neighbor_sample_edge_properties(dask_client, return_offsets): source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) dest_rank = [0, 1] @@ -422,7 +417,7 @@ def test_uniform_neighbor_sample_edge_properties(dask_client, return_offsets): @pytest.mark.mg -def test_uniform_neighbor_sample_edge_properties_self_loops(): +def test_uniform_neighbor_sample_edge_properties_self_loops(dask_client): df = dask_cudf.from_cudf( cudf.DataFrame( { @@ -442,7 +437,6 @@ def test_uniform_neighbor_sample_edge_properties_self_loops(): source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) sampling_results = cugraph.dask.uniform_neighbor_sample( @@ -484,7 +478,9 @@ def test_uniform_neighbor_sample_edge_properties_self_loops(): @pytest.mark.skipif( int(os.getenv("DASK_NUM_WORKERS", 2)) < 2, reason="too few workers to test" ) -def test_uniform_neighbor_edge_properties_sample_small_start_list(with_replacement): +def test_uniform_neighbor_edge_properties_sample_small_start_list( + dask_client, with_replacement +): df = dask_cudf.from_cudf( cudf.DataFrame( { @@ -504,7 +500,6 @@ def test_uniform_neighbor_edge_properties_sample_small_start_list(with_replaceme source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) cugraph.dask.uniform_neighbor_sample( @@ -518,7 +513,7 @@ def test_uniform_neighbor_edge_properties_sample_small_start_list(with_replaceme @pytest.mark.mg -def test_uniform_neighbor_sample_without_dask_inputs(): +def test_uniform_neighbor_sample_without_dask_inputs(dask_client): df = dask_cudf.from_cudf( cudf.DataFrame( { @@ -538,7 +533,6 @@ def test_uniform_neighbor_sample_without_dask_inputs(): source="src", destination="dst", edge_attr=["w", "eid", "etp"], - legacy_renum_only=True, ) sampling_results = cugraph.dask.uniform_neighbor_sample( @@ -573,6 +567,65 @@ def test_uniform_neighbor_sample_without_dask_inputs(): assert sorted(sampling_results.hop_id.values_host.tolist()) == [0, 0, 0, 1, 1, 1] +@pytest.mark.mg +@pytest.mark.parametrize("dataset", datasets) +@pytest.mark.parametrize("input_df", [cudf.DataFrame, dask_cudf.DataFrame]) +@pytest.mark.parametrize("max_batches", [2, 8, 16, 32]) +def test_uniform_neighbor_sample_batched(dask_client, dataset, input_df, max_batches): + num_workers = len(dask_client.scheduler_info()["workers"]) + + df = dataset.get_edgelist() + df["eid"] = cupy.arange(len(df), dtype=df["src"].dtype) + df["etp"] = cupy.zeros_like(df["eid"].to_cupy()) + ddf = dask_cudf.from_cudf(df, npartitions=num_workers) + + G = cugraph.Graph(directed=True) + G.from_dask_cudf_edgelist( + ddf, + source="src", + destination="dst", + edge_attr=["wgt", "eid", "etp"], + legacy_renum_only=True, + ) + + input_vertices = dask_cudf.concat([df.src, df.dst]).unique().compute() + assert isinstance(input_vertices, cudf.Series) + + input_vertices.index = cupy.random.permutation(len(input_vertices)) + + input_batch = cudf.Series( + cupy.random.randint(0, max_batches, len(input_vertices)), dtype="int32" + ) + input_batch.index = cupy.random.permutation(len(input_vertices)) + + if input_df == dask_cudf.DataFrame: + input_batch = dask_cudf.from_cudf(input_batch, npartitions=num_workers) + input_vertices = dask_cudf.from_cudf(input_vertices, npartitions=num_workers) + + sampling_results = cugraph.dask.uniform_neighbor_sample( + G, + start_list=input_vertices, + batch_id_list=input_batch, + fanout_vals=[5, 5], + with_replacement=False, + with_edge_properties=True, + ) + + for batch_id in range(max_batches): + output_starts_per_batch = ( + sampling_results[ + (sampling_results.batch_id == batch_id) & (sampling_results.hop_id == 0) + ] + .sources.nunique() + .compute() + ) + + input_starts_per_batch = len(input_batch[input_batch == batch_id]) + + # Should be <= to account for starts without outgoing edges + assert output_starts_per_batch <= input_starts_per_batch + + # ============================================================================= # Benchmarks # ============================================================================= @@ -581,7 +634,7 @@ def test_uniform_neighbor_sample_without_dask_inputs(): @pytest.mark.mg @pytest.mark.slow @pytest.mark.parametrize("n_samples", [1_000, 5_000, 10_000]) -def bench_uniform_neigbour_sample_email_eu_core(gpubenchmark, dask_client, n_samples): +def bench_uniform_neighbor_sample_email_eu_core(gpubenchmark, dask_client, n_samples): input_data_path = email_Eu_core.get_path() chunksize = dcg.get_chunksize(input_data_path) @@ -600,7 +653,6 @@ def bench_uniform_neigbour_sample_email_eu_core(gpubenchmark, dask_client, n_sam destination="dst", edge_attr="value", store_transposed=False, - legacy_renum_only=True, ) # Partition the dataframe to add in chunks srcs = dg.input_df["src"] diff --git a/python/cugraph/cugraph/tests/structure/test_graph.py b/python/cugraph/cugraph/tests/structure/test_graph.py index 8a06a75e5ce..3734f6c9586 100644 --- a/python/cugraph/cugraph/tests/structure/test_graph.py +++ b/python/cugraph/cugraph/tests/structure/test_graph.py @@ -23,6 +23,7 @@ from cudf.testing.testing import assert_frame_equal import cugraph from cugraph.testing import utils +from cudf.testing import assert_series_equal import cupy @@ -456,59 +457,64 @@ def test_degree_functionality(graph_file): Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.DiGraph()) - df_in_degree = G.in_degree() - df_out_degree = G.out_degree() - df_degree = G.degree() - - nx_in_degree = Gnx.in_degree() - nx_out_degree = Gnx.out_degree() - nx_degree = Gnx.degree() - - err_in_degree = 0 - err_out_degree = 0 - err_degree = 0 - for i in range(len(df_degree)): - in_deg = df_in_degree["degree"][i] - out_deg = df_out_degree["degree"][i] - if in_deg != nx_in_degree[df_in_degree["vertex"][i]]: - err_in_degree = err_in_degree + 1 - if out_deg != nx_out_degree[df_out_degree["vertex"][i]]: - err_out_degree = err_out_degree + 1 - if df_degree["degree"][i] != nx_degree[df_degree["vertex"][i]]: - err_degree = err_degree + 1 - assert err_in_degree == 0 - assert err_out_degree == 0 - assert err_degree == 0 + cu_in_degree = G.in_degree().sort_values(by="vertex", ignore_index=True) + cu_out_degree = G.out_degree().sort_values(by="vertex", ignore_index=True) + cu_degree = G.degree().sort_values(by="vertex", ignore_index=True) + cu_results = cu_degree + cu_results["in_degree"] = cu_in_degree["degree"] + cu_results["out_degree"] = cu_out_degree["degree"] -# Test -@pytest.mark.sg -@pytest.mark.parametrize("graph_file", utils.DATASETS) -def test_degrees_functionality(graph_file): - M = utils.read_csv_for_nx(graph_file) - cu_M = utils.read_csv_file(graph_file) + nx_in_degree = list(Gnx.in_degree()) + nx_out_degree = list(Gnx.out_degree()) + nx_degree = list(Gnx.degree()) - G = cugraph.Graph(directed=True) - G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="2") + nx_in_degree.sort(key=lambda v: v[0]) + nx_out_degree.sort(key=lambda v: v[0]) + nx_degree.sort(key=lambda v: v[0]) - Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.DiGraph()) + nx_results = cudf.DataFrame() + nx_results["vertex"] = dict(nx_degree).keys() + nx_results["degree"] = dict(nx_degree).values() + nx_results["in_degree"] = dict(nx_in_degree).values() + nx_results["out_degree"] = dict(nx_out_degree).values() - df = G.degrees() + assert_series_equal( + cu_results["in_degree"], + nx_results["in_degree"], + check_names=False, + check_dtype=False, + ) - nx_in_degree = Gnx.in_degree() - nx_out_degree = Gnx.out_degree() + assert_series_equal( + cu_results["out_degree"], + nx_results["out_degree"], + check_names=False, + check_dtype=False, + ) - err_in_degree = 0 - err_out_degree = 0 + assert_series_equal( + cu_results["degree"], + nx_results["degree"], + check_names=False, + check_dtype=False, + ) - for i in range(len(df)): - if df["in_degree"][i] != nx_in_degree[df["vertex"][i]]: - err_in_degree = err_in_degree + 1 - if df["out_degree"][i] != nx_out_degree[df["vertex"][i]]: - err_out_degree = err_out_degree + 1 + # testing degrees functionality + df = G.degrees().sort_values(by="vertex", ignore_index=True) + assert_series_equal( + df["in_degree"], + nx_results["in_degree"], + check_names=False, + check_dtype=False, + ) - assert err_in_degree == 0 - assert err_out_degree == 0 + assert_series_equal( + df["out_degree"], + nx_results["out_degree"], + check_names=False, + check_dtype=False, + ) # Test @@ -719,7 +725,7 @@ def test_graph_init_with_multigraph(): @pytest.mark.parametrize("graph_file", utils.DATASETS) def test_create_sg_graph(graph_file): el = utils.read_csv_file(graph_file) - G = cugraph.from_cudf_edgelist(el, source="0", destination="1", edge_attr="2") + G = cugraph.from_cudf_edgelist(el, source=["0"], destination=["1"], edge_attr="2") # ensure graph exists assert G._plc_graph is not None @@ -758,7 +764,6 @@ def test_create_graph_with_edge_ids(graph_file): source="0", destination="1", edge_attr=["2", "id", "etype"], - legacy_renum_only=True, ) G = cugraph.Graph(directed=True) @@ -767,7 +772,6 @@ def test_create_graph_with_edge_ids(graph_file): source="0", destination="1", edge_attr=["2", "id", "etype"], - legacy_renum_only=True, ) H = G.to_undirected() @@ -775,6 +779,41 @@ def test_create_graph_with_edge_ids(graph_file): assert not H.is_directed() +@pytest.mark.sg +@pytest.mark.parametrize("graph_file", utils.DATASETS) +def test_create_graph_with_edge_ids_check_renumbering(graph_file): + el = utils.read_csv_file(graph_file) + el = el.rename(columns={"0": "0_src", "1": "0_dst", "2": "weights"}) + el["1_src"] = el["0_src"] + 1000 + el["1_dst"] = el["0_dst"] + 1000 + + el["edge_id"] = cupy.random.permutation(len(el)) + el["edge_id"] = el["edge_id"].astype(el["1_dst"].dtype) + el["edge_type"] = cupy.random.random_integers(4, size=len(el)) + el["edge_type"] = el["edge_type"].astype("int32") + + G = cugraph.Graph(directed=True) + G.from_cudf_edgelist( + el, + source=["0_src", "1_src"], + destination=["0_dst", "1_dst"], + edge_attr=["weights", "edge_id", "edge_type"], + ) + assert G.renumbered is True + + renumbered_df = G.edgelist.edgelist_df + + unrenumbered_df = G.unrenumber(renumbered_df, "src") + unrenumbered_df = G.unrenumber(unrenumbered_df, "dst") + + assert_frame_equal( + el.sort_values(by=["0_src", "0_dst"]).reset_index(drop=True), + unrenumbered_df.sort_values(by=["0_src", "0_dst"]).reset_index(drop=True), + check_dtype=False, + check_like=True, + ) + + # Test @pytest.mark.sg @pytest.mark.parametrize("graph_file", utils.DATASETS) @@ -794,3 +833,33 @@ def test_density(graph_file): M_G = cugraph.MultiGraph() with pytest.raises(TypeError): M_G.density() + + +# Test +@pytest.mark.sg +@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL) +@pytest.mark.parametrize("random_state", [42, None]) +@pytest.mark.parametrize("num_vertices", [5, None]) +def test_select_random_vertices(graph_file, random_state, num_vertices): + cu_M = utils.read_csv_file(graph_file) + + G = cugraph.Graph(directed=True) + G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="2") + + if num_vertices is None: + # Select all vertices + num_vertices = G.number_of_nodes() + + sampled_vertices = G.select_random_vertices(random_state, num_vertices) + + original_vertices_df = cudf.DataFrame() + sampled_vertices_df = cudf.DataFrame() + + sampled_vertices_df["sampled_vertices"] = sampled_vertices + original_vertices_df["original_vertices"] = G.nodes() + + join = sampled_vertices_df.merge( + original_vertices_df, left_on="sampled_vertices", right_on="original_vertices" + ) + + assert len(join) == len(sampled_vertices) diff --git a/python/cugraph/cugraph/tests/structure/test_graph_mg.py b/python/cugraph/cugraph/tests/structure/test_graph_mg.py index 9b947c511f3..b1b8d65c5a6 100644 --- a/python/cugraph/cugraph/tests/structure/test_graph_mg.py +++ b/python/cugraph/cugraph/tests/structure/test_graph_mg.py @@ -21,6 +21,7 @@ from pylibcugraph import bfs as pylibcugraph_bfs from pylibcugraph import ResourceHandle from pylibcugraph.testing.utils import gen_fixture_params_product +from cudf.testing.testing import assert_frame_equal import cugraph import cugraph.dask as dcg @@ -45,9 +46,7 @@ def setup_function(): datasets = utils.DATASETS_UNDIRECTED + utils.DATASETS_UNRENUMBERED fixture_params = gen_fixture_params_product( - (datasets, "graph_file"), - (IS_DIRECTED, "directed"), - ([True, False], "legacy_renum_only"), + (datasets, "graph_file"), (IS_DIRECTED, "directed") ) @@ -57,13 +56,10 @@ def input_combo(request): Simply return the current combination of params as a dictionary for use in tests or other parameterized fixtures. """ - parameters = dict( - zip(("graph_file", "directed", "legacy_renum_only"), request.param) - ) + parameters = dict(zip(("graph_file", "directed"), request.param)) input_data_path = parameters["graph_file"] directed = parameters["directed"] - legacy_renum_only = parameters["legacy_renum_only"] chunksize = dcg.get_chunksize(input_data_path) ddf = dask_cudf.read_csv( @@ -76,13 +72,7 @@ def input_combo(request): parameters["input_df"] = ddf dg = cugraph.Graph(directed=directed) - dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - legacy_renum_only=legacy_renum_only, - ) + dg.from_dask_cudf_edgelist(ddf, source="src", destination="dst", edge_attr="value") parameters["MGGraph"] = dg @@ -234,7 +224,47 @@ def test_create_graph_with_edge_ids(dask_client, graph_file): source="0", destination="1", edge_attr=["2", "id", "etype"], - legacy_renum_only=True, + ) + + +@pytest.mark.mg +@pytest.mark.parametrize("graph_file", utils.DATASETS) +def test_create_graph_with_edge_ids_check_renumbering(dask_client, graph_file): + el = utils.read_csv_file(graph_file) + el = el.rename(columns={"0": "0_src", "1": "0_dst", "2": "value"}) + el["1_src"] = el["0_src"] + 1000 + el["1_dst"] = el["0_dst"] + 1000 + + el["edge_id"] = cupy.random.permutation(len(el)) + el["edge_id"] = el["edge_id"].astype(el["1_dst"].dtype) + el["edge_type"] = cupy.random.random_integers(4, size=len(el)) + el["edge_type"] = el["edge_type"].astype("int32") + + num_workers = len(Comms.get_workers()) + el = dask_cudf.from_cudf(el, npartitions=num_workers) + + G = cugraph.Graph(directed=True) + G.from_dask_cudf_edgelist( + el, + source=["0_src", "1_src"], + destination=["0_dst", "1_dst"], + edge_attr=["value", "edge_id", "edge_type"], + ) + assert G.renumbered is True + + renumbered_df = G.edgelist.edgelist_df + unrenumbered_df = G.unrenumber(renumbered_df, "renumbered_src") + unrenumbered_df = G.unrenumber(unrenumbered_df, "renumbered_dst") + + unrenumbered_df.columns = unrenumbered_df.columns.str.replace(r"renumbered_", "") + + assert_frame_equal( + el.compute().sort_values(by=["0_src", "0_dst"]).reset_index(drop=True), + unrenumbered_df.compute() + .sort_values(by=["0_src", "0_dst"]) + .reset_index(drop=True), + check_dtype=False, + check_like=True, ) @@ -278,3 +308,30 @@ def test_mg_graph_copy(): G = cugraph.MultiGraph(directed=True) G_c = copy.deepcopy(G) assert type(G) == type(G_c) + + +@pytest.mark.mg +@pytest.mark.parametrize("random_state", [42, None]) +@pytest.mark.parametrize("num_vertices", [5, None]) +def test_mg_select_random_vertices( + dask_client, input_combo, random_state, num_vertices +): + G = input_combo["MGGraph"] + + if num_vertices is None: + # Select all vertices + num_vertices = len(G.nodes()) + + sampled_vertices = G.select_random_vertices(random_state, num_vertices).compute() + + original_vertices_df = cudf.DataFrame() + sampled_vertices_df = cudf.DataFrame() + + sampled_vertices_df["sampled_vertices"] = sampled_vertices + original_vertices_df["original_vertices"] = G.nodes().compute() + + join = sampled_vertices_df.merge( + original_vertices_df, left_on="sampled_vertices", right_on="original_vertices" + ) + + assert len(join) == len(sampled_vertices) diff --git a/python/cugraph/cugraph/tests/traversal/test_sssp_mg.py b/python/cugraph/cugraph/tests/traversal/test_sssp_mg.py index 5d640542c7e..0a138fd95ed 100644 --- a/python/cugraph/cugraph/tests/traversal/test_sssp_mg.py +++ b/python/cugraph/cugraph/tests/traversal/test_sssp_mg.py @@ -65,7 +65,7 @@ def test_dask_sssp(dask_client, directed): g.from_cudf_edgelist(df, "src", "dst", "value", renumber=True) dg = cugraph.Graph(directed=directed) - dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value", legacy_renum_only=True) + dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value") expected_dist = cugraph.sssp(g, 0) print(expected_dist) @@ -106,7 +106,6 @@ def test_dask_unweighted_sssp(dask_client): ddf, source="src", destination="dst", - legacy_renum_only=True, store_transposed=True, ) diff --git a/python/cugraph/cugraph/traversal/bfs.py b/python/cugraph/cugraph/traversal/bfs.py index 96e8eb02a77..a200ba9b5d8 100644 --- a/python/cugraph/cugraph/traversal/bfs.py +++ b/python/cugraph/cugraph/traversal/bfs.py @@ -128,9 +128,6 @@ def bfs( Find the distances and predecessors for a breadth first traversal of a graph. - Note: This is a pylibcugraph-enabled algorithm, which requires that the - graph was created with legacy_renum_only=True. - Parameters ---------- G : cugraph.Graph, networkx.Graph, CuPy or SciPy sparse matrix diff --git a/python/cugraph/cugraph/traversal/sssp.py b/python/cugraph/cugraph/traversal/sssp.py index c7aa0c2939c..9557650cbbc 100644 --- a/python/cugraph/cugraph/traversal/sssp.py +++ b/python/cugraph/cugraph/traversal/sssp.py @@ -223,15 +223,15 @@ def sssp( ) warnings.warn(warning_msg, PendingDeprecationWarning) + if not G.has_node(source): + raise ValueError("Graph does not contain source vertex") + if G.renumbered: if isinstance(source, cudf.DataFrame): source = G.lookup_internal_vertex_id(source, source.columns).iloc[0] else: source = G.lookup_internal_vertex_id(cudf.Series([source]))[0] - if source is cudf.NA: - raise ValueError("Starting vertex should be between 0 to number of vertices") - if cutoff is None: cutoff = np.inf diff --git a/python/cugraph/cugraph/utilities/nx_factory.py b/python/cugraph/cugraph/utilities/nx_factory.py index 8763f0dd453..d712e902df2 100644 --- a/python/cugraph/cugraph/utilities/nx_factory.py +++ b/python/cugraph/cugraph/utilities/nx_factory.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -22,25 +22,29 @@ from .utils import import_optional import cudf from cudf import from_pandas +from cudf.api.types import is_integer_dtype # nx will be a MissingModule instance if NetworkX is not installed (any # attribute access on a MissingModule instance results in a RuntimeError). nx = import_optional("networkx") -def convert_unweighted_to_gdf(NX_G): +def convert_unweighted_to_gdf(NX_G, vertex_type="int32"): _edges = NX_G.edges(data=False) src = [s for s, _ in _edges] dst = [d for _, d in _edges] _gdf = cudf.DataFrame() - _gdf["src"] = src - _gdf["dst"] = dst + _gdf["src"] = cudf.Series(src) + _gdf["dst"] = cudf.Series(dst) + + if is_integer_dtype(_gdf["src"]) or is_integer_dtype(_gdf["dst"]): + _gdf = _gdf.astype(vertex_type) return _gdf -def convert_weighted_named_to_gdf(NX_G, weight): +def convert_weighted_named_to_gdf(NX_G, weight, vertex_type="int32"): _edges = NX_G.edges(data=weight) src = [s for s, _, _ in _edges] @@ -48,18 +52,22 @@ def convert_weighted_named_to_gdf(NX_G, weight): wt = [w for _, _, w in _edges] _gdf = cudf.DataFrame() - _gdf["src"] = src - _gdf["dst"] = dst + _gdf["src"] = cudf.Series(src) + _gdf["dst"] = cudf.Series(dst) _gdf["weight"] = wt + if is_integer_dtype(_gdf["src"]) or is_integer_dtype(_gdf["dst"]): + _gdf = _gdf.astype({"src": vertex_type, "dst": vertex_type}) + # FIXME: The weight dtype is hardcoded. _gdf = _gdf.astype({"weight": "float32"}) return _gdf -def convert_weighted_unnamed_to_gdf(NX_G): +def convert_weighted_unnamed_to_gdf(NX_G, vertex_type="int32"): _pdf = nx.to_pandas_edgelist(NX_G) + nx_col = ["source", "target"] wt_col = [col for col in _pdf.columns if col not in nx_col] if len(wt_col) != 1: @@ -69,10 +77,16 @@ def convert_weighted_unnamed_to_gdf(NX_G): _pdf.rename(columns={wt_col[0]: "weight"}) _gdf = from_pandas(_pdf) + + if is_integer_dtype(_gdf["source"]) or is_integer_dtype(_gdf["target"]): + _gdf = _gdf.astype({"source": vertex_type, "target": vertex_type}) + return _gdf -def convert_from_nx(nxG, weight=None, do_renumber=True, store_transposed=False): +def convert_from_nx( + nxG, weight=None, do_renumber=True, store_transposed=False, vertex_type="int32" +): """ Convert a NetworkX Graph into a cuGraph Graph. This might not be the most effecient way since the @@ -93,6 +107,9 @@ def convert_from_nx(nxG, weight=None, do_renumber=True, store_transposed=False): store_transposed : boolean, defaukt is False should the cuGraph Graph store the transpose of the graph + vertex_type : str, default is "int32" + Vertex type + Returns ------- G : cuGraph Graph @@ -111,7 +128,7 @@ def convert_from_nx(nxG, weight=None, do_renumber=True, store_transposed=False): is_weighted = nx.is_weighted(nxG) if is_weighted is False: - _gdf = convert_unweighted_to_gdf(nxG) + _gdf = convert_unweighted_to_gdf(nxG, vertex_type) G.from_cudf_edgelist( _gdf, source="src", @@ -122,7 +139,7 @@ def convert_from_nx(nxG, weight=None, do_renumber=True, store_transposed=False): ) else: if weight is None: - _gdf = convert_weighted_unnamed_to_gdf(nxG) + _gdf = convert_weighted_unnamed_to_gdf(nxG, vertex_type) G.from_cudf_edgelist( _gdf, source="source", @@ -132,7 +149,7 @@ def convert_from_nx(nxG, weight=None, do_renumber=True, store_transposed=False): store_transposed=store_transposed, ) else: - _gdf = convert_weighted_named_to_gdf(nxG, weight) + _gdf = convert_weighted_named_to_gdf(nxG, weight, vertex_type) G.from_cudf_edgelist( _gdf, source="src", diff --git a/python/cugraph/cugraph/utilities/utils.py b/python/cugraph/cugraph/utilities/utils.py index 8cbdf6a8c21..5384175d201 100644 --- a/python/cugraph/cugraph/utilities/utils.py +++ b/python/cugraph/cugraph/utilities/utils.py @@ -320,7 +320,9 @@ def ensure_cugraph_obj(obj, nx_weight_attr=None, matrix_graph_type=None): # Nx graphs may be needed. From the Nx docs: # | Many NetworkX algorithms designed for weighted graphs use # | an edge attribute (by default `weight`) to hold a numerical value. -def ensure_cugraph_obj_for_nx(obj, nx_weight_attr="weight", store_transposed=False): +def ensure_cugraph_obj_for_nx( + obj, nx_weight_attr="weight", store_transposed=False, vertex_type="int32" +): """ Ensures a cuGraph Graph-type obj is returned for either cuGraph or Nx Graph-type objs. If obj is a Nx type, @@ -332,7 +334,10 @@ def ensure_cugraph_obj_for_nx(obj, nx_weight_attr="weight", store_transposed=Fal if is_nx_graph_type(input_type): return ( convert_from_nx( - obj, weight=nx_weight_attr, store_transposed=store_transposed + obj, + weight=nx_weight_attr, + store_transposed=store_transposed, + vertex_type=vertex_type, ), True, ) diff --git a/python/cugraph/pyproject.toml b/python/cugraph/pyproject.toml index dd76b792923..f41e9774f08 100644 --- a/python/cugraph/pyproject.toml +++ b/python/cugraph/pyproject.toml @@ -3,16 +3,16 @@ [build-system] requires = [ - "wheel", - "setuptools", - "cython>=0.29,<0.30", - "scikit-build>=0.13.1", "cmake>=3.23.1,!=3.25.0", + "cython>=0.29,<0.30", "ninja", - "rmm==23.6.*", - "pylibraft==23.6.*", "pylibcugraph==23.6.*", -] + "pylibraft==23.6.*", + "rmm==23.6.*", + "scikit-build>=0.13.1", + "setuptools", + "wheel", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. build-backend = "setuptools.build_meta" [tool.pytest.ini_options] @@ -29,15 +29,18 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.8" dependencies = [ - "numba", - "dask-cuda==23.6.*", - "rmm==23.6.*", "cudf==23.6.*", - "raft-dask==23.6.*", + "cupy-cuda11x>=9.5.0,<12.0.0a0", + "dask-cuda==23.6.*", "dask-cudf==23.6.*", + "dask==2023.3.2", + "distributed==2023.3.2.1", + "numba>=0.56.2", "pylibcugraph==23.6.*", - "cupy-cuda11x", -] + "raft-dask==23.6.*", + "rmm==23.6.*", + "ucx-py==0.32.*", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", "Programming Language :: Python", @@ -47,19 +50,20 @@ classifiers = [ [project.optional-dependencies] test = [ + "aiohttp", + "fsspec[http]>=0.6.0", + "networkx>=2.5.1", + "numpy>=1.21", + "pandas", "pytest", - "pytest-xdist", "pytest-benchmark", - "scipy", - "numpy", - "pandas", - "networkx>=2.5.1", - "scikit-learn>=0.23.1", + "pytest-cov", + "pytest-xdist", "python-louvain", - # cudf will use fsspec but is protocol independent. cugraph tests - # specifically require http for the test files it asks cudf to read. - "fsspec[http]>=0.6.0", -] + "requests", + "scikit-learn>=0.23.1", + "scipy", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. [project.urls] Homepage = "https://github.com/rapidsai/cugraph" diff --git a/python/pylibcugraph/pylibcugraph/CMakeLists.txt b/python/pylibcugraph/pylibcugraph/CMakeLists.txt index b1b52128d99..f4a9f40431d 100644 --- a/python/pylibcugraph/pylibcugraph/CMakeLists.txt +++ b/python/pylibcugraph/pylibcugraph/CMakeLists.txt @@ -17,13 +17,20 @@ add_subdirectory(internal_types) add_subdirectory(testing) set(cython_sources + analyze_clustering_edge_cut.pyx + analyze_clustering_modularity.pyx + analyze_clustering_ratio_cut.pyx + balanced_cut_clustering.pyx + betweenness_centrality.pyx bfs.pyx core_number.pyx + ecg.pyx egonet.pyx eigenvector_centrality.pyx graph_properties.pyx graphs.pyx hits.pyx + induced_subgraph.pyx k_core.pyx jaccard_coefficients.pyx sorensen_coefficients.pyx @@ -35,6 +42,8 @@ set(cython_sources personalized_pagerank.pyx random.pyx resource_handle.pyx + spectral_modularity_maximization.pyx + select_random_vertices.pyx sssp.pyx triangle_count.pyx two_hop_neighbors.pyx diff --git a/python/pylibcugraph/pylibcugraph/__init__.py b/python/pylibcugraph/pylibcugraph/__init__.py index 0337e6a4fcf..6c04e260579 100644 --- a/python/pylibcugraph/pylibcugraph/__init__.py +++ b/python/pylibcugraph/pylibcugraph/__init__.py @@ -57,7 +57,26 @@ from pylibcugraph.uniform_random_walks import uniform_random_walks +from pylibcugraph.betweenness_centrality import betweenness_centrality + +from pylibcugraph.induced_subgraph import induced_subgraph + +from pylibcugraph.ecg import ecg + +from pylibcugraph.balanced_cut_clustering import balanced_cut_clustering + +from pylibcugraph.spectral_modularity_maximization import ( + spectral_modularity_maximization, +) + +from pylibcugraph.analyze_clustering_modularity import analyze_clustering_modularity + +from pylibcugraph.analyze_clustering_edge_cut import analyze_clustering_edge_cut + +from pylibcugraph.analyze_clustering_ratio_cut import analyze_clustering_ratio_cut + from pylibcugraph.random import CuGraphRandomState +from pylibcugraph.select_random_vertices import select_random_vertices __version__ = "23.06.00" diff --git a/python/pylibcugraph/pylibcugraph/_cugraph_c/centrality_algorithms.pxd b/python/pylibcugraph/pylibcugraph/_cugraph_c/centrality_algorithms.pxd index 8e8b1c8e923..06838256f30 100644 --- a/python/pylibcugraph/pylibcugraph/_cugraph_c/centrality_algorithms.pxd +++ b/python/pylibcugraph/pylibcugraph/_cugraph_c/centrality_algorithms.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -98,7 +98,7 @@ cdef extern from "cugraph_c/centrality_algorithms.h": cugraph_centrality_result_t** result, cugraph_error_t** error ) - + ########################################################################### # katz centrality cdef cugraph_error_code_t \ @@ -124,17 +124,17 @@ cdef extern from "cugraph_c/centrality_algorithms.h": cugraph_hits_result_get_vertices( cugraph_hits_result_t* result ) - + cdef cugraph_type_erased_device_array_view_t* \ cugraph_hits_result_get_hubs( cugraph_hits_result_t* result ) - + cdef cugraph_type_erased_device_array_view_t* \ cugraph_hits_result_get_authorities( cugraph_hits_result_t* result ) - + cdef void \ cugraph_hits_result_free( cugraph_hits_result_t* result @@ -153,3 +153,18 @@ cdef extern from "cugraph_c/centrality_algorithms.h": cugraph_hits_result_t** result, cugraph_error_t** error ) + + ########################################################################### + # betweenness centrality + + cdef cugraph_error_code_t \ + cugraph_betweenness_centrality( + const cugraph_resource_handle_t* handle, + cugraph_graph_t* graph, + const cugraph_type_erased_device_array_view_t* vertex_list, + bool_t normalized, + bool_t include_endpoints, + bool_t do_expensive_check, + cugraph_centrality_result_t** result, + cugraph_error_t** error + ) \ No newline at end of file diff --git a/python/pylibcugraph/pylibcugraph/_cugraph_c/community_algorithms.pxd b/python/pylibcugraph/pylibcugraph/_cugraph_c/community_algorithms.pxd index 584912ec8ba..be58072e1d9 100644 --- a/python/pylibcugraph/pylibcugraph/_cugraph_c/community_algorithms.pxd +++ b/python/pylibcugraph/pylibcugraph/_cugraph_c/community_algorithms.pxd @@ -112,3 +112,105 @@ cdef extern from "cugraph_c/community_algorithms.h": cugraph_induced_subgraph_result_t** result, cugraph_error_t** error ) + + ########################################################################### + # ECG + cdef cugraph_error_code_t \ + cugraph_ecg( + const cugraph_resource_handle_t* handle, + cugraph_graph_t* graph, + double min_weight, + size_t ensemble_size, + bool_t do_expensive_check, + cugraph_hierarchical_clustering_result_t** result, + cugraph_error_t** error + ) + + ########################################################################### + # Clustering + ctypedef struct cugraph_clustering_result_t: + pass + + cdef cugraph_type_erased_device_array_view_t* \ + cugraph_clustering_result_get_vertices( + cugraph_clustering_result_t* result + ) + + cdef cugraph_type_erased_device_array_view_t* \ + cugraph_clustering_result_get_clusters( + cugraph_clustering_result_t* result + ) + + cdef void \ + cugraph_clustering_result_free( + cugraph_clustering_result_t* result + ) + + # Balanced cut clustering + cdef cugraph_error_code_t \ + cugraph_balanced_cut_clustering( + const cugraph_resource_handle_t* handle, + cugraph_graph_t* graph, + size_t n_clusters, + size_t n_eigenvectors, + double evs_tolerance, + int evs_max_iterations, + double k_means_tolerance, + int k_means_max_iterations, + bool_t do_expensive_check, + cugraph_clustering_result_t** result, + cugraph_error_t** error + ) + + # Spectral modularity maximization + cdef cugraph_error_code_t \ + cugraph_spectral_modularity_maximization( + const cugraph_resource_handle_t* handle, + cugraph_graph_t* graph, + size_t n_clusters, + size_t n_eigenvectors, + double evs_tolerance, + int evs_max_iterations, + double k_means_tolerance, + int k_means_max_iterations, + bool_t do_expensive_check, + cugraph_clustering_result_t** result, + cugraph_error_t** error + ) + + # Analyze clustering modularity + cdef cugraph_error_code_t \ + cugraph_analyze_clustering_modularity( + const cugraph_resource_handle_t* handle, + cugraph_graph_t* graph, + size_t n_clusters, + const cugraph_type_erased_device_array_view_t* vertices, + const cugraph_type_erased_device_array_view_t* clusters, + double* score, + cugraph_error_t** error + ) + + # Analyze clustering edge cut + cdef cugraph_error_code_t \ + cugraph_analyze_clustering_edge_cut( + const cugraph_resource_handle_t* handle, + cugraph_graph_t* graph, + size_t n_clusters, + const cugraph_type_erased_device_array_view_t* vertices, + const cugraph_type_erased_device_array_view_t* clusters, + double* score, + cugraph_error_t** error + ) + + # Analyze clustering ratio cut + cdef cugraph_error_code_t \ + cugraph_analyze_clustering_ratio_cut( + const cugraph_resource_handle_t* handle, + cugraph_graph_t* graph, + size_t n_clusters, + const cugraph_type_erased_device_array_view_t* vertices, + const cugraph_type_erased_device_array_view_t* clusters, + double* score, + cugraph_error_t** error + ) + diff --git a/python/pylibcugraph/pylibcugraph/_cugraph_c/sampling_algorithms.pxd b/python/pylibcugraph/pylibcugraph/_cugraph_c/sampling_algorithms.pxd index 53c97119fe3..ad8a8cd33a0 100644 --- a/python/pylibcugraph/pylibcugraph/_cugraph_c/sampling_algorithms.pxd +++ b/python/pylibcugraph/pylibcugraph/_cugraph_c/sampling_algorithms.pxd @@ -35,6 +35,9 @@ from pylibcugraph._cugraph_c.algorithms cimport ( from pylibcugraph._cugraph_c.random cimport ( cugraph_rng_state_t, ) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_t, +) cdef extern from "cugraph_c/sampling_algorithms.h": ########################################################################### @@ -66,3 +69,14 @@ cdef extern from "cugraph_c/sampling_algorithms.h": cugraph_sample_result_t** result, cugraph_error_t** error ) + + # random vertices selection + cdef cugraph_error_code_t \ + cugraph_select_random_vertices( + const cugraph_resource_handle_t* handle, + const cugraph_graph_t* graph, + cugraph_rng_state_t* rng_state, + size_t num_vertices, + cugraph_type_erased_device_array_t** vertices, + cugraph_error_t** error + ) diff --git a/python/pylibcugraph/pylibcugraph/analyze_clustering_edge_cut.pyx b/python/pylibcugraph/pylibcugraph/analyze_clustering_edge_cut.pyx new file mode 100644 index 00000000000..60613f27a0d --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/analyze_clustering_edge_cut.pyx @@ -0,0 +1,138 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, + cugraph_type_erased_device_array_view_free, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.community_algorithms cimport ( + cugraph_analyze_clustering_edge_cut, +) + +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + create_cugraph_type_erased_device_array_view_from_py_obj +) + + +def analyze_clustering_edge_cut(ResourceHandle resource_handle, + _GPUGraph graph, + size_t num_clusters, + vertex, + cluster, + ): + """ + Compute edge cut score of the specified clustering. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph + The input graph. + + num_clusters : size_t + Specifies the number of clusters to find, must be greater than 1. + + vertex : device array type + Vertex ids from the clustering to analyze. + + cluster : device array type + Cluster ids from the clustering to analyze. + + Returns + ------- + The edge cut score of the specified clustering. + + Examples + -------- + >>> import pylibcugraph, cupy, numpy + >>> srcs = cupy.asarray([0, 1, 2], dtype=numpy.int32) + >>> dsts = cupy.asarray([1, 2, 0], dtype=numpy.int32) + >>> weights = cupy.asarray([1.0, 1.0, 1.0], dtype=numpy.float32) + >>> resource_handle = pylibcugraph.ResourceHandle() + >>> graph_props = pylibcugraph.GraphProperties( + ... is_symmetric=True, is_multigraph=False) + >>> G = pylibcugraph.SGGraph( + ... resource_handle, graph_props, srcs, dsts, weights, + ... store_transposed=True, renumber=False, do_expensive_check=False) + >>> (vertex, cluster) = pylibcugraph.spectral_modularity_maximization( + ... resource_handle, G, num_clusters=5, num_eigen_vects=2, evs_tolerance=0.00001 + ... evs_max_iter=100, kmean_tolerance=0.00001, kmean_max_iter=100) + # FIXME: Fix docstring result. + >>> vertices + ############ + >>> clusters + ############ + >>> score = pylibcugraph.analyze_clustering_edge_cut( + ... resource_handle, G, num_clusters=5, vertex=vertex, cluster=cluster) + >>> score + ############ + + """ + + cdef double score = 0 + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + cdef cugraph_type_erased_device_array_view_t* \ + vertex_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + vertex) + + cdef cugraph_type_erased_device_array_view_t* \ + cluster_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + cluster) + + error_code = cugraph_analyze_clustering_edge_cut(c_resource_handle_ptr, + c_graph_ptr, + num_clusters, + vertex_view_ptr, + cluster_view_ptr, + &score, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_analyze_clustering_edge_cut") + + if vertex is not None: + cugraph_type_erased_device_array_view_free(vertex_view_ptr) + if cluster is not None: + cugraph_type_erased_device_array_view_free(cluster_view_ptr) + + return score diff --git a/python/pylibcugraph/pylibcugraph/analyze_clustering_modularity.pyx b/python/pylibcugraph/pylibcugraph/analyze_clustering_modularity.pyx new file mode 100644 index 00000000000..76ba48f52b7 --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/analyze_clustering_modularity.pyx @@ -0,0 +1,142 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, + cugraph_type_erased_device_array_view_free, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.community_algorithms cimport ( + cugraph_clustering_result_t, + cugraph_analyze_clustering_modularity, +) + +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + create_cugraph_type_erased_device_array_view_from_py_obj +) + + +def analyze_clustering_modularity(ResourceHandle resource_handle, + _GPUGraph graph, + size_t num_clusters, + vertex, + cluster, + ): + """ + Compute modularity score of the specified clustering. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph + The input graph. + + num_clusters : size_t + Specifies the number of clusters to find, must be greater than 1. + + vertex : device array type + Vertex ids from the clustering to analyze. + + cluster : device array type + Cluster ids from the clustering to analyze. + + Returns + ------- + The modularity score of the specified clustering. + + Examples + -------- + >>> import pylibcugraph, cupy, numpy + >>> srcs = cupy.asarray([0, 1, 2], dtype=numpy.int32) + >>> dsts = cupy.asarray([1, 2, 0], dtype=numpy.int32) + >>> weights = cupy.asarray([1.0, 1.0, 1.0], dtype=numpy.float32) + >>> resource_handle = pylibcugraph.ResourceHandle() + >>> graph_props = pylibcugraph.GraphProperties( + ... is_symmetric=True, is_multigraph=False) + >>> G = pylibcugraph.SGGraph( + ... resource_handle, graph_props, srcs, dsts, weights, + ... store_transposed=True, renumber=False, do_expensive_check=False) + >>> (vertex, cluster) = pylibcugraph.spectral_modularity_maximization( + ... resource_handle, G, num_clusters=5, num_eigen_vects=2, evs_tolerance=0.00001 + ... evs_max_iter=100, kmean_tolerance=0.00001, kmean_max_iter=100) + # FIXME: Fix docstring result. + >>> vertices + ############ + >>> clusters + ############ + >>> score = pylibcugraph.analyze_clustering_modularity( + ... resource_handle, G, num_clusters=5, vertex=vertex, cluster=cluster) + >>> score + ############ + + + """ + + cdef double score = 0 + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + cdef cugraph_clustering_result_t* result_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + cdef cugraph_type_erased_device_array_view_t* \ + vertex_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + vertex) + + cdef cugraph_type_erased_device_array_view_t* \ + cluster_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + cluster) + + + error_code = cugraph_analyze_clustering_modularity(c_resource_handle_ptr, + c_graph_ptr, + num_clusters, + vertex_view_ptr, + cluster_view_ptr, + &score, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_analyze_clustering_modularity") + + if vertex is not None: + cugraph_type_erased_device_array_view_free(vertex_view_ptr) + if cluster is not None: + cugraph_type_erased_device_array_view_free(cluster_view_ptr) + + return score diff --git a/python/pylibcugraph/pylibcugraph/analyze_clustering_ratio_cut.pyx b/python/pylibcugraph/pylibcugraph/analyze_clustering_ratio_cut.pyx new file mode 100644 index 00000000000..39b317e107d --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/analyze_clustering_ratio_cut.pyx @@ -0,0 +1,138 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, + cugraph_type_erased_device_array_view_free, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.community_algorithms cimport ( + cugraph_analyze_clustering_ratio_cut, +) + +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + create_cugraph_type_erased_device_array_view_from_py_obj +) + + +def analyze_clustering_ratio_cut(ResourceHandle resource_handle, + _GPUGraph graph, + size_t num_clusters, + vertex, + cluster, + ): + """ + Compute ratio cut score of the specified clustering. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph + The input graph. + + num_clusters : size_t + Specifies the number of clusters to find, must be greater than 1. + + vertex : device array type + Vertex ids from the clustering to analyze. + + cluster : device array type + Cluster ids from the clustering to analyze. + + Returns + ------- + The ratio cut score of the specified clustering. + + Examples + -------- + >>> import pylibcugraph, cupy, numpy + >>> srcs = cupy.asarray([0, 1, 2], dtype=numpy.int32) + >>> dsts = cupy.asarray([1, 2, 0], dtype=numpy.int32) + >>> weights = cupy.asarray([1.0, 1.0, 1.0], dtype=numpy.float32) + >>> resource_handle = pylibcugraph.ResourceHandle() + >>> graph_props = pylibcugraph.GraphProperties( + ... is_symmetric=True, is_multigraph=False) + >>> G = pylibcugraph.SGGraph( + ... resource_handle, graph_props, srcs, dsts, weights, + ... store_transposed=True, renumber=False, do_expensive_check=False) + >>> (vertex, cluster) = pylibcugraph.spectral_modularity_maximization( + ... resource_handle, G, num_clusters=5, num_eigen_vects=2, evs_tolerance=0.00001 + ... evs_max_iter=100, kmean_tolerance=0.00001, kmean_max_iter=100) + # FIXME: Fix docstring result. + >>> vertices + ############ + >>> clusters + ############ + >>> score = pylibcugraph.analyze_clustering_ratio_cut( + ... resource_handle, G, num_clusters=5, vertex=vertex, cluster=cluster) + >>> score + ############ + + """ + + cdef double score = 0 + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + cdef cugraph_type_erased_device_array_view_t* \ + vertex_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + vertex) + + cdef cugraph_type_erased_device_array_view_t* \ + cluster_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + cluster) + + error_code = cugraph_analyze_clustering_ratio_cut(c_resource_handle_ptr, + c_graph_ptr, + num_clusters, + vertex_view_ptr, + cluster_view_ptr, + &score, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_analyze_clustering_ratio_cut") + + if vertex is not None: + cugraph_type_erased_device_array_view_free(vertex_view_ptr) + if cluster is not None: + cugraph_type_erased_device_array_view_free(cluster_view_ptr) + + return score diff --git a/python/pylibcugraph/pylibcugraph/balanced_cut_clustering.pyx b/python/pylibcugraph/pylibcugraph/balanced_cut_clustering.pyx new file mode 100644 index 00000000000..5a61f9e0dd7 --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/balanced_cut_clustering.pyx @@ -0,0 +1,157 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + bool_t, + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.community_algorithms cimport ( + cugraph_clustering_result_t, + cugraph_balanced_cut_clustering, + cugraph_clustering_result_get_vertices, + cugraph_clustering_result_get_clusters, + cugraph_clustering_result_free, +) + +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + copy_to_cupy_array, +) + + +def balanced_cut_clustering(ResourceHandle resource_handle, + _GPUGraph graph, + num_clusters, + num_eigen_vects, + evs_tolerance, + evs_max_iter, + kmean_tolerance, + kmean_max_iter, + bool_t do_expensive_check + ): + """ + Compute a clustering/partitioning of the given graph using the spectral + balanced cut method. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph + The input graph. + + num_clusters : size_t + Specifies the number of clusters to find, must be greater than 1. + + num_eigen_vects : size_t + Specifies the number of eigenvectors to use. Must be lower or equal to + num_clusters. + + evs_tolerance: double + Specifies the tolerance to use in the eigensolver. + + evs_max_iter: size_t + Specifies the maximum number of iterations for the eigensolver. + + kmean_tolerance: double + Specifies the tolerance to use in the k-means solver. + + kmean_max_iter: size_t + Specifies the maximum number of iterations for the k-means solver. + + do_expensive_check : bool_t + If True, performs more extensive tests on the inputs to ensure + validitity, at the expense of increased run time. + + Returns + ------- + A tuple containing the clustering vertices, clusters + + Examples + -------- + >>> import pylibcugraph, cupy, numpy + >>> srcs = cupy.asarray([0, 1, 2], dtype=numpy.int32) + >>> dsts = cupy.asarray([1, 2, 0], dtype=numpy.int32) + >>> weights = cupy.asarray([1.0, 1.0, 1.0], dtype=numpy.float32) + >>> resource_handle = pylibcugraph.ResourceHandle() + >>> graph_props = pylibcugraph.GraphProperties( + ... is_symmetric=True, is_multigraph=False) + >>> G = pylibcugraph.SGGraph( + ... resource_handle, graph_props, srcs, dsts, weights, + ... store_transposed=True, renumber=False, do_expensive_check=False) + >>> (vertices, clusters) = pylibcugraph.balanced_cut_clustering( + ... resource_handle, G, num_clusters=5, num_eigen_vects=2, evs_tolerance=0.00001 + ... evs_max_iter=100, kmean_tolerance=0.00001, kmean_max_iter=100) + # FIXME: Fix docstring results. + >>> vertices + ############ + >>> clusters + ############ + + """ + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + cdef cugraph_clustering_result_t* result_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + error_code = cugraph_balanced_cut_clustering(c_resource_handle_ptr, + c_graph_ptr, + num_clusters, + num_eigen_vects, + evs_tolerance, + evs_max_iter, + kmean_tolerance, + kmean_max_iter, + do_expensive_check, + &result_ptr, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_balanced_cut_clustering") + + # Extract individual device array pointers from result and copy to cupy + # arrays for returning. + cdef cugraph_type_erased_device_array_view_t* vertices_ptr = \ + cugraph_clustering_result_get_vertices(result_ptr) + cdef cugraph_type_erased_device_array_view_t* clusters_ptr = \ + cugraph_clustering_result_get_clusters(result_ptr) + + cupy_vertices = copy_to_cupy_array(c_resource_handle_ptr, vertices_ptr) + cupy_clusters = copy_to_cupy_array(c_resource_handle_ptr, clusters_ptr) + + cugraph_clustering_result_free(result_ptr) + + return (cupy_vertices, cupy_clusters) diff --git a/python/pylibcugraph/pylibcugraph/betweenness_centrality.pyx b/python/pylibcugraph/pylibcugraph/betweenness_centrality.pyx new file mode 100644 index 00000000000..5087314c725 --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/betweenness_centrality.pyx @@ -0,0 +1,160 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + +from libc.stdint cimport uintptr_t + +from pylibcugraph._cugraph_c.resource_handle cimport ( + bool_t, + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, + cugraph_type_erased_device_array_view_free, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.centrality_algorithms cimport ( + cugraph_centrality_result_t, + cugraph_betweenness_centrality, + cugraph_centrality_result_get_vertices, + cugraph_centrality_result_get_values, + cugraph_centrality_result_free, +) +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + copy_to_cupy_array, + assert_CAI_type, + create_cugraph_type_erased_device_array_view_from_py_obj, +) +from pylibcugraph.select_random_vertices import ( + select_random_vertices +) + + +def betweenness_centrality(ResourceHandle resource_handle, + _GPUGraph graph, + k, + random_state, + bool_t normalized, + bool_t include_endpoints, + bool_t do_expensive_check): + """ + Compute the betweenness centrality for all vertices of the graph G. + Betweenness centrality is a measure of the number of shortest paths that + pass through a vertex. A vertex with a high betweenness centrality score + has more paths passing through it and is therefore believed to be more + important. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph or MGGraph + The input graph, for either Single or Multi-GPU operations. + + k : int or device array type or None, optional (default=None) + If k is not None, use k node samples to estimate betweenness. Higher + values give better approximation. If k is a device array type, + use the content of the list for estimation: the list should contain + vertex identifiers. If k is None (the default), all the vertices are + used to estimate betweenness. Vertices obtained through sampling or + defined as a list will be used as sources for traversals inside the + algorithm. + + random_state : int, optional (default=None) + if k is specified and k is an integer, use random_state to initialize the + random number generator. + Using None defaults to a hash of process id, time, and hostname + If k is either None or list or cudf objects: random_state parameter is + ignored. + + normalized : bool_t + Normalization will ensure that values are in [0, 1]. + + include_endpoints : bool_t + If true, include the endpoints in the shortest path counts. + + do_expensive_check : bool_t + A flag to run expensive checks for input arguments if True. + + Returns + ------- + + Examples + -------- + + """ + + if isinstance(k, int): + # randomly select vertices + + #'select_random_vertices' internally creates a + # 'pylibcugraph.random.CuGraphRandomState' + vertex_list = select_random_vertices( + resource_handle, graph, random_state, k) + else: + vertex_list = k + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + + cdef cugraph_centrality_result_t* result_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + cdef cugraph_type_erased_device_array_view_t* \ + vertex_list_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + vertex_list) + + error_code = cugraph_betweenness_centrality(c_resource_handle_ptr, + c_graph_ptr, + vertex_list_view_ptr, + normalized, + include_endpoints, + do_expensive_check, + &result_ptr, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_betweenness_centrality") + + # Extract individual device array pointers from result and copy to cupy + # arrays for returning. + cdef cugraph_type_erased_device_array_view_t* vertices_ptr = \ + cugraph_centrality_result_get_vertices(result_ptr) + cdef cugraph_type_erased_device_array_view_t* values_ptr = \ + cugraph_centrality_result_get_values(result_ptr) + + cupy_vertices = copy_to_cupy_array(c_resource_handle_ptr, vertices_ptr) + cupy_values = copy_to_cupy_array(c_resource_handle_ptr, values_ptr) + + cugraph_centrality_result_free(result_ptr) + cugraph_type_erased_device_array_view_free(vertex_list_view_ptr) + + return (cupy_vertices, cupy_values) diff --git a/python/pylibcugraph/pylibcugraph/bfs.pyx b/python/pylibcugraph/pylibcugraph/bfs.pyx index 6886e6b059a..8af3f48736b 100644 --- a/python/pylibcugraph/pylibcugraph/bfs.pyx +++ b/python/pylibcugraph/pylibcugraph/bfs.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -68,26 +68,33 @@ def bfs(ResourceHandle handle, _GPUGraph graph, handle: ResourceHandle The resource handle responsible for managing device resources that this algorithm will use + graph: SGGraph or MGGraph The graph to operate upon + sources: cudf.Series The vertices to start the breadth-first search from. Should match the numbering of the provided graph. All workers must have a unique set of sources. Empty sets are allowed as long as at least one worker has a source. + direction_optimizing: bool_t Whether to treat the graph as undirected (should only be called on a symmetric graph) + depth_limit: int32_t The depth limit at which the traversal will be stopped. If this is a negative number, the traversal will run without a depth limit. + compute_predecessors: bool_t Whether to compute the predecessors. If left blank, -1 will be returned instead of the correct predecessor of each vertex. + do_expensive_check : bool_t If True, performs more extensive tests on the inputs to ensure validitity, at the expense of increased run time. + Returns ------- A tuple of device arrays (cupy arrays) of the form diff --git a/python/pylibcugraph/pylibcugraph/ecg.pyx b/python/pylibcugraph/pylibcugraph/ecg.pyx new file mode 100644 index 00000000000..c5c1fe2eda7 --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/ecg.pyx @@ -0,0 +1,148 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + bool_t, + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.community_algorithms cimport ( + cugraph_hierarchical_clustering_result_t, + cugraph_ecg, + cugraph_hierarchical_clustering_result_get_vertices, + cugraph_hierarchical_clustering_result_get_clusters, + cugraph_hierarchical_clustering_result_free, +) + +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + copy_to_cupy_array, +) + + +def ecg(ResourceHandle resource_handle, + _GPUGraph graph, + min_weight, + size_t ensemble_size, + bool_t do_expensive_check + ): + """ + Compute the Ensemble Clustering for Graphs (ECG) partition of the input + graph. ECG runs truncated Louvain on an ensemble of permutations of the + input graph, then uses the ensemble partitions to determine weights for + the input graph. The final result is found by running full Louvain on + the input graph using the determined weights. + + See https://arxiv.org/abs/1809.05578 for further information. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph + The input graph. + + min_weight : double, optional (default=0.5) + The minimum value to assign as an edgeweight in the ECG algorithm. + It should be a value in the range [0,1] usually left as the default + value of .05 + + ensemble_size : size_t, optional (default=16) + The number of graph permutations to use for the ensemble. + The default value is 16, larger values may produce higher quality + partitions for some graphs. + + do_expensive_check : bool_t + If True, performs more extensive tests on the inputs to ensure + validitity, at the expense of increased run time. + + Returns + ------- + A tuple containing the hierarchical clustering vertices, clusters + + Examples + -------- + >>> import pylibcugraph, cupy, numpy + >>> srcs = cupy.asarray([0, 1, 2], dtype=numpy.int32) + >>> dsts = cupy.asarray([1, 2, 0], dtype=numpy.int32) + >>> weights = cupy.asarray([1.0, 1.0, 1.0], dtype=numpy.float32) + >>> resource_handle = pylibcugraph.ResourceHandle() + >>> graph_props = pylibcugraph.GraphProperties( + ... is_symmetric=True, is_multigraph=False) + >>> G = pylibcugraph.SGGraph( + ... resource_handle, graph_props, srcs, dsts, weights, + ... store_transposed=True, renumber=False, do_expensive_check=False) + >>> (vertices, clusters) = pylibcugraph.ecg(resource_handle, G) + # FIXME: Check this docstring example + >>> vertices + [0, 1, 2] + >>> clusters + [0, 0, 0] + + """ + + if min_weight is None: + min_weight = 0.5 + if ensemble_size is None: + ensemble_size = 16 + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + cdef cugraph_hierarchical_clustering_result_t* result_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + error_code = cugraph_ecg(c_resource_handle_ptr, + c_graph_ptr, + min_weight, + ensemble_size, + do_expensive_check, + &result_ptr, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_ecg") + + # Extract individual device array pointers from result and copy to cupy + # arrays for returning. + cdef cugraph_type_erased_device_array_view_t* vertices_ptr = \ + cugraph_hierarchical_clustering_result_get_vertices(result_ptr) + cdef cugraph_type_erased_device_array_view_t* clusters_ptr = \ + cugraph_hierarchical_clustering_result_get_clusters(result_ptr) + + cupy_vertices = copy_to_cupy_array(c_resource_handle_ptr, vertices_ptr) + cupy_clusters = copy_to_cupy_array(c_resource_handle_ptr, clusters_ptr) + + cugraph_hierarchical_clustering_result_free(result_ptr) + + return (cupy_vertices, cupy_clusters) diff --git a/python/pylibcugraph/pylibcugraph/induced_subgraph.pyx b/python/pylibcugraph/pylibcugraph/induced_subgraph.pyx new file mode 100644 index 00000000000..aab36d3d5e0 --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/induced_subgraph.pyx @@ -0,0 +1,169 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + bool_t, + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.graph_functions cimport ( + cugraph_induced_subgraph_result_t, + cugraph_extract_induced_subgraph, + cugraph_induced_subgraph_get_sources, + cugraph_induced_subgraph_get_destinations, + cugraph_induced_subgraph_get_edge_weights, + cugraph_induced_subgraph_get_subgraph_offsets, + cugraph_induced_subgraph_result_free, +) + +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + copy_to_cupy_array, + create_cugraph_type_erased_device_array_view_from_py_obj, +) + + +def induced_subgraph(ResourceHandle resource_handle, + _GPUGraph graph, + subgraph_vertices, + subgraph_offsets, + bool_t do_expensive_check): + """ + extract a list of edges that represent the subgraph + containing only the specified vertex ids. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph or MGGraph + The input graph. + + subgraph_vertices : cupy array + array of vertices to include in extracted subgraph. + + subgraph_offsets : cupy array + array of subgraph offsets into subgraph_vertices. + + do_expensive_check : bool_t + If True, performs more extensive tests on the inputs to ensure + validitity, at the expense of increased run time. + + Returns + ------- + A tuple of device arrays containing the sources, destinations, edge_weights + and the subgraph_offsets(if there are more than one seeds) + + Examples + -------- + >>> import pylibcugraph, cupy, numpy + >>> srcs = cupy.asarray([0, 1, 1, 2, 2, 2, 3, 4], dtype=numpy.int32) + >>> dsts = cupy.asarray([1, 3, 4, 0, 1, 3, 5, 5], dtype=numpy.int32) + >>> weights = cupy.asarray( + ... [0.1, 2.1, 1.1, 5.1, 3.1, 4.1, 7.2, 3.2], dtype=numpy.float32) + >>> subgraph_vertices = cupy.asarray([0, 1, 2, 3], dtype=numpy.int32) + >>> subgraph_offsets = cupy.asarray([0, 4], dtype=numpy.int32) + >>> resource_handle = pylibcugraph.ResourceHandle() + >>> graph_props = pylibcugraph.GraphProperties( + ... is_symmetric=False, is_multigraph=False) + >>> G = pylibcugraph.SGGraph( + ... resource_handle, graph_props, srcs, dsts, weights, + ... store_transposed=False, renumber=False, do_expensive_check=False) + >>> (sources, destinations, edge_weights, subgraph_offsets) = + ... pylibcugraph.induced_subgraph( + ... resource_handle, G, subgraph_vertices, subgraph_offsets, False) + >>> sources + [0, 1, 2, 2, 2] + >>> destinations + [1, 3, 0, 1, 3] + >>> edge_weights + [0.1, 2.1, 5.1, 3.1, 4.1] + >>> subgraph_offsets + [0, 5] + + """ + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + cdef cugraph_induced_subgraph_result_t* result_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + cdef cugraph_type_erased_device_array_view_t* \ + subgraph_offsets_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + subgraph_offsets) + + cdef cugraph_type_erased_device_array_view_t* \ + subgraph_vertices_view_ptr = \ + create_cugraph_type_erased_device_array_view_from_py_obj( + subgraph_vertices) + + error_code = cugraph_extract_induced_subgraph(c_resource_handle_ptr, + c_graph_ptr, + subgraph_offsets_view_ptr, + subgraph_vertices_view_ptr, + do_expensive_check, + &result_ptr, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_extract_induced_subgraph") + + # Extract individual device array pointers from result and copy to cupy + # arrays for returning. + cdef cugraph_type_erased_device_array_view_t* sources_ptr = \ + cugraph_induced_subgraph_get_sources(result_ptr) + cdef cugraph_type_erased_device_array_view_t* destinations_ptr = \ + cugraph_induced_subgraph_get_destinations(result_ptr) + cdef cugraph_type_erased_device_array_view_t* edge_weights_ptr = \ + cugraph_induced_subgraph_get_edge_weights(result_ptr) + cdef cugraph_type_erased_device_array_view_t* subgraph_offsets_ptr = \ + cugraph_induced_subgraph_get_subgraph_offsets(result_ptr) + + # FIXME: Get ownership of the result data instead of performing a copy + # for perfomance improvement + cupy_sources = copy_to_cupy_array( + c_resource_handle_ptr, sources_ptr) + cupy_destinations = copy_to_cupy_array( + c_resource_handle_ptr, destinations_ptr) + cupy_edge_weights = copy_to_cupy_array( + c_resource_handle_ptr, edge_weights_ptr) + cupy_subgraph_offsets = copy_to_cupy_array( + c_resource_handle_ptr, subgraph_offsets_ptr) + + # Free pointer + cugraph_induced_subgraph_result_free(result_ptr) + + return (cupy_sources, cupy_destinations, + cupy_edge_weights, cupy_subgraph_offsets) diff --git a/python/pylibcugraph/pylibcugraph/select_random_vertices.pyx b/python/pylibcugraph/pylibcugraph/select_random_vertices.pyx new file mode 100644 index 00000000000..7964f101058 --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/select_random_vertices.pyx @@ -0,0 +1,122 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, +) +from pylibcugraph._cugraph_c.graph_functions cimport ( + cugraph_two_hop_neighbors, + cugraph_vertex_pairs_t, + cugraph_vertex_pairs_get_first, + cugraph_vertex_pairs_get_second, + cugraph_vertex_pairs_free, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + copy_to_cupy_array, +) +from pylibcugraph._cugraph_c.random cimport ( + cugraph_rng_state_t +) +from pylibcugraph.random cimport ( + CuGraphRandomState +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_t, + cugraph_type_erased_device_array_view +) +from pylibcugraph._cugraph_c.sampling_algorithms cimport ( + cugraph_select_random_vertices +) + + +def select_random_vertices(ResourceHandle resource_handle, + _GPUGraph graph, + random_state, + size_t num_vertices, + ): + """ + Select random vertices from the graph + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph or MGGraph + The input graph, for either Single or Multi-GPU operations. + + random_state : int , optional + Random state to use when generating samples. Optional argument, + defaults to a hash of process id, time, and hostname. + (See pylibcugraph.random.CuGraphRandomState) + + num_vertices : size_t , optional + Number of vertices to sample. Optional argument, defaults to the + total number of vertices. + + Returns + ------- + return random vertices from the graph + """ + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + + cdef cugraph_type_erased_device_array_t* vertices_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + cg_rng_state = CuGraphRandomState(resource_handle, random_state) + + cdef cugraph_rng_state_t* rng_state_ptr = \ + cg_rng_state.rng_state_ptr + + error_code = cugraph_select_random_vertices(c_resource_handle_ptr, + c_graph_ptr, + rng_state_ptr, + num_vertices, + &vertices_ptr, + &error_ptr) + assert_success(error_code, error_ptr, "select_random_vertices") + + cdef cugraph_type_erased_device_array_view_t* \ + vertices_view_ptr = \ + cugraph_type_erased_device_array_view( + vertices_ptr) + + cupy_vertices = copy_to_cupy_array(c_resource_handle_ptr, vertices_view_ptr) + + return cupy_vertices diff --git a/python/pylibcugraph/pylibcugraph/spectral_modularity_maximization.pyx b/python/pylibcugraph/pylibcugraph/spectral_modularity_maximization.pyx new file mode 100644 index 00000000000..c74b1f0db41 --- /dev/null +++ b/python/pylibcugraph/pylibcugraph/spectral_modularity_maximization.pyx @@ -0,0 +1,157 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Have cython use python 3 syntax +# cython: language_level = 3 + + +from pylibcugraph._cugraph_c.resource_handle cimport ( + bool_t, + cugraph_resource_handle_t, +) +from pylibcugraph._cugraph_c.error cimport ( + cugraph_error_code_t, + cugraph_error_t, +) +from pylibcugraph._cugraph_c.array cimport ( + cugraph_type_erased_device_array_view_t, +) +from pylibcugraph._cugraph_c.graph cimport ( + cugraph_graph_t, +) +from pylibcugraph._cugraph_c.community_algorithms cimport ( + cugraph_clustering_result_t, + cugraph_spectral_modularity_maximization, + cugraph_clustering_result_get_vertices, + cugraph_clustering_result_get_clusters, + cugraph_clustering_result_free, +) + +from pylibcugraph.resource_handle cimport ( + ResourceHandle, +) +from pylibcugraph.graphs cimport ( + _GPUGraph, +) +from pylibcugraph.utils cimport ( + assert_success, + copy_to_cupy_array, +) + + +def spectral_modularity_maximization(ResourceHandle resource_handle, + _GPUGraph graph, + num_clusters, + num_eigen_vects, + evs_tolerance, + evs_max_iter, + kmean_tolerance, + kmean_max_iter, + bool_t do_expensive_check + ): + """ + Compute a clustering/partitioning of the given graph using the spectral + modularity maximization method. + + Parameters + ---------- + resource_handle : ResourceHandle + Handle to the underlying device resources needed for referencing data + and running algorithms. + + graph : SGGraph + The input graph. + + num_clusters : size_t + Specifies the number of clusters to find, must be greater than 1 + + num_eigen_vects : size_t + Specifies the number of eigenvectors to use. Must be lower or equal to + num_clusters. + + evs_tolerance: double + Specifies the tolerance to use in the eigensolver. + + evs_max_iter: size_t + Specifies the maximum number of iterations for the eigensolver. + + kmean_tolerance: double + Specifies the tolerance to use in the k-means solver. + + kmean_max_iter: size_t + Specifies the maximum number of iterations for the k-means solver. + + do_expensive_check : bool_t + If True, performs more extensive tests on the inputs to ensure + validitity, at the expense of increased run time. + + Returns + ------- + A tuple containing the clustering vertices, clusters + + Examples + -------- + >>> import pylibcugraph, cupy, numpy + >>> srcs = cupy.asarray([0, 1, 2], dtype=numpy.int32) + >>> dsts = cupy.asarray([1, 2, 0], dtype=numpy.int32) + >>> weights = cupy.asarray([1.0, 1.0, 1.0], dtype=numpy.float32) + >>> resource_handle = pylibcugraph.ResourceHandle() + >>> graph_props = pylibcugraph.GraphProperties( + ... is_symmetric=True, is_multigraph=False) + >>> G = pylibcugraph.SGGraph( + ... resource_handle, graph_props, srcs, dsts, weights, + ... store_transposed=True, renumber=False, do_expensive_check=False) + >>> (vertices, clusters) = pylibcugraph.spectral_modularity_maximization( + ... resource_handle, G, num_clusters=5, num_eigen_vects=2, evs_tolerance=0.00001 + ... evs_max_iter=100, kmean_tolerance=0.00001, kmean_max_iter=100) + # FIXME: Fix docstring result. + >>> vertices + ############ + >>> clusters + ############ + + """ + + cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ + resource_handle.c_resource_handle_ptr + cdef cugraph_graph_t* c_graph_ptr = graph.c_graph_ptr + cdef cugraph_clustering_result_t* result_ptr + cdef cugraph_error_code_t error_code + cdef cugraph_error_t* error_ptr + + error_code = cugraph_spectral_modularity_maximization(c_resource_handle_ptr, + c_graph_ptr, + num_clusters, + num_eigen_vects, + evs_tolerance, + evs_max_iter, + kmean_tolerance, + kmean_max_iter, + do_expensive_check, + &result_ptr, + &error_ptr) + assert_success(error_code, error_ptr, "cugraph_spectral_modularity_maximization") + + # Extract individual device array pointers from result and copy to cupy + # arrays for returning. + cdef cugraph_type_erased_device_array_view_t* vertices_ptr = \ + cugraph_clustering_result_get_vertices(result_ptr) + cdef cugraph_type_erased_device_array_view_t* clusters_ptr = \ + cugraph_clustering_result_get_clusters(result_ptr) + + cupy_vertices = copy_to_cupy_array(c_resource_handle_ptr, vertices_ptr) + cupy_clusters = copy_to_cupy_array(c_resource_handle_ptr, clusters_ptr) + + cugraph_clustering_result_free(result_ptr) + + return (cupy_vertices, cupy_clusters) diff --git a/python/pylibcugraph/pyproject.toml b/python/pylibcugraph/pyproject.toml index 89ef0509092..9101d78bb28 100644 --- a/python/pylibcugraph/pyproject.toml +++ b/python/pylibcugraph/pyproject.toml @@ -3,15 +3,15 @@ [build-system] requires = [ - "wheel", - "setuptools", - "cython>=0.29,<0.30", - "scikit-build>=0.13.1", "cmake>=3.23.1,!=3.25.0", + "cython>=0.29,<0.30", "ninja", - "rmm==23.6.*", "pylibraft==23.6.*", -] + "rmm==23.6.*", + "scikit-build>=0.13.1", + "setuptools", + "wheel", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. build-backend = "setuptools.build_meta" [tool.pytest.ini_options] @@ -30,7 +30,7 @@ requires-python = ">=3.8" dependencies = [ "pylibraft==23.6.*", "rmm==23.6.*", -] +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", "Programming Language :: Python", @@ -40,15 +40,16 @@ classifiers = [ [project.optional-dependencies] test = [ + "cudf==23.6.*", + "networkx>=2.5.1", + "numpy>=1.21", + "pandas", "pytest", - "pytest-xdist", "pytest-benchmark", + "pytest-cov", + "pytest-xdist", "scipy", - "pandas", - "numpy", - "networkx>=2.5.1", - "cudf==23.6.*", -] +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. [project.urls] Homepage = "https://github.com/rapidsai/cugraph" diff --git a/readme_pages/SOURCEBUILD.md b/readme_pages/SOURCEBUILD.md index 2a8cb9aa625..122b8b0d0ee 100644 --- a/readme_pages/SOURCEBUILD.md +++ b/readme_pages/SOURCEBUILD.md @@ -89,6 +89,7 @@ build.sh [ ...] [ ...] cugraph-service - build the cugraph-service_client and cugraph-service_server Python package cpp-mgtests - build libcugraph and libcugraph_etl MG tests. Builds MPI communicator, adding MPI as a dependency. cugraph-dgl - build the cugraph-dgl extensions for DGL + cugraph-pyg - build the cugraph-dgl extensions for PyG docs - build the docs and is: -v - verbose build mode From 012d3a48b104fd490e1811290a332d000a926a2e Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 10 Apr 2023 12:49:09 -0700 Subject: [PATCH 08/90] Remove uses-setup-env-vars (#3463) This setting now matches the default behavior of the shared-action-workflows repo Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/cugraph/pull/3463 --- .github/workflows/build.yaml | 2 -- .github/workflows/pr.yaml | 2 -- 2 files changed, 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f1cb664fe0d..2a3261a1dfe 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -82,7 +82,6 @@ jobs: extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" - uses-setup-env-vars: false wheel-publish-pylibcugraph: needs: wheel-build-pylibcugraph secrets: inherit @@ -113,7 +112,6 @@ jobs: extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" - uses-setup-env-vars: false wheel-publish-cugraph: needs: wheel-build-cugraph secrets: inherit diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 4bcf22586e6..b5fe3a9183a 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -87,7 +87,6 @@ jobs: extra-repo-sha: branch-23.06 extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" - uses-setup-env-vars: false wheel-tests-pylibcugraph: needs: wheel-build-pylibcugraph secrets: inherit @@ -112,7 +111,6 @@ jobs: extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY before-wheel: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_cu11 rapids-download-wheels-from-s3 ./local-wheelhouse" skbuild-configure-options: "-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=/project/cugraph-ops/" - uses-setup-env-vars: false wheel-tests-cugraph: needs: wheel-build-cugraph secrets: inherit From e231693e7b02a9238ef18a23607fcd956412a22d Mon Sep 17 00:00:00 2001 From: Chuck Hastings <45364586+ChuckHastings@users.noreply.github.com> Date: Mon, 10 Apr 2023 19:10:59 -0400 Subject: [PATCH 09/90] Remove legacy implementation of induce subgraph (#3464) No longer used since #2537 was merged. Closes #2538 Authors: - Chuck Hastings (https://github.com/ChuckHastings) Approvers: - Seunghwa Kang (https://github.com/seunghwak) - Joseph Nke (https://github.com/jnke2016) URL: https://github.com/rapidsai/cugraph/pull/3464 --- cpp/CMakeLists.txt | 1 - .../legacy/extract_subgraph_by_vertex.cu | 146 ------------------ 2 files changed, 147 deletions(-) delete mode 100644 cpp/src/community/legacy/extract_subgraph_by_vertex.cu diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 8a47defed35..d592b45609c 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -216,7 +216,6 @@ set(CUGRAPH_SOURCES src/community/legacy/leiden.cu src/community/legacy/ktruss.cu src/community/legacy/ecg.cu - src/community/legacy/extract_subgraph_by_vertex.cu src/community/egonet_sg.cu src/community/egonet_mg.cu src/sampling/random_walks.cu diff --git a/cpp/src/community/legacy/extract_subgraph_by_vertex.cu b/cpp/src/community/legacy/extract_subgraph_by_vertex.cu deleted file mode 100644 index 481b1fa33e9..00000000000 --- a/cpp/src/community/legacy/extract_subgraph_by_vertex.cu +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -namespace { - -template -std::unique_ptr> extract_subgraph_by_vertices( - cugraph::legacy::GraphCOOView const& graph, - vertex_t const* vertices, - vertex_t num_vertices, - cudaStream_t stream) -{ - edge_t graph_num_verts = graph.number_of_vertices; - - rmm::device_vector error_count_v{1, 0}; - rmm::device_vector vertex_used_v{graph_num_verts, num_vertices}; - - vertex_t* d_vertex_used = vertex_used_v.data().get(); - int64_t* d_error_count = error_count_v.data().get(); - - thrust::for_each( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(num_vertices), - [vertices, d_vertex_used, d_error_count, graph_num_verts] __device__(vertex_t idx) { - vertex_t v = vertices[idx]; - if ((v >= 0) && (v < graph_num_verts)) { - d_vertex_used[v] = idx; - } else { - atomicAdd(d_error_count, int64_t{1}); - } - }); - - CUGRAPH_EXPECTS(error_count_v[0] == 0, - "Input error... vertices specifies vertex id out of range"); - - vertex_t* graph_src = graph.src_indices; - vertex_t* graph_dst = graph.dst_indices; - weight_t* graph_weight = graph.edge_data; - - // iterate over the edges and count how many make it into the output - int64_t count = thrust::count_if( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(graph.number_of_edges), - [graph_src, graph_dst, d_vertex_used, num_vertices] __device__(edge_t e) { - vertex_t s = graph_src[e]; - vertex_t d = graph_dst[e]; - return ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)); - }); - - if (count > 0) { - auto result = std::make_unique>( - num_vertices, count, has_weight); - - vertex_t* d_new_src = result->src_indices(); - vertex_t* d_new_dst = result->dst_indices(); - weight_t* d_new_weight = result->edge_data(); - - // reusing error_count as a vertex counter... - thrust::for_each(rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(graph.number_of_edges), - [graph_src, - graph_dst, - graph_weight, - d_vertex_used, - num_vertices, - d_error_count, - d_new_src, - d_new_dst, - d_new_weight] __device__(edge_t e) { - vertex_t s = graph_src[e]; - vertex_t d = graph_dst[e]; - if ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)) { - // NOTE: Could avoid atomic here by doing a inclusive sum, but that would - // require 2*|E| temporary memory. If this becomes important perhaps - // we make 2 implementations and pick one based on the number of - // vertices in the subgraph set. - auto pos = atomicAdd(d_error_count, int64_t{1}); - d_new_src[pos] = d_vertex_used[s]; - d_new_dst[pos] = d_vertex_used[d]; - if (has_weight) d_new_weight[pos] = graph_weight[e]; - } - }); - - return result; - } else { - return std::make_unique>( - 0, 0, has_weight); - } -} -} // namespace - -namespace cugraph { -namespace subgraph { - -template -std::unique_ptr> extract_subgraph_vertex( - legacy::GraphCOOView const& graph, VT const* vertices, VT num_vertices) -{ - CUGRAPH_EXPECTS(vertices != nullptr, "Invalid input argument: vertices must be non null"); - - cudaStream_t stream{0}; - - if (graph.edge_data == nullptr) { - return extract_subgraph_by_vertices(graph, vertices, num_vertices, stream); - } else { - return extract_subgraph_by_vertices(graph, vertices, num_vertices, stream); - } -} - -template std::unique_ptr> -extract_subgraph_vertex( - legacy::GraphCOOView const&, int32_t const*, int32_t); -template std::unique_ptr> -extract_subgraph_vertex( - legacy::GraphCOOView const&, int32_t const*, int32_t); - -} // namespace subgraph -} // namespace cugraph From 16bb39df8d3369660d9958a80e1afb4332cbbf05 Mon Sep 17 00:00:00 2001 From: Chuck Hastings <45364586+ChuckHastings@users.noreply.github.com> Date: Tue, 11 Apr 2023 12:03:35 -0400 Subject: [PATCH 10/90] Remove legacy renumber and shuffle calls from cython.cu (#3467) With the merge of PR #2949, we no longer need these calls in `cython.cu` Closes #3466 Authors: - Chuck Hastings (https://github.com/ChuckHastings) Approvers: - Joseph Nke (https://github.com/jnke2016) - Naim (https://github.com/naimnv) - Alex Barghi (https://github.com/alexbarghi-nv) - Rick Ratzel (https://github.com/rlratzel) - Seunghwa Kang (https://github.com/seunghwak) URL: https://github.com/rapidsai/cugraph/pull/3467 --- cpp/include/cugraph/utilities/cython.hpp | 204 +-------------- cpp/src/utilities/cython.cu | 238 ------------------ .../cugraph/structure/graph_utilities.pxd | 147 +---------- 3 files changed, 2 insertions(+), 587 deletions(-) diff --git a/cpp/include/cugraph/utilities/cython.hpp b/cpp/include/cugraph/utilities/cython.hpp index 91dbe2c701e..2573752cb98 100644 --- a/cpp/include/cugraph/utilities/cython.hpp +++ b/cpp/include/cugraph/utilities/cython.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,189 +25,11 @@ namespace cugraph { namespace cython { -enum class numberTypeEnum : int { int32Type, int64Type, floatType, doubleType }; - -// replacement for std::tuple<,,>, since std::tuple is not -// supported in cython -// -template -struct major_minor_weights_t { - explicit major_minor_weights_t(raft::handle_t const& handle) - : shuffled_major_vertices_(0, handle.get_stream()), - shuffled_minor_vertices_(0, handle.get_stream()), - shuffled_weights_(0, handle.get_stream()) - { - } - - rmm::device_uvector& get_major(void) { return shuffled_major_vertices_; } - - rmm::device_uvector& get_minor(void) { return shuffled_minor_vertices_; } - - rmm::device_uvector& get_weights(void) { return shuffled_weights_; } - - std::vector& get_edge_counts(void) { return edge_counts_; } - - std::pair, size_t> get_major_wrap( - void) // const: triggers errors in Cython autogen-ed C++ - { - return std::make_pair(std::make_unique(shuffled_major_vertices_.release()), - sizeof(vertex_t)); - } - - std::pair, size_t> get_minor_wrap(void) // const - { - return std::make_pair(std::make_unique(shuffled_minor_vertices_.release()), - sizeof(vertex_t)); - } - - std::pair, size_t> get_weights_wrap(void) // const - { - return std::make_pair(std::make_unique(shuffled_weights_.release()), - sizeof(weight_t)); - } - - std::unique_ptr> get_edge_counts_wrap(void) // const - { - return std::make_unique>(edge_counts_); - } - - private: - rmm::device_uvector shuffled_major_vertices_; - rmm::device_uvector shuffled_minor_vertices_; - rmm::device_uvector shuffled_weights_; - std::vector edge_counts_{}; -}; - struct graph_generator_t { std::unique_ptr d_source; std::unique_ptr d_destination; }; -// wrapper for renumber_edgelist() return -// (unrenumbering maps, etc.) -// -template -struct renum_tuple_t { - explicit renum_tuple_t(raft::handle_t const& handle) : dv_(0, handle.get_stream()), part_() {} - - rmm::device_uvector& get_dv(void) { return dv_; } - - std::pair, size_t> get_dv_wrap( - void) // const: see above explanation - { - return std::make_pair(std::make_unique(dv_.release()), sizeof(vertex_t)); - } - - cugraph::partition_t& get_partition(void) { return part_; } - vertex_t& get_num_vertices(void) { return nv_; } - edge_t& get_num_edges(void) { return ne_; } - - std::vector& get_segment_offsets(void) { return segment_offsets_; } - - std::unique_ptr> get_segment_offsets_wrap() - { // const - return std::make_unique>(segment_offsets_); - } - - // `partition_t` pass-through getters - // - int get_part_row_size() const { return part_.row_comm_size(); } - - int get_part_col_size() const { return part_.col_comm_size(); } - - int get_part_comm_rank() const { return part_.comm_rank(); } - - // FIXME: part_.vertex_partition_offsets() returns a std::vector - // - std::unique_ptr> get_partition_offsets_wrap(void) // const - { - return std::make_unique>(part_.vertex_partition_range_offsets()); - } - - std::pair get_part_local_vertex_range() const - { - auto tpl_v = part_.local_vertex_partition_range(); - return std::make_pair(std::get<0>(tpl_v), std::get<1>(tpl_v)); - } - - vertex_t get_part_local_vertex_first() const - { - return part_.local_vertex_partition_range_first(); - } - - vertex_t get_part_local_vertex_last() const { return part_.local_vertex_partition_range_last(); } - - std::pair get_part_vertex_partition_range(size_t vertex_partition_idx) const - { - auto tpl_v = part_.vertex_partition_range(vertex_partition_idx); - return std::make_pair(std::get<0>(tpl_v), std::get<1>(tpl_v)); - } - - vertex_t get_part_vertex_partition_first(size_t vertex_partition_idx) const - { - return part_.vertex_partition_range_first(vertex_partition_idx); - } - - vertex_t get_part_vertex_partition_last(size_t vertex_partition_idx) const - { - return part_.vertex_partition_range_last(vertex_partition_idx); - } - - vertex_t get_part_vertex_partition_size(size_t vertex_partition_idx) const - { - return part_.vertex_partition_range_size(vertex_partition_idx); - } - - size_t get_part_number_of_matrix_partitions() const - { - return part_.number_of_local_edgex_partitions(); - } - - std::pair get_part_matrix_partition_major_range(size_t partition_idx) const - { - auto tpl_v = part_.local_edgex_partition_major_range(partition_idx); - return std::make_pair(std::get<0>(tpl_v), std::get<1>(tpl_v)); - } - - vertex_t get_part_matrix_partition_major_first(size_t partition_idx) const - { - return part_.local_edge_partition_major_first(partition_idx); - } - - vertex_t get_part_matrix_partition_major_last(size_t partition_idx) const - { - return part_.local_edge_partition_major_range_last(partition_idx); - } - - vertex_t get_part_matrix_partition_major_value_start_offset(size_t partition_idx) const - { - return part_.local_edge_partition_major_value_start_offset(partition_idx); - } - - std::pair get_part_matrix_partition_minor_range() const - { - auto tpl_v = part_.local_edge_partition_minor_range(); - return std::make_pair(std::get<0>(tpl_v), std::get<1>(tpl_v)); - } - - vertex_t get_part_matrix_partition_minor_first() const - { - return part_.local_edge_partition_minor_range_first(); - } - - vertex_t get_part_matrix_partition_minor_last() const - { - return part_.local_edge_partition_minor_range_last(); - } - - private: - rmm::device_uvector dv_; - cugraph::partition_t part_; - vertex_t nv_{0}; - edge_t ne_{0}; - std::vector segment_offsets_; -}; - // Wrapper for calling graph generator template std::unique_ptr call_generate_rmat_edgelist(raft::handle_t const& handle, @@ -232,30 +54,6 @@ call_generate_rmat_edgelists(raft::handle_t const& handle, bool clip_and_flip, bool scramble_vertex_ids); -// wrapper for shuffling: -// -template -std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - vertex_t* - edgelist_major_vertices, // [IN / OUT]: groupby_gpu_id_and_shuffle_values() sorts in-place - vertex_t* edgelist_minor_vertices, // [IN / OUT] - weight_t* edgelist_weights, // [IN / OUT] - edge_t num_edgelist_edges, - bool is_weighted); - -// Wrapper for calling renumber_edgelist() inplace: -// -template -std::unique_ptr> call_renumber( - raft::handle_t const& handle, - vertex_t* shuffled_edgelist_src_vertices /* [INOUT] */, - vertex_t* shuffled_edgelist_dst_vertices /* [INOUT] */, - std::vector const& edge_counts, - bool store_transposed, - bool do_expensive_check, - bool multi_gpu); - // Helper for setting up subcommunicators, typically called as part of the // user-initiated comms initialization in Python. // diff --git a/cpp/src/utilities/cython.cu b/cpp/src/utilities/cython.cu index c1ff9c33568..72ea93ffcf1 100644 --- a/cpp/src/utilities/cython.cu +++ b/cpp/src/utilities/cython.cu @@ -14,22 +14,15 @@ * limitations under the License. */ -#include - -#include #include #include #include #include -#include #include #include -#include -#include - #include #include @@ -112,243 +105,12 @@ call_generate_rmat_edgelists(raft::handle_t const& handle, return gg_vec; } -// wrapper for shuffling: -// -template -std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - vertex_t* - edgelist_major_vertices, // [IN / OUT]: groupby_gpu_id_and_shuffle_values() sorts in-place - vertex_t* edgelist_minor_vertices, // [IN / OUT] - weight_t* edgelist_weights, // [IN / OUT] - edge_t num_edgelist_edges, - bool is_weighted) -{ - auto& comm = handle.get_comms(); - auto const comm_size = comm.get_size(); - auto& major_comm = handle.get_subcomm(cugraph::partition_manager::major_comm_name()); - auto const major_comm_size = major_comm.get_size(); - auto& minor_comm = handle.get_subcomm(cugraph::partition_manager::minor_comm_name()); - auto const minor_comm_size = minor_comm.get_size(); - - std::unique_ptr> ptr_ret = - std::make_unique>(handle); - - if (is_weighted) { - auto zip_edge = thrust::make_zip_iterator( - thrust::make_tuple(edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights)); - - std::forward_as_tuple( - std::tie(ptr_ret->get_major(), ptr_ret->get_minor(), ptr_ret->get_weights()), - std::ignore) = - cugraph::groupby_gpu_id_and_shuffle_values( - comm, // handle.get_comms(), - zip_edge, - zip_edge + num_edgelist_edges, - [key_func = - cugraph::detail::compute_gpu_id_from_ext_edge_endpoints_t{ - comm_size, major_comm_size, minor_comm_size}] __device__(auto val) { - return key_func(thrust::get<0>(val), thrust::get<1>(val)); - }, - handle.get_stream()); - } else { - auto zip_edge = thrust::make_zip_iterator( - thrust::make_tuple(edgelist_major_vertices, edgelist_minor_vertices)); - - std::forward_as_tuple(std::tie(ptr_ret->get_major(), ptr_ret->get_minor()), - std::ignore) = - cugraph::groupby_gpu_id_and_shuffle_values( - comm, // handle.get_comms(), - zip_edge, - zip_edge + num_edgelist_edges, - [key_func = - cugraph::detail::compute_gpu_id_from_ext_edge_endpoints_t{ - comm_size, major_comm_size, minor_comm_size}] __device__(auto val) { - return key_func(thrust::get<0>(val), thrust::get<1>(val)); - }, - handle.get_stream()); - } - - auto local_partition_id_op = - cugraph::detail::compute_local_edge_partition_id_from_ext_edge_endpoints_t{ - comm_size, major_comm_size, minor_comm_size}; - - auto pair_first = thrust::make_zip_iterator( - thrust::make_tuple(ptr_ret->get_major().data(), ptr_ret->get_minor().data())); - - auto edge_counts = (is_weighted) - ? cugraph::groupby_and_count(pair_first, - pair_first + ptr_ret->get_major().size(), - ptr_ret->get_weights().data(), - local_partition_id_op, - minor_comm_size, - false, - handle.get_stream()) - : cugraph::groupby_and_count(pair_first, - pair_first + ptr_ret->get_major().size(), - local_partition_id_op, - minor_comm_size, - false, - handle.get_stream()); - - std::vector h_edge_counts(edge_counts.size()); - raft::update_host( - h_edge_counts.data(), edge_counts.data(), edge_counts.size(), handle.get_stream()); - handle.sync_stream(); - - ptr_ret->get_edge_counts().resize(h_edge_counts.size()); - for (size_t i = 0; i < h_edge_counts.size(); ++i) { - ptr_ret->get_edge_counts()[i] = static_cast(h_edge_counts[i]); - } - - return ptr_ret; // RVO-ed -} - -// Wrapper for calling renumber_edeglist() inplace: -// TODO: check if return type needs further handling... -// -template -std::unique_ptr> call_renumber( - raft::handle_t const& handle, - vertex_t* shuffled_edgelist_src_vertices /* [INOUT] */, - vertex_t* shuffled_edgelist_dst_vertices /* [INOUT] */, - std::vector const& edge_counts, - bool store_transposed, - bool do_expensive_check, - bool multi_gpu) // bc. cython cannot take non-type template params -{ - // caveat: return values have different types on the 2 branches below: - // - std::unique_ptr> p_ret = - std::make_unique>(handle); - - if (multi_gpu) { - std::vector displacements(edge_counts.size(), edge_t{0}); - std::partial_sum(edge_counts.begin(), edge_counts.end() - 1, displacements.begin() + 1); - std::vector src_ptrs(edge_counts.size()); - std::vector dst_ptrs(src_ptrs.size()); - for (size_t i = 0; i < edge_counts.size(); ++i) { - src_ptrs[i] = shuffled_edgelist_src_vertices + displacements[i]; - dst_ptrs[i] = shuffled_edgelist_dst_vertices + displacements[i]; - } - - cugraph::renumber_meta_t meta{}; - std::tie(p_ret->get_dv(), meta) = - cugraph::renumber_edgelist(handle, - std::nullopt, - src_ptrs, - dst_ptrs, - edge_counts, - std::nullopt, - store_transposed, - do_expensive_check); - p_ret->get_num_vertices() = meta.number_of_vertices; - p_ret->get_num_edges() = meta.number_of_edges; - p_ret->get_partition() = meta.partition; - p_ret->get_segment_offsets() = meta.edge_partition_segment_offsets; - } else { - cugraph::renumber_meta_t meta{}; - std::tie(p_ret->get_dv(), meta) = - cugraph::renumber_edgelist(handle, - std::nullopt, - shuffled_edgelist_src_vertices, - shuffled_edgelist_dst_vertices, - edge_counts[0], - store_transposed, - do_expensive_check); - - p_ret->get_num_vertices() = static_cast(p_ret->get_dv().size()); - p_ret->get_num_edges() = edge_counts[0]; - p_ret->get_partition() = cugraph::partition_t{}; // dummy - p_ret->get_segment_offsets() = meta.segment_offsets; - } - - return p_ret; // RVO-ed (copy ellision) -} - // Helper for setting up subcommunicators void init_subcomms(raft::handle_t& handle, size_t row_comm_size) { partition_manager::init_subcomm(handle, row_comm_size); } -template std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - int32_t* edgelist_major_vertices, - int32_t* edgelist_minor_vertices, - float* edgelist_weights, - int32_t num_edgelist_edges, - bool is_weighted); - -template std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - int32_t* edgelist_major_vertices, - int32_t* edgelist_minor_vertices, - float* edgelist_weights, - int64_t num_edgelist_edges, - bool is_weighted); - -template std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - int32_t* edgelist_major_vertices, - int32_t* edgelist_minor_vertices, - double* edgelist_weights, - int32_t num_edgelist_edges, - bool is_weighted); - -template std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - int32_t* edgelist_major_vertices, - int32_t* edgelist_minor_vertices, - double* edgelist_weights, - int64_t num_edgelist_edges, - bool is_weighted); - -template std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - int64_t* edgelist_major_vertices, - int64_t* edgelist_minor_vertices, - float* edgelist_weights, - int64_t num_edgelist_edges, - bool is_weighted); - -template std::unique_ptr> call_shuffle( - raft::handle_t const& handle, - int64_t* edgelist_major_vertices, - int64_t* edgelist_minor_vertices, - double* edgelist_weights, - int64_t num_edgelist_edges, - bool is_weighted); - -// TODO: add the remaining relevant EIDIr's: -// -template std::unique_ptr> call_renumber( - raft::handle_t const& handle, - int32_t* shuffled_edgelist_src_vertices /* [INOUT] */, - int32_t* shuffled_edgelist_dst_vertices /* [INOUT] */, - std::vector const& edge_counts, - bool store_transposed, - bool do_expensive_check, - bool multi_gpu); - -template std::unique_ptr> call_renumber( - raft::handle_t const& handle, - int32_t* shuffled_edgelist_src_vertices /* [INOUT] */, - int32_t* shuffled_edgelist_dst_vertices /* [INOUT] */, - std::vector const& edge_counts, - bool store_transposed, - bool do_expensive_check, - bool multi_gpu); - -template std::unique_ptr> call_renumber( - raft::handle_t const& handle, - int64_t* shuffled_edgelist_src_vertices /* [INOUT] */, - int64_t* shuffled_edgelist_dst_vertices /* [INOUT] */, - std::vector const& edge_counts, - bool store_transposed, - bool do_expensive_check, - bool multi_gpu); - template std::unique_ptr call_generate_rmat_edgelist( raft::handle_t const& handle, size_t scale, diff --git a/python/cugraph/cugraph/structure/graph_utilities.pxd b/python/cugraph/cugraph/structure/graph_utilities.pxd index 74edb61fafa..0bf0f829d1b 100644 --- a/python/cugraph/cugraph/structure/graph_utilities.pxd +++ b/python/cugraph/cugraph/structure/graph_utilities.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -28,64 +28,6 @@ from pylibraft.common.handle cimport handle_t # C++ graph utilities cdef extern from "cugraph/utilities/cython.hpp" namespace "cugraph::cython": - - ctypedef enum numberTypeEnum: - int32Type "cugraph::cython::numberTypeEnum::int32Type" - int64Type "cugraph::cython::numberTypeEnum::int64Type" - floatType "cugraph::cython::numberTypeEnum::floatType" - doubleType "cugraph::cython::numberTypeEnum::doubleType" - - cdef cppclass graph_container_t: - pass - - cdef void populate_graph_container( - graph_container_t &graph_container, - handle_t &handle, - void *src_vertices, - void *dst_vertices, - void *weights, - void *vertex_partition_offsets, - void *segment_offsets, - size_t num_segments, - numberTypeEnum vertexType, - numberTypeEnum edgeType, - numberTypeEnum weightType, - size_t num_local_edges, - size_t num_global_vertices, - size_t num_global_edges, - bool is_weighted, - bool is_symmetric, - bool transposed, - bool multi_gpu) except + - - ctypedef enum graphTypeEnum: - LegacyCSR "cugraph::cython::graphTypeEnum::LegacyCSR" - LegacyCSC "cugraph::cython::graphTypeEnum::LegacyCSC" - LegacyCOO "cugraph::cython::graphTypeEnum::LegacyCOO" - - cdef cppclass cy_multi_edgelists_t: - size_t number_of_vertices - size_t number_of_edges - size_t number_of_subgraph - unique_ptr[device_buffer] src_indices - unique_ptr[device_buffer] dst_indices - unique_ptr[device_buffer] edge_data - unique_ptr[device_buffer] subgraph_offsets - - cdef cppclass random_walk_ret_t: - size_t coalesced_sz_v_ - size_t coalesced_sz_w_ - size_t num_paths_ - size_t max_depth_ - unique_ptr[device_buffer] d_coalesced_v_ - unique_ptr[device_buffer] d_coalesced_w_ - unique_ptr[device_buffer] d_sizes_ - - cdef cppclass random_walk_path_t: - unique_ptr[device_buffer] d_v_offsets - unique_ptr[device_buffer] d_w_sizes - unique_ptr[device_buffer] d_w_offsets - cdef cppclass graph_generator_t: unique_ptr[device_buffer] d_source unique_ptr[device_buffer] d_destination @@ -93,91 +35,4 @@ cdef extern from "cugraph/utilities/cython.hpp" namespace "cugraph::cython": cdef extern from "" namespace "std" nogil: cdef device_buffer move(device_buffer) cdef unique_ptr[device_buffer] move(unique_ptr[device_buffer]) - cdef cy_multi_edgelists_t move(cy_multi_edgelists_t) - cdef unique_ptr[cy_multi_edgelists_t] move(unique_ptr[cy_multi_edgelists_t]) - -# renumber_edgelist() interface utilities: -# -# -# 1. `cdef extern partition_t`: -# -cdef extern from "cugraph/graph_view.hpp" namespace "cugraph": - - cdef cppclass partition_t[vertex_t]: - pass - - -# 2. return type for shuffle: -# -cdef extern from "cugraph/utilities/cython.hpp" namespace "cugraph::cython": - - cdef cppclass major_minor_weights_t[vertex_t, edge_t, weight_t]: - major_minor_weights_t(const handle_t &handle) - pair[unique_ptr[device_buffer], size_t] get_major_wrap() - pair[unique_ptr[device_buffer], size_t] get_minor_wrap() - pair[unique_ptr[device_buffer], size_t] get_weights_wrap() - unique_ptr[vector[edge_t]] get_edge_counts_wrap() - - -ctypedef fused shuffled_vertices_t: - major_minor_weights_t[int, int, float] - major_minor_weights_t[int, int, double] - major_minor_weights_t[int, long, float] - major_minor_weights_t[int, long, double] - major_minor_weights_t[long, long, float] - major_minor_weights_t[long, long, double] - -# 3. return type for renumber: -# -cdef extern from "cugraph/utilities/cython.hpp" namespace "cugraph::cython": - - cdef cppclass renum_tuple_t[vertex_t, edge_t]: - renum_tuple_t(const handle_t &handle) - pair[unique_ptr[device_buffer], size_t] get_dv_wrap() - vertex_t& get_num_vertices() - edge_t& get_num_edges() - vector[vertex_t]& get_segment_offsets() - unique_ptr[vector[vertex_t]] get_segment_offsets_wrap() - int get_part_row_size() - int get_part_col_size() - int get_part_comm_rank() - unique_ptr[vector[vertex_t]] get_partition_offsets_wrap() - pair[vertex_t, vertex_t] get_part_local_vertex_range() - vertex_t get_part_local_vertex_first() - vertex_t get_part_local_vertex_last() - pair[vertex_t, vertex_t] get_part_vertex_partition_range(size_t vertex_partition_idx) - vertex_t get_part_vertex_partition_first(size_t vertex_partition_idx) - vertex_t get_part_vertex_partition_last(size_t vertex_partition_idx) - vertex_t get_part_vertex_partition_size(size_t vertex_partition_idx) - size_t get_part_number_of_matrix_partitions() - vertex_t get_part_matrix_partition_major_first(size_t partition_idx) - vertex_t get_part_matrix_partition_major_last(size_t partition_idx) - vertex_t get_part_matrix_partition_major_value_start_offset(size_t partition_idx) - pair[vertex_t, vertex_t] get_part_matrix_partition_minor_range() - vertex_t get_part_matrix_partition_minor_first() - vertex_t get_part_matrix_partition_minor_last() - -# 4. `sort_and_shuffle_values()` wrapper: -# -cdef extern from "cugraph/utilities/cython.hpp" namespace "cugraph::cython": - - cdef unique_ptr[major_minor_weights_t[vertex_t, edge_t, weight_t]] call_shuffle[vertex_t, edge_t, weight_t]( - const handle_t &handle, - vertex_t *edgelist_major_vertices, - vertex_t *edgelist_minor_vertices, - weight_t* edgelist_weights, - edge_t num_edges, - bool is_weighted) except + - -# 5. `renumber_edgelist()` wrapper -# -cdef extern from "cugraph/utilities/cython.hpp" namespace "cugraph::cython": - cdef unique_ptr[renum_tuple_t[vertex_t, edge_t]] call_renumber[vertex_t, edge_t]( - const handle_t &handle, - vertex_t *edgelist_major_vertices, - vertex_t *edgelist_minor_vertices, - const vector[edge_t]& edge_counts, - bool store_transposed, - bool do_check, - bool multi_gpu) except + From bdc8d9df943a0e7028132a339e22bd2d9b133c48 Mon Sep 17 00:00:00 2001 From: Chuck Hastings <45364586+ChuckHastings@users.noreply.github.com> Date: Wed, 12 Apr 2023 14:16:48 -0400 Subject: [PATCH 11/90] Fix issue with latest rapids-make (#3481) https://github.com/rapidsai/rapids-cmake/commit/fb7033e8860594beb5b0351c204068c12f1d6a0a removed a deprecated feature that we were inadvertently still using. This PR updates the second reference to `CMAKE_CUDA_ARCHITECTURES` as `ALL` Authors: - Chuck Hastings (https://github.com/ChuckHastings) Approvers: - Brad Rees (https://github.com/BradReesWork) URL: https://github.com/rapidsai/cugraph/pull/3481 --- build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sh b/build.sh index 063f881020d..a8e97d924c6 100755 --- a/build.sh +++ b/build.sh @@ -272,7 +272,7 @@ if buildAll || hasArg libcugraph_etl; then CUGRAPH_CMAKE_CUDA_ARCHITECTURES="NATIVE" echo "Building for the architecture of the GPU in the system..." else - CUGRAPH_CMAKE_CUDA_ARCHITECTURES="ALL" + CUGRAPH_CMAKE_CUDA_ARCHITECTURES="RAPIDS" echo "Building for *ALL* supported GPU architectures..." fi mkdir -p ${LIBCUGRAPH_ETL_BUILD_DIR} From fbef6f7fac5cfc7be95f41fafa27b22da5bc14de Mon Sep 17 00:00:00 2001 From: Brad Rees <34135411+BradReesWork@users.noreply.github.com> Date: Thu, 13 Apr 2023 12:03:28 -0400 Subject: [PATCH 12/90] Sphinx updates (#3468) Spinx is very picky and looks for indentations and black lines. Also during the build, if there is an error, Spinx just ignores the function that caused the error rather to terminating the build. Some docstrings also used reserved character {* and _} which are used for formating This PR: addresses Spinx Warnings and Errors addresses issues where rst is md is not added to the output any any other issues You will see a lot of additional blank lines added and some text clean-up for readability. Seems like the PR also picked up all the the 23.06 updates - so half the files are not really for this PR Authors: - Brad Rees (https://github.com/BradReesWork) Approvers: - Don Acosta (https://github.com/acostadon) - AJ Schmidt (https://github.com/ajschmidt8) - Rick Ratzel (https://github.com/rlratzel) URL: https://github.com/rapidsai/cugraph/pull/3468 --- .../source/api_docs/cugraph/centrality.rst | 38 +++++-- .../source/api_docs/cugraph/components.rst | 1 - .../api_docs/cugraph/helper_functions.rst | 1 - .../cugraph/{cugraph_top.rst => index.rst} | 0 .../source/api_docs/cugraph/layout.rst | 1 - .../api_docs/cugraph/linear_assignment.rst | 1 - .../source/api_docs/cugraph/link_analysis.rst | 1 - .../api_docs/cugraph/link_prediction.rst | 1 - .../source/api_docs/cugraph/structure.rst | 7 +- .../source/api_docs/cugraph/traversal.rst | 1 - docs/cugraph/source/api_docs/cugraph/tree.rst | 1 - docs/cugraph/source/api_docs/index.rst | 10 +- .../cugraph/source/api_docs/service/index.rst | 10 ++ docs/cugraph/source/api_docs/structure.rst | 104 ------------------ docs/cugraph/source/basics/index.rst | 1 - docs/cugraph/source/index.rst | 3 + .../source/tutorials/cugraph_notebooks.md | 4 +- .../cugraph_pyg/data/cugraph_store.py | 21 ++-- .../client/cugraph_service_client/types.py | 2 +- .../cugraph_service_server/cugraph_handler.py | 4 +- .../cugraph/centrality/degree_centrality.py | 4 +- .../centrality/eigenvector_centrality.py | 4 +- .../cugraph/centrality/katz_centrality.py | 3 +- .../dask/centrality/eigenvector_centrality.py | 1 + .../cugraph/dask/link_analysis/pagerank.py | 7 ++ .../cugraph/cugraph/link_analysis/pagerank.py | 7 ++ .../cugraph/link_prediction/sorensen.py | 4 +- .../cugraph/link_prediction/woverlap.py | 6 +- .../cugraph/link_prediction/wsorensen.py | 5 +- .../cugraph/structure/graph_classes.py | 4 +- .../graph_implementation/simpleGraph.py | 12 ++ python/pylibcugraph/pylibcugraph/bfs.pyx | 80 +++++++------- 32 files changed, 154 insertions(+), 195 deletions(-) rename docs/cugraph/source/api_docs/cugraph/{cugraph_top.rst => index.rst} (100%) create mode 100644 docs/cugraph/source/api_docs/service/index.rst delete mode 100644 docs/cugraph/source/api_docs/structure.rst diff --git a/docs/cugraph/source/api_docs/cugraph/centrality.rst b/docs/cugraph/source/api_docs/cugraph/centrality.rst index c3b026597d9..344c95195b7 100644 --- a/docs/cugraph/source/api_docs/cugraph/centrality.rst +++ b/docs/cugraph/source/api_docs/cugraph/centrality.rst @@ -7,42 +7,64 @@ Centrality Betweenness Centrality ---------------------- +single-GPU +^^^^^^^^^^ .. autosummary:: :toctree: ../api/cugraph/ - cugraph.betweenness_centrality - cugraph.edge_betweenness_centrality + cugraph.centrality.betweenness_centrality + cugraph.centrality.edge_betweenness_centrality + +multi-GPU +^^^^^^^^^^ +.. autosummary:: + :toctree: ../api/cugraph/ + + cugraph.dask.centrality.betweenness_centrality + + Katz Centrality --------------- +single-GPU +^^^^^^^^^^ .. autosummary:: :toctree: ../api/cugraph/ - cugraph.katz_centrality + cugraph.centrality.katz_centrality -Katz Centrality (MG) --------------------- +multi-GPU +^^^^^^^^^^ .. autosummary:: :toctree: ../api/cugraph/ cugraph.dask.centrality.katz_centrality.katz_centrality + Degree Centrality ----------------- +single-GPU +^^^^^^^^^^ .. autosummary:: :toctree: ../api/cugraph/ - cugraph.degree_centrality + cugraph.centrality.degree_centrality + +multi-GPU +^^^^^^^^^^ + Eigenvector Centrality ---------------------- +single-GPU +^^^^^^^^^^ .. autosummary:: :toctree: ../api/cugraph/ cugraph.centrality.eigenvector_centrality -Eigenvector Centrality (MG) ---------------------------- +multi-GPU +^^^^^^^^^^ .. autosummary:: :toctree: ../api/cugraph/ diff --git a/docs/cugraph/source/api_docs/cugraph/components.rst b/docs/cugraph/source/api_docs/cugraph/components.rst index 5835972cd4d..560aa1f8ca0 100644 --- a/docs/cugraph/source/api_docs/cugraph/components.rst +++ b/docs/cugraph/source/api_docs/cugraph/components.rst @@ -4,7 +4,6 @@ Components .. currentmodule:: cugraph - Connected Components -------------------- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/cugraph/helper_functions.rst b/docs/cugraph/source/api_docs/cugraph/helper_functions.rst index ec3248bfa27..02cb599ae55 100644 --- a/docs/cugraph/source/api_docs/cugraph/helper_functions.rst +++ b/docs/cugraph/source/api_docs/cugraph/helper_functions.rst @@ -4,7 +4,6 @@ DASK MG Helper functions .. currentmodule:: cugraph - Methods ------- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/cugraph/cugraph_top.rst b/docs/cugraph/source/api_docs/cugraph/index.rst similarity index 100% rename from docs/cugraph/source/api_docs/cugraph/cugraph_top.rst rename to docs/cugraph/source/api_docs/cugraph/index.rst diff --git a/docs/cugraph/source/api_docs/cugraph/layout.rst b/docs/cugraph/source/api_docs/cugraph/layout.rst index 1c097346b6c..ed97caf549f 100644 --- a/docs/cugraph/source/api_docs/cugraph/layout.rst +++ b/docs/cugraph/source/api_docs/cugraph/layout.rst @@ -4,7 +4,6 @@ Layout .. currentmodule:: cugraph - Force Atlas 2 ------------- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst b/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst index dfdf6da96db..e0b0b4d11bd 100644 --- a/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst +++ b/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst @@ -4,7 +4,6 @@ Linear Assignment .. currentmodule:: cugraph - Hungarian --------- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/cugraph/link_analysis.rst b/docs/cugraph/source/api_docs/cugraph/link_analysis.rst index 5f977b47724..48b5ec1176f 100644 --- a/docs/cugraph/source/api_docs/cugraph/link_analysis.rst +++ b/docs/cugraph/source/api_docs/cugraph/link_analysis.rst @@ -4,7 +4,6 @@ Link Analysis .. currentmodule:: cugraph - HITS ---- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/cugraph/link_prediction.rst b/docs/cugraph/source/api_docs/cugraph/link_prediction.rst index b2134fba9f9..f05dce6f721 100644 --- a/docs/cugraph/source/api_docs/cugraph/link_prediction.rst +++ b/docs/cugraph/source/api_docs/cugraph/link_prediction.rst @@ -4,7 +4,6 @@ Link Prediction .. currentmodule:: cugraph - Jaccard Coefficient ------------------- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/cugraph/structure.rst b/docs/cugraph/source/api_docs/cugraph/structure.rst index 5114cb57b47..6369e1bb3fd 100644 --- a/docs/cugraph/source/api_docs/cugraph/structure.rst +++ b/docs/cugraph/source/api_docs/cugraph/structure.rst @@ -10,8 +10,6 @@ Constructors Graph MultiGraph - BiPartiteGraph - Adding Data @@ -19,7 +17,6 @@ Adding Data .. autosummary:: :toctree: ../api/cugraph/ - Graph.from_cudf_adjlist Graph.from_cudf_edgelist Graph.from_dask_cudf_edgelist @@ -35,7 +32,7 @@ Adding Data Checks ------ .. autosummary:: - :toctree: ../api/cugraph/ + :toctree: ../api/cugraph/ Graph.has_isolated_vertices Graph.is_bipartite @@ -82,8 +79,6 @@ NumberMap :toctree: ../api/cugraph/ cugraph.structure.NumberMap - cugraph.structure.NumberMap.MultiGPU - cugraph.structure.NumberMap.SingleGPU cugraph.structure.NumberMap.from_internal_vertex_id cugraph.structure.NumberMap.to_internal_vertex_id cugraph.structure.NumberMap.add_internal_vertex_id diff --git a/docs/cugraph/source/api_docs/cugraph/traversal.rst b/docs/cugraph/source/api_docs/cugraph/traversal.rst index c8fcc6b721c..31296f3b850 100644 --- a/docs/cugraph/source/api_docs/cugraph/traversal.rst +++ b/docs/cugraph/source/api_docs/cugraph/traversal.rst @@ -4,7 +4,6 @@ Traversal .. currentmodule:: cugraph - Breadth-first-search -------------------- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/cugraph/tree.rst b/docs/cugraph/source/api_docs/cugraph/tree.rst index 38bfbad7d62..5ba2242ebb6 100644 --- a/docs/cugraph/source/api_docs/cugraph/tree.rst +++ b/docs/cugraph/source/api_docs/cugraph/tree.rst @@ -4,7 +4,6 @@ Tree .. currentmodule:: cugraph - Minimum Spanning Tree --------------------- .. autosummary:: diff --git a/docs/cugraph/source/api_docs/index.rst b/docs/cugraph/source/api_docs/index.rst index f6307d5ac36..229bea4517a 100644 --- a/docs/cugraph/source/api_docs/index.rst +++ b/docs/cugraph/source/api_docs/index.rst @@ -1,16 +1,16 @@ Python API reference ==================== -This page provides a list of all publicly accessible modules, methods and classes through -``cugraph.*`` namespace. +This page provides a list of all publicly accessible Python modules, +methods and classes through ``cugraph.*`` namespace. .. toctree:: :maxdepth: 2 :caption: Python API Documentation - cugraph/cugraph_top.rst + cugraph/index.rst plc/pylibcugraph.rst cugraph-dgl/cugraph_dgl.rst cugraph-pyg/cugraph_pyg.rst - service/cugraph_service_client.rst - service/cugraph_service_server.rst + service/index.rst + diff --git a/docs/cugraph/source/api_docs/service/index.rst b/docs/cugraph/source/api_docs/service/index.rst new file mode 100644 index 00000000000..a58cf207456 --- /dev/null +++ b/docs/cugraph/source/api_docs/service/index.rst @@ -0,0 +1,10 @@ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +cugraph-service API Reference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. toctree:: + :maxdepth: 2 + :caption: cugraph-service API Documentation + + cugraph_service_client + cugraph_service_server \ No newline at end of file diff --git a/docs/cugraph/source/api_docs/structure.rst b/docs/cugraph/source/api_docs/structure.rst deleted file mode 100644 index 0d6e287927a..00000000000 --- a/docs/cugraph/source/api_docs/structure.rst +++ /dev/null @@ -1,104 +0,0 @@ -============= -Graph Classes -============= -.. currentmodule:: cugraph - -Constructors ------------- -.. autosummary:: - :toctree: api/ - - Graph - MultiGraph - BiPartiteGraph - - - -Adding Data ------------ -.. autosummary:: - :toctree: api/ - - - Graph.from_cudf_adjlist - Graph.from_cudf_edgelist - Graph.from_dask_cudf_edgelist - Graph.from_pandas_adjacency - Graph.from_pandas_edgelist - Graph.from_numpy_array - Graph.from_numpy_matrix - Graph.add_internal_vertex_id - Graph.add_nodes_from - Graph.clear - Graph.unrenumber - -Checks ------- -.. autosummary:: - :toctree: api/ - - Graph.has_isolated_vertices - Graph.is_bipartite - Graph.is_directed - Graph.is_multigraph - Graph.is_multipartite - Graph.is_renumbered - Graph.is_weighted - Graph.lookup_internal_vertex_id - Graph.to_directed - Graph.to_undirected - - -Symmetrize ----------- -.. autosummary:: - :toctree: api/ - - cugraph.symmetrize - cugraph.symmetrize_ddf - cugraph.symmetrize_df - - -Conversion from Other Formats ------------------------------ -.. autosummary:: - :toctree: api/ - - cugraph.from_adjlist - cugraph.from_cudf_edgelist - cugraph.from_edgelist - cugraph.from_numpy_array - cugraph.from_numpy_matrix - cugraph.from_pandas_adjacency - cugraph.from_pandas_edgelist - cugraph.to_numpy_array - cugraph.to_numpy_matrix - cugraph.to_pandas_adjacency - cugraph.to_pandas_edgelist - -NumberMap ------------------------------ -.. autosummary:: - :toctree: api/ - - cugraph.structure.NumberMap - cugraph.structure.NumberMap.MultiGPU - cugraph.structure.NumberMap.SingleGPU - cugraph.structure.NumberMap.from_internal_vertex_id - cugraph.structure.NumberMap.to_internal_vertex_id - cugraph.structure.NumberMap.add_internal_vertex_id - cugraph.structure.NumberMap.compute_vals - cugraph.structure.NumberMap.compute_vals_types - cugraph.structure.NumberMap.generate_unused_column_name - cugraph.structure.NumberMap.renumber - cugraph.structure.NumberMap.renumber_and_segment - cugraph.structure.NumberMap.set_renumbered_col_names - cugraph.structure.NumberMap.unrenumber - cugraph.structure.NumberMap.vertex_column_size - -Other ------------------------------ -.. autosummary:: - :toctree: api/ - - cugraph.hypergraph diff --git a/docs/cugraph/source/basics/index.rst b/docs/cugraph/source/basics/index.rst index 1875ac22bd8..7bba301b657 100644 --- a/docs/cugraph/source/basics/index.rst +++ b/docs/cugraph/source/basics/index.rst @@ -7,6 +7,5 @@ Basics :maxdepth: 2 cugraph_intro - cugraph_toc.md nx_transition cugraph_cascading diff --git a/docs/cugraph/source/index.rst b/docs/cugraph/source/index.rst index 12bc74aea2b..e6731ed51da 100644 --- a/docs/cugraph/source/index.rst +++ b/docs/cugraph/source/index.rst @@ -5,6 +5,9 @@ into the RAPIDS data science ecosystem and allows the data scientist to easily call graph algorithms using data stored in GPU DataFrames, NetworkX Graphs, or even CuPy or SciPy sparse Matrices. +Note: We are redoing all of our documents, please be patient as we update +the docs and links + .. toctree:: :maxdepth: 2 :caption: Contents: diff --git a/docs/cugraph/source/tutorials/cugraph_notebooks.md b/docs/cugraph/source/tutorials/cugraph_notebooks.md index 1624ef10aa5..ff9d6b199a2 100644 --- a/docs/cugraph/source/tutorials/cugraph_notebooks.md +++ b/docs/cugraph/source/tutorials/cugraph_notebooks.md @@ -1,6 +1,6 @@ # cuGraph Notebooks -![GraphAnalyticsFigure](img/GraphAnalyticsFigure.jpg) +![GraphAnalyticsFigure](https://github.com/rapidsai/cugraph/tree/main/img/GraphAnalyticsFigure.jpg) This repository contains a collection of Jupyter Notebooks that outline how to run various cuGraph analytics. The notebooks do not address a complete data science problem. The notebooks are simply examples of how to run the graph analytics. Manipulation of the data before or after the graph analytic is not covered here. Extended, more problem focused, notebooks are being created and available https://github.com/rapidsai/notebooks-extended @@ -74,4 +74,4 @@ Unless required by applicable law or agreed to in writing, software distributed -![RAPIDS](img/rapids_logo.png) \ No newline at end of file +![RAPIDS](https://github.com/rapidsai/cugraph/tree/main/img/rapids_logo.png) \ No newline at end of file diff --git a/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py b/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py index 300c56fe6a7..7c8767e2a70 100644 --- a/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py +++ b/python/cugraph-pyg/cugraph_pyg/data/cugraph_store.py @@ -216,20 +216,25 @@ def __init__( G: dict[str, tuple[TensorType]] or dict[str, int] (Required) Dictionary of edge indices. Option 1 (graph in memory): - Pass the edge indices - i.e. { - ('author', 'writes', 'paper'): [[0,1,2],[2,0,1]], - ('author', 'affiliated', 'institution'): [[0,1],[0,1]] + + Pass the edge indices: i.e. + { + ('author', 'writes', 'paper'): [[0,1,2],[2,0,1]], + ('author', 'affiliated', 'institution'): [[0,1],[0,1]] } + + Option 2 (graph not in memory): - Pass the number of edges - i.e. { - ('author', 'writes', 'paper'): 2, - ('author', 'affiliated', 'institution'): 2 + + Pass the number of edges: i.e. + { + ('author', 'writes', 'paper'): 2, + ('author', 'affiliated', 'institution'): 2 } If the graph is not in memory, manipulating the edge indices or calling sampling is not possible. This is for cases where sampling has already been done and samples were written to disk. + Note: the internal cugraph representation will use offsetted vertex and edge ids. diff --git a/python/cugraph-service/client/cugraph_service_client/types.py b/python/cugraph-service/client/cugraph_service_client/types.py index a78e06169ad..509508042b4 100644 --- a/python/cugraph-service/client/cugraph_service_client/types.py +++ b/python/cugraph-service/client/cugraph_service_client/types.py @@ -35,7 +35,7 @@ class UnionWrapper: """ Provides easy conversions between py objs and Thrift "unions". This is used - as a base class for the "*Wrapper" classes below. Together with the derived + as a base class for the "Wrapper" classes below. Together with the derived classes below, these objects allow the caller to go from py objects/Thrift unions to Thrift unions/py objects. """ diff --git a/python/cugraph-service/server/cugraph_service_server/cugraph_handler.py b/python/cugraph-service/server/cugraph_service_server/cugraph_handler.py index 6e9559dab56..6cdf0d793d4 100644 --- a/python/cugraph-service/server/cugraph_service_server/cugraph_handler.py +++ b/python/cugraph-service/server/cugraph_service_server/cugraph_handler.py @@ -222,7 +222,7 @@ def get_server_info(self): def load_graph_creation_extensions(self, extension_dir_or_mod_path): """ - Loads ("imports") all modules matching the pattern *_extension.py in the + Loads ("imports") all modules matching the pattern '_extension.py' in the directory specified by extension_dir_or_mod_path. extension_dir_or_mod_path can be either a path to a directory on disk, or a python import path to a package. @@ -257,7 +257,7 @@ def load_graph_creation_extensions(self, extension_dir_or_mod_path): def load_extensions(self, extension_dir_or_mod_path): """ - Loads ("imports") all modules matching the pattern *_extension.py in the + Loads ("imports") all modules matching the pattern _extension.py in the directory specified by extension_dir_or_mod_path. extension_dir_or_mod_path can be either a path to a directory on disk, or a python import path to a package. diff --git a/python/cugraph/cugraph/centrality/degree_centrality.py b/python/cugraph/cugraph/centrality/degree_centrality.py index 5d6a0a02bab..66946afded2 100644 --- a/python/cugraph/cugraph/centrality/degree_centrality.py +++ b/python/cugraph/cugraph/centrality/degree_centrality.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -36,8 +36,10 @@ def degree_centrality(G, normalized=True): df : cudf.DataFrame or Dictionary if using NetworkX GPU data frame containing two cudf.Series of size V: the vertex identifiers and the corresponding degree centrality values. + df['vertex'] : cudf.Series Contains the vertex identifiers + df['degree_centrality'] : cudf.Series Contains the degree centrality of vertices diff --git a/python/cugraph/cugraph/centrality/eigenvector_centrality.py b/python/cugraph/cugraph/centrality/eigenvector_centrality.py index ef2f4104cc4..07cbfefaaf1 100644 --- a/python/cugraph/cugraph/centrality/eigenvector_centrality.py +++ b/python/cugraph/cugraph/centrality/eigenvector_centrality.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -59,8 +59,10 @@ def eigenvector_centrality(G, max_iter=100, tol=1.0e-6): df : cudf.DataFrame or Dictionary if using NetworkX GPU data frame containing two cudf.Series of size V: the vertex identifiers and the corresponding eigenvector centrality values. + df['vertex'] : cudf.Series Contains the vertex identifiers + df['eigenvector_centrality'] : cudf.Series Contains the eigenvector centrality of vertices diff --git a/python/cugraph/cugraph/centrality/katz_centrality.py b/python/cugraph/cugraph/centrality/katz_centrality.py index 7a6b240ba24..ffede18b5d2 100644 --- a/python/cugraph/cugraph/centrality/katz_centrality.py +++ b/python/cugraph/cugraph/centrality/katz_centrality.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -97,6 +97,7 @@ def katz_centrality( df : cudf.DataFrame or Dictionary if using NetworkX GPU data frame containing two cudf.Series of size V: the vertex identifiers and the corresponding katz centrality values. + df['vertex'] : cudf.Series Contains the vertex identifiers df['katz_centrality'] : cudf.Series diff --git a/python/cugraph/cugraph/dask/centrality/eigenvector_centrality.py b/python/cugraph/cugraph/dask/centrality/eigenvector_centrality.py index f2ac8cebdc6..0dcd2b38546 100644 --- a/python/cugraph/cugraph/dask/centrality/eigenvector_centrality.py +++ b/python/cugraph/cugraph/dask/centrality/eigenvector_centrality.py @@ -89,6 +89,7 @@ def eigenvector_centrality(input_graph, max_iter=100, tol=1.0e-6): df : dask_cudf.DataFrame GPU data frame containing two cudf.Series of size V: the vertex identifiers and the corresponding eigenvector centrality values. + df['vertex'] : cudf.Series Contains the vertex identifiers df['eigenvector_centrality'] : cudf.Series diff --git a/python/cugraph/cugraph/dask/link_analysis/pagerank.py b/python/cugraph/cugraph/dask/link_analysis/pagerank.py index 75d5b6d16c6..4aba5725c1b 100644 --- a/python/cugraph/cugraph/dask/link_analysis/pagerank.py +++ b/python/cugraph/cugraph/dask/link_analysis/pagerank.py @@ -178,8 +178,10 @@ def pagerank( personalization : cudf.Dataframe, optional (default=None) GPU Dataframe containing the personalization information. (a performance optimization) + personalization['vertex'] : cudf.Series Subset of vertices of graph for personalization + personalization['values'] : cudf.Series Personalization values for vertices @@ -187,8 +189,10 @@ def pagerank( GPU Dataframe containing the precomputed vertex out weight (a performance optimization) information. + precomputed_vertex_out_weight['vertex'] : cudf.Series Subset of vertices of graph for precomputed_vertex_out_weight + precomputed_vertex_out_weight['sums'] : cudf.Series Corresponding precomputed sum of outgoing vertices weight @@ -211,8 +215,10 @@ def pagerank( nstart : cudf.Dataframe, optional (default=None) GPU Dataframe containing the initial guess for pagerank. (a performance optimization) + nstart['vertex'] : cudf.Series Subset of vertices of graph for initial guess for pagerank values + nstart['values'] : cudf.Series Pagerank values for vertices @@ -234,6 +240,7 @@ def pagerank( ddf['vertex'] : dask_cudf.Series Contains the vertex identifiers + ddf['pagerank'] : dask_cudf.Series Contains the PageRank score diff --git a/python/cugraph/cugraph/link_analysis/pagerank.py b/python/cugraph/cugraph/link_analysis/pagerank.py index 83b8af35e4c..6696512dcf0 100644 --- a/python/cugraph/cugraph/link_analysis/pagerank.py +++ b/python/cugraph/cugraph/link_analysis/pagerank.py @@ -112,16 +112,20 @@ def pagerank( personalization : cudf.Dataframe, optional (default=None) GPU Dataframe containing the personalization information. (a performance optimization) + personalization['vertex'] : cudf.Series Subset of vertices of graph for personalization + personalization['values'] : cudf.Series Personalization values for vertices precomputed_vertex_out_weight : cudf.Dataframe, optional (default=None) GPU Dataframe containing the precomputed vertex out weight information(a performance optimization). + precomputed_vertex_out_weight['vertex'] : cudf.Series Subset of vertices of graph for precomputed_vertex_out_weight + precomputed_vertex_out_weight['sums'] : cudf.Series Corresponding precomputed sum of outgoing vertices weight @@ -144,8 +148,10 @@ def pagerank( nstart : cudf.Dataframe, optional (default=None) GPU Dataframe containing the initial guess for pagerank. (a performance optimization). + nstart['vertex'] : cudf.Series Subset of vertices of graph for initial guess for pagerank values + nstart['values'] : cudf.Series Pagerank values for vertices @@ -175,6 +181,7 @@ def pagerank( df['vertex'] : cudf.Series Contains the vertex identifiers + df['pagerank'] : cudf.Series Contains the PageRank score diff --git a/python/cugraph/cugraph/link_prediction/sorensen.py b/python/cugraph/cugraph/link_prediction/sorensen.py index 03db9b74db0..20238e10464 100644 --- a/python/cugraph/cugraph/link_prediction/sorensen.py +++ b/python/cugraph/cugraph/link_prediction/sorensen.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2022, NVIDIA CORPORATION. +# Copyright (c) 2021-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -60,9 +60,11 @@ def sorensen(input_graph, vertex_pair=None): df['first'] : cudf.Series The first vertex ID of each pair (will be identical to first if specified) + df['second'] : cudf.Series The second vertex ID of each pair (will be identical to second if specified) + df['sorensen_coeff'] : cudf.Series The computed Sorensen coefficient between the source and destination vertices diff --git a/python/cugraph/cugraph/link_prediction/woverlap.py b/python/cugraph/cugraph/link_prediction/woverlap.py index 87498c72e51..c7d4f56a428 100644 --- a/python/cugraph/cugraph/link_prediction/woverlap.py +++ b/python/cugraph/cugraph/link_prediction/woverlap.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -42,6 +42,7 @@ def overlap_w(input_graph, weights, vertex_pair=None): weights['vertex'] : cudf.Series Contains the vertex identifiers + weights['weight'] : cudf.Series Contains the weights of vertices @@ -60,8 +61,10 @@ def overlap_w(input_graph, weights, vertex_pair=None): df['first'] : cudf.Series The first vertex ID of each pair. + df['second'] : cudf.Series The second vertex ID of each pair. + df['overlap_coeff'] : cudf.Series The computed weighted Overlap coefficient between the first and the second vertex ID. @@ -84,7 +87,6 @@ def overlap_w(input_graph, weights, vertex_pair=None): >>> weights['weight'] = [random.random() for w in range( ... len(weights['vertex']))] >>> df = cugraph.overlap_w(G, weights) - """ if type(vertex_pair) == cudf.DataFrame: diff --git a/python/cugraph/cugraph/link_prediction/wsorensen.py b/python/cugraph/cugraph/link_prediction/wsorensen.py index 00c89370106..c017463a294 100644 --- a/python/cugraph/cugraph/link_prediction/wsorensen.py +++ b/python/cugraph/cugraph/link_prediction/wsorensen.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2022, NVIDIA CORPORATION. +# Copyright (c) 2021-2023, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -38,6 +38,7 @@ def sorensen_w(input_graph, weights, vertex_pair=None): weights['vertex'] : cudf.Series Contains the vertex identifiers + weights['weight'] : cudf.Series Contains the weights of vertices @@ -56,8 +57,10 @@ def sorensen_w(input_graph, weights, vertex_pair=None): df['first'] : cudf.Series The first vertex ID of each pair. + df['second'] : cudf.Series The second vertex ID of each pair. + df['sorensen_coeff'] : cudf.Series The computed weighted Sorensen coefficient between the first and the second vertex ID. diff --git a/python/cugraph/cugraph/structure/graph_classes.py b/python/cugraph/cugraph/structure/graph_classes.py index 5fd398124b8..a7fb9e0c0dd 100644 --- a/python/cugraph/cugraph/structure/graph_classes.py +++ b/python/cugraph/cugraph/structure/graph_classes.py @@ -497,7 +497,7 @@ def lookup_internal_vertex_id(self, df, column_name=None): Name of the column containing the external vertex ids Returns - --------- + ------- series : cudf.Series or dask_cudf.Series The internal vertex identifiers """ @@ -536,7 +536,7 @@ def add_internal_vertex_id( Preserve the order of the data frame (requires an extra sort) Returns - --------- + ------- df : cudf.DataFrame or dask_cudf.DataFrame Original DataFrame with new column containing internal vertex id diff --git a/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py b/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py index 7ad694e62f5..f1f134af404 100644 --- a/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py +++ b/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py @@ -361,10 +361,13 @@ def view_edge_list(self): ------- df : cudf.DataFrame This cudf.DataFrame wraps source, destination and weight + df[src] : cudf.Series contains the source index for each edge + df[dst] : cudf.Series contains the destination index for each edge + df[weight] : cusd.Series Column is only present for weighted Graph, then containing the weight value for each edge @@ -745,9 +748,11 @@ def in_degree(self, vertex_subset=None): vertices (vertex_subset) containing the in_degree. The ordering is relative to the adjacency list, or that given by the specified vertex_subset. + df[vertex] : cudf.Series The vertex IDs (will be identical to vertex_subset if specified). + df[degree] : cudf.Series The computed in-degree of the corresponding vertex. @@ -785,9 +790,11 @@ def out_degree(self, vertex_subset=None): vertices (vertex_subset) containing the out_degree. The ordering is relative to the adjacency list, or that given by the specified vertex_subset. + df[vertex] : cudf.Series The vertex IDs (will be identical to vertex_subset if specified). + df[degree] : cudf.Series The computed out-degree of the corresponding vertex. @@ -824,9 +831,11 @@ def degree(self, vertex_subset=None): vertices (vertex_subset) containing the degree. The ordering is relative to the adjacency list, or that given by the specified vertex_subset. + df['vertex'] : cudf.Series The vertex IDs (will be identical to vertex_subset if specified). + df['degree'] : cudf.Series The computed degree of the corresponding vertex. @@ -863,11 +872,14 @@ def degrees(self, vertex_subset=None): vertices (vertex_subset) containing the degrees. The ordering is relative to the adjacency list, or that given by the specified vertex_subset. + df['vertex'] : cudf.Series The vertex IDs (will be identical to vertex_subset if specified). + df['in_degree'] : cudf.Series The in-degree of the vertex. + df['out_degree'] : cudf.Series The out-degree of the vertex. diff --git a/python/pylibcugraph/pylibcugraph/bfs.pyx b/python/pylibcugraph/pylibcugraph/bfs.pyx index 8af3f48736b..b9d17f15cc5 100644 --- a/python/pylibcugraph/pylibcugraph/bfs.pyx +++ b/python/pylibcugraph/pylibcugraph/bfs.pyx @@ -102,47 +102,45 @@ def bfs(ResourceHandle handle, _GPUGraph graph, Examples -------- - - M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ', - dtype=['int32', 'int32', 'float32'], header=None) - G = cugraph.Graph() - G.from_cudf_edgelist(M, source='0', destination='1', edge_attr='2') - - handle = ResourceHandle() - - srcs = G.edgelist.edgelist_df['src'] - dsts = G.edgelist.edgelist_df['dst'] - weights = G.edgelist.edgelist_df['weights'] - - sg = SGGraph( - resource_handle = handle, - graph_properties = GraphProperties(is_multigraph=G.is_multigraph()), - src_array = srcs, - dst_array = dsts, - weight_array = weights, - store_transposed=False, - renumber=False, - do_expensive_check=do_expensive_check - ) - - res = pylibcugraph_bfs( - handle, - sg, - cudf.Series([0], dtype='int32'), - False, - 10, - True, - False - ) - - distances, predecessors, vertices = res - - final_results = cudf.DataFrame({ - 'distance': cudf.Series(distances), - 'vertex': cudf.Series(vertices), - 'predecessor': cudf.Series(predecessors), - }) - + >>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ', + >>> dtype=['int32', 'int32', 'float32'], header=None) + >>> G = cugraph.Graph() + >>> G.from_cudf_edgelist(M, source='0', destination='1', edge_attr='2') + >>> + >>> handle = ResourceHandle() + >>> + >>> srcs = G.edgelist.edgelist_df['src'] + >>> dsts = G.edgelist.edgelist_df['dst'] + >>> weights = G.edgelist.edgelist_df['weights'] + >>> + >>> sg = SGGraph( + >>> resource_handle = handle, + >>> graph_properties = GraphProperties(is_multigraph=G.is_multigraph()), + >>> src_array = srcs, + >>> dst_array = dsts, + >>> weight_array = weights, + >>> store_transposed=False, + >>> renumber=False, + >>> do_expensive_check=do_expensive_check + >>> ) + >>> + >>> res = pylibcugraph_bfs( + >>> handle, + >>> sg, + >>> cudf.Series([0], dtype='int32'), + >>> False, + >>> 10, + >>> True, + >>> False + >>> ) + >>> + >>> distances, predecessors, vertices = res + >>> + f>>> inal_results = cudf.DataFrame({ + >>> 'distance': cudf.Series(distances), + >>> 'vertex': cudf.Series(vertices), + >>> 'predecessor': cudf.Series(predecessors), + >>> }) """ try: From 8c794e2ec9bd052f3bdf677f8f1b1cc31cb38869 Mon Sep 17 00:00:00 2001 From: Naim <110031745+naimnv@users.noreply.github.com> Date: Mon, 17 Apr 2023 20:10:26 +0200 Subject: [PATCH 13/90] Fix graph_properties_t's members order (#3484) Fixes bug. Authors: - Naim (https://github.com/naimnv) Approvers: - Seunghwa Kang (https://github.com/seunghwak) URL: https://github.com/rapidsai/cugraph/pull/3484 --- cpp/src/structure/symmetrize_graph_impl.cuh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/src/structure/symmetrize_graph_impl.cuh b/cpp/src/structure/symmetrize_graph_impl.cuh index 7ad24aef01a..4afa4122a06 100644 --- a/cpp/src/structure/symmetrize_graph_impl.cuh +++ b/cpp/src/structure/symmetrize_graph_impl.cuh @@ -111,7 +111,7 @@ symmetrize_graph_impl( std::move(edgelist_weights), std::nullopt, std::nullopt, - graph_properties_t{is_multigraph, true}, + graph_properties_t{true, is_multigraph}, true); return std::make_tuple( @@ -205,7 +205,7 @@ symmetrize_graph_impl( std::move(edgelist_weights), std::nullopt, std::nullopt, - graph_properties_t{is_multigraph, true}, + graph_properties_t{true, is_multigraph}, renumber); return std::make_tuple( From 3f965f57cc126c19b5c0bcadd9b64044f1d93ba4 Mon Sep 17 00:00:00 2001 From: Jordan Jacobelli Date: Mon, 17 Apr 2023 20:23:46 +0200 Subject: [PATCH 14/90] Use ARC V2 self-hosted runners for GPU jobs (#3483) This PR is updating the runner labels to use ARC V2 self-hosted runners for GPU jobs. This is needed to resolve the auto-scalling issues. Authors: - Jordan Jacobelli (https://github.com/jjacobelli) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/cugraph/pull/3483 --- .github/workflows/build.yaml | 2 +- .github/workflows/pr.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2a3261a1dfe..80dd5de1343 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -59,7 +59,7 @@ jobs: uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.06 with: build_type: branch - node_type: "gpu-latest-1" + node_type: "gpu-v100-latest-1" arch: "amd64" container_image: "rapidsai/ci:latest" run_script: "ci/build_docs.sh" diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index b5fe3a9183a..93e5b656f17 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -61,7 +61,7 @@ jobs: uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.06 with: build_type: pull-request - node_type: "gpu-latest-1" + node_type: "gpu-v100-latest-1" arch: "amd64" container_image: "rapidsai/ci:latest" run_script: "ci/test_notebooks.sh" @@ -71,7 +71,7 @@ jobs: uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.06 with: build_type: pull-request - node_type: "gpu-latest-1" + node_type: "gpu-v100-latest-1" arch: "amd64" container_image: "rapidsai/ci:latest" run_script: "ci/build_docs.sh" From 88e7366c76833861a0872429557054317d271e42 Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Mon, 17 Apr 2023 15:15:39 -0500 Subject: [PATCH 15/90] Update clang-format to 16.0.1. (#3485) This PR updates the clang-format version used by pre-commit. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Chuck Hastings (https://github.com/ChuckHastings) - Rick Ratzel (https://github.com/rlratzel) URL: https://github.com/rapidsai/cugraph/pull/3485 --- .pre-commit-config.yaml | 5 +- cpp/include/cugraph/edge_src_dst_property.hpp | 4 +- cpp/include/cugraph/graph.hpp | 15 ++-- cpp/include/cugraph/graph_mask.hpp | 6 +- cpp/include/cugraph/legacy/graph.hpp | 14 ++- .../cugraph/utilities/thrust_tuple_utils.hpp | 36 +++----- cpp/src/community/egonet_impl.cuh | 4 +- cpp/src/community/legacy/louvain.cuh | 4 +- cpp/src/components/legacy/scc_matrix.cuh | 4 +- cpp/src/cores/core_number_impl.cuh | 4 +- cpp/src/linear_assignment/legacy/hungarian.cu | 4 +- .../detail/extract_transform_v_frontier_e.cuh | 12 +-- ...r_v_random_select_transform_outgoing_e.cuh | 4 +- ...m_reduce_dst_key_aggregated_outgoing_e.cuh | 4 +- cpp/src/prims/property_op_utils.cuh | 3 +- cpp/src/prims/reduce_op.cuh | 12 +-- ...t_nbr_intersection_of_e_endpoints_by_v.cuh | 4 +- cpp/src/prims/update_v_frontier.cuh | 6 +- cpp/src/sampling/random_walks.cuh | 18 ++-- cpp/src/sampling/rw_traversals.hpp | 4 +- cpp/src/structure/detail/structure_utils.cuh | 7 +- cpp/src/structure/induced_subgraph_impl.cuh | 2 +- cpp/src/structure/renumber_edgelist_impl.cuh | 2 +- cpp/src/traversal/bfs_impl.cuh | 4 +- cpp/src/traversal/legacy/bfs_kernels.cuh | 6 +- cpp/src/utilities/graph_utils.cuh | 4 +- cpp/tests/components/scc_test.cu | 5 +- cpp/tests/generators/erdos_renyi_test.cpp | 5 +- cpp/tests/generators/generators_test.cpp | 3 +- cpp/tests/linear_assignment/hungarian_test.cu | 9 +- cpp/tests/prims/mg_extract_transform_e.cu | 4 +- cpp/tests/structure/streams.cu | 3 +- cpp/tests/traversal/mg_sssp_test.cpp | 2 +- cpp/tests/traversal/sssp_test.cpp | 4 +- cpp/tests/utilities/test_utilities.hpp | 2 +- .../cugraph/internals/callbacks_implems.hpp | 90 +++++++++---------- readme_pages/CONTRIBUTING.md | 22 ++--- 37 files changed, 152 insertions(+), 189 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3c2f5fe2cfb..0f05aedf1a1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,6 +2,7 @@ # # Before first use: `pre-commit install` # To run: `pre-commit run --all-files` +exclude: '^thirdparty' repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 @@ -32,13 +33,13 @@ repos: additional_dependencies: - flake8==6.0.0 - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v11.1.0 + rev: v16.0.1 hooks: - id: clang-format exclude: | (?x)^( cpp/libcugraph_etl| - cpp/tests/c_api/.* + cpp/tests/c_api ) types_or: [c, c++, cuda] args: ["-fallback-style=none", "-style=file", "-i"] diff --git a/cpp/include/cugraph/edge_src_dst_property.hpp b/cpp/include/cugraph/edge_src_dst_property.hpp index f894d3d1a60..9b7139ff8f4 100644 --- a/cpp/include/cugraph/edge_src_dst_property.hpp +++ b/cpp/include/cugraph/edge_src_dst_property.hpp @@ -562,8 +562,8 @@ auto view_concat(detail::edge_major_property_view_t const&... view template auto view_concat(detail::edge_minor_property_view_t const&... views) { - using concat_value_iterator = decltype( - thrust::make_zip_iterator(thrust_tuple_cat(to_thrust_iterator_tuple(views.value_first())...))); + using concat_value_iterator = decltype(thrust::make_zip_iterator( + thrust_tuple_cat(to_thrust_iterator_tuple(views.value_first())...))); concat_value_iterator edge_partition_concat_value_first{}; diff --git a/cpp/include/cugraph/graph.hpp b/cpp/include/cugraph/graph.hpp index 233824049f3..60b9f1a4054 100644 --- a/cpp/include/cugraph/graph.hpp +++ b/cpp/include/cugraph/graph.hpp @@ -310,27 +310,22 @@ template struct invalid_idx< T, typename std::enable_if_t::value && std::is_signed::value>> - : std::integral_constant { -}; + : std::integral_constant {}; template struct invalid_idx< T, typename std::enable_if_t::value && std::is_unsigned::value>> - : std::integral_constant::max()> { -}; + : std::integral_constant::max()> {}; template -struct invalid_vertex_id : invalid_idx { -}; +struct invalid_vertex_id : invalid_idx {}; template -struct invalid_edge_id : invalid_idx { -}; +struct invalid_edge_id : invalid_idx {}; template -struct invalid_component_id : invalid_idx { -}; +struct invalid_component_id : invalid_idx {}; template __host__ __device__ std::enable_if_t::value, bool> is_valid_vertex( diff --git a/cpp/include/cugraph/graph_mask.hpp b/cpp/include/cugraph/graph_mask.hpp index af5b9b01764..2048d3692c7 100644 --- a/cpp/include/cugraph/graph_mask.hpp +++ b/cpp/include/cugraph/graph_mask.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -128,7 +128,7 @@ struct graph_mask_view_t { ~graph_mask_view_t() = default; graph_mask_view_t(graph_mask_view_t&&) noexcept = default; - graph_mask_view_t& operator=(graph_mask_view_t&&) noexcept = default; + graph_mask_view_t& operator=(graph_mask_view_t&&) noexcept = default; graph_mask_view_t& operator=(graph_mask_view_t const& other) = default; /** @@ -231,7 +231,7 @@ struct graph_mask_t { { } - graph_mask_t& operator=(graph_mask_t&&) noexcept = default; + graph_mask_t& operator=(graph_mask_t&&) noexcept = default; graph_mask_t& operator=(graph_mask_t const& other) = default; /** diff --git a/cpp/include/cugraph/legacy/graph.hpp b/cpp/include/cugraph/legacy/graph.hpp index d207a0a1603..8276853ce7e 100644 --- a/cpp/include/cugraph/legacy/graph.hpp +++ b/cpp/include/cugraph/legacy/graph.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -553,23 +553,19 @@ template struct invalid_idx< T, typename std::enable_if_t::value && std::is_signed::value>> - : std::integral_constant { -}; + : std::integral_constant {}; template struct invalid_idx< T, typename std::enable_if_t::value && std::is_unsigned::value>> - : std::integral_constant::max()> { -}; + : std::integral_constant::max()> {}; template -struct invalid_vertex_id : invalid_idx { -}; +struct invalid_vertex_id : invalid_idx {}; template -struct invalid_edge_id : invalid_idx { -}; +struct invalid_edge_id : invalid_idx {}; } // namespace legacy } // namespace cugraph diff --git a/cpp/include/cugraph/utilities/thrust_tuple_utils.hpp b/cpp/include/cugraph/utilities/thrust_tuple_utils.hpp index cb3b8146153..6b78f104542 100644 --- a/cpp/include/cugraph/utilities/thrust_tuple_utils.hpp +++ b/cpp/include/cugraph/utilities/thrust_tuple_utils.hpp @@ -96,16 +96,13 @@ constexpr TupleType thrust_tuple_of_arithmetic_numeric_limits_max(TupleType t, } // namespace detail template -struct is_thrust_tuple : std::false_type { -}; +struct is_thrust_tuple : std::false_type {}; template -struct is_thrust_tuple> : std::true_type { -}; +struct is_thrust_tuple> : std::true_type {}; template -struct is_thrust_tuple_of_arithmetic : std::false_type { -}; +struct is_thrust_tuple_of_arithmetic : std::false_type {}; template struct is_thrust_tuple_of_arithmetic> { @@ -118,25 +115,20 @@ struct is_thrust_tuple_of_arithmetic> { }; template -struct is_std_tuple : std::false_type { -}; +struct is_std_tuple : std::false_type {}; template -struct is_std_tuple> : std::true_type { -}; +struct is_std_tuple> : std::true_type {}; template typename Vector> -struct is_arithmetic_vector : std::false_type { -}; +struct is_arithmetic_vector : std::false_type {}; template