diff --git a/README.md b/README.md index fd81257bb..d406bbe6c 100644 --- a/README.md +++ b/README.md @@ -105,6 +105,8 @@ Below is the list of algorithms that are currently supported in nx-cugraph.
 bipartite
+ ├─ centrality
+ │   └─ betweenness_centrality
  └─ generators
      └─ complete_bipartite_graph
 centrality
diff --git a/_nx_cugraph/__init__.py b/_nx_cugraph/__init__.py
index 6b905e8db..a52583d4d 100644
--- a/_nx_cugraph/__init__.py
+++ b/_nx_cugraph/__init__.py
@@ -60,6 +60,7 @@
         "bfs_successors",
         "bfs_tree",
         "bidirectional_shortest_path",
+        "bipartite_betweenness_centrality",
         "bull_graph",
         "caveman_graph",
         "chvatal_graph",
diff --git a/benchmarks/pytest-based/bench_algos.py b/benchmarks/pytest-based/bench_algos.py
index 6549c7a61..3a9a171cf 100644
--- a/benchmarks/pytest-based/bench_algos.py
+++ b/benchmarks/pytest-based/bench_algos.py
@@ -875,6 +875,27 @@ def bench_ego_graph(benchmark, graph_obj, backend_wrapper):
     assert type(result) is type(G)
 
 
+def bench_bipartite_BC_n1000_m3000_k100000(benchmark, backend_wrapper):
+    # Example how to run:
+    # $ pytest -sv -k "bench_bipartite_BC" \
+    #   --benchmark-json="logs/None__bipartite_BC__None.json" \
+    #   bench_algos.py
+    n = 1000
+    m = 3000
+    k = 100000
+    graph_obj = nx.bipartite.generators.gnmk_random_graph(n, m, k)
+    G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
+    nodes = list(range(n))
+    result = benchmark.pedantic(
+        target=backend_wrapper(nx.bipartite.betweenness_centrality),
+        args=(G, nodes),
+        rounds=rounds,
+        iterations=iterations,
+        warmup_rounds=warmup_rounds,
+    )
+    assert type(result) is dict
+
+
 @pytest.mark.skip(reason="benchmark not implemented")
 def bench_complete_bipartite_graph(benchmark, graph_obj, backend_wrapper):
     pass
diff --git a/nx_cugraph/algorithms/bipartite/__init__.py b/nx_cugraph/algorithms/bipartite/__init__.py
index bfc7f1d4d..ce5e47ead 100644
--- a/nx_cugraph/algorithms/bipartite/__init__.py
+++ b/nx_cugraph/algorithms/bipartite/__init__.py
@@ -10,4 +10,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+from .centrality import *
 from .generators import *
diff --git a/nx_cugraph/algorithms/bipartite/centrality.py b/nx_cugraph/algorithms/bipartite/centrality.py
new file mode 100644
index 000000000..7e4b0baa9
--- /dev/null
+++ b/nx_cugraph/algorithms/bipartite/centrality.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2024, NVIDIA CORPORATION.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cupy as cp
+import pylibcugraph as plc
+
+from nx_cugraph.convert import _to_graph
+from nx_cugraph.utils import networkx_algorithm
+
+__all__ = ["betweenness_centrality"]
+
+
+@networkx_algorithm(
+    name="bipartite_betweenness_centrality",
+    version_added="24.12",
+    _plc="betweenness_centrality",
+)
+def betweenness_centrality(G, nodes):
+    G = _to_graph(G)
+
+    node_ids, values = plc.betweenness_centrality(
+        resource_handle=plc.ResourceHandle(),
+        graph=G._get_plc_graph(),
+        k=None,
+        random_state=None,
+        normalized=False,
+        include_endpoints=False,
+        do_expensive_check=False,
+    )
+    top_node_ids = G._nodekeys_to_nodearray(set(nodes))
+    bottom_node_ids = cp.delete(cp.arange(G._N, dtype=top_node_ids.dtype), top_node_ids)
+    n = top_node_ids.size
+    m = bottom_node_ids.size
+    s, t = divmod(n - 1, m)
+    bet_max_top = (
+        ((m**2) * ((s + 1) ** 2))
+        + (m * (s + 1) * (2 * t - s - 1))
+        - (t * ((2 * s) - t + 3))
+    ) / 2.0
+    p, r = divmod(m - 1, n)
+    bet_max_bot = (
+        ((n**2) * ((p + 1) ** 2))
+        + (n * (p + 1) * (2 * r - p - 1))
+        - (r * ((2 * p) - r + 3))
+    ) / 2.0
+
+    values = values[cp.argsort(node_ids)]
+
+    values[top_node_ids] /= bet_max_top
+    values[bottom_node_ids] /= bet_max_bot
+
+    return G._nodearray_to_dict(values)