Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrate NVText Normalizing APIs to Pylibcudf #17072

Merged
merged 24 commits into from
Oct 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@ nvtext
jaccard
minhash
ngrams_tokenize
normalize
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
=========
normalize
=========

.. automodule:: pylibcudf.nvtext.normalize
:members:
13 changes: 7 additions & 6 deletions python/cudf/cudf/_lib/nvtext/ngrams_tokenize.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@ def ngrams_tokenize(
object py_delimiter,
object py_separator
):
result = nvtext.ngrams_tokenize.ngrams_tokenize(
input.to_pylibcudf(mode="read"),
ngrams,
py_delimiter.device_value.c_value,
py_separator.device_value.c_value
return Column.from_pylibcudf(
nvtext.ngrams_tokenize.ngrams_tokenize(
input.to_pylibcudf(mode="read"),
ngrams,
py_delimiter.device_value.c_value,
py_separator.device_value.c_value
)
)
return Column.from_pylibcudf(result)
38 changes: 13 additions & 25 deletions python/cudf/cudf/_lib/nvtext/normalize.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -3,36 +3,24 @@
from cudf.core.buffer import acquire_spill_lock

from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move

from pylibcudf.libcudf.column.column cimport column
from pylibcudf.libcudf.column.column_view cimport column_view
from pylibcudf.libcudf.nvtext.normalize cimport (
normalize_characters as cpp_normalize_characters,
normalize_spaces as cpp_normalize_spaces,
)

from cudf._lib.column cimport Column


@acquire_spill_lock()
def normalize_spaces(Column strings):
cdef column_view c_strings = strings.view()
cdef unique_ptr[column] c_result

with nogil:
c_result = move(cpp_normalize_spaces(c_strings))

return Column.from_unique_ptr(move(c_result))
from pylibcudf import nvtext


@acquire_spill_lock()
def normalize_characters(Column strings, bool do_lower=True):
cdef column_view c_strings = strings.view()
cdef unique_ptr[column] c_result
def normalize_spaces(Column input):
result = nvtext.normalize.normalize_spaces(
input.to_pylibcudf(mode="read")
)
return Column.from_pylibcudf(result)

with nogil:
c_result = move(cpp_normalize_characters(c_strings, do_lower))

return Column.from_unique_ptr(move(c_result))
@acquire_spill_lock()
def normalize_characters(Column input, bool do_lower=True):
result = nvtext.normalize.normalize_characters(
input.to_pylibcudf(mode="read"),
do_lower,
)
return Column.from_pylibcudf(result)
2 changes: 1 addition & 1 deletion python/pylibcudf/pylibcudf/nvtext/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# =============================================================================

set(cython_sources edit_distance.pyx generate_ngrams.pyx jaccard.pyx minhash.pyx
ngrams_tokenize.pyx
ngrams_tokenize.pyx normalize.pyx
)

set(linked_libraries cudf::cudf)
Expand Down
4 changes: 3 additions & 1 deletion python/pylibcudf/pylibcudf/nvtext/__init__.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@ from . cimport (
jaccard,
minhash,
ngrams_tokenize,
normalize,
)

__all__ = [
"edit_distance",
"generate_ngrams",
"jaccard",
"minhash",
"ngrams_tokenize"
"ngrams_tokenize",
"normalize",
]
10 changes: 9 additions & 1 deletion python/pylibcudf/pylibcudf/nvtext/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
# Copyright (c) 2024, NVIDIA CORPORATION.

from . import edit_distance, generate_ngrams, jaccard, minhash, ngrams_tokenize
from . import (
edit_distance,
generate_ngrams,
jaccard,
minhash,
ngrams_tokenize,
normalize,
)

__all__ = [
"edit_distance",
"generate_ngrams",
"jaccard",
"minhash",
"ngrams_tokenize",
"normalize",
]
9 changes: 9 additions & 0 deletions python/pylibcudf/pylibcudf/nvtext/normalize.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Copyright (c) 2024, NVIDIA CORPORATION.

from libcpp cimport bool
from pylibcudf.column cimport Column


cpdef Column normalize_spaces(Column input)

cpdef Column normalize_characters(Column input, bool do_lower_case)
64 changes: 64 additions & 0 deletions python/pylibcudf/pylibcudf/nvtext/normalize.pyx
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# Copyright (c) 2024, NVIDIA CORPORATION.

from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move
from pylibcudf.column cimport Column
from pylibcudf.libcudf.column.column cimport column
from pylibcudf.libcudf.nvtext.normalize cimport (
normalize_characters as cpp_normalize_characters,
normalize_spaces as cpp_normalize_spaces,
)


cpdef Column normalize_spaces(Column input):
"""
Returns a new strings column by normalizing the whitespace in
each string in the input column.

For details, see :cpp:func:`normalize_spaces`

Parameters
----------
input : Column
Input strings

Returns
-------
Column
New strings columns of normalized strings.
"""
cdef unique_ptr[column] c_result

with nogil:
c_result = cpp_normalize_spaces(input.view())

return Column.from_libcudf(move(c_result))


cpdef Column normalize_characters(Column input, bool do_lower_case):
"""
Normalizes strings characters for tokenizing.

For details, see :cpp:func:`normalize_characters`

Parameters
----------
input : Column
Input strings
do_lower_case : bool
If true, upper-case characters are converted to lower-case
and accents are stripped from those characters. If false,
accented and upper-case characters are not transformed.

Returns
-------
Column
Normalized strings column
"""
cdef unique_ptr[column] c_result

with nogil:
c_result = cpp_normalize_characters(input.view(), do_lower_case)

return Column.from_libcudf(move(c_result))
42 changes: 42 additions & 0 deletions python/pylibcudf/pylibcudf/tests/test_nvtext_normalize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Copyright (c) 2024, NVIDIA CORPORATION.

import pyarrow as pa
import pylibcudf as plc
import pytest
from utils import assert_column_eq


@pytest.fixture(scope="module")
def norm_spaces_input_data():
arr = ["a b", " c d\n", "e \t f "]
return pa.array(arr)


@pytest.fixture(scope="module")
def norm_chars_input_data():
arr = ["éâîô\teaio", "ĂĆĖÑÜ", "ACENU", "$24.08", "[a,bb]"]
return pa.array(arr)


def test_normalize_spaces(norm_spaces_input_data):
result = plc.nvtext.normalize.normalize_spaces(
plc.interop.from_arrow(norm_spaces_input_data)
)
expected = pa.array(["a b", "c d", "e f"])
assert_column_eq(result, expected)


@pytest.mark.parametrize("do_lower", [True, False])
def test_normalize_characters(norm_chars_input_data, do_lower):
result = plc.nvtext.normalize.normalize_characters(
plc.interop.from_arrow(norm_chars_input_data),
do_lower,
)
expected = pa.array(
["eaio eaio", "acenu", "acenu", " $ 24 . 08", " [ a , bb ] "]
)
if not do_lower:
expected = pa.array(
["éâîô eaio", "ĂĆĖÑÜ", "ACENU", " $ 24 . 08", " [ a , bb ] "]
)
assert_column_eq(result, expected)
Loading