Skip to content

Commit

Permalink
rename
Browse files Browse the repository at this point in the history
  • Loading branch information
galipremsagar committed May 23, 2022
1 parent e840947 commit 378b104
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion python/cudf/cudf/io/avro.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def read_avro(
"`read_avro` does not yet support reading multiple files"
)

filepath_or_buffer, compression = ioutils.get_reader_path_or_buf(
filepath_or_buffer, compression = ioutils.get_reader_filepath_or_buffer(
path_or_data=filepath_or_buffer, compression=None, **kwargs
)
if compression is not None:
Expand Down
4 changes: 2 additions & 2 deletions python/cudf/cudf/io/csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def read_csv(
"`read_csv` does not yet support reading multiple files"
)

filepath_or_buffer, compression = ioutils.get_reader_path_or_buf(
filepath_or_buffer, compression = ioutils.get_reader_filepath_or_buffer(
path_or_data=filepath_or_buffer,
compression=compression,
iotypes=(BytesIO, StringIO, NativeFile),
Expand Down Expand Up @@ -146,7 +146,7 @@ def to_csv(
path_or_buf = StringIO()
return_as_string = True

path_or_buf = ioutils.get_writer_path_or_buf(
path_or_buf = ioutils.get_writer_filepath_or_buffer(
path_or_data=path_or_buf, mode="w", **kwargs
)

Expand Down
4 changes: 2 additions & 2 deletions python/cudf/cudf/io/json.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def read_json(
source = ioutils.stringify_pathlike(source)
source = fs.sep.join([source, "*.json"])

tmp_source, compression = ioutils.get_reader_path_or_buf(
tmp_source, compression = ioutils.get_reader_filepath_or_buffer(
path_or_data=source,
compression=compression,
iotypes=(BytesIO, StringIO),
Expand Down Expand Up @@ -74,7 +74,7 @@ def read_json(
"multiple files via pandas"
)

path_or_buf, compression = ioutils.get_reader_path_or_buf(
path_or_buf, compression = ioutils.get_reader_filepath_or_buffer(
path_or_data=path_or_buf,
compression=compression,
iotypes=(BytesIO, StringIO),
Expand Down
8 changes: 4 additions & 4 deletions python/cudf/cudf/io/orc.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def read_orc_statistics(
files_statistics = []
stripes_statistics = []
for source in filepaths_or_buffers:
(filepath_or_buffer, compression,) = ioutils.get_reader_path_or_buf(
(path_or_buff, compression,) = ioutils.get_reader_filepath_or_buffer(
path_or_data=source, compression=None, **kwargs
)
if compression is not None:
Expand All @@ -182,7 +182,7 @@ def read_orc_statistics(
column_names,
raw_file_statistics,
raw_stripes_statistics,
) = liborc.read_raw_orc_statistics(filepath_or_buffer)
) = liborc.read_raw_orc_statistics(path_or_buff)

# Parse column names
column_names = [
Expand Down Expand Up @@ -323,7 +323,7 @@ def read_orc(
source = stringify_path(source)
source = fs.sep.join([source, "*.orc"])

tmp_source, compression = ioutils.get_reader_path_or_buf(
tmp_source, compression = ioutils.get_reader_filepath_or_buffer(
path_or_data=source,
compression=None,
use_python_file_object=use_python_file_object,
Expand Down Expand Up @@ -422,7 +422,7 @@ def to_orc(
"Categorical columns."
)

path_or_buf = ioutils.get_writer_path_or_buf(
path_or_buf = ioutils.get_writer_filepath_or_buffer(
path_or_data=fname, mode="wb", **kwargs
)
if ioutils.is_fsspec_open_file(path_or_buf):
Expand Down
4 changes: 2 additions & 2 deletions python/cudf/cudf/io/parquet.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def _write_parquet(
ValueError("paths must be list-like when partitions_info provided")

paths_or_bufs = [
ioutils.get_writer_path_or_buf(path, mode="wb", **kwargs)
ioutils.get_writer_filepath_or_buffer(path, mode="wb", **kwargs)
for path in paths
]
common_args = {
Expand Down Expand Up @@ -435,7 +435,7 @@ def read_parquet(
fs=fs,
)
for i, source in enumerate(filepath_or_buffer):
tmp_source, compression = ioutils.get_reader_path_or_buf(
tmp_source, compression = ioutils.get_reader_filepath_or_buffer(
path_or_data=source,
compression=None,
fs=fs,
Expand Down
2 changes: 1 addition & 1 deletion python/cudf/cudf/io/text.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def read_text(
):
"""{docstring}"""

filepath_or_buffer, compression = ioutils.get_reader_path_or_buf(
filepath_or_buffer, compression = ioutils.get_reader_filepath_or_buffer(
path_or_data=filepath_or_buffer,
compression=None,
iotypes=(BytesIO, StringIO),
Expand Down
4 changes: 2 additions & 2 deletions python/cudf/cudf/utils/ioutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1319,7 +1319,7 @@ def _open_remote_files(
]


def get_reader_path_or_buf(
def get_reader_filepath_or_buffer(
path_or_data,
compression,
mode="rb",
Expand Down Expand Up @@ -1428,7 +1428,7 @@ def get_reader_path_or_buf(
return path_or_data, compression


def get_writer_path_or_buf(path_or_data, mode, **kwargs):
def get_writer_filepath_or_buffer(path_or_data, mode, **kwargs):
"""
Return either a filepath string to data,
or a open file object to the output filesystem
Expand Down

0 comments on commit 378b104

Please sign in to comment.