diff --git a/cytotable/convert.py b/cytotable/convert.py index f5d2eb6..d729242 100644 --- a/cytotable/convert.py +++ b/cytotable/convert.py @@ -173,6 +173,106 @@ def _prep_cast_column_data_types( return columns +@python_app +def _set_tablenumber( + sources: Dict[str, List[Dict[str, Any]]], + add_tablenumber: Optional[bool] = None, +) -> Dict[str, List[Dict[str, Any]]]: + """ + Gathers a "TableNumber" from the image table (if CSV) or + SQLite file (if SQLite source) which is a unique identifier + intended to help differentiate between imagenumbers + to create distinct records for single-cell profiles + referenced across multiple source data exports. + For example, ImageNumber column values from CellProfiler + will repeat across exports, meaning we may lose distinction + when combining multiple export files together through CytoTable. + + Note: + - If using CSV data sources, the image.csv table is used for checksum. + - If using SQLite data sources, the entire SQLite database is used for checksum. + + Args: + sources: Dict[str, List[Dict[str, Any]]] + Contains metadata about data tables and related contents. + add_tablenumber: Optional[bool] + Whether to add a calculated tablenumber. + Note: when False, adds None as the tablenumber + + Returns: + List[Dict[str, Any]] + New source group with added TableNumber details. + """ + + from cloudpathlib import AnyPath + + from cytotable.utils import _gather_tablenumber_checksum + + image_table_groups = { + # create a data structure with the common parent for each dataset + # and the calculated checksum from the image table. + # note: the source_path parent is used for non-SQLite files + # whereas the direct source path is used for SQLite files. + ( + str(source["source_path"].parent) + if source["source_path"].suffix != "sqlite" + else source["source_path"] + ): source["source_path"] + for source_group_name, source_group_vals in sources.items() + # use the image tables references only for the basis of the + # these calculations. + if any( + value in str(AnyPath(source_group_name).stem).lower() + for value in ["image", "per_image"] + ) + for source in source_group_vals + } + + # determine if we need to add tablenumber data + if ( + # case for detecting multiple image tables which need to be differentiated + add_tablenumber is None + and (len(image_table_groups) <= 1) + ) or ( + # case for explicitly set no tablenumbers + add_tablenumber + is False + ): + return { + source_group_name: [ + dict( + source, + **{ + "tablenumber": None, + }, + ) + for source in source_group_vals + ] + for source_group_name, source_group_vals in sources.items() + } + + # gather the image table from the source_group + tablenumber_table = { + # create a data structure with the common parent for each dataset + # and the calculated checksum from the image table + group: _gather_tablenumber_checksum(path) + for group, path in image_table_groups.items() + } + + # return a modified sources data structure with the tablenumber added + return { + source_group_name: [ + dict( + source, + **{"tablenumber": tablenumber_table[str(source["source_path"].parent)]}, + ) + for source in source_group_vals + if str(source["source_path"].parent) in list(tablenumber_table.keys()) + ] + for source_group_name, source_group_vals in sources.items() + } + + @python_app def _get_table_keyset_pagination_sets( chunk_size: int, @@ -310,6 +410,18 @@ def _source_pageset_to_parquet( ) pathlib.Path(source_dest_path).mkdir(parents=True, exist_ok=True) + # build tablenumber segment addition (if necessary) + tablenumber_sql = ( + # to become tablenumber in sql select later with bigint (8-byte integer) + # we cast here to bigint to avoid concat or join conflicts later due to + # misaligned automatic data typing. + f"CAST({source['tablenumber']} AS BIGINT) as TableNumber, " + if source["tablenumber"] is not None + # don't introduce the column if we aren't supposed to add tablenumber + # as per parameter. + else "" + ) + # add source table columns casted_source_cols = [ # here we cast the column to the specified type ensure the colname remains the same @@ -317,8 +429,8 @@ def _source_pageset_to_parquet( for column in source["columns"] ] - # create selection statement from lists above - select_columns = ",".join( + # create selection statement from tablenumber_sql + lists above + select_columns = tablenumber_sql + ",".join( # if we should sort the output, add the metadata_cols casted_source_cols if sort_output @@ -376,6 +488,7 @@ def _source_pageset_to_parquet( page_key=source["page_key"], pageset=pageset, sort_output=sort_output, + tablenumber=source["tablenumber"], ), where=result_filepath, ) @@ -994,6 +1107,7 @@ def _to_parquet( # pylint: disable=too-many-arguments, too-many-locals sort_output: bool, page_keys: Dict[str, str], data_type_cast_map: Optional[Dict[str, str]] = None, + add_tablenumber: Optional[bool] = None, **kwargs, ) -> Union[Dict[str, List[Dict[str, Any]]], List[Any], str]: """ @@ -1137,6 +1251,12 @@ def _to_parquet( # pylint: disable=too-many-arguments, too-many-locals for source_group_name, source_group_vals in invalid_files_dropped.items() } + # add tablenumber details, appending None if not add_tablenumber + tablenumber_prepared = _set_tablenumber( + sources=evaluate_futures(column_names_and_types_gathered), + add_tablenumber=add_tablenumber, + ).result() + results = { source_group_name: [ dict( @@ -1165,7 +1285,7 @@ def _to_parquet( # pylint: disable=too-many-arguments, too-many-locals for source in source_group_vals ] for source_group_name, source_group_vals in evaluate_futures( - column_names_and_types_gathered + tablenumber_prepared ).items() } @@ -1277,6 +1397,7 @@ def convert( # pylint: disable=too-many-arguments,too-many-locals infer_common_schema: bool = True, drop_null: bool = False, data_type_cast_map: Optional[Dict[str, str]] = None, + add_tablenumber: Optional[bool] = None, page_keys: Optional[Dict[str, str]] = None, sort_output: bool = True, preset: Optional[str] = "cellprofiler_csv", @@ -1326,6 +1447,11 @@ def convert( # pylint: disable=too-many-arguments,too-many-locals A dictionary mapping data type groups to specific types. Roughly includes Arrow data types language from: https://arrow.apache.org/docs/python/api/datatypes.html + add_tablenumber: Optional[bool] + Whether to add a calculated tablenumber which helps differentiate + various repeated values (such as ObjectNumber) within source data. + Useful for processing multiple SQLite or CSV data sources together + to retain distinction from each dataset. page_keys: str: The table and column names to be used for key pagination. Uses the form: {"table_name":"column_name"}. @@ -1466,6 +1592,7 @@ def convert( # pylint: disable=too-many-arguments,too-many-locals infer_common_schema=infer_common_schema, drop_null=drop_null, data_type_cast_map=data_type_cast_map, + add_tablenumber=add_tablenumber, sort_output=sort_output, page_keys=cast(dict, page_keys), **kwargs, diff --git a/cytotable/utils.py b/cytotable/utils.py index c65f62d..16b562f 100644 --- a/cytotable/utils.py +++ b/cytotable/utils.py @@ -182,6 +182,7 @@ def _sqlite_mixed_type_query_to_parquet( page_key: str, pageset: Tuple[Union[int, float], Union[int, float]], sort_output: bool, + tablenumber: Optional[int] = None, ) -> str: """ Performs SQLite table data extraction where one or many @@ -201,6 +202,9 @@ def _sqlite_mixed_type_query_to_parquet( Specifies whether to sort cytotable output or not. add_cytotable_meta: bool, default=False: Whether to add CytoTable metadata fields or not + tablenumber: Optional[int], default=None: + An optional table number to append to the results. + Defaults to None. Returns: pyarrow.Table: @@ -256,9 +260,19 @@ def _sqlite_affinity_data_type_lookup(col_type: str) -> str: # return the translated type for use in SQLite return translated_type[0] + # build tablenumber segment addition (if necessary) + tablenumber_sql = ( + # to become tablenumber in sql select later with integer + f"CAST({tablenumber} AS INTEGER) as TableNumber, " + if tablenumber is not None + # if we don't have a tablenumber value, don't introduce the column + else "" + ) + # create cases for mixed-type handling in each column discovered above - query_parts = [ - f""" + query_parts = tablenumber_sql + ", ".join( + [ + f""" CASE /* when the storage class type doesn't match the column, return nulltype */ WHEN typeof({col['column_name']}) != @@ -267,13 +281,14 @@ def _sqlite_affinity_data_type_lookup(col_type: str) -> str: ELSE {col['column_name']} END AS {col['column_name']} """ - for col in column_info - ] + for col in column_info + ] + ) # perform the select using the cases built above and using chunksize + offset sql_stmt = f""" SELECT - {', '.join(query_parts)} + {query_parts} FROM {table_name} WHERE {page_key} BETWEEN {pageset[0]} AND {pageset[1]} {"ORDER BY " + page_key if sort_output else ""}; @@ -482,6 +497,47 @@ def _write_parquet_table_with_metadata(table: pa.Table, **kwargs) -> None: ) +def _gather_tablenumber_checksum(pathname: str, buffer_size: int = 1048576) -> int: + """ + Build and return a checksum for use as a unique identifier across datasets + referenced from cytominer-database: + https://github.com/cytomining/cytominer-database/blob/master/cytominer_database/ingest_variable_engine.py#L129 + + Args: + pathname: str: + A path to a file with which to generate the checksum on. + buffer_size: int: + Buffer size to use for reading data. + + Returns: + int + an integer representing the checksum of the pathname file. + """ + + import os + import zlib + + # check whether the buffer size is larger than the file_size + file_size = os.path.getsize(pathname) + if file_size < buffer_size: + buffer_size = file_size + + # open file + with open(str(pathname), "rb") as stream: + # begin result formation + result = zlib.crc32(bytes(0)) + while True: + # read data from stream using buffer size + buffer = stream.read(buffer_size) + if not buffer: + # if we have no more data to use, break while loop + break + # use buffer read data to form checksum + result = zlib.crc32(buffer, result) + + return result & 0xFFFFFFFF + + def _unwrap_value(val: Union[parsl.dataflow.futures.AppFuture, Any]) -> Any: """ Helper function to unwrap futures from values or return values diff --git a/docs/source/architecture.data.md b/docs/source/architecture.data.md index 3591dd0..876b706 100644 --- a/docs/source/architecture.data.md +++ b/docs/source/architecture.data.md @@ -25,6 +25,7 @@ Data are organized into tables of generally two categories: Identifying or key fields for image and compartment tables may include the following: +- __TableNumber__: Provides a unique number based on the file referenced to build CytoTable output to help distinguish from repeated values in ImageNumber, ObjectNumber or other metadata columns which are referenced. Typically useful when using multiple SQLite or CSV-based source datasets. - __ImageNumber__: Provides specificity on what image is being referenced (there may be many). - __ObjectNumber__: Provides specificity for a specific compartment object within an ImageNumber. - __Parent_Cells__: Provides a related Cell compartment ObjectNumber. This field is canonically referenced from the Cytoplasm compartment for joining Cytoplasm and Cell compartment data. (see [Cytoplasm Compartment Data Relationships](architecture.data.md#cytoplasm-compartment-data-relationships) below for greater detail) diff --git a/docs/source/python-api.md b/docs/source/python-api.md index ab1fc62..0e9986c 100644 --- a/docs/source/python-api.md +++ b/docs/source/python-api.md @@ -45,6 +45,10 @@ Convert | +.. autofunction:: _set_tablenumber + +| + .. autofunction:: _prepend_column_name | diff --git a/poetry.lock b/poetry.lock index 3b12005..9356592 100644 --- a/poetry.lock +++ b/poetry.lock @@ -171,46 +171,6 @@ files = [ {file = "backports.weakref-1.0.post1.tar.gz", hash = "sha256:bc4170a29915f8b22c9e7c4939701859650f2eb84184aee80da329ac0b9825c2"}, ] -[[package]] -name = "bcrypt" -version = "4.1.2" -description = "Modern password hashing for your software and your servers" -optional = false -python-versions = ">=3.7" -files = [ - {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0"}, - {file = "bcrypt-4.1.2-cp37-abi3-win32.whl", hash = "sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369"}, - {file = "bcrypt-4.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551"}, - {file = "bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a"}, - {file = "bcrypt-4.1.2-cp39-abi3-win32.whl", hash = "sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f"}, - {file = "bcrypt-4.1.2-cp39-abi3-win_amd64.whl", hash = "sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42"}, - {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946"}, - {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d"}, - {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab"}, - {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb"}, - {file = "bcrypt-4.1.2.tar.gz", hash = "sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258"}, -] - -[package.extras] -tests = ["pytest (>=3.2.1,!=3.3.0)"] -typecheck = ["mypy"] - [[package]] name = "boto3" version = "1.34.63" @@ -1549,27 +1509,6 @@ sql-other = ["SQLAlchemy (>=1.4.16)"] test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.6.3)"] -[[package]] -name = "paramiko" -version = "3.4.0" -description = "SSH2 protocol library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "paramiko-3.4.0-py3-none-any.whl", hash = "sha256:43f0b51115a896f9c00f59618023484cb3a14b98bbceab43394a39c6739b7ee7"}, - {file = "paramiko-3.4.0.tar.gz", hash = "sha256:aac08f26a31dc4dffd92821527d1682d99d52f9ef6851968114a8728f3c274d3"}, -] - -[package.dependencies] -bcrypt = ">=3.2" -cryptography = ">=3.3" -pynacl = ">=1.5" - -[package.extras] -all = ["gssapi (>=1.4.1)", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] -gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] -invoke = ["invoke (>=2.0)"] - [[package]] name = "parsedatetime" version = "2.6" @@ -1583,20 +1522,19 @@ files = [ [[package]] name = "parsl" -version = "2024.6.17" +version = "2024.10.7" description = "Simple data dependent workflows in Python" optional = false python-versions = ">=3.8.0" files = [ - {file = "parsl-2024.6.17-py3-none-any.whl", hash = "sha256:95cdbff3657efbe61d0b0dc501b10bd8d23b238d527e0a6327d58639d24f7e87"}, - {file = "parsl-2024.6.17.tar.gz", hash = "sha256:78e31ec46dcb9f665c0b5c50090dbf755b8cddb43b2bc39d467e50ce9c7eabfc"}, + {file = "parsl-2024.10.7-py3-none-any.whl", hash = "sha256:79b0f1e0e1854d261f548ddd79daca254fdd345610daf72d0866004093fdf93e"}, + {file = "parsl-2024.10.7.tar.gz", hash = "sha256:ab155f51b92f62bd2a2ca82abe70ea608d280d79b7beaa29fd8ce871d41ae59e"}, ] [package.dependencies] dill = "*" filelock = ">=3.13,<4" globus-sdk = "*" -paramiko = "*" psutil = ">=5.5.1" pyzmq = ">=17.1.2" requests = "*" @@ -1606,7 +1544,7 @@ typeguard = ">=2.10,<3.dev0 || ==4.*" typing-extensions = ">=4.6,<5" [package.extras] -all = ["Flask (>=1.0.2)", "azure (<=4)", "boto3", "cffi", "flask-sqlalchemy", "google-api-python-client", "google-auth", "ipython (<=8.6.0)", "jsonschema", "kubernetes", "msrestazure", "nbsphinx", "networkx (>=2.5,<2.6)", "oauth-ssh (>=0.9)", "pandas (<2.2)", "plotly", "proxystore", "pydot", "python-daemon", "python-gssapi", "pyyaml", "radical.pilot (==1.60)", "radical.utils (==1.60)", "sphinx (>=7.1,<7.2)", "sphinx-rtd-theme", "sqlalchemy (>=1.4,<2)", "work-queue"] +all = ["Flask (>=1.0.2)", "azure (<=4)", "boto3", "cffi", "flask-sqlalchemy", "google-api-python-client", "google-auth", "ipython (<=8.6.0)", "jsonschema", "kubernetes", "msrestazure", "nbsphinx", "networkx (>=2.5,<2.6)", "oauth-ssh (>=0.9)", "pandas (<2.2)", "paramiko", "plotly", "proxystore", "pydot", "python-daemon", "python-gssapi", "pyyaml", "radical.pilot (==1.60)", "radical.utils (==1.60)", "sphinx (>=7.1,<7.2)", "sphinx-rtd-theme", "sqlalchemy (>=1.4,<2)", "work-queue"] aws = ["boto3"] azure = ["azure (<=4)", "msrestazure"] docs = ["ipython (<=8.6.0)", "nbsphinx", "sphinx (>=7.1,<7.2)", "sphinx-rtd-theme"] @@ -1618,6 +1556,7 @@ monitoring = ["sqlalchemy (>=1.4,<2)"] oauth-ssh = ["oauth-ssh (>=0.9)"] proxystore = ["proxystore"] radical-pilot = ["radical.pilot (==1.60)", "radical.utils (==1.60)"] +ssh = ["paramiko"] visualization = ["Flask (>=1.0.2)", "flask-sqlalchemy", "networkx (>=2.5,<2.6)", "pandas (<2.2)", "plotly", "pydot", "python-daemon"] workqueue = ["work-queue"] @@ -1826,32 +1765,6 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] -[[package]] -name = "pynacl" -version = "1.5.0" -description = "Python binding to the Networking and Cryptography (NaCl) library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, - {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, -] - -[package.dependencies] -cffi = ">=1.4.1" - -[package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] -tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] - [[package]] name = "pytest" version = "7.4.4" diff --git a/tests/conftest.py b/tests/conftest.py index 6045ace..cf6fb8b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -214,6 +214,98 @@ def cytominerdatabase_to_pycytominer_merge_single_cells_parquet( return output_paths +@pytest.fixture() +def cytominerdatabase_to_manual_join_parquet( + fx_tempdir: str, + cytominerdatabase_sqlite_static: List[str], +) -> List[str]: + """ + Processed cytominer-database test sqlite data as + pycytominer merged single cell parquet files + """ + + output_paths = [] + for sqlite_file in cytominerdatabase_sqlite_static: + destination_path = ( + f"{fx_tempdir}/manual_join.{pathlib.Path(sqlite_file).name}.parquet" + ) + df_cytominerdatabase = ( + pd.read_sql( + sql=""" + WITH Image_Filtered AS ( + SELECT + TableNumber, + ImageNumber + FROM + Image + ), + /* gather unique objectnumber column names from each + compartment so as to retain differentiation */ + Cytoplasm_renamed AS ( + SELECT + ObjectNumber AS Cytoplasm_ObjectNumber, + * + FROM Cytoplasm + ), + Cells_renamed AS ( + SELECT + ObjectNumber AS Cells_ObjectNumber, + * + FROM Cells + ), + Nuclei_renamed AS ( + SELECT + ObjectNumber AS Nuclei_ObjectNumber, + * + FROM Nuclei + ) + SELECT * + FROM Cytoplasm_renamed cytoplasm + LEFT JOIN Cells_renamed cells ON + cells.ImageNumber = cytoplasm.ImageNumber + AND cells.TableNumber = cytoplasm.TableNumber + AND cells.Cells_Number_Object_Number = cytoplasm.Cytoplasm_Parent_Cells + LEFT JOIN Nuclei_renamed nuclei ON + nuclei.ImageNumber = cytoplasm.ImageNumber + AND nuclei.TableNumber = cytoplasm.TableNumber + AND nuclei.Nuclei_Number_Object_Number = cytoplasm.Cytoplasm_Parent_Nuclei + LEFT JOIN Image_Filtered image ON + image.ImageNumber = cytoplasm.ImageNumber + AND image.TableNumber = cytoplasm.TableNumber + """, + con=sqlite_file, + ) + # replacing 'nan' strings with None + .replace(to_replace="nan", value=None) + # renaming columns as appropriate + .rename( + columns={ + "ImageNumber": "Metadata_ImageNumber", + "TableNumber": "Metadata_TableNumber", + "Cytoplasm_Parent_Cells": "Metadata_Cytoplasm_Parent_Cells", + "Cytoplasm_Parent_Nuclei": "Metadata_Cytoplasm_Parent_Nuclei", + "Cells_Parent_Nuclei": "Metadata_Cells_Parent_Nuclei", + } + # drop generic objectnumber column gathered from each compartment + # (we'll rely on the compartment prefixed name instead for comparisons) + ).drop(columns="ObjectNumber") + ) + + # drop duplicate column names + df_cytominerdatabase = df_cytominerdatabase.loc[ + :, ~df_cytominerdatabase.columns.duplicated() + ].copy() + + # sort the columns and export to parquet + df_cytominerdatabase[ + sorted(sorted(df_cytominerdatabase.columns.tolist()), key=_column_sort) + ].to_parquet(destination_path) + + output_paths.append(destination_path) + + return output_paths + + @pytest.fixture(name="example_tables") def fixture_example_tables() -> Tuple[pa.Table, ...]: """ diff --git a/tests/test_convert_threaded.py b/tests/test_convert_threaded.py index b513562..f4340d7 100644 --- a/tests/test_convert_threaded.py +++ b/tests/test_convert_threaded.py @@ -6,6 +6,7 @@ import pathlib +from typing import List import pandas as pd import pyarrow as pa @@ -161,6 +162,83 @@ def test_get_source_filepaths( assert len(set(single_dir_result.keys())) == 4 +def test_gather_tablenumber( + load_parsl_threaded: None, + fx_tempdir: str, + data_dirs_cytominerdatabase: List[str], + cytominerdatabase_to_manual_join_parquet: List[str], +): + """ + Tests _gather_tablenumber + """ + + for unprocessed_cytominerdatabase, processed_cytominerdatabase in zip( + data_dirs_cytominerdatabase, cytominerdatabase_to_manual_join_parquet + ): + test_table = parquet.read_table( + source=convert( + source_path=unprocessed_cytominerdatabase, + dest_path=( + f"{fx_tempdir}/{pathlib.Path(unprocessed_cytominerdatabase).name}.test_table.parquet" + ), + dest_datatype="parquet", + source_datatype="csv", + join=True, + joins=""" + WITH Image_Filtered AS ( + SELECT + Metadata_TableNumber, + Metadata_ImageNumber + FROM + read_parquet('image.parquet') + ) + SELECT + image.*, + cytoplasm.* EXCLUDE (Metadata_TableNumber, Metadata_ImageNumber), + nuclei.* EXCLUDE (Metadata_TableNumber, Metadata_ImageNumber), + cells.* EXCLUDE (Metadata_TableNumber, Metadata_ImageNumber) + FROM + read_parquet('cytoplasm.parquet') AS cytoplasm + LEFT JOIN read_parquet('cells.parquet') AS cells ON + cells.Metadata_TableNumber = cells.Metadata_TableNumber + AND cells.Metadata_ImageNumber = cytoplasm.Metadata_ImageNumber + AND cells.Cells_ObjectNumber = cytoplasm.Metadata_Cytoplasm_Parent_Cells + LEFT JOIN read_parquet('nuclei.parquet') AS nuclei ON + nuclei.Metadata_TableNumber = nuclei.Metadata_TableNumber + AND nuclei.Metadata_ImageNumber = cytoplasm.Metadata_ImageNumber + AND nuclei.Nuclei_ObjectNumber = cytoplasm.Metadata_Cytoplasm_Parent_Nuclei + LEFT JOIN Image_Filtered AS image ON + image.Metadata_TableNumber = cytoplasm.Metadata_TableNumber + AND image.Metadata_ImageNumber = cytoplasm.Metadata_ImageNumber + """, + preset="cell-health-cellprofiler-to-cytominer-database", + ) + ) + control_table = parquet.read_table(source=processed_cytominerdatabase) + + control_unique_tablenumbers = pc.unique(control_table["Metadata_TableNumber"]) + + # use pandas to assert a test of equality to help with differences in how + # data may be rounded by CytoTable vs cytominer-database (which use different data parsers + # and related conversions). + # See here for more information: https://github.com/cytomining/CytoTable/issues/187 + pd.testing.assert_frame_equal( + test_table.filter( + # we use only those tablenumbers which appear in cytominer-database related results + # to help compare. CytoTable only removes datasets which have no image table whereas + # cytominer-database removes any dataset which has no image table or problematic + # compartment tables (any compartment table with errors triggers the entire dataset + # being removed). + pc.field("Metadata_TableNumber").isin(control_unique_tablenumbers) + ) + .sort_by([(name, "ascending") for name in test_table.column_names]) + .to_pandas(), + control_table.sort_by( + [(name, "ascending") for name in control_table.column_names] + ).to_pandas(), + ) + + def test_avoid_na_row_output( load_parsl_threaded: None, fx_tempdir: str, data_dir_cellprofiler: str ):