diff --git a/.coveragerc b/.coveragerc index 280c5674f5bd..ae05ce469e67 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,6 +1,7 @@ [report] omit = */_generated/*.py + */_generated_v2/*.py show_missing = True exclude_lines = # Re-enable the standard pragma diff --git a/.gitignore b/.gitignore index feb24cb93c97..a84250e78819 100644 --- a/.gitignore +++ b/.gitignore @@ -57,3 +57,4 @@ scripts/pylintrc_reduced generated_python/ cloud-bigtable-client/ googleapis-pb/ +grpc_python_venv/ diff --git a/Makefile b/Makefile.bigtable_v1 similarity index 61% rename from Makefile rename to Makefile.bigtable_v1 index 3e7e63e6e26a..d6e6a247a06f 100644 --- a/Makefile +++ b/Makefile.bigtable_v1 @@ -1,10 +1,11 @@ +GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv GENERATED_DIR=$(shell pwd)/generated_python -BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/_generated -DATASTORE_DIR=$(shell pwd)/gcloud/datastore/_generated -GRPC_PLUGIN=grpc_python_plugin -PROTOC_CMD=protoc -BIGTABLE_PROTOS_DIR=$(shell pwd)/cloud-bigtable-client/bigtable-protos/src/main/proto +GENERATED_SUBDIR=_generated +BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) +PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb +BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client +BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-client-core-parent/bigtable-protos/src/main/proto help: @echo 'Makefile for gcloud-python Bigtable protos ' @@ -14,19 +15,22 @@ help: @echo ' make clean Clean generated files ' generate: + # Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools + [ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV) + $(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools # Retrieve git repos that have our *.proto files. - [ -d cloud-bigtable-client ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 - cd cloud-bigtable-client && git pull origin master - [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 - cd googleapis-pb && git pull origin master + [ -d $(BIGTABLE_CHECKOUT_DIR) ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 + cd $(BIGTABLE_CHECKOUT_DIR) && git pull origin master + [ -d $(GOOGLEAPIS_PROTOS_DIR) ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 + cd $(GOOGLEAPIS_PROTOS_DIR) && git pull origin master # Make the directory where our *_pb2.py files will go. mkdir -p $(GENERATED_DIR) # Generate all *_pb2.py files that require gRPC. $(PROTOC_CMD) \ --proto_path=$(BIGTABLE_PROTOS_DIR) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ --python_out=$(GENERATED_DIR) \ - --plugin=protoc-gen-grpc=$(GRPC_PLUGIN) \ - --grpc_out=$(GENERATED_DIR) \ + --grpc_python_out=$(GENERATED_DIR) \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/bigtable_service.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_service.proto @@ -41,23 +45,17 @@ generate: $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_data.proto \ $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto \ - $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/datastore.proto \ - $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/entity.proto \ - $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/query.proto # Move the newly generated *_pb2.py files into our library. - mv $(GENERATED_DIR)/google/bigtable/v1/* $(BIGTABLE_DIR) - mv $(GENERATED_DIR)/google/bigtable/admin/cluster/v1/* $(BIGTABLE_DIR) - mv $(GENERATED_DIR)/google/bigtable/admin/table/v1/* $(BIGTABLE_DIR) - mv $(GENERATED_DIR)/google/datastore/v1beta3/* $(DATASTORE_DIR) + cp $(GENERATED_DIR)/google/bigtable/v1/* $(BIGTABLE_DIR) + cp $(GENERATED_DIR)/google/bigtable/admin/cluster/v1/* $(BIGTABLE_DIR) + cp $(GENERATED_DIR)/google/bigtable/admin/table/v1/* $(BIGTABLE_DIR) # Remove all existing *.proto files before we replace rm -f $(BIGTABLE_DIR)/*.proto - rm -f $(DATASTORE_DIR)/*.proto # Copy over the *.proto files into our library. cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/*.proto $(BIGTABLE_DIR) cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/*.proto $(BIGTABLE_DIR) cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/*.proto $(BIGTABLE_DIR) - cp $(BIGTABLE_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) - cp $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/*.proto $(DATASTORE_DIR) + cp $(GOOGLEAPIS_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) # Rename all *.proto files in our library with an # underscore and remove executable bit. cd $(BIGTABLE_DIR) && \ @@ -65,25 +63,19 @@ generate: chmod -x $$filename ; \ mv $$filename _$$filename ; \ done - cd $(DATASTORE_DIR) && \ - for filename in *.proto; do \ - chmod -x $$filename ; \ - mv $$filename _$$filename ; \ - done # Separate the gRPC parts of the operations service from the # non-gRPC parts so that the protos from `googleapis-common-protos` # can be used without gRPC. - python scripts/make_operations_grpc.py - # Separate the gRPC parts of the datastore service from the - # non-gRPC parts so that the protos can be used without gRPC. - python scripts/make_datastore_grpc.py + GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ + python scripts/make_operations_grpc.py # Rewrite the imports in the generated *_pb2.py files. - python scripts/rewrite_imports.py + python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py check_generate: python scripts/check_generate.py clean: - rm -fr cloud-bigtable-client $(GENERATED_DIR) + rm -fr $(GRPCIO_VIRTUALENV) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) .PHONY: generate check_generate clean diff --git a/Makefile.bigtable_v2 b/Makefile.bigtable_v2 new file mode 100644 index 000000000000..05681b1d55ed --- /dev/null +++ b/Makefile.bigtable_v2 @@ -0,0 +1,71 @@ +GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv +GENERATED_DIR=$(shell pwd)/generated_python +GENERATED_SUBDIR=_generated_v2 +BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) +PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc +GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb + +help: + @echo 'Makefile for gcloud-python Bigtable protos ' + @echo ' ' + @echo ' make generate Generates the protobuf modules ' + @echo ' make check_generate Checks that generate succeeded ' + @echo ' make clean Clean generated files ' + +generate: + # Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools + [ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV) + $(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools + # Retrieve git repos that have our *.proto files. + [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 + cd googleapis-pb && git pull origin master + # Make the directory where our *_pb2.py files will go. + mkdir -p $(GENERATED_DIR) + # Generate all *_pb2.py files that require gRPC. + $(PROTOC_CMD) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ + --python_out=$(GENERATED_DIR) \ + --grpc_python_out=$(GENERATED_DIR) \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/bigtable.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_instance_admin.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_table_admin.proto + # Generate all *_pb2.py files that do not require gRPC. + $(PROTOC_CMD) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ + --python_out=$(GENERATED_DIR) \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/data.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/common.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/instance.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/table.proto \ + # Move the newly generated *_pb2.py files into our library. + cp $(GENERATED_DIR)/google/bigtable/v2/* $(BIGTABLE_DIR) + cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR) + # Remove all existing *.proto files before we replace + rm -f $(BIGTABLE_DIR)/*.proto + # Copy over the *.proto files into our library. + cp $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/*.proto $(BIGTABLE_DIR) + cp $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/*.proto $(BIGTABLE_DIR) + cp $(GOOGLEAPIS_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) + # Rename all *.proto files in our library with an + # underscore and remove executable bit. + cd $(BIGTABLE_DIR) && \ + for filename in *.proto; do \ + chmod -x $$filename ; \ + mv $$filename _$$filename ; \ + done + # Separate the gRPC parts of the operations service from the + # non-gRPC parts so that the protos from `googleapis-common-protos` + # can be used without gRPC. + GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ + python scripts/make_operations_grpc.py + # Rewrite the imports in the generated *_pb2.py files. + python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py + +check_generate: + python scripts/check_generate.py + +clean: + rm -fr $(GRPCIO_VIRTUALENV) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) + +.PHONY: generate check_generate clean diff --git a/Makefile.datastore b/Makefile.datastore new file mode 100644 index 000000000000..73665ef5f542 --- /dev/null +++ b/Makefile.datastore @@ -0,0 +1,57 @@ +GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv +GENERATED_DIR=$(shell pwd)/generated_python +DATASTORE_DIR=$(shell pwd)/gcloud/datastore/_generated +PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc +GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb + +help: + @echo 'Makefile for gcloud-python Bigtable protos ' + @echo ' ' + @echo ' make generate Generates the protobuf modules ' + @echo ' make check_generate Checks that generate succeeded ' + @echo ' make clean Clean generated files ' + +generate: + # Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools + [ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV) + $(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools + # Retrieve git repos that have our *.proto files. + [ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 + cd googleapis-pb && git pull origin master + # Make the directory where our *_pb2.py files will go. + mkdir -p $(GENERATED_DIR) + # Generate all *_pb2.py files that do not require gRPC. + $(PROTOC_CMD) \ + --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ + --python_out=$(GENERATED_DIR) \ + $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/datastore.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/entity.proto \ + $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/query.proto + # Move the newly generated *_pb2.py files into our library. + cp $(GENERATED_DIR)/google/datastore/v1beta3/* $(DATASTORE_DIR) + # Remove all existing *.proto files before we replace + rm -f $(DATASTORE_DIR)/*.proto + # Copy over the *.proto files into our library. + cp $(GOOGLEAPIS_PROTOS_DIR)/google/datastore/v1beta3/*.proto $(DATASTORE_DIR) + # Rename all *.proto files in our library with an + # underscore and remove executable bit. + cd $(DATASTORE_DIR) && \ + for filename in *.proto; do \ + chmod -x $$filename ; \ + mv $$filename _$$filename ; \ + done + # Separate the gRPC parts of the datastore service from the + # non-gRPC parts so that the protos can be used without gRPC. + GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \ + GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ + python scripts/make_datastore_grpc.py + # Rewrite the imports in the generated *_pb2.py files. + python scripts/rewrite_imports.py $(DATASTORE_DIR)/*pb2.py + +check_generate: + python scripts/check_generate.py + +clean: + rm -fr $(GENERATED_DIR) + +.PHONY: generate check_generate clean diff --git a/docs/bigtable-client-intro.rst b/docs/bigtable-client-intro.rst index 55111ad1dfb5..db04ffa0e0c1 100644 --- a/docs/bigtable-client-intro.rst +++ b/docs/bigtable-client-intro.rst @@ -63,7 +63,7 @@ Configuration Admin API Access ---------------- -If you'll be using your client to make `Cluster Admin`_ and `Table Admin`_ +If you'll be using your client to make `Instance Admin`_ and `Table Admin`_ API requests, you'll need to pass the ``admin`` argument: .. code:: python @@ -89,10 +89,10 @@ Next Step --------- After a :class:`Client `, the next highest-level -object is a :class:`Cluster `. You'll need +object is a :class:`Instance `. You'll need one before you can interact with tables or data. -Head next to learn about the :doc:`bigtable-cluster-api`. +Head next to learn about the :doc:`bigtable-instance-api`. -.. _Cluster Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1 +.. _Instance Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1 .. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 diff --git a/docs/bigtable-cluster-api.rst b/docs/bigtable-cluster-api.rst deleted file mode 100644 index 1266fa8e893a..000000000000 --- a/docs/bigtable-cluster-api.rst +++ /dev/null @@ -1,187 +0,0 @@ -Cluster Admin API -================= - -.. warning:: - - gRPC is required for using the Cloud Bigtable API. As of May 2016, - ``grpcio`` is only supported in Python 2.7, so importing - :mod:`gcloud.bigtable` in other versions of Python will fail. - -After creating a :class:`Client `, you can -interact with individual clusters, groups of clusters or available -zones for a project. - -List Clusters -------------- - -If you want a comprehensive list of all existing clusters, make a -`ListClusters`_ API request with -:meth:`Client.list_clusters() `: - -.. code:: python - - clusters = client.list_clusters() - -List Zones ----------- - -If you aren't sure which ``zone`` to create a cluster in, find out -which zones your project has access to with a `ListZones`_ API request -with :meth:`Client.list_zones() `: - -.. code:: python - - zones = client.list_zones() - -You can choose a :class:`string ` from among the result to pass to -the :class:`Cluster ` constructor. - -The available zones (as of February 2016) are - -.. code:: python - - >>> zones - [u'asia-east1-b', u'europe-west1-c', u'us-central1-c', u'us-central1-b'] - -Cluster Factory ---------------- - -To create a :class:`Cluster ` object: - -.. code:: python - - cluster = client.cluster(zone, cluster_id, - display_name=display_name, - serve_nodes=3) - -Both ``display_name`` and ``serve_nodes`` are optional. When not provided, -``display_name`` defaults to the ``cluster_id`` value and ``serve_nodes`` -defaults to the minimum allowed: -:data:`DEFAULT_SERVE_NODES `. - -Even if this :class:`Cluster ` already -has been created with the API, you'll want this object to use as a -parent of a :class:`Table ` just as the -:class:`Client ` is used as the parent of -a :class:`Cluster `. - -Create a new Cluster --------------------- - -After creating the cluster object, make a `CreateCluster`_ API request -with :meth:`create() `: - -.. code:: python - - cluster.display_name = 'My very own cluster' - cluster.create() - -If you would like more than the minimum number of nodes -(:data:`DEFAULT_SERVE_NODES `) -in your cluster: - -.. code:: python - - cluster.serve_nodes = 10 - cluster.create() - -Check on Current Operation --------------------------- - -.. note:: - - When modifying a cluster (via a `CreateCluster`_, `UpdateCluster`_ or - `UndeleteCluster`_ request), the Bigtable API will return a - `long-running operation`_ and a corresponding - :class:`Operation ` object - will be returned by each of - :meth:`create() `, - :meth:`update() ` and - :meth:`undelete() `. - -You can check if a long-running operation (for a -:meth:`create() `, -:meth:`update() ` or -:meth:`undelete() `) has finished -by making a `GetOperation`_ request with -:meth:`Operation.finished() `: - -.. code:: python - - >>> operation = cluster.create() - >>> operation.finished() - True - -.. note:: - - Once an :class:`Operation ` object - has returned :data:`True` from - :meth:`finished() `, the - object should not be re-used. Subsequent calls to - :meth:`finished() ` - will result in a :class:`ValueError `. - -Get metadata for an existing Cluster ------------------------------------- - -After creating the cluster object, make a `GetCluster`_ API request -with :meth:`reload() `: - -.. code:: python - - cluster.reload() - -This will load ``serve_nodes`` and ``display_name`` for the existing -``cluster`` in addition to the ``cluster_id``, ``zone`` and ``project`` -already set on the :class:`Cluster ` object. - -Update an existing Cluster --------------------------- - -After creating the cluster object, make an `UpdateCluster`_ API request -with :meth:`update() `: - -.. code:: python - - client.display_name = 'New display_name' - cluster.update() - -Delete an existing Cluster --------------------------- - -Make a `DeleteCluster`_ API request with -:meth:`delete() `: - -.. code:: python - - cluster.delete() - -Undelete a deleted Cluster --------------------------- - -Make an `UndeleteCluster`_ API request with -:meth:`undelete() `: - -.. code:: python - - cluster.undelete() - -Next Step ---------- - -Now we go down the hierarchy from -:class:`Cluster ` to a -:class:`Table `. - -Head next to learn about the :doc:`bigtable-table-api`. - -.. _Cluster Admin API: https://cloud.google.com/bigtable/docs/creating-cluster -.. _CreateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L66-L68 -.. _GetCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L38-L40 -.. _UpdateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L93-L95 -.. _DeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L109-L111 -.. _ListZones: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L33-L35 -.. _ListClusters: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L44-L46 -.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 -.. _UndeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L126-L128 -.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/docs/bigtable-instance-api.rst b/docs/bigtable-instance-api.rst new file mode 100644 index 000000000000..c2fd1402a97b --- /dev/null +++ b/docs/bigtable-instance-api.rst @@ -0,0 +1,133 @@ +Instance Admin API +================== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +After creating a :class:`Client `, you can +interact with individual instances for a project. + +List Intances +------------- + +If you want a comprehensive list of all existing intances, make a +`ListInstances`_ API request with +:meth:`Client.list_intances() `: + +.. code:: python + + intances = client.list_intances() + +Instance Factory +---------------- + +To create a :class:`Instance ` object: + +.. code:: python + + instance = client.instance(instance_id, display_name=display_name) + +``display_name`` is optional. When not provided, +``display_name`` defaults to the ``instance_id`` value. + +Even if this :class:`Instance ` already +has been created with the API, you'll want this object to use as a +parent of a :class:`Table ` just as the +:class:`Client ` is used as the parent of +a :class:`Instance `. + +Create a new Instance +--------------------- + +After creating the instance object, make a `CreateInstance`_ API request +with :meth:`create() `: + +.. code:: python + + instance.display_name = 'My very own instance' + instance.create() + +Check on Current Operation +-------------------------- + +.. note:: + + When modifying a instance (via a `CreateInstance`_ request), the Bigtable + API will return a `long-running operation`_ and a corresponding + :class:`Operation ` object + will be returned by + :meth:`create() ``. + +You can check if a long-running operation (for a +:meth:`create() ` has finished +by making a `GetOperation`_ request with +:meth:`Operation.finished() `: + +.. code:: python + + >>> operation = instance.create() + >>> operation.finished() + True + +.. note:: + + Once an :class:`Operation ` object + has returned :data:`True` from + :meth:`finished() `, the + object should not be re-used. Subsequent calls to + :meth:`finished() ` + will result in a :class:`ValueError `. + +Get metadata for an existing Instance +------------------------------------- + +After creating the instance object, make a `GetInstance`_ API request +with :meth:`reload() `: + +.. code:: python + + instance.reload() + +This will load ``display_name`` for the existing ``instance`` object. + +Update an existing Instance +--------------------------- + +After creating the instance object, make an `UpdateInstance`_ API request +with :meth:`update() `: + +.. code:: python + + client.display_name = 'New display_name' + instance.update() + +Delete an existing Instance +--------------------------- + +Make a `DeleteInstance`_ API request with +:meth:`delete() `: + +.. code:: python + + instance.delete() + +Next Step +--------- + +Now we go down the hierarchy from +:class:`Instance ` to a +:class:`Table `. + +Head next to learn about the :doc:`bigtable-table-api`. + +.. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance +.. _CreateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L66-L68 +.. _GetInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L38-L40 +.. _UpdateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L93-L95 +.. _DeleteInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L109-L111 +.. _ListInstances: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L44-L46 +.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 +.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/docs/bigtable-instance.rst b/docs/bigtable-instance.rst new file mode 100644 index 000000000000..7ba1c15d8df3 --- /dev/null +++ b/docs/bigtable-instance.rst @@ -0,0 +1,12 @@ +Instance +~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.instance + :members: + :show-inheritance: diff --git a/docs/bigtable-table-api.rst b/docs/bigtable-table-api.rst index 6ef4dba1e7e0..554b157031f9 100644 --- a/docs/bigtable-table-api.rst +++ b/docs/bigtable-table-api.rst @@ -7,20 +7,20 @@ Table Admin API ``grpcio`` is only supported in Python 2.7, so importing :mod:`gcloud.bigtable` in other versions of Python will fail. -After creating a :class:`Cluster `, you can +After creating a :class:`Instance `, you can interact with individual tables, groups of tables or column families within a table. List Tables ----------- -If you want a comprehensive list of all existing tables in a cluster, make a +If you want a comprehensive list of all existing tables in a instance, make a `ListTables`_ API request with -:meth:`Cluster.list_tables() `: +:meth:`Instance.list_tables() `: .. code:: python - >>> cluster.list_tables() + >>> instance.list_tables() [, ] @@ -31,7 +31,7 @@ To create a :class:`Table ` object: .. code:: python - table = cluster.table(table_id) + table = instance.table(table_id) Even if this :class:`Table ` already has been created with the API, you'll want this object to use as a @@ -65,17 +65,6 @@ Make a `DeleteTable`_ API request with table.delete() -Rename an existing Table ------------------------- - -Though the `RenameTable`_ API request is listed in the service -definition, requests to that method return:: - - BigtableTableService.RenameTable is not yet implemented - -We have implemented :meth:`rename() ` -but it will not work unless the backend supports the method. - List Column Families in a Table ------------------------------- diff --git a/docs/index.rst b/docs/index.rst index df0aa0ea9980..b263dba70531 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -63,10 +63,11 @@ bigtable-usage HappyBase bigtable-client-intro - bigtable-cluster-api + bigtable-instance-api bigtable-table-api bigtable-data-api Client + bigtable-instance bigtable-cluster bigtable-table bigtable-column-family diff --git a/gcloud/bigtable/_generated_v2/__init__.py b/gcloud/bigtable/_generated_v2/__init__.py new file mode 100644 index 000000000000..ad35adcf05ae --- /dev/null +++ b/gcloud/bigtable/_generated_v2/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generated protobuf modules for Google Cloud Bigtable API.""" diff --git a/gcloud/bigtable/_generated_v2/_bigtable.proto b/gcloud/bigtable/_generated_v2/_bigtable.proto new file mode 100644 index 000000000000..49e27ca2ff5f --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_bigtable.proto @@ -0,0 +1,321 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/v2/data.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableProto"; +option java_package = "com.google.bigtable.v2"; + + +// Service for reading from and writing to existing Bigtable tables. +service Bigtable { + // Streams back the contents of all requested rows, optionally + // applying the same Reader filter to each. Depending on their size, + // rows and cells may be broken up across multiple responses, but + // atomicity of each row will still be preserved. See the + // ReadRowsResponse documentation for details. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" }; + } + + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { + option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" }; + } + + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by `mutation`. + rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" }; + } + + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" }; + } + + // Mutates a row atomically based on the output of a predicate Reader filter. + rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" }; + } + + // Modifies a row atomically. The method reads the latest existing timestamp + // and value from the specified columns and writes a new entry based on + // pre-defined read/modify/write rules. The new value for the timestamp is the + // greater of the existing timestamp or the current server time. The method + // returns the new contents of all modified cells. + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" }; + } +} + +// Request message for Bigtable.ReadRows. +message ReadRowsRequest { + // The unique name of the table from which to read. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The row keys and/or ranges to read. If not specified, reads from all rows. + RowSet rows = 2; + + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entirety of each row. + RowFilter filter = 3; + + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + int64 rows_limit = 4; +} + +// Response message for Bigtable.ReadRows. +message ReadRowsResponse { + // Specifies a piece of a row's contents returned as part of the read + // response stream. + message CellChunk { + // The row key for this chunk of data. If the row key is empty, + // this CellChunk is a continuation of the same row as the previous + // CellChunk in the response stream, even if that CellChunk was in a + // previous ReadRowsResponse message. + bytes row_key = 1; + + // The column family name for this chunk of data. If this message + // is not present this CellChunk is a continuation of the same column + // family as the previous CellChunk. The empty string can occur as a + // column family name in a response so clients must check + // explicitly for the presence of this message, not just for + // `family_name.value` being non-empty. + google.protobuf.StringValue family_name = 2; + + // The column qualifier for this chunk of data. If this message + // is not present, this CellChunk is a continuation of the same column + // as the previous CellChunk. Column qualifiers may be empty so + // clients must check for the presence of this message, not just + // for `qualifier.value` being non-empty. + google.protobuf.BytesValue qualifier = 3; + + // The cell's stored timestamp, which also uniquely identifies it + // within its column. Values are always expressed in + // microseconds, but individual tables may set a coarser + // granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will + // only allow values of `timestamp_micros` which are multiples of + // 1000. Timestamps are only set in the first CellChunk per cell + // (for cells split into multiple chunks). + int64 timestamp_micros = 4; + + // Labels applied to the cell by a + // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set + // on the first CellChunk per cell. + repeated string labels = 5; + + // The value stored in the cell. Cell values can be split across + // multiple CellChunks. In that case only the value field will be + // set in CellChunks after the first: the timestamp and labels + // will only be present in the first CellChunk, even if the first + // CellChunk came in a previous ReadRowsResponse. + bytes value = 6; + + // If this CellChunk is part of a chunked cell value and this is + // not the final chunk of that cell, value_size will be set to the + // total length of the cell value. The client can use this size + // to pre-allocate memory to hold the full cell value. + int32 value_size = 7; + + oneof row_status { + // Indicates that the client should drop all previous chunks for + // `row_key`, as it will be re-read from the beginning. + bool reset_row = 8; + + // Indicates that the client can safely process all previous chunks for + // `row_key`, as its data has been fully read. + bool commit_row = 9; + } + } + + repeated CellChunk chunks = 1; + + // Optionally the server might return the row key of the last row it + // has scanned. The client can use this to construct a more + // efficient retry request if needed: any row keys or portions of + // ranges less than this row key can be dropped from the request. + // This is primarily useful for cases where the server has read a + // lot of data that was filtered out since the last committed row + // key, allowing the client to skip that work on a retry. + bytes last_scanned_row_key = 2; +} + +// Request message for Bigtable.SampleRowKeys. +message SampleRowKeysRequest { + // The unique name of the table from which to sample row keys. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; +} + +// Response message for Bigtable.SampleRowKeys. +message SampleRowKeysResponse { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + bytes row_key = 1; + + // Approximate total storage space used by all rows in the table which precede + // `row_key`. Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // `offset_bytes` fields. + int64 offset_bytes = 2; +} + +// Request message for Bigtable.MutateRow. +message MutateRowRequest { + // The unique name of the table to which the mutation should be applied. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The key of the row to which the mutation should be applied. + bytes row_key = 2; + + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + repeated Mutation mutations = 3; +} + +// Response message for Bigtable.MutateRow. +message MutateRowResponse { + +} + +// Request message for BigtableService.MutateRows. +message MutateRowsRequest { + message Entry { + // The key of the row to which the `mutations` should be applied. + bytes row_key = 1; + + // Changes to be atomically applied to the specified row. Mutations are + // applied in order, meaning that earlier mutations can be masked by + // later ones. + // You must specify at least one mutation. + repeated Mutation mutations = 2; + } + + // The unique name of the table to which the mutations should be applied. + string table_name = 1; + + // The row keys and corresponding mutations to be applied in bulk. + // Each entry is applied as an atomic mutation, but the entries may be + // applied in arbitrary order (even between entries for the same row). + // At least one entry must be specified, and in total the entries can + // contain at most 100000 mutations. + repeated Entry entries = 2; +} + +// Response message for BigtableService.MutateRows. +message MutateRowsResponse { + message Entry { + // The index into the original request's `entries` list of the Entry + // for which a result is being reported. + int64 index = 1; + + // The result of the request Entry identified by `index`. + // Depending on how requests are batched during execution, it is possible + // for one Entry to fail due to an error with another Entry. In the event + // that this occurs, the same error will be reported for both entries. + google.rpc.Status status = 2; + } + + // One or more results for Entries from the batch request. + repeated Entry entries = 1; +} + +// Request message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowRequest { + // The unique name of the table to which the conditional mutation should be + // applied. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The key of the row to which the conditional mutation should be applied. + bytes row_key = 2; + + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either `true_mutations` or + // `false_mutations` will be executed. If unset, checks that the row contains + // any values at all. + RowFilter predicate_filter = 6; + + // Changes to be atomically applied to the specified row if `predicate_filter` + // yields at least one cell when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `false_mutations` is empty, and at most + // 100000. + repeated Mutation true_mutations = 4; + + // Changes to be atomically applied to the specified row if `predicate_filter` + // does not yield any cells when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `true_mutations` is empty, and at most + // 100000. + repeated Mutation false_mutations = 5; +} + +// Response message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowResponse { + // Whether or not the request's `predicate_filter` yielded any results for + // the specified row. + bool predicate_matched = 1; +} + +// Request message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowRequest { + // The unique name of the table to which the read/modify/write rules should be + // applied. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The key of the row to which the read/modify/write rules should be applied. + bytes row_key = 2; + + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + repeated ReadModifyWriteRule rules = 3; +} + +// Response message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowResponse { + // A Row containing the new contents of all cells modified by the request. + Row row = 1; +} diff --git a/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto b/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto new file mode 100644 index 000000000000..bda5d2163532 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto @@ -0,0 +1,232 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/instance.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableInstanceAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable Instances and +// Clusters. Provides access to the Instance and Cluster schemas only, not the +// tables metadata or data stored in those tables. +service BigtableInstanceAdmin { + // Create an instance within a project. + rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" }; + } + + // Gets information about an instance. + rpc GetInstance(GetInstanceRequest) returns (Instance) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" }; + } + + // Lists information about instances in a project. + rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { + option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" }; + } + + // Updates an instance within a project. + rpc UpdateInstance(Instance) returns (Instance) { + option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" }; + } + + // Delete an instance from a project. + rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" }; + } + + // Creates a cluster within an instance. + rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" }; + } + + // Gets information about a cluster. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" }; + } + + // Lists information about clusters in an instance. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" }; + } + + // Updates a cluster within an instance. + rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { + option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" }; + } + + // Deletes a cluster from an instance. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" }; + } +} + +// Request message for BigtableInstanceAdmin.CreateInstance. +message CreateInstanceRequest { + // The unique name of the project in which to create the new instance. + // Values are of the form projects/ + string parent = 1; + + // The id to be used when referring to the new instance within its project, + // e.g. just the "myinstance" section of the full name + // "projects/myproject/instances/myinstance" + string instance_id = 2; + + // The instance to create. + // Fields marked "@OutputOnly" must be left blank. + Instance instance = 3; + + // The clusters to be created within the instance, mapped by desired + // cluster ID (e.g. just the "mycluster" part of the full name + // "projects/myproject/instances/myinstance/clusters/mycluster"). + // Fields marked "@OutputOnly" must be left blank. + // Currently exactly one cluster must be specified. + map clusters = 4; +} + +// Request message for BigtableInstanceAdmin.GetInstance. +message GetInstanceRequest { + // The unique name of the requested instance. Values are of the form + // projects//instances/ + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListInstances. +message ListInstancesRequest { + // The unique name of the project for which a list of instances is requested. + // Values are of the form projects/ + string parent = 1; + + // The value of `next_page_token` returned by a previous call. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListInstances. +message ListInstancesResponse { + // The list of requested instances. + repeated Instance instances = 1; + + // Locations from which Instance information could not be retrieved, + // due to an outage or some other transient condition. + // Instances whose Clusters are all in one of the failed locations + // may be missing from 'instances', and Instances with at least one + // Cluster in a failed location may only have partial information returned. + repeated string failed_locations = 2; + + // Set if not all instances could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.DeleteInstance. +message DeleteInstanceRequest { + // The unique name of the instance to be deleted. + // Values are of the form projects//instances/ + string name = 1; +} + +// Request message for BigtableInstanceAdmin.CreateCluster. +message CreateClusterRequest { + // The unique name of the instance in which to create the new cluster. + // Values are of the form + // projects//instances//clusters/[a-z][-a-z0-9]* + string parent = 1; + + // The id to be used when referring to the new cluster within its instance, + // e.g. just the "mycluster" section of the full name + // "projects/myproject/instances/myinstance/clusters/mycluster" + string cluster_id = 2; + + // The cluster to be created. + // Fields marked "@OutputOnly" must be left blank. + Cluster cluster = 3; +} + +// Request message for BigtableInstanceAdmin.GetCluster. +message GetClusterRequest { + // The unique name of the requested cluster. Values are of the form + // projects//instances//clusters/ + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListClusters. +message ListClustersRequest { + // The unique name of the instance for which a list of clusters is requested. + // Values are of the form projects//instances/ + // Use = '-' to list Clusters for all Instances in a project, + // for example "projects/myproject/instances/-" + string parent = 1; + + // The value of `next_page_token` returned by a previous call. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListClusters. +message ListClustersResponse { + // The list of requested clusters. + repeated Cluster clusters = 1; + + // Locations from which Cluster information could not be retrieved, + // due to an outage or some other transient condition. + // Clusters from these locations may be missing from 'clusters', + // or may only have partial information returned. + repeated string failed_locations = 2; + + // Set if not all clusters could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.DeleteCluster. +message DeleteClusterRequest { + // The unique name of the cluster to be deleted. Values are of the form + // projects//instances//clusters/ + string name = 1; +} + +// The metadata for the Operation returned by CreateInstance. +message CreateInstanceMetadata { + // The request that prompted the initiation of this CreateInstance operation. + CreateInstanceRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by UpdateCluster. +message UpdateClusterMetadata { + // The request that prompted the initiation of this UpdateCluster operation. + Cluster original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} diff --git a/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto b/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto new file mode 100644 index 000000000000..0a39e298359c --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto @@ -0,0 +1,195 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/table.proto"; +import "google/protobuf/empty.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableTableAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// Provides access to the table schemas only, not the data stored within +// the tables. +service BigtableTableAdmin { + // Creates a new table in the specified instance. + // The table can be created with a full set of initial column families, + // specified in the request. + rpc CreateTable(CreateTableRequest) returns (Table) { + option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" }; + } + + // Lists all tables served from a specified instance. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" }; + } + + // Gets metadata information about the specified table. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" }; + } + + // Permanently deletes a specified table and all of its data. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" }; + } + + // Atomically performs a series of column family modifications + // on the specified table. + rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" }; + } + + // Permanently drop/delete a row range from a specified table. The request can + // specify whether to delete all rows in a table, or only those that match a + // particular prefix. + rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" }; + } +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] +message CreateTableRequest { + // An initial split point for a newly created table. + message Split { + // Row key to use as an initial tablet boundary. + bytes key = 1; + } + + // The unique name of the instance in which to create the table. + // Values are of the form projects//instances/ + string parent = 1; + + // The name by which the new table should be referred to within the parent + // instance, e.g. "foobar" rather than "/tables/foobar". + string table_id = 2; + + // The Table to create. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + repeated Split initial_splits = 4; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] +message DropRowRangeRequest { + // The unique name of the table on which to drop a range of rows. + // Values are of the form projects//instances//tables/ + string name = 1; + + oneof target { + // Delete all rows that start with this row key prefix. Prefix cannot be + // zero length. + bytes row_key_prefix = 2; + + // Delete all rows in the table. Setting this to false is a no-op. + bool delete_all_data_from_table = 3; + } +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesRequest { + // The unique name of the instance for which tables should be listed. + // Values are of the form projects//instances/ + string parent = 1; + + // The view to be applied to the returned tables' fields. + // Defaults to NAME_ONLY if unspecified (no others are currently supported). + Table.View view = 2; + + // The value of `next_page_token` returned by a previous call. + string page_token = 3; +} + +// Response message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesResponse { + // The tables present in the requested cluster. + repeated Table tables = 1; + + // Set if not all tables could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] +message GetTableRequest { + // The unique name of the requested table. + // Values are of the form projects//instances//tables/
+ string name = 1; + + // The view to be applied to the returned table's fields. + // Defaults to SCHEMA_ONLY if unspecified. + Table.View view = 2; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] +message DeleteTableRequest { + // The unique name of the table to be deleted. + // Values are of the form projects//instances//tables/
+ string name = 1; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] +message ModifyColumnFamiliesRequest { + // A create, update, or delete of a particular column family. + message Modification { + // The ID of the column family to be modified. + string id = 1; + + oneof mod { + // Create a new column family with the specified schema, or fail if + // one already exists with the given ID. + ColumnFamily create = 2; + + // Update an existing column family to the specified schema, or fail + // if no column family exists with the given ID. + ColumnFamily update = 3; + + // Drop (delete) the column family with the given ID, or fail if no such + // family exists. + bool drop = 4; + } + } + + // The unique name of the table whose families should be modified. + // Values are of the form projects//instances//tables/
+ string name = 1; + + // Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2; +} diff --git a/gcloud/bigtable/_generated_v2/_common.proto b/gcloud/bigtable/_generated_v2/_common.proto new file mode 100644 index 000000000000..1912e03e0446 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_common.proto @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/timestamp.proto"; + +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Storage media types for persisting Bigtable data. +enum StorageType { + // The user did not specify a storage type. + STORAGE_TYPE_UNSPECIFIED = 0; + + // Flash (SSD) storage should be used. + SSD = 1; + + // Magnetic drive (HDD) storage should be used. + HDD = 2; +} diff --git a/gcloud/bigtable/_generated_v2/_data.proto b/gcloud/bigtable/_generated_v2/_data.proto new file mode 100644 index 000000000000..720f48279b8f --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_data.proto @@ -0,0 +1,532 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +option java_multiple_files = true; +option java_outer_classname = "DataProto"; +option java_package = "com.google.bigtable.v2"; + + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +message Row { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + bytes key = 1; + + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + repeated Family families = 2; +} + +// Specifies (some of) the contents of a single row/column family intersection +// of a table. +message Family { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + string name = 1; + + // Must not be empty. Sorted in order of increasing "qualifier". + repeated Column columns = 2; +} + +// Specifies (some of) the contents of a single row/column intersection of a +// table. +message Column { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its `column_qualifier_regex_filter` field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + bytes qualifier = 1; + + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + repeated Cell cells = 2; +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +message Cell { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of `timestamp_micros` which are multiples of 1000. + int64 timestamp_micros = 1; + + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + bytes value = 2; + + // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. + repeated string labels = 3; +} + +// Specifies a contiguous range of rows. +message RowRange { + // The row key at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_key { + // Used when giving an inclusive lower bound for the range. + bytes start_key_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_key_open = 2; + } + + // The row key at which to end the range. + // If neither field is set, interpreted as the infinite row key, exclusive. + oneof end_key { + // Used when giving an inclusive upper bound for the range. + bytes end_key_open = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_key_closed = 4; + } +} + +// Specifies a non-contiguous set of rows. +message RowSet { + // Single rows included in the set. + repeated bytes row_keys = 1; + + // Contiguous row ranges included in the set. + repeated RowRange row_ranges = 2; +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from <column_family>:<start_qualifier> to +// <column_family>:<end_qualifier>, where both bounds can be either +// inclusive or exclusive. +message ColumnRange { + // The name of the column family within which this range falls. + string family_name = 1; + + // The column qualifier at which to start the range (within `column_family`). + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_qualifier { + // Used when giving an inclusive lower bound for the range. + bytes start_qualifier_closed = 2; + + // Used when giving an exclusive lower bound for the range. + bytes start_qualifier_open = 3; + } + + // The column qualifier at which to end the range (within `column_family`). + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_qualifier { + // Used when giving an inclusive upper bound for the range. + bytes end_qualifier_closed = 4; + + // Used when giving an exclusive upper bound for the range. + bytes end_qualifier_open = 5; + } +} + +// Specified a contiguous range of microsecond timestamps. +message TimestampRange { + // Inclusive lower bound. If left empty, interpreted as 0. + int64 start_timestamp_micros = 1; + + // Exclusive upper bound. If left empty, interpreted as infinity. + int64 end_timestamp_micros = 2; +} + +// Specifies a contiguous range of raw byte values. +message ValueRange { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_value { + // Used when giving an inclusive lower bound for the range. + bytes start_value_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_value_open = 2; + } + + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_value { + // Used when giving an inclusive upper bound for the range. + bytes end_value_closed = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_value_open = 4; + } +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the `value_regex_filter`, +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that `RE2(.)` is equivalent by default to +// `RE2([^\n])`, meaning that it does not match newlines. When attempting to +// match an arbitrary byte, you should therefore use the escape sequence `\C`, +// which may need to be further escaped as `\\C` in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the `strip_value_transformer`, which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +message RowFilter { + // A RowFilter which sends rows through several RowFilters in sequence. + message Chain { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which sends each row to each of several component + // RowFilters and interleaves the results. + message Interleave { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // + // All interleaved filters are executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which evaluates one of two possible RowFilters, depending on + // whether or not a predicate RowFilter outputs any cells from the input row. + // + // IMPORTANT NOTE: The predicate filter does not execute atomically with the + // true and false filters, which may lead to inconsistent or unexpected + // results. Additionally, Condition filters have poor performance, especially + // when filters are set for the false condition. + message Condition { + // If `predicate_filter` outputs any cells, then `true_filter` will be + // evaluated on the input row. Otherwise, `false_filter` will be evaluated. + RowFilter predicate_filter = 1; + + // The filter to apply to the input row if `predicate_filter` returns any + // results. If not provided, no results will be returned in the true case. + RowFilter true_filter = 2; + + // The filter to apply to the input row if `predicate_filter` does not + // return any results. If not provided, no results will be returned in the + // false case. + RowFilter false_filter = 3; + } + + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + oneof filter { + // Applies several RowFilters to the data in sequence, progressively + // narrowing the results. + Chain chain = 1; + + // Applies several RowFilters to the data in parallel and combines the + // results. + Interleave interleave = 2; + + // Applies one of two possible RowFilters to the data based on the output of + // a predicate RowFilter. + Condition condition = 3; + + // ADVANCED USE ONLY. + // Hook for introspection into the RowFilter. Outputs all cells directly to + // the output of the read rather than to any parent filter. Consider the + // following example: + // + // Chain( + // FamilyRegex("A"), + // Interleave( + // All(), + // Chain(Label("foo"), Sink()) + // ), + // QualifierRegex("B") + // ) + // + // A,A,1,w + // A,B,2,x + // B,B,4,z + // | + // FamilyRegex("A") + // | + // A,A,1,w + // A,B,2,x + // | + // +------------+-------------+ + // | | + // All() Label(foo) + // | | + // A,A,1,w A,A,1,w,labels:[foo] + // A,B,2,x A,B,2,x,labels:[foo] + // | | + // | Sink() --------------+ + // | | | + // +------------+ x------+ A,A,1,w,labels:[foo] + // | A,B,2,x,labels:[foo] + // A,A,1,w | + // A,B,2,x | + // | | + // QualifierRegex("B") | + // | | + // A,B,2,x | + // | | + // +--------------------------------+ + // | + // A,A,1,w,labels:[foo] + // A,B,2,x,labels:[foo] // could be switched + // A,B,2,x // could be switched + // + // Despite being excluded by the qualifier filter, a copy of every cell + // that reaches the sink is present in the final result. + // + // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave], + // duplicate cells are possible, and appear in an unspecified mutual order. + // In this case we have a duplicate with column "A:B" and timestamp 2, + // because one copy passed through the all filter while the other was + // passed through the label and sink. Note that one copy has label "foo", + // while the other does not. + // + // Cannot be used within the `predicate_filter`, `true_filter`, or + // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition]. + bool sink = 16; + + // Matches all cells, regardless of input. Functionally equivalent to + // leaving `filter` unset, but included for completeness. + bool pass_all_filter = 17; + + // Does not match any cells, regardless of input. Useful for temporarily + // disabling just part of a filter. + bool block_all_filter = 18; + + // Matches only cells from rows whose keys satisfy the given RE2 regex. In + // other words, passes through the entire row when the key matches, and + // otherwise produces an empty row. + // Note that, since row keys can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a + // binary key. + bytes row_key_regex_filter = 4; + + // Matches all cells from a row with probability p, and matches no cells + // from the row with probability 1-p. + double row_sample_filter = 14; + + // Matches only cells from columns whose families satisfy the given RE2 + // regex. For technical reasons, the regex must not contain the `:` + // character, even if it is not being used as a literal. + // Note that, since column families cannot contain the new line character + // `\n`, it is sufficient to use `.` as a full wildcard when matching + // column family names. + string family_name_regex_filter = 5; + + // Matches only cells from columns whose qualifiers satisfy the given RE2 + // regex. + // Note that, since column qualifiers can contain arbitrary bytes, the `\C` + // escape sequence must be used if a true wildcard is desired. The `.` + // character will not match the new line character `\n`, which may be + // present in a binary qualifier. + bytes column_qualifier_regex_filter = 6; + + // Matches only cells from columns within the given range. + ColumnRange column_range_filter = 7; + + // Matches only cells with timestamps within the given range. + TimestampRange timestamp_range_filter = 8; + + // Matches only cells with values that satisfy the given regular expression. + // Note that, since cell values can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a + // binary value. + bytes value_regex_filter = 9; + + // Matches only cells with values that fall within the given range. + ValueRange value_range_filter = 15; + + // Skips the first N cells of each row, matching all subsequent cells. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_offset_filter = 10; + + // Matches only the first N cells of each row. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_limit_filter = 11; + + // Matches only the most recent N cells within each column. For example, + // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, + // skip all earlier cells in `foo:bar`, and then begin matching again in + // column `foo:bar2`. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_column_limit_filter = 12; + + // Replaces each cell's value with the empty string. + bool strip_value_transformer = 13; + + // Applies the given label to all cells in the output row. This allows + // the client to determine which results were produced from which part of + // the filter. + // + // Values must be at most 15 characters in length, and match the RE2 + // pattern `[a-z0-9\\-]+` + // + // Due to a technical limitation, it is not currently possible to apply + // multiple labels to a cell. As a result, a Chain may have no more than + // one sub-filter which contains a `apply_label_transformer`. It is okay for + // an Interleave to contain multiple `apply_label_transformers`, as they + // will be applied to separate copies of the input. This may be relaxed in + // the future. + string apply_label_transformer = 19; + } +} + +// Specifies a particular change to be made to the contents of a row. +message Mutation { + // A Mutation which sets the value of the specified cell. + message SetCell { + // The name of the family into which new data should be written. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the granularity of the table (e.g. micros, millis). + int64 timestamp_micros = 3; + + // The value to be written into the specified cell. + bytes value = 4; + } + + // A Mutation which deletes cells from the specified column, optionally + // restricting the deletions to a given timestamp range. + message DeleteFromColumn { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The range of timestamps within which cells should be deleted. + TimestampRange time_range = 3; + } + + // A Mutation which deletes all cells from the specified column family. + message DeleteFromFamily { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + } + + // A Mutation which deletes all cells from the containing row. + message DeleteFromRow { + + } + + // Which of the possible Mutation types to apply. + oneof mutation { + // Set a cell's value. + SetCell set_cell = 1; + + // Deletes cells from a column. + DeleteFromColumn delete_from_column = 2; + + // Deletes cells from a column family. + DeleteFromFamily delete_from_family = 3; + + // Deletes cells from the entire row. + DeleteFromRow delete_from_row = 4; + } +} + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +message ReadModifyWriteRule { + // The name of the family to which the read/modify/write should be applied. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The rule used to determine the column's new latest value from its current + // latest value. + oneof rule { + // Rule specifying that `append_value` be appended to the existing value. + // If the targeted cell is unset, it will be treated as containing the + // empty string. + bytes append_value = 3; + + // Rule specifying that `increment_amount` be added to the existing value. + // If the targeted cell is unset, it will be treated as containing a zero. + // Otherwise, the targeted cell must contain an 8-byte value (interpreted + // as a 64-bit big-endian signed integer), or the entire request will fail. + int64 increment_amount = 4; + } +} diff --git a/gcloud/bigtable/_generated_v2/_instance.proto b/gcloud/bigtable/_generated_v2/_instance.proto new file mode 100644 index 000000000000..4aa3f9d06dd3 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_instance.proto @@ -0,0 +1,113 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/common.proto"; + +option java_multiple_files = true; +option java_outer_classname = "InstanceProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and +// the resources that serve them. +// All tables in an instance are served from a single +// [Cluster][google.bigtable.admin.v2.Cluster]. +message Instance { + // Possible states of an instance. + enum State { + // The state of the instance could not be determined. + STATE_NOT_KNOWN = 0; + + // The instance has been successfully created and can serve requests + // to its tables. + READY = 1; + + // The instance is currently being created, and may be destroyed + // if the creation process encounters an error. + CREATING = 2; + } + + // @OutputOnly + // The unique name of the instance. Values are of the form + // projects//instances/[a-z][a-z0-9\\-]+[a-z0-9] + string name = 1; + + // The descriptive name for this instance as it appears in UIs. + // Can be changed at any time, but should be kept globally unique + // to avoid confusion. + string display_name = 2; + + // + // The current state of the instance. + State state = 3; +} + +// A resizable group of nodes in a particular cloud location, capable +// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent +// [Instance][google.bigtable.admin.v2.Instance]. +message Cluster { + // Possible states of a cluster. + enum State { + // The state of the cluster could not be determined. + STATE_NOT_KNOWN = 0; + + // The cluster has been successfully created and is ready to serve requests. + READY = 1; + + // The cluster is currently being created, and may be destroyed + // if the creation process encounters an error. + // A cluster may not be able to serve requests while being created. + CREATING = 2; + + // The cluster is currently being resized, and may revert to its previous + // node count if the process encounters an error. + // A cluster is still capable of serving requests while being resized, + // but may exhibit performance as if its number of allocated nodes is + // between the starting and requested states. + RESIZING = 3; + + // The cluster has no backing nodes. The data (tables) still + // exist, but no operations can be performed on the cluster. + DISABLED = 4; + } + + // @OutputOnly + // The unique name of the cluster. Values are of the form + // projects//instances//clusters/[a-z][-a-z0-9]* + string name = 1; + + // @CreationOnly + // The location where this cluster's nodes and storage reside. For best + // performance, clients should be located as close as possible to this cluster. + // Currently only zones are supported, e.g. projects/*/locations/us-central1-b + string location = 2; + + // @OutputOnly + // The current state of the cluster. + State state = 3; + + // The number of nodes allocated to this cluster. More nodes enable higher + // throughput and more consistent performance. + int32 serve_nodes = 4; + + // @CreationOnly + // The type of storage used by this cluster to serve its + // parent instance's tables, unless explicitly overridden. + StorageType default_storage_type = 5; +} diff --git a/gcloud/bigtable/_generated_v2/_operations.proto b/gcloud/bigtable/_generated_v2/_operations.proto new file mode 100644 index 000000000000..a358d0a38787 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_operations.proto @@ -0,0 +1,144 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.longrunning; + +import "google/api/annotations.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/rpc/status.proto"; + +option java_multiple_files = true; +option java_outer_classname = "OperationsProto"; +option java_package = "com.google.longrunning"; + + +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return [Operation][google.longrunning.Operation] to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or using `google.watcher.v1.Watcher` interface to watch +// the response, or pass the operation resource to another API (such as Google +// Cloud Pub/Sub API) to receive the response. Any API service that returns +// long-running operations should implement the `Operations` interface so +// developers can have a consistent client experience. +service Operations { + // Gets the latest state of a long-running operation. Clients may use this + // method to poll the operation result at intervals as recommended by the API + // service. + rpc GetOperation(GetOperationRequest) returns (Operation) { + option (google.api.http) = { get: "/v1/{name=operations/**}" }; + } + + // Lists operations that match the specified filter in the request. If the + // server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { + option (google.api.http) = { get: "/v1/{name=operations}" }; + } + + // Starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not + // guaranteed. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. Clients may use + // [Operations.GetOperation] or other methods to check whether the + // cancellation succeeded or the operation completed despite cancellation. + rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; + } + + // Deletes a long-running operation. It indicates the client is no longer + // interested in the operation result. It does not cancel the operation. + rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=operations/**}" }; + } +} + +// This resource represents a long-running operation that is the result of a +// network API call. +message Operation { + // The name of the operation resource, which is only unique within the same + // service that originally returns it. + string name = 1; + + // Some service-specific metadata associated with the operation. It typically + // contains progress information and common metadata such as create time. + // Some services may not provide such metadata. Any method that returns a + // long-running operation should document the metadata type, if any. + google.protobuf.Any metadata = 2; + + // If the value is false, it means the operation is still in progress. + // If true, the operation is completed and the `result` is available. + bool done = 3; + + oneof result { + // The error result of the operation in case of failure. + google.rpc.Status error = 4; + + // The normal response of the operation in case of success. If the original + // method returns no data on success, such as `Delete`, the response will be + // `google.protobuf.Empty`. If the original method is standard + // `Get`/`Create`/`Update`, the response should be the resource. For other + // methods, the response should have the type `XxxResponse`, where `Xxx` + // is the original method name. For example, if the original method name + // is `TakeSnapshot()`, the inferred response type will be + // `TakeSnapshotResponse`. + google.protobuf.Any response = 5; + } +} + +// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +message GetOperationRequest { + // The name of the operation resource. + string name = 1; +} + +// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +message ListOperationsRequest { + // The name of the operation collection. + string name = 4; + + // The standard List filter. + string filter = 1; + + // The standard List page size. + int32 page_size = 2; + + // The standard List page token. + string page_token = 3; +} + +// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +message ListOperationsResponse { + // A list of operations that match the specified filter in the request. + repeated Operation operations = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +message CancelOperationRequest { + // The name of the operation resource to be cancelled. + string name = 1; +} + +// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +message DeleteOperationRequest { + // The name of the operation resource to be deleted. + string name = 1; +} diff --git a/gcloud/bigtable/_generated_v2/_table.proto b/gcloud/bigtable/_generated_v2/_table.proto new file mode 100644 index 000000000000..63e41103e42f --- /dev/null +++ b/gcloud/bigtable/_generated_v2/_table.proto @@ -0,0 +1,115 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + // Possible timestamp granularities to use when keeping multiple versions + // of data in a table. + enum TimestampGranularity { + // The user did not specify a granularity. Should not be returned. + // When specified during table creation, MILLIS will be used. + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; + + // The table keeps data versioned at a granularity of 1ms. + MILLIS = 1; + } + + // Defines a view over a table's fields. + enum View { + // Uses the default view for each method as documented in its request. + VIEW_UNSPECIFIED = 0; + + // Only populates `name`. + NAME_ONLY = 1; + + // Only populates `name` and fields related to the table's schema. + SCHEMA_VIEW = 2; + + // Populates all fields. + FULL = 4; + } + + // The unique name of the table. Values are of the form + // projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + // Views: NAME_ONLY, SCHEMA_VIEW, REPLICATION_VIEW, FULL + // @OutputOnly + string name = 1; + + // The column families configured for this table, mapped by column family ID. + // Views: SCHEMA_VIEW, FULL + // @CreationOnly + map column_families = 3; + + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // If unspecified at creation time, the value will be set to MILLIS. + // Views: SCHEMA_VIEW, FULL + // @CreationOnly + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // Garbage collection rule specified as a protobuf. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 1; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} diff --git a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py new file mode 100644 index 000000000000..9da2364b7866 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py @@ -0,0 +1,1061 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/bigtable_instance_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import instance_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2 +from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/bigtable_instance_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n6google/bigtable/admin/v2/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\'google/bigtable/admin/v2/instance.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xdb\x0b\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}B<\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( + name='ClustersEntry', + full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=452, + serialized_end=534, +) + +_CREATEINSTANCEREQUEST = _descriptor.Descriptor( + name='CreateInstanceRequest', + full_name='google.bigtable.admin.v2.CreateInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateInstanceRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instance_id', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instance', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.CreateInstanceRequest.clusters', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=255, + serialized_end=534, +) + + +_GETINSTANCEREQUEST = _descriptor.Descriptor( + name='GetInstanceRequest', + full_name='google.bigtable.admin.v2.GetInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=536, + serialized_end=570, +) + + +_LISTINSTANCESREQUEST = _descriptor.Descriptor( + name='ListInstancesRequest', + full_name='google.bigtable.admin.v2.ListInstancesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListInstancesRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=572, + serialized_end=630, +) + + +_LISTINSTANCESRESPONSE = _descriptor.Descriptor( + name='ListInstancesResponse', + full_name='google.bigtable.admin.v2.ListInstancesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='instances', full_name='google.bigtable.admin.v2.ListInstancesResponse.instances', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=633, + serialized_end=762, +) + + +_DELETEINSTANCEREQUEST = _descriptor.Descriptor( + name='DeleteInstanceRequest', + full_name='google.bigtable.admin.v2.DeleteInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=764, + serialized_end=801, +) + + +_CREATECLUSTERREQUEST = _descriptor.Descriptor( + name='CreateClusterRequest', + full_name='google.bigtable.admin.v2.CreateClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateClusterRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cluster_id', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cluster', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=803, + serialized_end=913, +) + + +_GETCLUSTERREQUEST = _descriptor.Descriptor( + name='GetClusterRequest', + full_name='google.bigtable.admin.v2.GetClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=915, + serialized_end=948, +) + + +_LISTCLUSTERSREQUEST = _descriptor.Descriptor( + name='ListClustersRequest', + full_name='google.bigtable.admin.v2.ListClustersRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListClustersRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListClustersRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=950, + serialized_end=1007, +) + + +_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( + name='ListClustersResponse', + full_name='google.bigtable.admin.v2.ListClustersResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.ListClustersResponse.clusters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListClustersResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListClustersResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1009, + serialized_end=1135, +) + + +_DELETECLUSTERREQUEST = _descriptor.Descriptor( + name='DeleteClusterRequest', + full_name='google.bigtable.admin.v2.DeleteClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1137, + serialized_end=1173, +) + + +_CREATEINSTANCEMETADATA = _descriptor.Descriptor( + name='CreateInstanceMetadata', + full_name='google.bigtable.admin.v2.CreateInstanceMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1176, + serialized_end=1374, +) + + +_UPDATECLUSTERMETADATA = _descriptor.Descriptor( + name='UpdateClusterMetadata', + full_name='google.bigtable.admin.v2.UpdateClusterMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1377, + serialized_end=1560, +) + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE +_CREATEINSTANCEREQUEST.fields_by_name['clusters'].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY +_LISTINSTANCESRESPONSE.fields_by_name['instances'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE +_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_CREATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['GetInstanceRequest'] = _GETINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesRequest'] = _LISTINSTANCESREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesResponse'] = _LISTINSTANCESRESPONSE +DESCRIPTOR.message_types_by_name['DeleteInstanceRequest'] = _DELETEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST +DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST +DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE +DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['CreateInstanceMetadata'] = _CREATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA + +CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), dict( + + ClustersEntry = _reflection.GeneratedProtocolMessageType('ClustersEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEREQUEST_CLUSTERSENTRY, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) + )) + , + DESCRIPTOR = _CREATEINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) + )) +_sym_db.RegisterMessage(CreateInstanceRequest) +_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) + +GetInstanceRequest = _reflection.GeneratedProtocolMessageType('GetInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _GETINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) + )) +_sym_db.RegisterMessage(GetInstanceRequest) + +ListInstancesRequest = _reflection.GeneratedProtocolMessageType('ListInstancesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) + )) +_sym_db.RegisterMessage(ListInstancesRequest) + +ListInstancesResponse = _reflection.GeneratedProtocolMessageType('ListInstancesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) + )) +_sym_db.RegisterMessage(ListInstancesResponse) + +DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType('DeleteInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETEINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) + )) +_sym_db.RegisterMessage(DeleteInstanceRequest) + +CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _CREATECLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) + )) +_sym_db.RegisterMessage(CreateClusterRequest) + +GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _GETCLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) + )) +_sym_db.RegisterMessage(GetClusterRequest) + +ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) + )) +_sym_db.RegisterMessage(ListClustersRequest) + +ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) + )) +_sym_db.RegisterMessage(ListClustersResponse) + +DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETECLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) + )) +_sym_db.RegisterMessage(DeleteClusterRequest) + +CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType('CreateInstanceMetadata', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEMETADATA, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) + )) +_sym_db.RegisterMessage(CreateInstanceMetadata) + +UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict( + DESCRIPTOR = _UPDATECLUSTERMETADATA, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) + )) +_sym_db.RegisterMessage(UpdateClusterMetadata) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True +_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) + +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class BigtableInstanceAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', + request_serializer=CreateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', + request_serializer=GetInstanceRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ) + self.ListInstances = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', + request_serializer=ListInstancesRequest.SerializeToString, + response_deserializer=ListInstancesResponse.FromString, + ) + self.UpdateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', + request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ) + self.DeleteInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', + request_serializer=DeleteInstanceRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', + request_serializer=CreateClusterRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', + request_serializer=GetClusterRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ) + self.ListClusters = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', + request_serializer=ListClustersRequest.SerializeToString, + response_deserializer=ListClustersResponse.FromString, + ) + self.UpdateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', + request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', + request_serializer=DeleteClusterRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class BigtableInstanceAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + + def CreateInstance(self, request, context): + """Create an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetInstance(self, request, context): + """Gets information about an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListInstances(self, request, context): + """Lists information about instances in a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateInstance(self, request, context): + """Updates an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetCluster(self, request, context): + """Gets information about a cluster. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableInstanceAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateInstance': grpc.unary_unary_rpc_method_handler( + servicer.CreateInstance, + request_deserializer=CreateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetInstance': grpc.unary_unary_rpc_method_handler( + servicer.GetInstance, + request_deserializer=GetInstanceRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ), + 'ListInstances': grpc.unary_unary_rpc_method_handler( + servicer.ListInstances, + request_deserializer=ListInstancesRequest.FromString, + response_serializer=ListInstancesResponse.SerializeToString, + ), + 'UpdateInstance': grpc.unary_unary_rpc_method_handler( + servicer.UpdateInstance, + request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ), + 'DeleteInstance': grpc.unary_unary_rpc_method_handler( + servicer.DeleteInstance, + request_deserializer=DeleteInstanceRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'CreateCluster': grpc.unary_unary_rpc_method_handler( + servicer.CreateCluster, + request_deserializer=CreateClusterRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetCluster': grpc.unary_unary_rpc_method_handler( + servicer.GetCluster, + request_deserializer=GetClusterRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ), + 'ListClusters': grpc.unary_unary_rpc_method_handler( + servicer.ListClusters, + request_deserializer=ListClustersRequest.FromString, + response_serializer=ListClustersResponse.SerializeToString, + ), + 'UpdateCluster': grpc.unary_unary_rpc_method_handler( + servicer.UpdateCluster, + request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'DeleteCluster': grpc.unary_unary_rpc_method_handler( + servicer.DeleteCluster, + request_deserializer=DeleteClusterRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableInstanceAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaBigtableInstanceAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + def CreateInstance(self, request, context): + """Create an instance within a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetInstance(self, request, context): + """Gets information about an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListInstances(self, request, context): + """Lists information about instances in a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateInstance(self, request, context): + """Updates an instance within a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetCluster(self, request, context): + """Gets information about a cluster. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaBigtableInstanceAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + def CreateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Create an instance within a project. + """ + raise NotImplementedError() + CreateInstance.future = None + def GetInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets information about an instance. + """ + raise NotImplementedError() + GetInstance.future = None + def ListInstances(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists information about instances in a project. + """ + raise NotImplementedError() + ListInstances.future = None + def UpdateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates an instance within a project. + """ + raise NotImplementedError() + UpdateInstance.future = None + def DeleteInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Delete an instance from a project. + """ + raise NotImplementedError() + DeleteInstance.future = None + def CreateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Creates a cluster within an instance. + """ + raise NotImplementedError() + CreateCluster.future = None + def GetCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets information about a cluster. + """ + raise NotImplementedError() + GetCluster.future = None + def ListClusters(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists information about clusters in an instance. + """ + raise NotImplementedError() + ListClusters.future = None + def UpdateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates a cluster within an instance. + """ + raise NotImplementedError() + UpdateCluster.future = None + def DeleteCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Deletes a cluster from an instance. + """ + raise NotImplementedError() + DeleteCluster.future = None + + +def beta_create_BigtableInstanceAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): face_utilities.unary_unary_inline(servicer.CreateInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): face_utilities.unary_unary_inline(servicer.DeleteInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): face_utilities.unary_unary_inline(servicer.GetInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): face_utilities.unary_unary_inline(servicer.ListInstances), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): face_utilities.unary_unary_inline(servicer.UpdateInstance), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_BigtableInstanceAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + } + cardinalities = { + 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, + 'CreateInstance': cardinality.Cardinality.UNARY_UNARY, + 'DeleteCluster': cardinality.Cardinality.UNARY_UNARY, + 'DeleteInstance': cardinality.Cardinality.UNARY_UNARY, + 'GetCluster': cardinality.Cardinality.UNARY_UNARY, + 'GetInstance': cardinality.Cardinality.UNARY_UNARY, + 'ListClusters': cardinality.Cardinality.UNARY_UNARY, + 'ListInstances': cardinality.Cardinality.UNARY_UNARY, + 'UpdateCluster': cardinality.Cardinality.UNARY_UNARY, + 'UpdateInstance': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableInstanceAdmin', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_pb2.py new file mode 100644 index 000000000000..606b3c826942 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/bigtable_pb2.py @@ -0,0 +1,1100 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/bigtable.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/v2/bigtable.proto', + package='google.bigtable.v2', + syntax='proto3', + serialized_pb=_b('\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x92\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x13\n\x11MutateRowResponse\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"D\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"E\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"M\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B)\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v2_dot_data__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_READROWSREQUEST = _descriptor.Descriptor( + name='ReadRowsRequest', + full_name='google.bigtable.v2.ReadRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='google.bigtable.v2.ReadRowsRequest.rows', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='filter', full_name='google.bigtable.v2.ReadRowsRequest.filter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows_limit', full_name='google.bigtable.v2.ReadRowsRequest.rows_limit', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=176, + serialized_end=322, +) + + +_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( + name='CellChunk', + full_name='google.bigtable.v2.ReadRowsResponse.CellChunk', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.family_name', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='qualifier', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.labels', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_size', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value_size', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reset_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row', index=7, + number=8, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='commit_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='row_status', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_status', + index=0, containing_type=None, fields=[]), + ], + serialized_start=440, + serialized_end=701, +) + +_READROWSRESPONSE = _descriptor.Descriptor( + name='ReadRowsResponse', + full_name='google.bigtable.v2.ReadRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='chunks', full_name='google.bigtable.v2.ReadRowsResponse.chunks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='last_scanned_row_key', full_name='google.bigtable.v2.ReadRowsResponse.last_scanned_row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_READROWSRESPONSE_CELLCHUNK, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=325, + serialized_end=701, +) + + +_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( + name='SampleRowKeysRequest', + full_name='google.bigtable.v2.SampleRowKeysRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.SampleRowKeysRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=703, + serialized_end=745, +) + + +_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( + name='SampleRowKeysResponse', + full_name='google.bigtable.v2.SampleRowKeysResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.SampleRowKeysResponse.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset_bytes', full_name='google.bigtable.v2.SampleRowKeysResponse.offset_bytes', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=747, + serialized_end=809, +) + + +_MUTATEROWREQUEST = _descriptor.Descriptor( + name='MutateRowRequest', + full_name='google.bigtable.v2.MutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowRequest.mutations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=811, + serialized_end=915, +) + + +_MUTATEROWRESPONSE = _descriptor.Descriptor( + name='MutateRowResponse', + full_name='google.bigtable.v2.MutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=917, + serialized_end=936, +) + + +_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsRequest.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowsRequest.Entry.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowsRequest.Entry.mutations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1042, + serialized_end=1115, +) + +_MUTATEROWSREQUEST = _descriptor.Descriptor( + name='MutateRowsRequest', + full_name='google.bigtable.v2.MutateRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsRequest.entries', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSREQUEST_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=939, + serialized_end=1115, +) + + +_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsResponse.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='google.bigtable.v2.MutateRowsResponse.Entry.index', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='status', full_name='google.bigtable.v2.MutateRowsResponse.Entry.status', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1203, + serialized_end=1261, +) + +_MUTATEROWSRESPONSE = _descriptor.Descriptor( + name='MutateRowsResponse', + full_name='google.bigtable.v2.MutateRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsResponse.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSRESPONSE_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1118, + serialized_end=1261, +) + + +_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( + name='CheckAndMutateRowRequest', + full_name='google.bigtable.v2.CheckAndMutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.CheckAndMutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.CheckAndMutateRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predicate_filter', full_name='google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter', index=2, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='true_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.true_mutations', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='false_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.false_mutations', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1264, + serialized_end=1493, +) + + +_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( + name='CheckAndMutateRowResponse', + full_name='google.bigtable.v2.CheckAndMutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='predicate_matched', full_name='google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1495, + serialized_end=1549, +) + + +_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( + name='ReadModifyWriteRowRequest', + full_name='google.bigtable.v2.ReadModifyWriteRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.rules', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1551, + serialized_end=1671, +) + + +_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( + name='ReadModifyWriteRowResponse', + full_name='google.bigtable.v2.ReadModifyWriteRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row', full_name='google.bigtable.v2.ReadModifyWriteRowResponse.row', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1673, + serialized_end=1739, +) + +_READROWSREQUEST.fields_by_name['rows'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWSET +_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER +_READROWSRESPONSE_CELLCHUNK.fields_by_name['family_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE +_READROWSRESPONSE_CELLCHUNK.fields_by_name['qualifier'].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE +_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CELLCHUNK +_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST +_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY +_MUTATEROWSRESPONSE_ENTRY.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS +_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE +_MUTATEROWSRESPONSE.fields_by_name['entries'].message_type = _MUTATEROWSRESPONSE_ENTRY +_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER +_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._READMODIFYWRITERULE +_READMODIFYWRITEROWRESPONSE.fields_by_name['row'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROW +DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST +DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE +DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST +DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['MutateRowResponse'] = _MUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST +DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE +DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowResponse'] = _READMODIFYWRITEROWRESPONSE + +ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( + DESCRIPTOR = _READROWSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) + )) +_sym_db.RegisterMessage(ReadRowsRequest) + +ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( + + CellChunk = _reflection.GeneratedProtocolMessageType('CellChunk', (_message.Message,), dict( + DESCRIPTOR = _READROWSRESPONSE_CELLCHUNK, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) + )) + , + DESCRIPTOR = _READROWSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) + )) +_sym_db.RegisterMessage(ReadRowsResponse) +_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) + +SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) + )) +_sym_db.RegisterMessage(SampleRowKeysRequest) + +SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) + )) +_sym_db.RegisterMessage(SampleRowKeysResponse) + +MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) + )) +_sym_db.RegisterMessage(MutateRowRequest) + +MutateRowResponse = _reflection.GeneratedProtocolMessageType('MutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) + )) +_sym_db.RegisterMessage(MutateRowResponse) + +MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) + )) +_sym_db.RegisterMessage(MutateRowsRequest) +_sym_db.RegisterMessage(MutateRowsRequest.Entry) + +MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSRESPONSE_ENTRY, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) + )) +_sym_db.RegisterMessage(MutateRowsResponse) +_sym_db.RegisterMessage(MutateRowsResponse.Entry) + +CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) + )) +_sym_db.RegisterMessage(CheckAndMutateRowRequest) + +CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) + )) +_sym_db.RegisterMessage(CheckAndMutateRowResponse) + +ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowRequest) + +ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowResponse', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowResponse) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) + +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=ReadRowsRequest.SerializeToString, + response_deserializer=ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=SampleRowKeysRequest.SerializeToString, + response_deserializer=SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=MutateRowRequest.SerializeToString, + response_deserializer=MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=MutateRowsRequest.SerializeToString, + response_deserializer=MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=CheckAndMutateRowRequest.SerializeToString, + response_deserializer=CheckAndMutateRowResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=ReadModifyWriteRowResponse.FromString, + ) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=ReadRowsRequest.FromString, + response_serializer=ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=SampleRowKeysRequest.FromString, + response_serializer=SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=MutateRowRequest.FromString, + response_serializer=MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=MutateRowsRequest.FromString, + response_serializer=MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=CheckAndMutateRowRequest.FromString, + response_serializer=CheckAndMutateRowResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=ReadModifyWriteRowRequest.FromString, + response_serializer=ReadModifyWriteRowResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaBigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + def ReadRows(self, request, context): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaBigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + def ReadRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + raise NotImplementedError() + def SampleRowKeys(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + raise NotImplementedError() + def MutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + raise NotImplementedError() + MutateRow.future = None + def MutateRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + raise NotImplementedError() + def CheckAndMutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + raise NotImplementedError() + CheckAndMutateRow.future = None + def ReadModifyWriteRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ + raise NotImplementedError() + ReadModifyWriteRow.future = None + + +def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.FromString, + } + response_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.SerializeToString, + } + method_implementations = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRows'): face_utilities.unary_stream_inline(servicer.MutateRows), + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), + ('google.bigtable.v2.Bigtable', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.FromString, + } + cardinalities = { + 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRows': cardinality.Cardinality.UNARY_STREAM, + 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, + 'ReadRows': cardinality.Cardinality.UNARY_STREAM, + 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.v2.Bigtable', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py new file mode 100644 index 000000000000..c929b222b78b --- /dev/null +++ b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py @@ -0,0 +1,784 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/bigtable_table_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/bigtable_table_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n3google/bigtable/admin/v2/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a$google/bigtable/admin/v2/table.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"k\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod2\xb8\x07\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*B9\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( + name='Split', + full_name='google.bigtable.admin.v2.CreateTableRequest.Split', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateTableRequest.Split.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=359, + serialized_end=379, +) + +_CREATETABLEREQUEST = _descriptor.Descriptor( + name='CreateTableRequest', + full_name='google.bigtable.admin.v2.CreateTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateTableRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='table_id', full_name='google.bigtable.admin.v2.CreateTableRequest.table_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='table', full_name='google.bigtable.admin.v2.CreateTableRequest.table', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='initial_splits', full_name='google.bigtable.admin.v2.CreateTableRequest.initial_splits', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CREATETABLEREQUEST_SPLIT, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=179, + serialized_end=379, +) + + +_DROPROWRANGEREQUEST = _descriptor.Descriptor( + name='DropRowRangeRequest', + full_name='google.bigtable.admin.v2.DropRowRangeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DropRowRangeRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key_prefix', full_name='google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_all_data_from_table', full_name='google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target', + index=0, containing_type=None, fields=[]), + ], + serialized_start=381, + serialized_end=490, +) + + +_LISTTABLESREQUEST = _descriptor.Descriptor( + name='ListTablesRequest', + full_name='google.bigtable.admin.v2.ListTablesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListTablesRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.ListTablesRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=492, + serialized_end=599, +) + + +_LISTTABLESRESPONSE = _descriptor.Descriptor( + name='ListTablesResponse', + full_name='google.bigtable.admin.v2.ListTablesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tables', full_name='google.bigtable.admin.v2.ListTablesResponse.tables', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListTablesResponse.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=601, + serialized_end=695, +) + + +_GETTABLEREQUEST = _descriptor.Descriptor( + name='GetTableRequest', + full_name='google.bigtable.admin.v2.GetTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.GetTableRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=697, + serialized_end=780, +) + + +_DELETETABLEREQUEST = _descriptor.Descriptor( + name='DeleteTableRequest', + full_name='google.bigtable.admin.v2.DeleteTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=782, + serialized_end=816, +) + + +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( + name='Modification', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='create', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='update', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='drop', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', + index=0, containing_type=None, fields=[]), + ], + serialized_start=956, + serialized_end=1121, +) + +_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( + name='ModifyColumnFamiliesRequest', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modifications', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=819, + serialized_end=1121, +) + +_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST +_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE +_CREATETABLEREQUEST.fields_by_name['initial_splits'].message_type = _CREATETABLEREQUEST_SPLIT +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['row_key_prefix']) +_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table']) +_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_LISTTABLESREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW +_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE +_GETTABLEREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name['modifications'].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION +DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST +DESCRIPTOR.message_types_by_name['DropRowRangeRequest'] = _DROPROWRANGEREQUEST +DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST +DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE +DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST +DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST +DESCRIPTOR.message_types_by_name['ModifyColumnFamiliesRequest'] = _MODIFYCOLUMNFAMILIESREQUEST + +CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( + + Split = _reflection.GeneratedProtocolMessageType('Split', (_message.Message,), dict( + DESCRIPTOR = _CREATETABLEREQUEST_SPLIT, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) + )) + , + DESCRIPTOR = _CREATETABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) + )) +_sym_db.RegisterMessage(CreateTableRequest) +_sym_db.RegisterMessage(CreateTableRequest.Split) + +DropRowRangeRequest = _reflection.GeneratedProtocolMessageType('DropRowRangeRequest', (_message.Message,), dict( + DESCRIPTOR = _DROPROWRANGEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) + )) +_sym_db.RegisterMessage(DropRowRangeRequest) + +ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) + )) +_sym_db.RegisterMessage(ListTablesRequest) + +ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) + )) +_sym_db.RegisterMessage(ListTablesResponse) + +GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( + DESCRIPTOR = _GETTABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) + )) +_sym_db.RegisterMessage(GetTableRequest) + +DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETETABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) + )) +_sym_db.RegisterMessage(DeleteTableRequest) + +ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType('ModifyColumnFamiliesRequest', (_message.Message,), dict( + + Modification = _reflection.GeneratedProtocolMessageType('Modification', (_message.Message,), dict( + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) + )) + , + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) + )) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) + +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class BigtableTableAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', + request_serializer=CreateTableRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.ListTables = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', + request_serializer=ListTablesRequest.SerializeToString, + response_deserializer=ListTablesResponse.FromString, + ) + self.GetTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', + request_serializer=GetTableRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.DeleteTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', + request_serializer=DeleteTableRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ModifyColumnFamilies = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', + request_serializer=ModifyColumnFamiliesRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.DropRowRange = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', + request_serializer=DropRowRangeRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class BigtableTableAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ModifyColumnFamilies(self, request, context): + """Atomically performs a series of column family modifications + on the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableTableAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateTable': grpc.unary_unary_rpc_method_handler( + servicer.CreateTable, + request_deserializer=CreateTableRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'ListTables': grpc.unary_unary_rpc_method_handler( + servicer.ListTables, + request_deserializer=ListTablesRequest.FromString, + response_serializer=ListTablesResponse.SerializeToString, + ), + 'GetTable': grpc.unary_unary_rpc_method_handler( + servicer.GetTable, + request_deserializer=GetTableRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'DeleteTable': grpc.unary_unary_rpc_method_handler( + servicer.DeleteTable, + request_deserializer=DeleteTableRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler( + servicer.ModifyColumnFamilies, + request_deserializer=ModifyColumnFamiliesRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'DropRowRange': grpc.unary_unary_rpc_method_handler( + servicer.DropRowRange, + request_deserializer=DropRowRangeRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaBigtableTableAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ModifyColumnFamilies(self, request, context): + """Atomically performs a series of column family modifications + on the specified table. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaBigtableTableAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + def CreateTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + raise NotImplementedError() + CreateTable.future = None + def ListTables(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists all tables served from a specified instance. + """ + raise NotImplementedError() + ListTables.future = None + def GetTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets metadata information about the specified table. + """ + raise NotImplementedError() + GetTable.future = None + def DeleteTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Permanently deletes a specified table and all of its data. + """ + raise NotImplementedError() + DeleteTable.future = None + def ModifyColumnFamilies(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Atomically performs a series of column family modifications + on the specified table. + """ + raise NotImplementedError() + ModifyColumnFamilies.future = None + def DropRowRange(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + raise NotImplementedError() + DropRowRange.future = None + + +def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): face_utilities.unary_unary_inline(servicer.DropRowRange), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): face_utilities.unary_unary_inline(servicer.ModifyColumnFamilies), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_BigtableTableAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + } + cardinalities = { + 'CreateTable': cardinality.Cardinality.UNARY_UNARY, + 'DeleteTable': cardinality.Cardinality.UNARY_UNARY, + 'DropRowRange': cardinality.Cardinality.UNARY_UNARY, + 'GetTable': cardinality.Cardinality.UNARY_UNARY, + 'ListTables': cardinality.Cardinality.UNARY_UNARY, + 'ModifyColumnFamilies': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableTableAdmin', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/common_pb2.py b/gcloud/bigtable/_generated_v2/common_pb2.py new file mode 100644 index 000000000000..298130452971 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/common_pb2.py @@ -0,0 +1,67 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/common.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/common.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n%google/bigtable/admin/v2/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42-\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_STORAGETYPE = _descriptor.EnumDescriptor( + name='StorageType', + full_name='google.bigtable.admin.v2.StorageType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STORAGE_TYPE_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SSD', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HDD', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=130, + serialized_end=191, +) +_sym_db.RegisterEnumDescriptor(_STORAGETYPE) + +StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) +STORAGE_TYPE_UNSPECIFIED = 0 +SSD = 1 +HDD = 2 + + +DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/data_pb2.py b/gcloud/bigtable/_generated_v2/data_pb2.py new file mode 100644 index 000000000000..6db08fbd12c3 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/data_pb2.py @@ -0,0 +1,1260 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/data.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/v2/data.proto', + package='google.bigtable.v2', + syntax='proto3', + serialized_pb=_b('\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB%\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_ROW = _descriptor.Descriptor( + name='Row', + full_name='google.bigtable.v2.Row', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.v2.Row.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='families', full_name='google.bigtable.v2.Row.families', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=53, + serialized_end=117, +) + + +_FAMILY = _descriptor.Descriptor( + name='Family', + full_name='google.bigtable.v2.Family', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.v2.Family.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='columns', full_name='google.bigtable.v2.Family.columns', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=119, + serialized_end=186, +) + + +_COLUMN = _descriptor.Descriptor( + name='Column', + full_name='google.bigtable.v2.Column', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='qualifier', full_name='google.bigtable.v2.Column.qualifier', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells', full_name='google.bigtable.v2.Column.cells', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=188, + serialized_end=256, +) + + +_CELL = _descriptor.Descriptor( + name='Cell', + full_name='google.bigtable.v2.Cell', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.Cell.timestamp_micros', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.Cell.value', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.v2.Cell.labels', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=258, + serialized_end=321, +) + + +_ROWRANGE = _descriptor.Descriptor( + name='RowRange', + full_name='google.bigtable.v2.RowRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_key_closed', full_name='google.bigtable.v2.RowRange.start_key_closed', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_key_open', full_name='google.bigtable.v2.RowRange.start_key_open', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_key_open', full_name='google.bigtable.v2.RowRange.end_key_open', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_key_closed', full_name='google.bigtable.v2.RowRange.end_key_closed', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_key', full_name='google.bigtable.v2.RowRange.start_key', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_key', full_name='google.bigtable.v2.RowRange.end_key', + index=1, containing_type=None, fields=[]), + ], + serialized_start=324, + serialized_end=462, +) + + +_ROWSET = _descriptor.Descriptor( + name='RowSet', + full_name='google.bigtable.v2.RowSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_keys', full_name='google.bigtable.v2.RowSet.row_keys', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_ranges', full_name='google.bigtable.v2.RowSet.row_ranges', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=464, + serialized_end=540, +) + + +_COLUMNRANGE = _descriptor.Descriptor( + name='ColumnRange', + full_name='google.bigtable.v2.ColumnRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ColumnRange.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.start_qualifier_closed', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_qualifier_open', full_name='google.bigtable.v2.ColumnRange.start_qualifier_open', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.end_qualifier_closed', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_qualifier_open', full_name='google.bigtable.v2.ColumnRange.end_qualifier_open', index=4, + number=5, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_qualifier', full_name='google.bigtable.v2.ColumnRange.start_qualifier', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_qualifier', full_name='google.bigtable.v2.ColumnRange.end_qualifier', + index=1, containing_type=None, fields=[]), + ], + serialized_start=543, + serialized_end=741, +) + + +_TIMESTAMPRANGE = _descriptor.Descriptor( + name='TimestampRange', + full_name='google.bigtable.v2.TimestampRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.start_timestamp_micros', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.end_timestamp_micros', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=743, + serialized_end=821, +) + + +_VALUERANGE = _descriptor.Descriptor( + name='ValueRange', + full_name='google.bigtable.v2.ValueRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_value_closed', full_name='google.bigtable.v2.ValueRange.start_value_closed', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_value_open', full_name='google.bigtable.v2.ValueRange.start_value_open', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_value_closed', full_name='google.bigtable.v2.ValueRange.end_value_closed', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_value_open', full_name='google.bigtable.v2.ValueRange.end_value_open', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_value', full_name='google.bigtable.v2.ValueRange.start_value', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_value', full_name='google.bigtable.v2.ValueRange.end_value', + index=1, containing_type=None, fields=[]), + ], + serialized_start=824, + serialized_end=976, +) + + +_ROWFILTER_CHAIN = _descriptor.Descriptor( + name='Chain', + full_name='google.bigtable.v2.RowFilter.Chain', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='filters', full_name='google.bigtable.v2.RowFilter.Chain.filters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1795, + serialized_end=1850, +) + +_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( + name='Interleave', + full_name='google.bigtable.v2.RowFilter.Interleave', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='filters', full_name='google.bigtable.v2.RowFilter.Interleave.filters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1852, + serialized_end=1912, +) + +_ROWFILTER_CONDITION = _descriptor.Descriptor( + name='Condition', + full_name='google.bigtable.v2.RowFilter.Condition', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='predicate_filter', full_name='google.bigtable.v2.RowFilter.Condition.predicate_filter', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='true_filter', full_name='google.bigtable.v2.RowFilter.Condition.true_filter', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='false_filter', full_name='google.bigtable.v2.RowFilter.Condition.false_filter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1915, + serialized_end=2088, +) + +_ROWFILTER = _descriptor.Descriptor( + name='RowFilter', + full_name='google.bigtable.v2.RowFilter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='chain', full_name='google.bigtable.v2.RowFilter.chain', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interleave', full_name='google.bigtable.v2.RowFilter.interleave', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='condition', full_name='google.bigtable.v2.RowFilter.condition', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sink', full_name='google.bigtable.v2.RowFilter.sink', index=3, + number=16, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pass_all_filter', full_name='google.bigtable.v2.RowFilter.pass_all_filter', index=4, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='block_all_filter', full_name='google.bigtable.v2.RowFilter.block_all_filter', index=5, + number=18, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key_regex_filter', full_name='google.bigtable.v2.RowFilter.row_key_regex_filter', index=6, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_sample_filter', full_name='google.bigtable.v2.RowFilter.row_sample_filter', index=7, + number=14, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='family_name_regex_filter', full_name='google.bigtable.v2.RowFilter.family_name_regex_filter', index=8, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier_regex_filter', full_name='google.bigtable.v2.RowFilter.column_qualifier_regex_filter', index=9, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_range_filter', full_name='google.bigtable.v2.RowFilter.column_range_filter', index=10, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_range_filter', full_name='google.bigtable.v2.RowFilter.timestamp_range_filter', index=11, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_regex_filter', full_name='google.bigtable.v2.RowFilter.value_regex_filter', index=12, + number=9, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_range_filter', full_name='google.bigtable.v2.RowFilter.value_range_filter', index=13, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_row_offset_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_offset_filter', index=14, + number=10, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_row_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_limit_filter', index=15, + number=11, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_column_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_column_limit_filter', index=16, + number=12, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strip_value_transformer', full_name='google.bigtable.v2.RowFilter.strip_value_transformer', index=17, + number=13, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='apply_label_transformer', full_name='google.bigtable.v2.RowFilter.apply_label_transformer', index=18, + number=19, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='filter', full_name='google.bigtable.v2.RowFilter.filter', + index=0, containing_type=None, fields=[]), + ], + serialized_start=979, + serialized_end=2098, +) + + +_MUTATION_SETCELL = _descriptor.Descriptor( + name='SetCell', + full_name='google.bigtable.v2.Mutation.SetCell', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.SetCell.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.Mutation.SetCell.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.Mutation.SetCell.timestamp_micros', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.Mutation.SetCell.value', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2396, + serialized_end=2493, +) + +_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( + name='DeleteFromColumn', + full_name='google.bigtable.v2.Mutation.DeleteFromColumn', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='time_range', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.time_range', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2495, + serialized_end=2616, +) + +_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( + name='DeleteFromFamily', + full_name='google.bigtable.v2.Mutation.DeleteFromFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromFamily.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2618, + serialized_end=2657, +) + +_MUTATION_DELETEFROMROW = _descriptor.Descriptor( + name='DeleteFromRow', + full_name='google.bigtable.v2.Mutation.DeleteFromRow', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2659, + serialized_end=2674, +) + +_MUTATION = _descriptor.Descriptor( + name='Mutation', + full_name='google.bigtable.v2.Mutation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='set_cell', full_name='google.bigtable.v2.Mutation.set_cell', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_column', full_name='google.bigtable.v2.Mutation.delete_from_column', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_family', full_name='google.bigtable.v2.Mutation.delete_from_family', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_row', full_name='google.bigtable.v2.Mutation.delete_from_row', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATION_SETCELL, _MUTATION_DELETEFROMCOLUMN, _MUTATION_DELETEFROMFAMILY, _MUTATION_DELETEFROMROW, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='mutation', full_name='google.bigtable.v2.Mutation.mutation', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2101, + serialized_end=2686, +) + + +_READMODIFYWRITERULE = _descriptor.Descriptor( + name='ReadModifyWriteRule', + full_name='google.bigtable.v2.ReadModifyWriteRule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ReadModifyWriteRule.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.ReadModifyWriteRule.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='append_value', full_name='google.bigtable.v2.ReadModifyWriteRule.append_value', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='increment_amount', full_name='google.bigtable.v2.ReadModifyWriteRule.increment_amount', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='rule', full_name='google.bigtable.v2.ReadModifyWriteRule.rule', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2689, + serialized_end=2817, +) + +_ROW.fields_by_name['families'].message_type = _FAMILY +_FAMILY.fields_by_name['columns'].message_type = _COLUMN +_COLUMN.fields_by_name['cells'].message_type = _CELL +_ROWRANGE.oneofs_by_name['start_key'].fields.append( + _ROWRANGE.fields_by_name['start_key_closed']) +_ROWRANGE.fields_by_name['start_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] +_ROWRANGE.oneofs_by_name['start_key'].fields.append( + _ROWRANGE.fields_by_name['start_key_open']) +_ROWRANGE.fields_by_name['start_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] +_ROWRANGE.oneofs_by_name['end_key'].fields.append( + _ROWRANGE.fields_by_name['end_key_open']) +_ROWRANGE.fields_by_name['end_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] +_ROWRANGE.oneofs_by_name['end_key'].fields.append( + _ROWRANGE.fields_by_name['end_key_closed']) +_ROWRANGE.fields_by_name['end_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] +_ROWSET.fields_by_name['row_ranges'].message_type = _ROWRANGE +_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['start_qualifier_closed']) +_COLUMNRANGE.fields_by_name['start_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] +_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['start_qualifier_open']) +_COLUMNRANGE.fields_by_name['start_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] +_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['end_qualifier_closed']) +_COLUMNRANGE.fields_by_name['end_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] +_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['end_qualifier_open']) +_COLUMNRANGE.fields_by_name['end_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] +_VALUERANGE.oneofs_by_name['start_value'].fields.append( + _VALUERANGE.fields_by_name['start_value_closed']) +_VALUERANGE.fields_by_name['start_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] +_VALUERANGE.oneofs_by_name['start_value'].fields.append( + _VALUERANGE.fields_by_name['start_value_open']) +_VALUERANGE.fields_by_name['start_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] +_VALUERANGE.oneofs_by_name['end_value'].fields.append( + _VALUERANGE.fields_by_name['end_value_closed']) +_VALUERANGE.fields_by_name['end_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] +_VALUERANGE.oneofs_by_name['end_value'].fields.append( + _VALUERANGE.fields_by_name['end_value_open']) +_VALUERANGE.fields_by_name['end_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] +_ROWFILTER_CHAIN.fields_by_name['filters'].message_type = _ROWFILTER +_ROWFILTER_CHAIN.containing_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.fields_by_name['filters'].message_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['predicate_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['true_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['false_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.containing_type = _ROWFILTER +_ROWFILTER.fields_by_name['chain'].message_type = _ROWFILTER_CHAIN +_ROWFILTER.fields_by_name['interleave'].message_type = _ROWFILTER_INTERLEAVE +_ROWFILTER.fields_by_name['condition'].message_type = _ROWFILTER_CONDITION +_ROWFILTER.fields_by_name['column_range_filter'].message_type = _COLUMNRANGE +_ROWFILTER.fields_by_name['timestamp_range_filter'].message_type = _TIMESTAMPRANGE +_ROWFILTER.fields_by_name['value_range_filter'].message_type = _VALUERANGE +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['chain']) +_ROWFILTER.fields_by_name['chain'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['interleave']) +_ROWFILTER.fields_by_name['interleave'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['condition']) +_ROWFILTER.fields_by_name['condition'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['sink']) +_ROWFILTER.fields_by_name['sink'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['pass_all_filter']) +_ROWFILTER.fields_by_name['pass_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['block_all_filter']) +_ROWFILTER.fields_by_name['block_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['row_key_regex_filter']) +_ROWFILTER.fields_by_name['row_key_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['row_sample_filter']) +_ROWFILTER.fields_by_name['row_sample_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['family_name_regex_filter']) +_ROWFILTER.fields_by_name['family_name_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['column_qualifier_regex_filter']) +_ROWFILTER.fields_by_name['column_qualifier_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['column_range_filter']) +_ROWFILTER.fields_by_name['column_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['timestamp_range_filter']) +_ROWFILTER.fields_by_name['timestamp_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['value_regex_filter']) +_ROWFILTER.fields_by_name['value_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['value_range_filter']) +_ROWFILTER.fields_by_name['value_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_row_offset_filter']) +_ROWFILTER.fields_by_name['cells_per_row_offset_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_row_limit_filter']) +_ROWFILTER.fields_by_name['cells_per_row_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_column_limit_filter']) +_ROWFILTER.fields_by_name['cells_per_column_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['strip_value_transformer']) +_ROWFILTER.fields_by_name['strip_value_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['apply_label_transformer']) +_ROWFILTER.fields_by_name['apply_label_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_MUTATION_SETCELL.containing_type = _MUTATION +_MUTATION_DELETEFROMCOLUMN.fields_by_name['time_range'].message_type = _TIMESTAMPRANGE +_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION +_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION +_MUTATION_DELETEFROMROW.containing_type = _MUTATION +_MUTATION.fields_by_name['set_cell'].message_type = _MUTATION_SETCELL +_MUTATION.fields_by_name['delete_from_column'].message_type = _MUTATION_DELETEFROMCOLUMN +_MUTATION.fields_by_name['delete_from_family'].message_type = _MUTATION_DELETEFROMFAMILY +_MUTATION.fields_by_name['delete_from_row'].message_type = _MUTATION_DELETEFROMROW +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['set_cell']) +_MUTATION.fields_by_name['set_cell'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_column']) +_MUTATION.fields_by_name['delete_from_column'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_family']) +_MUTATION.fields_by_name['delete_from_family'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_row']) +_MUTATION.fields_by_name['delete_from_row'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( + _READMODIFYWRITERULE.fields_by_name['append_value']) +_READMODIFYWRITERULE.fields_by_name['append_value'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] +_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( + _READMODIFYWRITERULE.fields_by_name['increment_amount']) +_READMODIFYWRITERULE.fields_by_name['increment_amount'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] +DESCRIPTOR.message_types_by_name['Row'] = _ROW +DESCRIPTOR.message_types_by_name['Family'] = _FAMILY +DESCRIPTOR.message_types_by_name['Column'] = _COLUMN +DESCRIPTOR.message_types_by_name['Cell'] = _CELL +DESCRIPTOR.message_types_by_name['RowRange'] = _ROWRANGE +DESCRIPTOR.message_types_by_name['RowSet'] = _ROWSET +DESCRIPTOR.message_types_by_name['ColumnRange'] = _COLUMNRANGE +DESCRIPTOR.message_types_by_name['TimestampRange'] = _TIMESTAMPRANGE +DESCRIPTOR.message_types_by_name['ValueRange'] = _VALUERANGE +DESCRIPTOR.message_types_by_name['RowFilter'] = _ROWFILTER +DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION +DESCRIPTOR.message_types_by_name['ReadModifyWriteRule'] = _READMODIFYWRITERULE + +Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( + DESCRIPTOR = _ROW, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) + )) +_sym_db.RegisterMessage(Row) + +Family = _reflection.GeneratedProtocolMessageType('Family', (_message.Message,), dict( + DESCRIPTOR = _FAMILY, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) + )) +_sym_db.RegisterMessage(Family) + +Column = _reflection.GeneratedProtocolMessageType('Column', (_message.Message,), dict( + DESCRIPTOR = _COLUMN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) + )) +_sym_db.RegisterMessage(Column) + +Cell = _reflection.GeneratedProtocolMessageType('Cell', (_message.Message,), dict( + DESCRIPTOR = _CELL, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) + )) +_sym_db.RegisterMessage(Cell) + +RowRange = _reflection.GeneratedProtocolMessageType('RowRange', (_message.Message,), dict( + DESCRIPTOR = _ROWRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) + )) +_sym_db.RegisterMessage(RowRange) + +RowSet = _reflection.GeneratedProtocolMessageType('RowSet', (_message.Message,), dict( + DESCRIPTOR = _ROWSET, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) + )) +_sym_db.RegisterMessage(RowSet) + +ColumnRange = _reflection.GeneratedProtocolMessageType('ColumnRange', (_message.Message,), dict( + DESCRIPTOR = _COLUMNRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) + )) +_sym_db.RegisterMessage(ColumnRange) + +TimestampRange = _reflection.GeneratedProtocolMessageType('TimestampRange', (_message.Message,), dict( + DESCRIPTOR = _TIMESTAMPRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) + )) +_sym_db.RegisterMessage(TimestampRange) + +ValueRange = _reflection.GeneratedProtocolMessageType('ValueRange', (_message.Message,), dict( + DESCRIPTOR = _VALUERANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) + )) +_sym_db.RegisterMessage(ValueRange) + +RowFilter = _reflection.GeneratedProtocolMessageType('RowFilter', (_message.Message,), dict( + + Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_CHAIN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) + )) + , + + Interleave = _reflection.GeneratedProtocolMessageType('Interleave', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_INTERLEAVE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) + )) + , + + Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_CONDITION, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) + )) + , + DESCRIPTOR = _ROWFILTER, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) + )) +_sym_db.RegisterMessage(RowFilter) +_sym_db.RegisterMessage(RowFilter.Chain) +_sym_db.RegisterMessage(RowFilter.Interleave) +_sym_db.RegisterMessage(RowFilter.Condition) + +Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict( + + SetCell = _reflection.GeneratedProtocolMessageType('SetCell', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_SETCELL, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) + )) + , + + DeleteFromColumn = _reflection.GeneratedProtocolMessageType('DeleteFromColumn', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMCOLUMN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) + )) + , + + DeleteFromFamily = _reflection.GeneratedProtocolMessageType('DeleteFromFamily', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMFAMILY, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) + )) + , + + DeleteFromRow = _reflection.GeneratedProtocolMessageType('DeleteFromRow', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMROW, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) + )) + , + DESCRIPTOR = _MUTATION, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) + )) +_sym_db.RegisterMessage(Mutation) +_sym_db.RegisterMessage(Mutation.SetCell) +_sym_db.RegisterMessage(Mutation.DeleteFromColumn) +_sym_db.RegisterMessage(Mutation.DeleteFromFamily) +_sym_db.RegisterMessage(Mutation.DeleteFromRow) + +ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRule', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITERULE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) + )) +_sym_db.RegisterMessage(ReadModifyWriteRule) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\tDataProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/instance_pb2.py b/gcloud/bigtable/_generated_v2/instance_pb2.py new file mode 100644 index 000000000000..2161bf33bf58 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/instance_pb2.py @@ -0,0 +1,222 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/instance.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from gcloud.bigtable._generated_v2 import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/instance.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n\'google/bigtable/admin/v2/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a%google/bigtable/admin/v2/common.proto\"\x9e\x01\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType\"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04\x42/\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_INSTANCE_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Instance.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=244, + serialized_end=297, +) +_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) + +_CLUSTER_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Cluster.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RESIZING', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DISABLED', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=489, + serialized_end=570, +) +_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) + + +_INSTANCE = _descriptor.Descriptor( + name='Instance', + full_name='google.bigtable.admin.v2.Instance', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Instance.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='display_name', full_name='google.bigtable.admin.v2.Instance.display_name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Instance.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _INSTANCE_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=139, + serialized_end=297, +) + + +_CLUSTER = _descriptor.Descriptor( + name='Cluster', + full_name='google.bigtable.admin.v2.Cluster', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Cluster.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='location', full_name='google.bigtable.admin.v2.Cluster.location', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Cluster.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='serve_nodes', full_name='google.bigtable.admin.v2.Cluster.serve_nodes', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='default_storage_type', full_name='google.bigtable.admin.v2.Cluster.default_storage_type', index=4, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CLUSTER_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=300, + serialized_end=570, +) + +_INSTANCE.fields_by_name['state'].enum_type = _INSTANCE_STATE +_INSTANCE_STATE.containing_type = _INSTANCE +_CLUSTER.fields_by_name['state'].enum_type = _CLUSTER_STATE +_CLUSTER.fields_by_name['default_storage_type'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2._STORAGETYPE +_CLUSTER_STATE.containing_type = _CLUSTER +DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE +DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER + +Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict( + DESCRIPTOR = _INSTANCE, + __module__ = 'google.bigtable.admin.v2.instance_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) + )) +_sym_db.RegisterMessage(Instance) + +Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict( + DESCRIPTOR = _CLUSTER, + __module__ = 'google.bigtable.admin.v2.instance_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) + )) +_sym_db.RegisterMessage(Cluster) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py new file mode 100644 index 000000000000..5723e1d99fe0 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py @@ -0,0 +1,264 @@ +from google.longrunning.operations_pb2 import ( + CancelOperationRequest, + DeleteOperationRequest, + GetOperationRequest, + ListOperationsRequest, + ListOperationsResponse, + Operation, + google_dot_protobuf_dot_empty__pb2, +) +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class OperationsStub(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetOperation = channel.unary_unary( + '/google.longrunning.Operations/GetOperation', + request_serializer=GetOperationRequest.SerializeToString, + response_deserializer=Operation.FromString, + ) + self.ListOperations = channel.unary_unary( + '/google.longrunning.Operations/ListOperations', + request_serializer=ListOperationsRequest.SerializeToString, + response_deserializer=ListOperationsResponse.FromString, + ) + self.CancelOperation = channel.unary_unary( + '/google.longrunning.Operations/CancelOperation', + request_serializer=CancelOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.DeleteOperation = channel.unary_unary( + '/google.longrunning.Operations/DeleteOperation', + request_serializer=DeleteOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class OperationsServicer(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def GetOperation(self, request, context): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListOperations(self, request, context): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CancelOperation(self, request, context): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteOperation(self, request, context): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_OperationsServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetOperation': grpc.unary_unary_rpc_method_handler( + servicer.GetOperation, + request_deserializer=GetOperationRequest.FromString, + response_serializer=Operation.SerializeToString, + ), + 'ListOperations': grpc.unary_unary_rpc_method_handler( + servicer.ListOperations, + request_deserializer=ListOperationsRequest.FromString, + response_serializer=ListOperationsResponse.SerializeToString, + ), + 'CancelOperation': grpc.unary_unary_rpc_method_handler( + servicer.CancelOperation, + request_deserializer=CancelOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'DeleteOperation': grpc.unary_unary_rpc_method_handler( + servicer.DeleteOperation, + request_deserializer=DeleteOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.longrunning.Operations', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaOperationsServicer(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + def GetOperation(self, request, context): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListOperations(self, request, context): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CancelOperation(self, request, context): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteOperation(self, request, context): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaOperationsStub(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + raise NotImplementedError() + GetOperation.future = None + def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + raise NotImplementedError() + ListOperations.future = None + def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + raise NotImplementedError() + CancelOperation.future = None + def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + raise NotImplementedError() + DeleteOperation.future = None + + +def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, + } + response_serializers = { + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, + } + method_implementations = { + ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), + ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), + ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), + ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, + } + response_deserializers = { + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, + } + cardinalities = { + 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, + 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, + 'GetOperation': cardinality.Cardinality.UNARY_UNARY, + 'ListOperations': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options) diff --git a/gcloud/bigtable/_generated_v2/table_pb2.py b/gcloud/bigtable/_generated_v2/table_pb2.py new file mode 100644 index 000000000000..840076514cc7 --- /dev/null +++ b/gcloud/bigtable/_generated_v2/table_pb2.py @@ -0,0 +1,393 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/table.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/table.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n$google/bigtable/admin/v2/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xa0\x03\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"F\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04ruleB,\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( + name='TimestampGranularity', + full_name='google.bigtable.admin.v2.Table.TimestampGranularity', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TIMESTAMP_GRANULARITY_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MILLIS', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=400, + serialized_end=473, +) +_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) + +_TABLE_VIEW = _descriptor.EnumDescriptor( + name='View', + full_name='google.bigtable.admin.v2.Table.View', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='VIEW_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NAME_ONLY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCHEMA_VIEW', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FULL', index=3, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=475, + serialized_end=545, +) +_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) + + +_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( + name='ColumnFamiliesEntry', + full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=305, + serialized_end=398, +) + +_TABLE = _descriptor.Descriptor( + name='Table', + full_name='google.bigtable.admin.v2.Table', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Table.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=1, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TABLE_COLUMNFAMILIESENTRY, ], + enum_types=[ + _TABLE_TIMESTAMPGRANULARITY, + _TABLE_VIEW, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=129, + serialized_end=545, +) + + +_COLUMNFAMILY = _descriptor.Descriptor( + name='ColumnFamily', + full_name='google.bigtable.admin.v2.ColumnFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gc_rule', full_name='google.bigtable.admin.v2.ColumnFamily.gc_rule', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=547, + serialized_end=612, +) + + +_GCRULE_INTERSECTION = _descriptor.Descriptor( + name='Intersection', + full_name='google.bigtable.admin.v2.GcRule.Intersection', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Intersection.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=827, + serialized_end=890, +) + +_GCRULE_UNION = _descriptor.Descriptor( + name='Union', + full_name='google.bigtable.admin.v2.GcRule.Union', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Union.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=892, + serialized_end=948, +) + +_GCRULE = _descriptor.Descriptor( + name='GcRule', + full_name='google.bigtable.admin.v2.GcRule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_num_versions', full_name='google.bigtable.admin.v2.GcRule.max_num_versions', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_age', full_name='google.bigtable.admin.v2.GcRule.max_age', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intersection', full_name='google.bigtable.admin.v2.GcRule.intersection', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='union', full_name='google.bigtable.admin.v2.GcRule.union', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='rule', full_name='google.bigtable.admin.v2.GcRule.rule', + index=0, containing_type=None, fields=[]), + ], + serialized_start=615, + serialized_end=956, +) + +_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY +_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE +_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY +_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY +_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE +_TABLE_VIEW.containing_type = _TABLE +_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE +_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_INTERSECTION.containing_type = _GCRULE +_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_UNION.containing_type = _GCRULE +_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION +_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_num_versions']) +_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_age']) +_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['intersection']) +_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['union']) +_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +DESCRIPTOR.message_types_by_name['Table'] = _TABLE +DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY +DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE + +Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( + + ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( + DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) + )) + , + DESCRIPTOR = _TABLE, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) + )) +_sym_db.RegisterMessage(Table) +_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) + +ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( + DESCRIPTOR = _COLUMNFAMILY, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) + )) +_sym_db.RegisterMessage(ColumnFamily) + +GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( + + Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_INTERSECTION, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) + )) + , + + Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_UNION, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) + )) + , + DESCRIPTOR = _GCRULE, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) + )) +_sym_db.RegisterMessage(GcRule) +_sym_db.RegisterMessage(GcRule.Intersection) +_sym_db.RegisterMessage(GcRule.Union) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001')) +_TABLE_COLUMNFAMILIESENTRY.has_options = True +_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 49faf42c8a25..cf25d05f2a0d 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -18,8 +18,8 @@ In the hierarchy of API concepts -* a :class:`Client` owns a :class:`.Cluster` -* a :class:`.Cluster` owns a :class:`Table ` +* a :class:`Client` owns a :class:`.Instance` +* a :class:`.Instance` owns a :class:`Table ` * a :class:`Table ` owns a :class:`ColumnFamily <.column_family.ColumnFamily>` * a :class:`Table ` owns a :class:`Row <.row.Row>` @@ -31,40 +31,49 @@ from grpc.beta import implementations -from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2 -from gcloud.bigtable._generated import bigtable_cluster_service_pb2 -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) -from gcloud.bigtable._generated import bigtable_service_pb2 -from gcloud.bigtable._generated import bigtable_table_service_pb2 -from gcloud.bigtable._generated import operations_grpc_pb2 -from gcloud.bigtable.cluster import Cluster +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as instance_admin_v2_pb2) +# V1 table admin service +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) +# V1 data service +from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as data_v2_pb2) + +from gcloud.bigtable._generated_v2 import ( + operations_grpc_pb2 as operations_grpc_v2_pb2) + +from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES +from gcloud.bigtable.instance import Instance +from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID from gcloud.client import _ClientFactoryMixin from gcloud.client import _ClientProjectMixin from gcloud.credentials import get_credentials -TABLE_STUB_FACTORY = ( - bigtable_table_service_pb2.beta_create_BigtableTableService_stub) -TABLE_ADMIN_HOST = 'bigtabletableadmin.googleapis.com' +TABLE_STUB_FACTORY_V2 = ( + table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub) +TABLE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com' """Table Admin API request host.""" -TABLE_ADMIN_PORT = 443 +TABLE_ADMIN_PORT_V2 = 443 """Table Admin API request port.""" -CLUSTER_STUB_FACTORY = ( - bigtable_cluster_service_pb2.beta_create_BigtableClusterService_stub) -CLUSTER_ADMIN_HOST = 'bigtableclusteradmin.googleapis.com' +INSTANCE_STUB_FACTORY_V2 = ( + instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub) +INSTANCE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com' """Cluster Admin API request host.""" -CLUSTER_ADMIN_PORT = 443 +INSTANCE_ADMIN_PORT_V2 = 443 """Cluster Admin API request port.""" -DATA_STUB_FACTORY = bigtable_service_pb2.beta_create_BigtableService_stub -DATA_API_HOST = 'bigtable.googleapis.com' +DATA_STUB_FACTORY_V2 = data_v2_pb2.beta_create_Bigtable_stub +DATA_API_HOST_V2 = 'bigtable.googleapis.com' """Data API request host.""" -DATA_API_PORT = 443 +DATA_API_PORT_V2 = 443 """Data API request port.""" -OPERATIONS_STUB_FACTORY = operations_grpc_pb2.beta_create_Operations_stub +OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v2_pb2.beta_create_Operations_stub +OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2 +OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2 ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' """Scope for interacting with the Cluster Admin and Table Admin APIs.""" @@ -91,14 +100,14 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): :type project: :class:`str` or :func:`unicode ` :param project: (Optional) The ID of the project which owns the - clusters, tables and data. If not provided, will + instances, tables and data. If not provided, will attempt to determine from the environment. :type credentials: :class:`OAuth2Credentials ` or :data:`NoneType ` :param credentials: (Optional) The OAuth2 Credentials to use for this - cluster. If not provided, defaults to the Google + client. If not provided, defaults to the Google Application Default Credentials. :type read_only: bool @@ -108,7 +117,7 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): :type admin: bool :param admin: (Optional) Boolean indicating if the client will be used to - interact with the Cluster Admin or Table Admin APIs. This + interact with the Instance Admin or Table Admin APIs. This requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. :type user_agent: str @@ -155,7 +164,7 @@ def __init__(self, project=None, credentials=None, # These will be set in start(). self._data_stub_internal = None - self._cluster_stub_internal = None + self._instance_stub_internal = None self._operations_stub_internal = None self._table_stub_internal = None @@ -191,7 +200,7 @@ def credentials(self): @property def project_name(self): - """Project name to be used with Cluster Admin API. + """Project name to be used with Instance Admin API. .. note:: @@ -222,8 +231,8 @@ def _data_stub(self): return self._data_stub_internal @property - def _cluster_stub(self): - """Getter for the gRPC stub used for the Cluster Admin API. + def _instance_stub(self): + """Getter for the gRPC stub used for the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. @@ -233,9 +242,9 @@ def _cluster_stub(self): """ if not self._admin: raise ValueError('Client is not an admin client.') - if self._cluster_stub_internal is None: + if self._instance_stub_internal is None: raise ValueError('Client has not been started.') - return self._cluster_stub_internal + return self._instance_stub_internal @property def _operations_stub(self): @@ -275,29 +284,29 @@ def _make_data_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, DATA_STUB_FACTORY, - DATA_API_HOST, DATA_API_PORT) + return _make_stub(self, DATA_STUB_FACTORY_V2, + DATA_API_HOST_V2, DATA_API_PORT_V2) - def _make_cluster_stub(self): - """Creates gRPC stub to make requests to the Cluster Admin API. + def _make_instance_stub(self): + """Creates gRPC stub to make requests to the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, CLUSTER_STUB_FACTORY, - CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT) + return _make_stub(self, INSTANCE_STUB_FACTORY_V2, + INSTANCE_ADMIN_HOST_V2, INSTANCE_ADMIN_PORT_V2) def _make_operations_stub(self): """Creates gRPC stub to make requests to the Operations API. - These are for long-running operations of the Cluster Admin API, + These are for long-running operations of the Instance Admin API, hence the host and port matching. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, OPERATIONS_STUB_FACTORY, - CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT) + return _make_stub(self, OPERATIONS_STUB_FACTORY_V2, + OPERATIONS_API_HOST_V2, OPERATIONS_API_PORT_V2) def _make_table_stub(self): """Creates gRPC stub to make requests to the Table Admin API. @@ -305,8 +314,8 @@ def _make_table_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, TABLE_STUB_FACTORY, - TABLE_ADMIN_HOST, TABLE_ADMIN_PORT) + return _make_stub(self, TABLE_STUB_FACTORY_V2, + TABLE_ADMIN_HOST_V2, TABLE_ADMIN_PORT_V2) def is_started(self): """Check if the client has been started. @@ -333,11 +342,11 @@ def start(self): self._data_stub_internal = self._make_data_stub() self._data_stub_internal.__enter__() if self._admin: - self._cluster_stub_internal = self._make_cluster_stub() + self._instance_stub_internal = self._make_instance_stub() self._operations_stub_internal = self._make_operations_stub() self._table_stub_internal = self._make_table_stub() - self._cluster_stub_internal.__enter__() + self._instance_stub_internal.__enter__() self._operations_stub_internal.__enter__() self._table_stub_internal.__enter__() @@ -355,12 +364,12 @@ def stop(self): # traceback to __exit__. self._data_stub_internal.__exit__(None, None, None) if self._admin: - self._cluster_stub_internal.__exit__(None, None, None) + self._instance_stub_internal.__exit__(None, None, None) self._operations_stub_internal.__exit__(None, None, None) self._table_stub_internal.__exit__(None, None, None) self._data_stub_internal = None - self._cluster_stub_internal = None + self._instance_stub_internal = None self._operations_stub_internal = None self._table_stub_internal = None @@ -368,78 +377,59 @@ def __exit__(self, exc_type, exc_val, exc_t): """Stops the client as a context manager.""" self.stop() - def cluster(self, zone, cluster_id, display_name=None, serve_nodes=3): - """Factory to create a cluster associated with this client. + def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, + display_name=None, serve_nodes=DEFAULT_SERVE_NODES): + """Factory to create a instance associated with this client. - :type zone: str - :param zone: The name of the zone where the cluster resides. + :type instance_id: str + :param instance_id: The ID of the instance. - :type cluster_id: str - :param cluster_id: The ID of the cluster. + :type location: string + :param location: location name, in form + ``projects//locations/``; used to + set up the instance's cluster. :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 + :param display_name: (Optional) The display name for the instance in + the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the - constructor, will fall back to the cluster ID. + constructor, will fall back to the instance ID. :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - Defaults to 3. + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. - :rtype: :class:`.Cluster` - :returns: The cluster owned by this client. + :rtype: :class:`.Instance` + :returns: an instance owned by this client. """ - return Cluster(zone, cluster_id, self, - display_name=display_name, serve_nodes=serve_nodes) + return Instance(instance_id, self, location, + display_name=display_name, serve_nodes=serve_nodes) - def list_zones(self): - """Lists zones associated with project. - - :rtype: list - :returns: The names (as :class:`str`) of the zones - :raises: :class:`ValueError ` if one of the - zones is not in ``OK`` state. - """ - request_pb = messages_pb2.ListZonesRequest(name=self.project_name) - # We expect a `.messages_pb2.ListZonesResponse` - list_zones_response = self._cluster_stub.ListZones( - request_pb, self.timeout_seconds) - - result = [] - for zone in list_zones_response.zones: - if zone.status != data_pb2.Zone.OK: - raise ValueError('Zone %s not in OK state' % ( - zone.display_name,)) - result.append(zone.display_name) - return result - - def list_clusters(self): - """Lists clusters owned by the project. + def list_instances(self): + """List instances owned by the project. :rtype: tuple - :returns: A pair of results, the first is a list of :class:`.Cluster` s - returned and the second is a list of strings (the failed - zones in the request). + :returns: A pair of results, the first is a list of + :class:`.Instance` objects returned and the second is a + list of strings (the failed locations in the request). """ - request_pb = messages_pb2.ListClustersRequest(name=self.project_name) - # We expect a `.messages_pb2.ListClustersResponse` - list_clusters_response = self._cluster_stub.ListClusters( + request_pb = instance_admin_v2_pb2.ListInstancesRequest( + parent=self.project_name) + + response = self._instance_stub.ListInstances( request_pb, self.timeout_seconds) - failed_zones = [zone.display_name - for zone in list_clusters_response.failed_zones] - clusters = [Cluster.from_pb(cluster_pb, self) - for cluster_pb in list_clusters_response.clusters] - return clusters, failed_zones + instances = [Instance.from_pb(instance_pb, self) + for instance_pb in response.instances] + return instances, response.failed_locations class _MetadataPlugin(object): """Callable class to transform metadata for gRPC requests. :type client: :class:`.client.Client` - :param client: The client that owns the cluster. Provides authorization and - user agent. + :param client: The client that owns the instance. + Provides authorization and user agent. """ def __init__(self, client): @@ -462,8 +452,8 @@ def _make_stub(client, stub_factory, host, port): Uses / depends on the beta implementation of gRPC. :type client: :class:`.client.Client` - :param client: The client that owns the cluster. Provides authorization and - user agent. + :param client: The client that owns the instance. + Provides authorization and user agent. :type stub_factory: callable :param stub_factory: A factory which will create a gRPC stub for diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index 55e7a49b446d..7867cff82bc7 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -19,30 +19,21 @@ from google.longrunning import operations_pb2 -from gcloud._helpers import _pb_timestamp_to_datetime -from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_pb2) -from gcloud.bigtable.table import Table +from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' - r'zones/(?P[^/]+)/clusters/' + r'instances/(?P[^/]+)/clusters/' r'(?P[a-z][-a-z0-9]*)$') -_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/zones/([^/]+)/' - r'clusters/([a-z][-a-z0-9]*)/operations/' - r'(?P\d+)$') -_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' -_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.cluster.v1.' -_CLUSTER_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateClusterMetadata' -_UPDATE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UpdateClusterMetadata' -_UNDELETE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UndeleteClusterMetadata' +_OPERATION_NAME_RE = re.compile(r'^operations/' + r'projects/([^/]+)/' + r'instances/([^/]+)/' + r'clusters/([a-z][-a-z0-9]*)/' + r'operations/(?P\d+)$') _TYPE_URL_MAP = { - _CLUSTER_CREATE_METADATA: messages_pb2.CreateClusterMetadata, - _UPDATE_CREATE_METADATA: messages_pb2.UpdateClusterMetadata, - _UNDELETE_CREATE_METADATA: messages_pb2.UndeleteClusterMetadata, } DEFAULT_SERVE_NODES = 3 @@ -55,16 +46,13 @@ def _prepare_create_request(cluster): :type cluster: :class:`Cluster` :param cluster: The cluster to be created. - :rtype: :class:`.messages_pb2.CreateClusterRequest` + :rtype: :class:`.messages_v2_pb2.CreateClusterRequest` :returns: The CreateCluster request object containing the cluster info. """ - zone_full_name = ('projects/' + cluster._client.project + - '/zones/' + cluster.zone) - return messages_pb2.CreateClusterRequest( - name=zone_full_name, + return messages_v2_pb2.CreateClusterRequest( + parent=cluster._instance.name, cluster_id=cluster.cluster_id, - cluster=data_pb2.Cluster( - display_name=cluster.display_name, + cluster=data_v2_pb2.Cluster( serve_nodes=cluster.serve_nodes, ), ) @@ -100,9 +88,7 @@ def _process_operation(operation_pb): Create/Update/Undelete cluster request. :rtype: tuple - :returns: A pair of an integer and datetime stamp. The integer is the ID - of the operation (``operation_id``) and the timestamp when - the create operation began (``operation_begin``). + :returns: integer ID of the operation (``operation_id``). :raises: :class:`ValueError ` if the operation name doesn't match the :data:`_OPERATION_NAME_RE` regex. """ @@ -113,11 +99,7 @@ def _process_operation(operation_pb): operation_pb.name) operation_id = int(match.group('operation_id')) - request_metadata = _parse_pb_any_to_native(operation_pb.metadata) - operation_begin = _pb_timestamp_to_datetime( - request_metadata.request_time) - - return operation_id, operation_begin + return operation_id class Operation(object): @@ -133,17 +115,13 @@ class Operation(object): :type op_id: int :param op_id: The ID of the operation. - :type begin: :class:`datetime.datetime` - :param begin: The time when the operation was started. - :type cluster: :class:`Cluster` :param cluster: The cluster that created the operation. """ - def __init__(self, op_type, op_id, begin, cluster=None): + def __init__(self, op_type, op_id, cluster=None): self.op_type = op_type self.op_id = op_id - self.begin = begin self._cluster = cluster self._complete = False @@ -152,7 +130,6 @@ def __eq__(self, other): return False return (other.op_type == self.op_type and other.op_id == self.op_id and - other.begin == self.begin and other._cluster == self._cluster and other._complete == self._complete) @@ -174,8 +151,9 @@ def finished(self): '/operations/%d' % (self.op_id,)) request_pb = operations_pb2.GetOperationRequest(name=operation_name) # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb = self._cluster._client._operations_stub.GetOperation( - request_pb, self._cluster._client.timeout_seconds) + client = self._cluster._instance._client + operation_pb = client._operations_stub.GetOperation( + request_pb, client.timeout_seconds) if operation_pb.done: self._complete = True @@ -198,87 +176,67 @@ class Cluster(object): .. note:: For now, we leave out the ``default_storage_type`` (an enum) - which if not sent will end up as :data:`.data_pb2.STORAGE_SSD`. - - :type zone: str - :param zone: The name of the zone where the cluster resides. + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. :type cluster_id: str :param cluster_id: The ID of the cluster. - :type client: :class:`Client ` - :param client: The client that owns the cluster. Provides - authorization and a project ID. - - :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the cluster ID. + :type instance: :class:`.instance.Instance` + :param instance: The instance where the cluster resides. :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. Defaults to :data:`DEFAULT_SERVE_NODES`. """ - def __init__(self, zone, cluster_id, client, - display_name=None, serve_nodes=DEFAULT_SERVE_NODES): - self.zone = zone + def __init__(self, cluster_id, instance, + serve_nodes=DEFAULT_SERVE_NODES): self.cluster_id = cluster_id - self.display_name = display_name or cluster_id + self._instance = instance self.serve_nodes = serve_nodes - self._client = client - - def table(self, table_id): - """Factory to create a table associated with this cluster. - - :type table_id: str - :param table_id: The ID of the table. - - :rtype: :class:`Table ` - :returns: The table owned by this cluster. - """ - return Table(table_id, self) + self.location = None def _update_from_pb(self, cluster_pb): """Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`. """ - if not cluster_pb.display_name: # Simple field (string) - raise ValueError('Cluster protobuf does not contain display_name') if not cluster_pb.serve_nodes: # Simple field (int32) raise ValueError('Cluster protobuf does not contain serve_nodes') - self.display_name = cluster_pb.display_name self.serve_nodes = cluster_pb.serve_nodes + self.location = cluster_pb.location @classmethod - def from_pb(cls, cluster_pb, client): + def from_pb(cls, cluster_pb, instance): """Creates a cluster instance from a protobuf. - :type cluster_pb: :class:`bigtable_cluster_data_pb2.Cluster` + :type cluster_pb: :class:`instance_pb2.Cluster` :param cluster_pb: A cluster protobuf object. - :type client: :class:`Client ` - :param client: The client that owns the cluster. + :type instance: :class:`.instance.Instance>` + :param instance: The instance that owns the cluster. :rtype: :class:`Cluster` :returns: The cluster parsed from the protobuf response. - :raises: :class:`ValueError ` if the cluster - name does not match - ``projects/{project}/zones/{zone}/clusters/{cluster_id}`` - or if the parsed project ID does not match the project ID - on the client. + :raises: + :class:`ValueError ` if the cluster + name does not match + ``projects/{project}/instances/{instance}/clusters/{cluster_id}`` + or if the parsed project ID does not match the project ID + on the client. """ match = _CLUSTER_NAME_RE.match(cluster_pb.name) if match is None: raise ValueError('Cluster protobuf name was not in the ' 'expected format.', cluster_pb.name) - if match.group('project') != client.project: + if match.group('project') != instance._client.project: raise ValueError('Project ID on cluster does not match the ' 'project ID on the client') + if match.group('instance') != instance.instance_id: + raise ValueError('Instance ID on cluster does not match the ' + 'instance ID on the client') - result = cls(match.group('zone'), match.group('cluster_id'), client) + result = cls(match.group('cluster_id'), instance) result._update_from_pb(cluster_pb) return result @@ -291,9 +249,8 @@ def copy(self): :rtype: :class:`.Cluster` :returns: A copy of the current cluster. """ - new_client = self._client.copy() - return self.__class__(self.zone, self.cluster_id, new_client, - display_name=self.display_name, + new_instance = self._instance.copy() + return self.__class__(self.cluster_id, new_instance, serve_nodes=self.serve_nodes) @property @@ -301,43 +258,41 @@ def name(self): """Cluster name used in requests. .. note:: - This property will not change if ``zone`` and ``cluster_id`` do not, - but the return value is not cached. + This property will not change if ``_instance`` and ``cluster_id`` + do not, but the return value is not cached. The cluster name is of the form - ``"projects/{project}/zones/{zone}/clusters/{cluster_id}"`` + ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"`` :rtype: str :returns: The cluster name. """ - return (self._client.project_name + '/zones/' + self.zone + - '/clusters/' + self.cluster_id) + return self._instance.name + '/clusters/' + self.cluster_id def __eq__(self, other): if not isinstance(other, self.__class__): return False # NOTE: This does not compare the configuration values, such as - # the serve_nodes or display_name. Instead, it only compares - # identifying values zone, cluster ID and client. This is + # the serve_nodes. Instead, it only compares + # identifying values instance, cluster ID and client. This is # intentional, since the same cluster can be in different states - # if not synchronized. Clusters with similar zone/cluster + # if not synchronized. Clusters with similar instance/cluster # settings but different clients can't be used in the same way. - return (other.zone == self.zone and - other.cluster_id == self.cluster_id and - other._client == self._client) + return (other.cluster_id == self.cluster_id and + other._instance == self._instance) def __ne__(self, other): return not self.__eq__(other) def reload(self): """Reload the metadata for this cluster.""" - request_pb = messages_pb2.GetClusterRequest(name=self.name) - # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.GetCluster( - request_pb, self._client.timeout_seconds) + request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) + # We expect a `._generated_v2.instance_pb2.Cluster`. + cluster_pb = self._instance._client._instance_stub.GetCluster( + request_pb, self._instance._client.timeout_seconds) - # NOTE: _update_from_pb does not check that the project, zone and + # NOTE: _update_from_pb does not check that the project, instance and # cluster ID on the response match the request. self._update_from_pb(cluster_pb) @@ -346,14 +301,13 @@ def create(self): .. note:: - Uses the ``project``, ``zone`` and ``cluster_id`` on the current - :class:`Cluster` in addition to the ``display_name`` and - ``serve_nodes``. If you'd like to change them before creating, - reset the values via + Uses the ``project``, ``instance`` and ``cluster_id`` on the + current :class:`Cluster` in addition to the ``serve_nodes``. + To change them before creating, reset the values via .. code:: python - cluster.display_name = 'New display name' + cluster.serve_nodes = 8 cluster.cluster_id = 'i-changed-my-mind' before calling :meth:`create`. @@ -364,24 +318,23 @@ def create(self): """ request_pb = _prepare_create_request(self) # We expect a `google.longrunning.operations_pb2.Operation`. - cluster_pb = self._client._cluster_stub.CreateCluster( - request_pb, self._client.timeout_seconds) + operation_pb = self._instance._client._instance_stub.CreateCluster( + request_pb, self._instance._client.timeout_seconds) - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('create', op_id, op_begin, cluster=self) + op_id = _process_operation(operation_pb) + return Operation('create', op_id, cluster=self) def update(self): """Update this cluster. .. note:: - Updates the ``display_name`` and ``serve_nodes``. If you'd like to + Updates the ``serve_nodes``. If you'd like to change them before updating, reset the values via .. code:: python - cluster.display_name = 'New display name' - cluster.serve_nodes = 3 + cluster.serve_nodes = 8 before calling :meth:`update`. @@ -389,17 +342,16 @@ def update(self): :returns: The long-running operation corresponding to the update operation. """ - request_pb = data_pb2.Cluster( + request_pb = data_v2_pb2.Cluster( name=self.name, - display_name=self.display_name, serve_nodes=self.serve_nodes, ) - # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.UpdateCluster( - request_pb, self._client.timeout_seconds) + # Ignore expected `._generated_v2.instance_pb2.Cluster`. + operation_pb = self._instance._client._instance_stub.UpdateCluster( + request_pb, self._instance._client.timeout_seconds) - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('update', op_id, op_begin, cluster=self) + op_id = _process_operation(operation_pb) + return Operation('update', op_id, cluster=self) def delete(self): """Delete this cluster. @@ -426,64 +378,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - request_pb = messages_pb2.DeleteClusterRequest(name=self.name) + request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name) # We expect a `google.protobuf.empty_pb2.Empty` - self._client._cluster_stub.DeleteCluster( - request_pb, self._client.timeout_seconds) - - def undelete(self): - """Undelete this cluster. - - Cancels the scheduled deletion of an cluster and begins preparing it to - resume serving. The returned operation will also be embedded as the - cluster's ``current_operation``. - - Immediately upon completion of this request: - - * The cluster's ``delete_time`` field will be unset, protecting it from - automatic deletion. - - Until completion of the returned operation: - - * The operation cannot be cancelled. - - Upon completion of the returned operation: - - * Billing for the cluster's resources will resume. - * All tables within the cluster will be available. - - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - undelete operation. - """ - request_pb = messages_pb2.UndeleteClusterRequest(name=self.name) - # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb2 = self._client._cluster_stub.UndeleteCluster( - request_pb, self._client.timeout_seconds) - - op_id, op_begin = _process_operation(operation_pb2) - return Operation('undelete', op_id, op_begin, cluster=self) - - def list_tables(self): - """List the tables in this cluster. - - :rtype: list of :class:`Table ` - :returns: The list of tables owned by the cluster. - :raises: :class:`ValueError ` if one of the - returned tables has a name that is not of the expected format. - """ - request_pb = table_messages_pb2.ListTablesRequest(name=self.name) - # We expect a `table_messages_pb2.ListTablesResponse` - table_list_pb = self._client._table_stub.ListTables( - request_pb, self._client.timeout_seconds) - - result = [] - for table_pb in table_list_pb.tables: - table_prefix = self.name + '/tables/' - if not table_pb.name.startswith(table_prefix): - raise ValueError('Table name %s not of expected format' % ( - table_pb.name,)) - table_id = table_pb.name[len(table_prefix):] - result.append(self.table(table_id)) - - return result + self._instance._client._instance_stub.DeleteCluster( + request_pb, self._instance._client.timeout_seconds) diff --git a/gcloud/bigtable/column_family.py b/gcloud/bigtable/column_family.py index c0d9060316a4..10127aa74961 100644 --- a/gcloud/bigtable/column_family.py +++ b/gcloud/bigtable/column_family.py @@ -20,9 +20,10 @@ from google.protobuf import duration_pb2 from gcloud._helpers import _total_seconds -from gcloud.bigtable._generated import bigtable_table_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) +from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) def _timedelta_to_duration_pb(timedelta_val): @@ -110,10 +111,10 @@ def __eq__(self, other): def to_pb(self): """Converts the garbage collection rule to a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - return data_pb2.GcRule(max_num_versions=self.max_num_versions) + return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions) class MaxAgeGCRule(GarbageCollectionRule): @@ -134,11 +135,11 @@ def __eq__(self, other): def to_pb(self): """Converts the garbage collection rule to a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ max_age = _timedelta_to_duration_pb(self.max_age) - return data_pb2.GcRule(max_age=max_age) + return table_v2_pb2.GcRule(max_age=max_age) class GCRuleUnion(GarbageCollectionRule): @@ -159,12 +160,12 @@ def __eq__(self, other): def to_pb(self): """Converts the union into a single GC rule as a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - union = data_pb2.GcRule.Union( + union = table_v2_pb2.GcRule.Union( rules=[rule.to_pb() for rule in self.rules]) - return data_pb2.GcRule(union=union) + return table_v2_pb2.GcRule(union=union) class GCRuleIntersection(GarbageCollectionRule): @@ -185,12 +186,12 @@ def __eq__(self, other): def to_pb(self): """Converts the intersection into a single GC rule as a protobuf. - :rtype: :class:`.data_pb2.GcRule` + :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - intersection = data_pb2.GcRule.Intersection( + intersection = table_v2_pb2.GcRule.Intersection( rules=[rule.to_pb() for rule in self.rules]) - return data_pb2.GcRule(intersection=intersection) + return table_v2_pb2.GcRule(intersection=intersection) class ColumnFamily(object): @@ -250,20 +251,22 @@ def __ne__(self, other): def create(self): """Create this column family.""" if self.gc_rule is None: - column_family = data_pb2.ColumnFamily() + column_family = table_v2_pb2.ColumnFamily() else: - column_family = data_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) - request_pb = messages_pb2.CreateColumnFamilyRequest( - name=self._table.name, - column_family_id=self.column_family_id, - column_family=column_family, + column_family = table_v2_pb2.ColumnFamily( + gc_rule=self.gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + create=column_family, ) - client = self._table._cluster._client - # We expect a `.data_pb2.ColumnFamily`. We ignore it since the only + client = self._table._instance._client + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_stub.CreateColumnFamily(request_pb, - client.timeout_seconds) + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds) def update(self): """Update this column family. @@ -273,30 +276,40 @@ def update(self): Only the GC rule can be updated. By changing the column family ID, you will simply be referring to a different column family. """ - request_kwargs = {'name': self.name} - if self.gc_rule is not None: - request_kwargs['gc_rule'] = self.gc_rule.to_pb() - request_pb = data_pb2.ColumnFamily(**request_kwargs) - client = self._table._cluster._client - # We expect a `.data_pb2.ColumnFamily`. We ignore it since the only + if self.gc_rule is None: + column_family = table_v2_pb2.ColumnFamily() + else: + column_family = table_v2_pb2.ColumnFamily( + gc_rule=self.gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + update=column_family) + client = self._table._instance._client + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_stub.UpdateColumnFamily(request_pb, - client.timeout_seconds) + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds) def delete(self): """Delete this column family.""" - request_pb = messages_pb2.DeleteColumnFamilyRequest(name=self.name) - client = self._table._cluster._client + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + drop=True) + client = self._table._instance._client # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.DeleteColumnFamily(request_pb, - client.timeout_seconds) + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds) def _gc_rule_from_pb(gc_rule_pb): """Convert a protobuf GC rule to a native object. - :type gc_rule_pb: :class:`.data_pb2.GcRule` + :type gc_rule_pb: :class:`.table_v2_pb2.GcRule` :param gc_rule_pb: The GC rule to convert. :rtype: :class:`GarbageCollectionRule` or :data:`NoneType ` diff --git a/gcloud/bigtable/happybase/__init__.py b/gcloud/bigtable/happybase/__init__.py index 30955b8be936..076a28d7c4d4 100644 --- a/gcloud/bigtable/happybase/__init__.py +++ b/gcloud/bigtable/happybase/__init__.py @@ -84,9 +84,9 @@ * ``protocol`` * In order to make :class:`Connection ` - compatible with Cloud Bigtable, we add a ``cluster`` keyword argument to + compatible with Cloud Bigtable, we add a ``instance`` keyword argument to allow users to pass in their own - :class:`Cluster ` (which they can + :class:`Instance ` (which they can construct beforehand). For example: @@ -95,11 +95,11 @@ from gcloud.bigtable.client import Client client = Client(project=PROJECT_ID, admin=True) - cluster = client.cluster(zone, cluster_id) - cluster.reload() + instance = client.instance(instance_id, location_id) + instance.reload() from gcloud.bigtable.happybase import Connection - connection = Connection(cluster=cluster) + connection = Connection(instance=instance) * Any uses of the ``wal`` (Write Ahead Log) argument will result in a warning as well. This includes uses in: diff --git a/gcloud/bigtable/happybase/connection.py b/gcloud/bigtable/happybase/connection.py index bc57b3429eed..ebea84e93998 100644 --- a/gcloud/bigtable/happybase/connection.py +++ b/gcloud/bigtable/happybase/connection.py @@ -54,25 +54,25 @@ 'of enabled / disabled tables.') -def _get_cluster(timeout=None): - """Gets cluster for the default project. +def _get_instance(timeout=None): + """Gets instance for the default project. Creates a client with the inferred credentials and project ID from the local environment. Then uses - :meth:`.bigtable.client.Client.list_clusters` to - get the unique cluster owned by the project. + :meth:`.bigtable.client.Client.list_instances` to + get the unique instance owned by the project. - If the request fails for any reason, or if there isn't exactly one cluster + If the request fails for any reason, or if there isn't exactly one instance owned by the project, then this function will fail. :type timeout: int :param timeout: (Optional) The socket timeout in milliseconds. - :rtype: :class:`gcloud.bigtable.cluster.Cluster` - :returns: The unique cluster owned by the project inferred from + :rtype: :class:`gcloud.bigtable.instance.Instance` + :returns: The unique instance owned by the project inferred from the environment. :raises: :class:`ValueError ` if there is a failed - zone or any number of clusters other than one. + location or any number of instances other than one. """ client_kwargs = {'admin': True} if timeout is not None: @@ -80,20 +80,20 @@ def _get_cluster(timeout=None): client = Client(**client_kwargs) try: client.start() - clusters, failed_zones = client.list_clusters() + instances, failed_locations = client.list_instances() finally: client.stop() - if len(failed_zones) != 0: - raise ValueError('Determining cluster via ListClusters encountered ' - 'failed zones.') - if len(clusters) == 0: - raise ValueError('This client doesn\'t have access to any clusters.') - if len(clusters) > 1: - raise ValueError('This client has access to more than one cluster. ' - 'Please directly pass the cluster you\'d ' + if len(failed_locations) != 0: + raise ValueError('Determining instance via ListInstances encountered ' + 'failed locations.') + if len(instances) == 0: + raise ValueError('This client doesn\'t have access to any instances.') + if len(instances) > 1: + raise ValueError('This client has access to more than one instance. ' + 'Please directly pass the instance you\'d ' 'like to use.') - return clusters[0] + return instances[0] class Connection(object): @@ -101,10 +101,10 @@ class Connection(object): .. note:: - If you pass a ``cluster``, it will be :meth:`.Cluster.copy`-ed before + If you pass a ``instance``, it will be :meth:`.Instance.copy`-ed before being stored on the new connection. This also copies the :class:`Client ` that created the - :class:`Cluster ` instance and the + :class:`Instance ` instance and the :class:`Credentials ` stored on the client. @@ -127,8 +127,8 @@ class Connection(object): :param table_prefix_separator: (Optional) Separator used with ``table_prefix``. Defaults to ``_``. - :type cluster: :class:`Cluster ` - :param cluster: (Optional) A Cloud Bigtable cluster. The instance also + :type instance: :class:`Instance ` + :param instance: (Optional) A Cloud Bigtable instance. The instance also owns a client for making gRPC requests to the Cloud Bigtable API. If not passed in, defaults to creating client with ``admin=True`` and using the ``timeout`` here for the @@ -136,7 +136,7 @@ class Connection(object): :class:`Client ` constructor. The credentials for the client will be the implicit ones loaded from the environment. - Then that client is used to retrieve all the clusters + Then that client is used to retrieve all the instances owned by the client's project. :type kwargs: dict @@ -144,10 +144,10 @@ class Connection(object): compatibility. """ - _cluster = None + _instance = None def __init__(self, timeout=None, autoconnect=True, table_prefix=None, - table_prefix_separator='_', cluster=None, **kwargs): + table_prefix_separator='_', instance=None, **kwargs): self._handle_legacy_args(kwargs) if table_prefix is not None: if not isinstance(table_prefix, six.string_types): @@ -162,13 +162,13 @@ def __init__(self, timeout=None, autoconnect=True, table_prefix=None, self.table_prefix = table_prefix self.table_prefix_separator = table_prefix_separator - if cluster is None: - self._cluster = _get_cluster(timeout=timeout) + if instance is None: + self._instance = _get_instance(timeout=timeout) else: if timeout is not None: raise ValueError('Timeout cannot be used when an existing ' - 'cluster is passed') - self._cluster = cluster.copy() + 'instance is passed') + self._instance = instance.copy() if autoconnect: self.open() @@ -203,23 +203,23 @@ def open(self): This method opens the underlying HTTP/2 gRPC connection using a :class:`Client ` bound to the - :class:`Cluster ` owned by + :class:`Instance ` owned by this connection. """ - self._cluster._client.start() + self._instance._client.start() def close(self): """Close the underlying transport to Cloud Bigtable. This method closes the underlying HTTP/2 gRPC connection using a :class:`Client ` bound to the - :class:`Cluster ` owned by + :class:`Instance ` owned by this connection. """ - self._cluster._client.stop() + self._instance._client.stop() def __del__(self): - if self._cluster is not None: + if self._instance is not None: self.close() def _table_name(self, name): @@ -258,7 +258,7 @@ def tables(self): .. note:: - This lists every table in the cluster owned by this connection, + This lists every table in the instance owned by this connection, **not** every table that a given user may have access to. .. note:: @@ -269,7 +269,7 @@ def tables(self): :rtype: list :returns: List of string table names. """ - low_level_table_instances = self._cluster.list_tables() + low_level_table_instances = self._instance.list_tables() table_names = [table_instance.table_id for table_instance in low_level_table_instances] @@ -345,7 +345,7 @@ def create_table(self, name, families): # Create table instance and then make API calls. name = self._table_name(name) - low_level_table = _LowLevelTable(name, self._cluster) + low_level_table = _LowLevelTable(name, self._instance) try: low_level_table.create() except face.NetworkError as network_err: @@ -376,7 +376,7 @@ def delete_table(self, name, disable=False): _WARN(_DISABLE_DELETE_MSG) name = self._table_name(name) - _LowLevelTable(name, self._cluster).delete() + _LowLevelTable(name, self._instance).delete() def enable_table(self, name): """Enable the specified table. diff --git a/gcloud/bigtable/happybase/pool.py b/gcloud/bigtable/happybase/pool.py index ab84724740a2..1ed22cdd6c84 100644 --- a/gcloud/bigtable/happybase/pool.py +++ b/gcloud/bigtable/happybase/pool.py @@ -21,7 +21,7 @@ import six from gcloud.bigtable.happybase.connection import Connection -from gcloud.bigtable.happybase.connection import _get_cluster +from gcloud.bigtable.happybase.connection import _get_instance _MIN_POOL_SIZE = 1 @@ -45,7 +45,7 @@ class ConnectionPool(object): :class:`Connection <.happybase.connection.Connection>` constructor **except** for ``autoconnect``. This is because the ``open`` / ``closed`` status of a connection is managed by the pool. In addition, - if ``cluster`` is not passed, the default / inferred cluster is + if ``instance`` is not passed, the default / inferred instance is determined by the pool and then passed to each :class:`Connection <.happybase.connection.Connection>` that is created. @@ -75,8 +75,8 @@ def __init__(self, size, **kwargs): connection_kwargs = kwargs connection_kwargs['autoconnect'] = False - if 'cluster' not in connection_kwargs: - connection_kwargs['cluster'] = _get_cluster( + if 'instance' not in connection_kwargs: + connection_kwargs['instance'] = _get_instance( timeout=kwargs.get('timeout')) for _ in six.moves.range(size): diff --git a/gcloud/bigtable/happybase/table.py b/gcloud/bigtable/happybase/table.py index 3f87f953c026..e35bb8090494 100644 --- a/gcloud/bigtable/happybase/table.py +++ b/gcloud/bigtable/happybase/table.py @@ -109,13 +109,13 @@ class Table(object): def __init__(self, name, connection): self.name = name - # This remains as legacy for HappyBase, but only the cluster + # This remains as legacy for HappyBase, but only the instance # from the connection is needed. self.connection = connection self._low_level_table = None if self.connection is not None: self._low_level_table = _LowLevelTable(self.name, - self.connection._cluster) + self.connection._instance) def __repr__(self): return '' % (self.name,) @@ -378,42 +378,8 @@ def scan(self, row_start=None, row_stop=None, row_prefix=None, :class:`TypeError ` if a string ``filter`` is used. """ - filter_ = kwargs.pop('filter', None) - legacy_args = [] - for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): - if kw_name in kwargs: - legacy_args.append(kw_name) - kwargs.pop(kw_name) - if legacy_args: - legacy_args = ', '.join(legacy_args) - message = ('The HappyBase legacy arguments %s were used. These ' - 'arguments are unused by gcloud.' % (legacy_args,)) - _WARN(message) - if kwargs: - raise TypeError('Received unexpected arguments', kwargs.keys()) - - if limit is not None and limit < 1: - raise ValueError('limit must be positive') - if row_prefix is not None: - if row_start is not None or row_stop is not None: - raise ValueError('row_prefix cannot be combined with ' - 'row_start or row_stop') - row_start = row_prefix - row_stop = _string_successor(row_prefix) - - filters = [] - if isinstance(filter_, six.string_types): - raise TypeError('Specifying filters as a string is not supported ' - 'by Cloud Bigtable. Use a ' - 'gcloud.bigtable.row.RowFilter instead.') - elif filter_ is not None: - filters.append(filter_) - - if columns is not None: - filters.append(_columns_filter_helper(columns)) - # versions == 1 since we only want the latest. - filter_chain = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) + row_start, row_stop, filter_chain = _scan_filter_helper( + row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs) partial_rows_data = self._low_level_table.read_rows( start_key=row_start, end_key=row_stop, @@ -424,11 +390,12 @@ def scan(self, row_start=None, row_stop=None, row_prefix=None, while True: try: partial_rows_data.consume_next() - row_key, curr_row_data = rows_dict.popitem() - # NOTE: We expect len(rows_dict) == 0, but don't check it. - curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp) - yield (row_key, curr_row_dict) + for row_key in sorted(rows_dict): + curr_row_data = rows_dict.pop(row_key) + # NOTE: We expect len(rows_dict) == 0, but don't check it. + curr_row_dict = _partial_row_to_dict( + curr_row_data, include_timestamp=include_timestamp) + yield (row_key, curr_row_dict) except StopIteration: break @@ -911,6 +878,49 @@ def _filter_chain_helper(column=None, versions=None, timestamp=None, return RowFilterChain(filters=filters) +def _scan_filter_helper(row_start, row_stop, row_prefix, columns, + timestamp, limit, kwargs): + """Helper for :meth:`scan`: build up a filter chain.""" + filter_ = kwargs.pop('filter', None) + legacy_args = [] + for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): + if kw_name in kwargs: + legacy_args.append(kw_name) + kwargs.pop(kw_name) + if legacy_args: + legacy_args = ', '.join(legacy_args) + message = ('The HappyBase legacy arguments %s were used. These ' + 'arguments are unused by gcloud.' % (legacy_args,)) + _WARN(message) + if kwargs: + raise TypeError('Received unexpected arguments', kwargs.keys()) + + if limit is not None and limit < 1: + raise ValueError('limit must be positive') + if row_prefix is not None: + if row_start is not None or row_stop is not None: + raise ValueError('row_prefix cannot be combined with ' + 'row_start or row_stop') + row_start = row_prefix + row_stop = _string_successor(row_prefix) + + filters = [] + if isinstance(filter_, six.string_types): + raise TypeError('Specifying filters as a string is not supported ' + 'by Cloud Bigtable. Use a ' + 'gcloud.bigtable.row.RowFilter instead.') + elif filter_ is not None: + filters.append(filter_) + + if columns is not None: + filters.append(_columns_filter_helper(columns)) + + # versions == 1 since we only want the latest. + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) + return row_start, row_stop, filter_ + + def _columns_filter_helper(columns): """Creates a union filter for a list of columns. diff --git a/gcloud/bigtable/happybase/test_connection.py b/gcloud/bigtable/happybase/test_connection.py index 2c96b9d1721b..6236539db71f 100644 --- a/gcloud/bigtable/happybase/test_connection.py +++ b/gcloud/bigtable/happybase/test_connection.py @@ -18,27 +18,27 @@ import unittest2 -class Test__get_cluster(unittest2.TestCase): +class Test__get_instance(unittest2.TestCase): def _callFUT(self, timeout=None): - from gcloud.bigtable.happybase.connection import _get_cluster - return _get_cluster(timeout=timeout) + from gcloud.bigtable.happybase.connection import _get_instance + return _get_instance(timeout=timeout) - def _helper(self, timeout=None, clusters=(), failed_zones=()): + def _helper(self, timeout=None, instances=(), failed_locations=()): from functools import partial from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - client_with_clusters = partial(_Client, clusters=clusters, - failed_zones=failed_zones) - with _Monkey(MUT, Client=client_with_clusters): + client_with_instances = partial( + _Client, instances=instances, failed_locations=failed_locations) + with _Monkey(MUT, Client=client_with_instances): result = self._callFUT(timeout=timeout) # If we've reached this point, then _callFUT didn't fail, so we know - # there is exactly one cluster. - cluster, = clusters - self.assertEqual(result, cluster) - client = cluster.client + # there is exactly one instance. + instance, = instances + self.assertEqual(result, instance) + client = instance.client self.assertEqual(client.args, ()) expected_kwargs = {'admin': True} if timeout is not None: @@ -48,28 +48,28 @@ def _helper(self, timeout=None, clusters=(), failed_zones=()): self.assertEqual(client.stop_calls, 1) def test_default(self): - cluster = _Cluster() - self._helper(clusters=[cluster]) + instance = _Instance() + self._helper(instances=[instance]) def test_with_timeout(self): - cluster = _Cluster() - self._helper(timeout=2103, clusters=[cluster]) + instance = _Instance() + self._helper(timeout=2103, instances=[instance]) - def test_with_no_clusters(self): + def test_with_no_instances(self): with self.assertRaises(ValueError): self._helper() - def test_with_too_many_clusters(self): - clusters = [_Cluster(), _Cluster()] + def test_with_too_many_instances(self): + instances = [_Instance(), _Instance()] with self.assertRaises(ValueError): - self._helper(clusters=clusters) + self._helper(instances=instances) - def test_with_failed_zones(self): - cluster = _Cluster() - failed_zone = 'us-central1-c' + def test_with_failed_locations(self): + instance = _Instance() + failed_location = 'us-central1-c' with self.assertRaises(ValueError): - self._helper(clusters=[cluster], - failed_zones=[failed_zone]) + self._helper(instances=[instance], + failed_locations=[failed_location]) class TestConnection(unittest2.TestCase): @@ -82,65 +82,65 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_constructor_defaults(self): - cluster = _Cluster() # Avoid implicit environ check. - self.assertEqual(cluster._client.start_calls, 0) - connection = self._makeOne(cluster=cluster) - self.assertEqual(cluster._client.start_calls, 1) - self.assertEqual(cluster._client.stop_calls, 0) + instance = _Instance() # Avoid implicit environ check. + self.assertEqual(instance._client.start_calls, 0) + connection = self._makeOne(instance=instance) + self.assertEqual(instance._client.start_calls, 1) + self.assertEqual(instance._client.stop_calls, 0) - self.assertEqual(connection._cluster, cluster) + self.assertEqual(connection._instance, instance) self.assertEqual(connection.table_prefix, None) self.assertEqual(connection.table_prefix_separator, '_') def test_constructor_no_autoconnect(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.start_calls, 0) - self.assertEqual(cluster._client.stop_calls, 0) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.start_calls, 0) + self.assertEqual(instance._client.stop_calls, 0) self.assertEqual(connection.table_prefix, None) self.assertEqual(connection.table_prefix_separator, '_') - def test_constructor_missing_cluster(self): + def test_constructor_missing_instance(self): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() + instance = _Instance() timeout = object() - get_cluster_called = [] + get_instance_called = [] - def mock_get_cluster(timeout): - get_cluster_called.append(timeout) - return cluster + def mock_get_instance(timeout): + get_instance_called.append(timeout) + return instance - with _Monkey(MUT, _get_cluster=mock_get_cluster): - connection = self._makeOne(autoconnect=False, cluster=None, + with _Monkey(MUT, _get_instance=mock_get_instance): + connection = self._makeOne(autoconnect=False, instance=None, timeout=timeout) self.assertEqual(connection.table_prefix, None) self.assertEqual(connection.table_prefix_separator, '_') - self.assertEqual(connection._cluster, cluster) + self.assertEqual(connection._instance, instance) - self.assertEqual(get_cluster_called, [timeout]) + self.assertEqual(get_instance_called, [timeout]) def test_constructor_explicit(self): autoconnect = False table_prefix = 'table-prefix' table_prefix_separator = 'sep' - cluster_copy = _Cluster() - cluster = _Cluster(copies=[cluster_copy]) + instance_copy = _Instance() + instance = _Instance(copies=[instance_copy]) connection = self._makeOne( autoconnect=autoconnect, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) self.assertEqual(connection.table_prefix, table_prefix) self.assertEqual(connection.table_prefix_separator, table_prefix_separator) def test_constructor_with_unknown_argument(self): - cluster = _Cluster() + instance = _Instance() with self.assertRaises(TypeError): - self._makeOne(cluster=cluster, unknown='foo') + self._makeOne(instance=instance, unknown='foo') def test_constructor_with_legacy_args(self): from gcloud._testing import _Monkey @@ -151,9 +151,9 @@ def test_constructor_with_legacy_args(self): def mock_warn(msg): warned.append(msg) - cluster = _Cluster() + instance = _Instance() with _Monkey(MUT, _WARN=mock_warn): - self._makeOne(cluster=cluster, host=object(), + self._makeOne(instance=instance, host=object(), port=object(), compat=object(), transport=object(), protocol=object()) @@ -164,10 +164,10 @@ def mock_warn(msg): self.assertIn('transport', warned[0]) self.assertIn('protocol', warned[0]) - def test_constructor_with_timeout_and_cluster(self): - cluster = _Cluster() + def test_constructor_with_timeout_and_instance(self): + instance = _Instance() with self.assertRaises(ValueError): - self._makeOne(cluster=cluster, timeout=object()) + self._makeOne(instance=instance, timeout=object()) def test_constructor_non_string_prefix(self): table_prefix = object() @@ -184,46 +184,46 @@ def test_constructor_non_string_prefix_separator(self): table_prefix_separator=table_prefix_separator) def test_open(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.start_calls, 0) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.start_calls, 0) connection.open() - self.assertEqual(cluster._client.start_calls, 1) - self.assertEqual(cluster._client.stop_calls, 0) + self.assertEqual(instance._client.start_calls, 1) + self.assertEqual(instance._client.stop_calls, 0) def test_close(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.stop_calls, 0) connection.close() - self.assertEqual(cluster._client.stop_calls, 1) - self.assertEqual(cluster._client.start_calls, 0) + self.assertEqual(instance._client.stop_calls, 1) + self.assertEqual(instance._client.start_calls, 0) - def test___del__with_cluster(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) + def test___del__with_instance(self): + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.stop_calls, 0) connection.__del__() - self.assertEqual(cluster._client.stop_calls, 1) + self.assertEqual(instance._client.stop_calls, 1) - def test___del__no_cluster(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) - del connection._cluster + def test___del__no_instance(self): + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) + self.assertEqual(instance._client.stop_calls, 0) + del connection._instance connection.__del__() - self.assertEqual(cluster._client.stop_calls, 0) + self.assertEqual(instance._client.stop_calls, 0) def test__table_name_with_prefix_set(self): table_prefix = 'table-prefix' table_prefix_separator = '<>' - cluster = _Cluster() + instance = _Instance() connection = self._makeOne( autoconnect=False, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) name = 'some-name' prefixed = connection._table_name(name) @@ -231,9 +231,9 @@ def test__table_name_with_prefix_set(self): table_prefix + table_prefix_separator + name) def test__table_name_with_no_prefix_set(self): - cluster = _Cluster() + instance = _Instance() connection = self._makeOne(autoconnect=False, - cluster=cluster) + instance=instance) name = 'some-name' prefixed = connection._table_name(name) @@ -242,8 +242,8 @@ def test__table_name_with_no_prefix_set(self): def test_table_factory(self): from gcloud.bigtable.happybase.table import Table - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' table = connection.table(name) @@ -255,13 +255,13 @@ def test_table_factory(self): def _table_factory_prefix_helper(self, use_prefix=True): from gcloud.bigtable.happybase.table import Table - cluster = _Cluster() # Avoid implicit environ check. + instance = _Instance() # Avoid implicit environ check. table_prefix = 'table-prefix' table_prefix_separator = '<>' connection = self._makeOne( autoconnect=False, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) name = 'table-name' table = connection.table(name, use_prefix=use_prefix) @@ -285,11 +285,11 @@ def test_tables(self): table_name1 = 'table-name1' table_name2 = 'table-name2' - cluster = _Cluster(list_tables_result=[ + instance = _Instance(list_tables_result=[ Table(table_name1, None), Table(table_name2, None), ]) - connection = self._makeOne(autoconnect=False, cluster=cluster) + connection = self._makeOne(autoconnect=False, instance=instance) result = connection.tables() self.assertEqual(result, [table_name1, table_name2]) @@ -303,12 +303,12 @@ def test_tables_with_prefix(self): table_name1 = (table_prefix + table_prefix_separator + unprefixed_table_name1) table_name2 = 'table-name2' - cluster = _Cluster(list_tables_result=[ + instance = _Instance(list_tables_result=[ Table(table_name1, None), Table(table_name2, None), ]) connection = self._makeOne( - autoconnect=False, cluster=cluster, table_prefix=table_prefix, + autoconnect=False, instance=instance, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator) result = connection.tables() self.assertEqual(result, [unprefixed_table_name1]) @@ -318,8 +318,8 @@ def test_create_table(self): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) mock_gc_rule = object() called_options = [] @@ -354,7 +354,7 @@ def make_table(*args, **kwargs): # Just one table would have been created. table_instance, = tables_created - self.assertEqual(table_instance.args, (name, cluster)) + self.assertEqual(table_instance.args, (name, instance)) self.assertEqual(table_instance.kwargs, {}) self.assertEqual(table_instance.create_calls, 1) @@ -380,8 +380,8 @@ def make_table(*args, **kwargs): self.assertEqual(col_fam_created[2].create_calls, 1) def test_create_table_bad_type(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' families = None @@ -389,8 +389,8 @@ def test_create_table_bad_type(self): connection.create_table(name, families) def test_create_table_bad_value(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' families = {} @@ -401,8 +401,8 @@ def _create_table_error_helper(self, err_val, err_type): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) tables_created = [] @@ -450,8 +450,8 @@ def _delete_table_helper(self, disable=False): from gcloud._testing import _Monkey from gcloud.bigtable.happybase import connection as MUT - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) tables_created = [] @@ -466,7 +466,7 @@ def make_table(*args, **kwargs): # Just one table would have been created. table_instance, = tables_created - self.assertEqual(table_instance.args, (name, cluster)) + self.assertEqual(table_instance.args, (name, instance)) self.assertEqual(table_instance.kwargs, {}) self.assertEqual(table_instance.delete_calls, 1) @@ -488,32 +488,32 @@ def mock_warn(msg): self.assertEqual(warned, [MUT._DISABLE_DELETE_MSG]) def test_enable_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' with self.assertRaises(NotImplementedError): connection.enable_table(name) def test_disable_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' with self.assertRaises(NotImplementedError): connection.disable_table(name) def test_is_table_enabled(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' with self.assertRaises(NotImplementedError): connection.is_table_enabled(name) def test_compact_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) + instance = _Instance() # Avoid implicit environ check. + connection = self._makeOne(autoconnect=False, instance=instance) name = 'table-name' major = True @@ -608,10 +608,10 @@ def test_non_dictionary(self): class _Client(object): def __init__(self, *args, **kwargs): - self.clusters = kwargs.pop('clusters', []) - for cluster in self.clusters: - cluster.client = self - self.failed_zones = kwargs.pop('failed_zones', []) + self.instances = kwargs.pop('instances', []) + for instance in self.instances: + instance.client = self + self.failed_locations = kwargs.pop('failed_locations', []) self.args = args self.kwargs = kwargs self.start_calls = 0 @@ -623,11 +623,11 @@ def start(self): def stop(self): self.stop_calls += 1 - def list_clusters(self): - return self.clusters, self.failed_zones + def list_instances(self): + return self.instances, self.failed_locations -class _Cluster(object): +class _Instance(object): def __init__(self, copies=(), list_tables_result=()): self.copies = list(copies) diff --git a/gcloud/bigtable/happybase/test_pool.py b/gcloud/bigtable/happybase/test_pool.py index c3634681e45d..50212927c0b5 100644 --- a/gcloud/bigtable/happybase/test_pool.py +++ b/gcloud/bigtable/happybase/test_pool.py @@ -31,10 +31,10 @@ def test_constructor_defaults(self): from gcloud.bigtable.happybase.connection import Connection size = 11 - cluster_copy = _Cluster() - all_copies = [cluster_copy] * size - cluster = _Cluster(copies=all_copies) # Avoid implicit environ check. - pool = self._makeOne(size, cluster=cluster) + instance_copy = _Instance() + all_copies = [instance_copy] * size + instance = _Instance(all_copies) # Avoid implicit environ check. + pool = self._makeOne(size, instance=instance) self.assertTrue(isinstance(pool._lock, type(threading.Lock()))) self.assertTrue(isinstance(pool._thread_connections, threading.local)) @@ -46,17 +46,17 @@ def test_constructor_defaults(self): self.assertEqual(queue.maxsize, size) for connection in queue.queue: self.assertTrue(isinstance(connection, Connection)) - self.assertTrue(connection._cluster is cluster_copy) + self.assertTrue(connection._instance is instance_copy) def test_constructor_passes_kwargs(self): table_prefix = 'foo' table_prefix_separator = '<>' - cluster = _Cluster() # Avoid implicit environ check. + instance = _Instance() # Avoid implicit environ check. size = 1 pool = self._makeOne(size, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - cluster=cluster) + instance=instance) for connection in pool._queue.queue: self.assertEqual(connection.table_prefix, table_prefix) @@ -76,53 +76,52 @@ def open(self): self._open_called = True # First make sure the custom Connection class does as expected. - cluster_copy1 = _Cluster() - cluster_copy2 = _Cluster() - cluster_copy3 = _Cluster() - cluster = _Cluster( - copies=[cluster_copy1, cluster_copy2, cluster_copy3]) - connection = ConnectionWithOpen(autoconnect=False, cluster=cluster) + instance_copy1 = _Instance() + instance_copy2 = _Instance() + instance_copy3 = _Instance() + instance = _Instance([instance_copy1, instance_copy2, instance_copy3]) + connection = ConnectionWithOpen(autoconnect=False, instance=instance) self.assertFalse(connection._open_called) - self.assertTrue(connection._cluster is cluster_copy1) - connection = ConnectionWithOpen(autoconnect=True, cluster=cluster) + self.assertTrue(connection._instance is instance_copy1) + connection = ConnectionWithOpen(autoconnect=True, instance=instance) self.assertTrue(connection._open_called) - self.assertTrue(connection._cluster is cluster_copy2) + self.assertTrue(connection._instance is instance_copy2) # Then make sure autoconnect=True is ignored in a pool. size = 1 with _Monkey(MUT, Connection=ConnectionWithOpen): - pool = self._makeOne(size, autoconnect=True, cluster=cluster) + pool = self._makeOne(size, autoconnect=True, instance=instance) for connection in pool._queue.queue: self.assertTrue(isinstance(connection, ConnectionWithOpen)) - self.assertTrue(connection._cluster is cluster_copy3) + self.assertTrue(connection._instance is instance_copy3) self.assertFalse(connection._open_called) - def test_constructor_infers_cluster(self): + def test_constructor_infers_instance(self): from gcloud._testing import _Monkey from gcloud.bigtable.happybase.connection import Connection from gcloud.bigtable.happybase import pool as MUT size = 1 - cluster_copy = _Cluster() - all_copies = [cluster_copy] * size - cluster = _Cluster(copies=all_copies) - get_cluster_calls = [] + instance_copy = _Instance() + all_copies = [instance_copy] * size + instance = _Instance(all_copies) + get_instance_calls = [] - def mock_get_cluster(timeout=None): - get_cluster_calls.append(timeout) - return cluster + def mock_get_instance(timeout=None): + get_instance_calls.append(timeout) + return instance - with _Monkey(MUT, _get_cluster=mock_get_cluster): + with _Monkey(MUT, _get_instance=mock_get_instance): pool = self._makeOne(size) for connection in pool._queue.queue: self.assertTrue(isinstance(connection, Connection)) # We know that the Connection() constructor will - # call cluster.copy(). - self.assertTrue(connection._cluster is cluster_copy) + # call instance.copy(). + self.assertTrue(connection._instance is instance_copy) - self.assertEqual(get_cluster_calls, [None]) + self.assertEqual(get_instance_calls, [None]) def test_constructor_non_integer_size(self): size = None @@ -142,11 +141,11 @@ def _makeOneWithMockQueue(self, queue_return): from gcloud.bigtable.happybase import pool as MUT # We are going to use a fake queue, so we don't want any connections - # or clusters to be created in the constructor. + # or instances to be created in the constructor. size = -1 - cluster = object() + instance = object() with _Monkey(MUT, _MIN_POOL_SIZE=size): - pool = self._makeOne(size, cluster=cluster) + pool = self._makeOne(size, instance=instance) pool._queue = _Queue(queue_return) return pool @@ -230,7 +229,7 @@ def open(self): pass -class _Cluster(object): +class _Instance(object): def __init__(self, copies=()): self.copies = list(copies) diff --git a/gcloud/bigtable/happybase/test_table.py b/gcloud/bigtable/happybase/test_table.py index be18ec1bc014..7efa1864d91d 100644 --- a/gcloud/bigtable/happybase/test_table.py +++ b/gcloud/bigtable/happybase/test_table.py @@ -51,8 +51,8 @@ def test_constructor(self): from gcloud.bigtable.happybase import table as MUT name = 'table-name' - cluster = object() - connection = _Connection(cluster) + instance = object() + connection = _Connection(instance) tables_constructed = [] def make_low_level_table(*args, **kwargs): @@ -67,7 +67,7 @@ def make_low_level_table(*args, **kwargs): table_instance, = tables_constructed self.assertEqual(table._low_level_table, table_instance) - self.assertEqual(table_instance.args, (name, cluster)) + self.assertEqual(table_instance.args, (name, instance)) self.assertEqual(table_instance.kwargs, {}) def test_constructor_null_connection(self): @@ -1405,8 +1405,8 @@ def test_many_rows(self): class _Connection(object): - def __init__(self, cluster): - self._cluster = cluster + def __init__(self, instance): + self._instance = instance class _MockLowLevelColumnFamily(object): diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py new file mode 100644 index 000000000000..dec6c9029744 --- /dev/null +++ b/gcloud/bigtable/instance.py @@ -0,0 +1,488 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Instance.""" + + +import re + +from google.longrunning import operations_pb2 + +from gcloud._helpers import _pb_timestamp_to_datetime +from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_messages_v2_pb2) +from gcloud.bigtable.cluster import Cluster +from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES +from gcloud.bigtable.table import Table + + +_EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' +_INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' + r'instances/(?P[a-z][-a-z0-9]*)$') +_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/' + r'instances/([a-z][-a-z0-9]*)/' + r'locations/(?P[a-z][-a-z0-9]*)/' + r'operations/(?P\d+)$') +_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' +_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.v2.' +_INSTANCE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateInstanceMetadata' +_TYPE_URL_MAP = { + _INSTANCE_CREATE_METADATA: messages_v2_pb2.CreateInstanceMetadata, +} + + +def _prepare_create_request(instance): + """Creates a protobuf request for a CreateInstance request. + + :type instance: :class:`Instance` + :param instance: The instance to be created. + + :rtype: :class:`.messages_v2_pb2.CreateInstanceRequest` + :returns: The CreateInstance request object containing the instance info. + """ + parent_name = ('projects/' + instance._client.project) + message = messages_v2_pb2.CreateInstanceRequest( + parent=parent_name, + instance_id=instance.instance_id, + instance=data_v2_pb2.Instance( + display_name=instance.display_name, + ), + ) + cluster = message.clusters[instance.instance_id] + cluster.name = instance.name + '/clusters/' + instance.instance_id + cluster.location = ( + parent_name + '/locations/' + instance._cluster_location_id) + cluster.serve_nodes = instance._cluster_serve_nodes + return message + + +def _parse_pb_any_to_native(any_val, expected_type=None): + """Convert a serialized "google.protobuf.Any" value to actual type. + + :type any_val: :class:`google.protobuf.any_pb2.Any` + :param any_val: A serialized protobuf value container. + + :type expected_type: str + :param expected_type: (Optional) The type URL we expect ``any_val`` + to have. + + :rtype: object + :returns: The de-serialized object. + :raises: :class:`ValueError ` if the + ``expected_type`` does not match the ``type_url`` on the input. + """ + if expected_type is not None and expected_type != any_val.type_url: + raise ValueError('Expected type: %s, Received: %s' % ( + expected_type, any_val.type_url)) + container_class = _TYPE_URL_MAP[any_val.type_url] + return container_class.FromString(any_val.value) + + +def _process_operation(operation_pb): + """Processes a create protobuf response. + + :type operation_pb: :class:`google.longrunning.operations_pb2.Operation` + :param operation_pb: The long-running operation response from a + Create/Update/Undelete instance request. + + :rtype: (int, str, datetime) + :returns: (operation_id, location_id, operation_begin). + :raises: :class:`ValueError ` if the operation name + doesn't match the :data:`_OPERATION_NAME_RE` regex. + """ + match = _OPERATION_NAME_RE.match(operation_pb.name) + if match is None: + raise ValueError('Operation name was not in the expected ' + 'format after instance creation.', + operation_pb.name) + location_id = match.group('location_id') + operation_id = int(match.group('operation_id')) + + request_metadata = _parse_pb_any_to_native(operation_pb.metadata) + operation_begin = _pb_timestamp_to_datetime( + request_metadata.request_time) + + return operation_id, location_id, operation_begin + + +class Operation(object): + """Representation of a Google API Long-Running Operation. + + In particular, these will be the result of operations on + instances using the Cloud Bigtable API. + + :type op_type: str + :param op_type: The type of operation being performed. Expect + ``create``, ``update`` or ``undelete``. + + :type op_id: int + :param op_id: The ID of the operation. + + :type begin: :class:`datetime.datetime` + :param begin: The time when the operation was started. + + :type location_id: str + :param location_id: ID of the location in which the operation is running + + :type instance: :class:`Instance` + :param instance: The instance that created the operation. + """ + + def __init__(self, op_type, op_id, begin, location_id, instance=None): + self.op_type = op_type + self.op_id = op_id + self.begin = begin + self.location_id = location_id + self._instance = instance + self._complete = False + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.op_type == self.op_type and + other.op_id == self.op_id and + other.begin == self.begin and + other.location_id == self.location_id and + other._instance == self._instance and + other._complete == self._complete) + + def __ne__(self, other): + return not self.__eq__(other) + + def finished(self): + """Check if the operation has finished. + + :rtype: bool + :returns: A boolean indicating if the current operation has completed. + :raises: :class:`ValueError ` if the operation + has already completed. + """ + if self._complete: + raise ValueError('The operation has completed.') + + operation_name = ( + 'operations/%s/locations/%s/operations/%d' % + (self._instance.name, self.location_id, self.op_id)) + request_pb = operations_pb2.GetOperationRequest(name=operation_name) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._instance._client._operations_stub.GetOperation( + request_pb, self._instance._client.timeout_seconds) + + if operation_pb.done: + self._complete = True + return True + else: + return False + + +class Instance(object): + """Representation of a Google Cloud Bigtable Instance. + + We can use a :class:`Instance` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + * :meth:`undelete` itself + + .. note:: + + For now, we leave out the ``default_storage_type`` (an enum) + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type client: :class:`Client ` + :param client: The client that owns the instance. Provides + authorization and a project ID. + + :type location_id: str + :param location_id: ID of the location in which the instance will be + created. Required for instances which do not yet + exist. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in the + Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + """ + + def __init__(self, instance_id, client, + location_id=_EXISTING_INSTANCE_LOCATION_ID, + display_name=None, + serve_nodes=DEFAULT_SERVE_NODES): + self.instance_id = instance_id + self.display_name = display_name or instance_id + self._cluster_location_id = location_id + self._cluster_serve_nodes = serve_nodes + self._client = client + + def _update_from_pb(self, instance_pb): + """Refresh self from the server-provided protobuf. + + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not instance_pb.display_name: # Simple field (string) + raise ValueError('Instance protobuf does not contain display_name') + self.display_name = instance_pb.display_name + + @classmethod + def from_pb(cls, instance_pb, client): + """Creates a instance instance from a protobuf. + + :type instance_pb: :class:`instance_pb2.Instance` + :param instance_pb: A instance protobuf object. + + :type client: :class:`Client ` + :param client: The client that owns the instance. + + :rtype: :class:`Instance` + :returns: The instance parsed from the protobuf response. + :raises: :class:`ValueError ` if the instance + name does not match + ``projects/{project}/instances/{instance_id}`` + or if the parsed project ID does not match the project ID + on the client. + """ + match = _INSTANCE_NAME_RE.match(instance_pb.name) + if match is None: + raise ValueError('Instance protobuf name was not in the ' + 'expected format.', instance_pb.name) + if match.group('project') != client.project: + raise ValueError('Project ID on instance does not match the ' + 'project ID on the client') + instance_id = match.group('instance_id') + + result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID) + result._update_from_pb(instance_pb) + return result + + def copy(self): + """Make a copy of this instance. + + Copies the local data stored as simple types and copies the client + attached to this instance. + + :rtype: :class:`.Instance` + :returns: A copy of the current instance. + """ + new_client = self._client.copy() + return self.__class__(self.instance_id, new_client, + self._cluster_location_id, + display_name=self.display_name) + + @property + def name(self): + """Instance name used in requests. + + .. note:: + This property will not change if ``instance_id`` does not, + but the return value is not cached. + + The instance name is of the form + + ``"projects/{project}/instances/{instance_id}"`` + + :rtype: str + :returns: The instance name. + """ + return self._client.project_name + '/instances/' + self.instance_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the display_name. Instead, it only compares + # identifying values instance ID and client. This is + # intentional, since the same instance can be in different states + # if not synchronized. Instances with similar instance + # settings but different clients can't be used in the same way. + return (other.instance_id == self.instance_id and + other._client == self._client) + + def __ne__(self, other): + return not self.__eq__(other) + + def reload(self): + """Reload the metadata for this instance.""" + request_pb = messages_v2_pb2.GetInstanceRequest(name=self.name) + # We expect `data_v2_pb2.Instance`. + instance_pb = self._client._instance_stub.GetInstance( + request_pb, self._client.timeout_seconds) + + # NOTE: _update_from_pb does not check that the project and + # instance ID on the response match the request. + self._update_from_pb(instance_pb) + + def create(self): + """Create this instance. + + .. note:: + + Uses the ``project`` and ``instance_id`` on the current + :class:`Instance` in addition to the ``display_name``. + To change them before creating, reset the values via + + .. code:: python + + instance.display_name = 'New display name' + instance.instance_id = 'i-changed-my-mind' + + before calling :meth:`create`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + create operation. + """ + request_pb = _prepare_create_request(self) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._client._instance_stub.CreateInstance( + request_pb, self._client.timeout_seconds) + + op_id, loc_id, op_begin = _process_operation(operation_pb) + return Operation('create', op_id, op_begin, loc_id, instance=self) + + def update(self): + """Update this instance. + + .. note:: + + Updates the ``display_name``. To change that value before + updating, reset its values via + + .. code:: python + + instance.display_name = 'New display name' + + before calling :meth:`update`. + """ + request_pb = data_v2_pb2.Instance( + name=self.name, + display_name=self.display_name, + ) + # Ignore the expected `data_v2_pb2.Instance`. + self._client._instance_stub.UpdateInstance( + request_pb, self._client.timeout_seconds) + + def delete(self): + """Delete this instance. + + Marks a instance and all of its tables for permanent deletion + in 7 days. + + Immediately upon completion of the request: + + * Billing will cease for all of the instance's reserved resources. + * The instance's ``delete_time`` field will be set 7 days in + the future. + + Soon afterward: + + * All tables within the instance will become unavailable. + + Prior to the instance's ``delete_time``: + + * The instance can be recovered with a call to ``UndeleteInstance``. + * All other attempts to modify or delete the instance will be rejected. + + At the instance's ``delete_time``: + + * The instance and **all of its tables** will immediately and + irrevocably disappear from the API, and their data will be + permanently deleted. + """ + request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name) + # We expect a `google.protobuf.empty_pb2.Empty` + self._client._instance_stub.DeleteInstance( + request_pb, self._client.timeout_seconds) + + def cluster(self, cluster_id, serve_nodes=3): + """Factory to create a cluster associated with this client. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + Defaults to 3. + + :rtype: :class:`.Cluster` + :returns: The cluster owned by this client. + """ + return Cluster(cluster_id, self, serve_nodes=serve_nodes) + + def list_clusters(self): + """Lists clusters in this instance. + + :rtype: tuple + :returns: A pair of results, the first is a list of :class:`.Cluster` s + returned and the second is a list of strings (the failed + locations in the request). + """ + request_pb = messages_v2_pb2.ListClustersRequest(parent=self.name) + # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` + list_clusters_response = self._client._instance_stub.ListClusters( + request_pb, self._client.timeout_seconds) + + failed_locations = [ + location for location in list_clusters_response.failed_locations] + clusters = [Cluster.from_pb(cluster_pb, self) + for cluster_pb in list_clusters_response.clusters] + return clusters, failed_locations + + def table(self, table_id): + """Factory to create a table associated with this instance. + + :type table_id: str + :param table_id: The ID of the table. + + :rtype: :class:`Table ` + :returns: The table owned by this instance. + """ + return Table(table_id, self) + + def list_tables(self): + """List the tables in this instance. + + :rtype: list of :class:`Table ` + :returns: The list of tables owned by the instance. + :raises: :class:`ValueError ` if one of the + returned tables has a name that is not of the expected format. + """ + request_pb = table_messages_v2_pb2.ListTablesRequest(parent=self.name) + # We expect a `table_messages_v2_pb2.ListTablesResponse` + table_list_pb = self._client._table_stub.ListTables( + request_pb, self._client.timeout_seconds) + + result = [] + for table_pb in table_list_pb.tables: + table_prefix = self.name + '/tables/' + if not table_pb.name.startswith(table_prefix): + raise ValueError('Table name %s not of expected format' % ( + table_pb.name,)) + table_id = table_pb.name[len(table_prefix):] + result.append(self.table(table_id)) + + return result diff --git a/gcloud/bigtable/read-rows-acceptance-test.json b/gcloud/bigtable/read-rows-acceptance-test.json new file mode 100644 index 000000000000..4973831f4979 --- /dev/null +++ b/gcloud/bigtable/read-rows-acceptance-test.json @@ -0,0 +1,1178 @@ +{ + "tests": [ + { + "name": "invalid - no commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before commit", + "chunks": [ + "commit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before value", + "chunks": [ + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new col family must specify qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "bare commit implies ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "simple row with timestamp", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "missing timestamp, implied ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "empty cell value", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "two unsplit cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two qualifiers", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two families", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "with labels", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "L_2", + "error": false + } + ] + }, + { + "name": "split cell, bare commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "split cell", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "split four ways", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"ue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "L", + "error": false + } + ] + }, + { + "name": "two split cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier multi-split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-family split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - no commit between rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no commit after first row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - last row missing commit", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - duplicate row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new row missing row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "two rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows implicit timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows empty value", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, one with multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "E", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "F", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells, multiple families", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "M", + "qual": "O", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "N", + "qual": "P", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, four cells, 2 labels", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "L_3", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows with splits, same timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - bare reset", + "chunks": [ + "reset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - bad reset, no commit", + "chunks": [ + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - missing key after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "no data after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n" + ], + "results": null + }, + { + "name": "simple reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new val", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new qual", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "reset with splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two resets", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset then two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "B", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset in between chunks", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - reset with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - commit with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "empty cell chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py index cb9ce2e67e3d..aad7dbecad0e 100644 --- a/gcloud/bigtable/row.py +++ b/gcloud/bigtable/row.py @@ -22,9 +22,10 @@ from gcloud._helpers import _datetime_from_microseconds from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) +from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) _PACK_I64 = struct.Struct('>q').pack @@ -133,13 +134,13 @@ def _set_cell(self, column_family_id, column, value, timestamp=None, # Truncate to millisecond granularity. timestamp_micros -= (timestamp_micros % 1000) - mutation_val = data_pb2.Mutation.SetCell( + mutation_val = data_v2_pb2.Mutation.SetCell( family_name=column_family_id, column_qualifier=column, timestamp_micros=timestamp_micros, value=value, ) - mutation_pb = data_pb2.Mutation(set_cell=mutation_val) + mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) self._get_mutations(state).append(mutation_pb) def _delete(self, state=None): @@ -155,8 +156,8 @@ def _delete(self, state=None): :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. """ - mutation_val = data_pb2.Mutation.DeleteFromRow() - mutation_pb = data_pb2.Mutation(delete_from_row=mutation_val) + mutation_val = data_v2_pb2.Mutation.DeleteFromRow() + mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) self._get_mutations(state).append(mutation_pb) def _delete_cells(self, column_family_id, columns, time_range=None, @@ -187,10 +188,10 @@ def _delete_cells(self, column_family_id, columns, time_range=None, """ mutations_list = self._get_mutations(state) if columns is self.ALL_COLUMNS: - mutation_val = data_pb2.Mutation.DeleteFromFamily( + mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( family_name=column_family_id, ) - mutation_pb = data_pb2.Mutation(delete_from_family=mutation_val) + mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) mutations_list.append(mutation_pb) else: delete_kwargs = {} @@ -206,9 +207,9 @@ def _delete_cells(self, column_family_id, columns, time_range=None, family_name=column_family_id, column_qualifier=column, ) - mutation_val = data_pb2.Mutation.DeleteFromColumn( + mutation_val = data_v2_pb2.Mutation.DeleteFromColumn( **delete_kwargs) - mutation_pb = data_pb2.Mutation( + mutation_pb = data_v2_pb2.Mutation( delete_from_column=mutation_val) to_append.append(mutation_pb) @@ -388,13 +389,13 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total mutations exceed the maximum allowable ' '%d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_pb2.MutateRowRequest( + request_pb = messages_v2_pb2.MutateRowRequest( table_name=self._table.name, row_key=self._row_key, mutations=mutations_list, ) # We expect a `google.protobuf.empty_pb2.Empty` - client = self._table._cluster._client + client = self._table._instance._client client._data_stub.MutateRow(request_pb, client.timeout_seconds) self.clear() @@ -503,15 +504,15 @@ def commit(self): 'mutations and %d false mutations.' % ( MAX_MUTATIONS, num_true_mutations, num_false_mutations)) - request_pb = messages_pb2.CheckAndMutateRowRequest( + request_pb = messages_v2_pb2.CheckAndMutateRowRequest( table_name=self._table.name, row_key=self._row_key, predicate_filter=self._filter.to_pb(), true_mutations=true_mutations, false_mutations=false_mutations, ) - # We expect a `.messages_pb2.CheckAndMutateRowResponse` - client = self._table._cluster._client + # We expect a `.messages_v2_pb2.CheckAndMutateRowResponse` + client = self._table._instance._client resp = client._data_stub.CheckAndMutateRow( request_pb, client.timeout_seconds) self.clear() @@ -700,9 +701,10 @@ def append_cell_value(self, column_family_id, column, value): """ column = _to_bytes(column) value = _to_bytes(value) - rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id, - column_qualifier=column, - append_value=value) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + append_value=value) self._rule_pb_list.append(rule_pb) def increment_cell_value(self, column_family_id, column, int_value): @@ -736,9 +738,10 @@ def increment_cell_value(self, column_family_id, column, int_value): will fail. """ column = _to_bytes(column) - rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id, - column_qualifier=column, - increment_amount=int_value) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + increment_amount=int_value) self._rule_pb_list.append(rule_pb) def commit(self): @@ -791,13 +794,13 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total append mutations exceed the maximum ' 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_pb2.ReadModifyWriteRowRequest( + request_pb = messages_v2_pb2.ReadModifyWriteRowRequest( table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list, ) - # We expect a `.data_pb2.Row` - client = self._table._cluster._client + # We expect a `.data_v2_pb2.Row` + client = self._table._instance._client row_response = client._data_stub.ReadModifyWriteRow( request_pb, client.timeout_seconds) @@ -811,7 +814,7 @@ def commit(self): def _parse_rmw_row_response(row_response): """Parses the response to a ``ReadModifyWriteRow`` request. - :type row_response: :class:`.data_pb2.Row` + :type row_response: :class:`.data_v2_pb2.Row` :param row_response: The response row (with only modified cells) from a ``ReadModifyWriteRow`` request. @@ -842,7 +845,7 @@ def _parse_rmw_row_response(row_response): } """ result = {} - for column_family in row_response.families: + for column_family in row_response.row.families: column_family_id, curr_family = _parse_family_pb(column_family) result[column_family_id] = curr_family return result @@ -851,7 +854,7 @@ def _parse_rmw_row_response(row_response): def _parse_family_pb(family_pb): """Parses a Family protobuf into a dictionary. - :type family_pb: :class:`._generated.bigtable_data_pb2.Family` + :type family_pb: :class:`._generated_v2.data_pb2.Family` :param family_pb: A protobuf :rtype: tuple diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py index e64a242f8507..3f4490097e68 100644 --- a/gcloud/bigtable/row_data.py +++ b/gcloud/bigtable/row_data.py @@ -44,7 +44,7 @@ def __init__(self, value, timestamp, labels=()): def from_pb(cls, cell_pb): """Create a new cell from a Cell protobuf. - :type cell_pb: :class:`._generated.bigtable_data_pb2.Cell` + :type cell_pb: :class:`._generated_v2.data_pb2.Cell` :param cell_pb: The protobuf to convert. :rtype: :class:`Cell` @@ -67,6 +67,49 @@ def __ne__(self, other): return not self.__eq__(other) +class PartialCellData(object): + """Representation of partial cell in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) cell. + + :type family_name: str + :param family_name: The family name of the (partial) cell. + + :type qualifier: bytes + :param qualifier: The column qualifier of the (partial) cell. + + :type timestamp_micros: int + :param timestamp_micros: The timestamp (in microsecods) of the + (partial) cell. + + :type labels: list of str + :param labels: labels assigned to the (partial) cell + + :type value: bytes + :param value: The (accumulated) value of the (partial) cell. + """ + def __init__(self, row_key, family_name, qualifier, timestamp_micros, + labels=(), value=b''): + self.row_key = row_key + self.family_name = family_name + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels + self.value = value + + def append_value(self, value): + """Append bytes from a new chunk to value. + + :type value: bytes + :param value: bytes to append + """ + self.value += value + + class PartialRowData(object): """Representation of partial row in a Google Cloud Bigtable Table. @@ -80,15 +123,11 @@ class PartialRowData(object): def __init__(self, row_key): self._row_key = row_key self._cells = {} - self._committed = False - self._chunks_encountered = False def __eq__(self, other): if not isinstance(other, self.__class__): return False return (other._row_key == self._row_key and - other._committed == self._committed and - other._chunks_encountered == self._chunks_encountered and other._cells == self._cells) def __ne__(self, other): @@ -132,119 +171,13 @@ def row_key(self): """ return self._row_key - @property - def committed(self): - """Getter for the committed status of the (partial) row. - - :rtype: bool - :returns: The committed status of the (partial) row. - """ - return self._committed - - def clear(self): - """Clears all cells that have been added.""" - self._committed = False - self._chunks_encountered = False - self._cells.clear() - - def _handle_commit_row(self, chunk, index, last_chunk_index): - """Handles a ``commit_row`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - - :type index: int - :param index: The current index of the chunk. - :type last_chunk_index: int - :param last_chunk_index: The index of the last chunk. +class InvalidReadRowsResponse(RuntimeError): + """Exception raised to to invalid response data from back-end.""" - :raises: :class:`ValueError ` if the value of - ``commit_row`` is :data:`False` or if the chunk passed is not - the last chunk in a response. - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``commit_row``. - if not chunk.commit_row: - raise ValueError('Received commit_row that was False.') - - if index != last_chunk_index: - raise ValueError('Commit row chunk was not the last chunk') - else: - self._committed = True - def _handle_reset_row(self, chunk): - """Handles a ``reset_row`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - - :raises: :class:`ValueError ` if the value of - ``reset_row`` is :data:`False` - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``reset_row``. - if not chunk.reset_row: - raise ValueError('Received reset_row that was False.') - - self.clear() - - def _handle_row_contents(self, chunk): - """Handles a ``row_contents`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``row_contents``. - - # chunk.row_contents is ._generated.bigtable_data_pb2.Family - column_family_id = chunk.row_contents.name - column_family_dict = self._cells.setdefault(column_family_id, {}) - for column in chunk.row_contents.columns: - cells = [Cell.from_pb(cell) for cell in column.cells] - - column_name = column.qualifier - column_cells = column_family_dict.setdefault(column_name, []) - column_cells.extend(cells) - - def update_from_read_rows(self, read_rows_response_pb): - """Updates the current row from a ``ReadRows`` response. - - :type read_rows_response_pb: - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - :param read_rows_response_pb: A response streamed back as part of a - ``ReadRows`` request. - - :raises: :class:`ValueError ` if the current - partial row has already been committed, if the row key on the - response doesn't match the current one or if there is a chunk - encountered with an unexpected ``ONEOF`` protobuf property. - """ - if self._committed: - raise ValueError('The row has been committed') - - if read_rows_response_pb.row_key != self.row_key: - raise ValueError('Response row key (%r) does not match current ' - 'one (%r).' % (read_rows_response_pb.row_key, - self.row_key)) - - last_chunk_index = len(read_rows_response_pb.chunks) - 1 - for index, chunk in enumerate(read_rows_response_pb.chunks): - chunk_property = chunk.WhichOneof('chunk') - if chunk_property == 'row_contents': - self._handle_row_contents(chunk) - elif chunk_property == 'reset_row': - self._handle_reset_row(chunk) - elif chunk_property == 'commit_row': - self._handle_commit_row(chunk, index, last_chunk_index) - else: - # NOTE: This includes chunk_property == None since we always - # want a value to be set - raise ValueError('Unexpected chunk property: %s' % ( - chunk_property,)) - - self._chunks_encountered = True +class InvalidChunk(RuntimeError): + """Exception raised to to invalid chunk data from back-end.""" class PartialRowsData(object): @@ -255,11 +188,27 @@ class PartialRowsData(object): :param response_iterator: A streaming iterator returned from a ``ReadRows`` request. """ + START = "Start" # No responses yet processed. + NEW_ROW = "New row" # No cells yet complete for row + ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row + CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row def __init__(self, response_iterator): - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` self._response_iterator = response_iterator + # Fully-processed rows, keyed by `row_key` self._rows = {} + # Counter for responses pulled from iterator + self._counter = 0 + # Maybe cached from previous response + self._last_scanned_row_key = None + # In-progress row, unset until first response, after commit/reset + self._row = None + # Last complete row, unset until first commit + self._previous_row = None + # In-progress cell, unset until first response, after completion + self._cell = None + # Last complete cell, unset until first completion, after new row + self._previous_cell = None def __eq__(self, other): if not isinstance(other, self.__class__): @@ -269,12 +218,32 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) + @property + def state(self): + """State machine state. + + :rtype: str + :returns: name of state corresponding to currrent row / chunk + processing. + """ + if self._last_scanned_row_key is None: + return self.START + if self._row is None: + assert self._cell is None + assert self._previous_cell is None + return self.NEW_ROW + if self._cell is not None: + return self.CELL_IN_PROGRESS + if self._previous_cell is not None: + return self.ROW_IN_PROGRESS + return self.NEW_ROW # row added, no chunk yet processed + @property def rows(self): """Property returning all rows accumulated from the stream. :rtype: dict - :returns: Dictionary of :class:`PartialRowData`. + :returns: row_key -> :class:`PartialRowData`. """ # NOTE: To avoid duplicating large objects, this is just the # mutable private data. @@ -285,21 +254,55 @@ def cancel(self): self._response_iterator.cancel() def consume_next(self): - """Consumes the next ``ReadRowsResponse`` from the stream. + """Consume the next ``ReadRowsResponse`` from the stream. - Parses the response and stores it as a :class:`PartialRowData` - in a dictionary owned by this object. - - :raises: :class:`StopIteration ` if the - response iterator has no more responses to stream. + Parse the response and its chunks into a new/existing row in + :attr:`_rows` """ - read_rows_response = self._response_iterator.next() - row_key = read_rows_response.row_key - partial_row = self._rows.get(row_key) - if partial_row is None: - partial_row = self._rows[row_key] = PartialRowData(row_key) - # NOTE: This is not atomic in the case of failures. - partial_row.update_from_read_rows(read_rows_response) + response = six.next(self._response_iterator) + self._counter += 1 + + if self._last_scanned_row_key is None: # first response + if response.last_scanned_row_key: + raise InvalidReadRowsResponse() + + self._last_scanned_row_key = response.last_scanned_row_key + + row = self._row + cell = self._cell + + for chunk in response.chunks: + + self._validate_chunk(chunk) + + if chunk.reset_row: + row = self._row = None + cell = self._cell = self._previous_cell = None + continue + + if row is None: + row = self._row = PartialRowData(chunk.row_key) + + if cell is None: + cell = self._cell = PartialCellData( + chunk.row_key, + chunk.family_name.value, + chunk.qualifier.value, + chunk.timestamp_micros, + chunk.labels, + chunk.value) + self._copy_from_previous(cell) + else: + cell.append_value(chunk.value) + + if chunk.commit_row: + self._save_current_row() + row = cell = None + continue + + if chunk.value_size == 0: + self._save_current_cell() + cell = None def consume_all(self, max_loops=None): """Consume the streamed responses until there are no more. @@ -321,3 +324,119 @@ def consume_all(self, max_loops=None): self.consume_next() except StopIteration: break + + @staticmethod + def _validate_chunk_status(chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`, etc.""" + # No reseet with other keys + if chunk.reset_row: + _raise_if(chunk.row_key) + _raise_if(chunk.HasField('family_name')) + _raise_if(chunk.HasField('qualifier')) + _raise_if(chunk.timestamp_micros) + _raise_if(chunk.labels) + _raise_if(chunk.value_size) + _raise_if(chunk.value) + # No commit with value size + _raise_if(chunk.commit_row and chunk.value_size > 0) + # No negative value_size (inferred as a general constraint). + _raise_if(chunk.value_size < 0) + + def _validate_chunk_new_row(self, chunk): + """Helper for :meth:`_validate_chunk`.""" + assert self.state == self.NEW_ROW + _raise_if(chunk.reset_row) + _raise_if(not chunk.row_key) + _raise_if(not chunk.family_name) + _raise_if(not chunk.qualifier) + # This constraint is not enforced in the Go example. + _raise_if(chunk.value_size > 0 and chunk.commit_row is not False) + # This constraint is from the Go example, not the spec. + _raise_if(self._previous_row is not None and + chunk.row_key <= self._previous_row.row_key) + + def _same_as_previous(self, chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`""" + previous = self._previous_cell + return (chunk.row_key == previous.row_key and + chunk.family_name == previous.family_name and + chunk.qualifier == previous.qualifier and + chunk.labels == previous.labels) + + def _validate_chunk_row_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.ROW_IN_PROGRESS + self._validate_chunk_status(chunk) + if not chunk.HasField('commit_row') and not chunk.reset_row: + _raise_if(not chunk.timestamp_micros or not chunk.value) + _raise_if(chunk.row_key and + chunk.row_key != self._row.row_key) + _raise_if(chunk.HasField('family_name') and + not chunk.HasField('qualifier')) + previous = self._previous_cell + _raise_if(self._same_as_previous(chunk) and + chunk.timestamp_micros <= previous.timestamp_micros) + + def _validate_chunk_cell_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.CELL_IN_PROGRESS + self._validate_chunk_status(chunk) + self._copy_from_current(chunk) + + def _validate_chunk(self, chunk): + """Helper for :meth:`consume_next`.""" + if self.state == self.NEW_ROW: + self._validate_chunk_new_row(chunk) + if self.state == self.ROW_IN_PROGRESS: + self._validate_chunk_row_in_progress(chunk) + if self.state == self.CELL_IN_PROGRESS: + self._validate_chunk_cell_in_progress(chunk) + + def _save_current_cell(self): + """Helper for :meth:`consume_next`.""" + row, cell = self._row, self._cell + family = row._cells.setdefault(cell.family_name, {}) + qualified = family.setdefault(cell.qualifier, []) + complete = Cell.from_pb(self._cell) + qualified.append(complete) + self._cell, self._previous_cell = None, cell + + def _copy_from_current(self, chunk): + """Helper for :meth:`consume_next`.""" + current = self._cell + if current is not None: + if not chunk.row_key: + chunk.row_key = current.row_key + if not chunk.HasField('family_name'): + chunk.family_name.value = current.family_name + if not chunk.HasField('qualifier'): + chunk.qualifier.value = current.qualifier + if not chunk.timestamp_micros: + chunk.timestamp_micros = current.timestamp_micros + if not chunk.labels: + chunk.labels.extend(current.labels) + + def _copy_from_previous(self, cell): + """Helper for :meth:`consume_next`.""" + previous = self._previous_cell + if previous is not None: + if not cell.row_key: + cell.row_key = previous.row_key + if not cell.family_name: + cell.family_name = previous.family_name + if not cell.qualifier: + cell.qualifier = previous.qualifier + + def _save_current_row(self): + """Helper for :meth:`consume_next`.""" + if self._cell: + self._save_current_cell() + self._rows[self._row.row_key] = self._row + self._row, self._previous_row = None, self._row + self._previous_cell = None + + +def _raise_if(predicate, *args): + """Helper for validation methods.""" + if predicate: + raise InvalidChunk(*args) diff --git a/gcloud/bigtable/row_filters.py b/gcloud/bigtable/row_filters.py index b7a1388b3a09..f76615ba5ea8 100644 --- a/gcloud/bigtable/row_filters.py +++ b/gcloud/bigtable/row_filters.py @@ -17,7 +17,8 @@ from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 +from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) class RowFilter(object): @@ -65,10 +66,10 @@ class SinkFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(sink=self.flag) + return data_v2_pb2.RowFilter(sink=self.flag) class PassAllFilter(_BoolFilter): @@ -83,10 +84,10 @@ class PassAllFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(pass_all_filter=self.flag) + return data_v2_pb2.RowFilter(pass_all_filter=self.flag) class BlockAllFilter(_BoolFilter): @@ -100,10 +101,10 @@ class BlockAllFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(block_all_filter=self.flag) + return data_v2_pb2.RowFilter(block_all_filter=self.flag) class _RegexFilter(RowFilter): @@ -153,10 +154,10 @@ class RowKeyRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(row_key_regex_filter=self.regex) + return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex) class RowSampleFilter(RowFilter): @@ -178,10 +179,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(row_sample_filter=self.sample) + return data_v2_pb2.RowFilter(row_sample_filter=self.sample) class FamilyNameRegexFilter(_RegexFilter): @@ -202,10 +203,10 @@ class FamilyNameRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(family_name_regex_filter=self.regex) + return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex) class ColumnQualifierRegexFilter(_RegexFilter): @@ -232,10 +233,10 @@ class ColumnQualifierRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(column_qualifier_regex_filter=self.regex) + return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex) class TimestampRange(object): @@ -266,7 +267,7 @@ def __ne__(self, other): def to_pb(self): """Converts the :class:`TimestampRange` to a protobuf. - :rtype: :class:`.data_pb2.TimestampRange` + :rtype: :class:`.data_v2_pb2.TimestampRange` :returns: The converted current object. """ timestamp_range_kwargs = {} @@ -276,7 +277,7 @@ def to_pb(self): if self.end is not None: timestamp_range_kwargs['end_timestamp_micros'] = ( _microseconds_from_datetime(self.end)) - return data_pb2.TimestampRange(**timestamp_range_kwargs) + return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) class TimestampRangeFilter(RowFilter): @@ -300,10 +301,11 @@ def to_pb(self): First converts the ``range_`` on the current object to a protobuf and then uses it in the ``timestamp_range_filter`` field. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb()) + return data_v2_pb2.RowFilter( + timestamp_range_filter=self.range_.to_pb()) class ColumnRangeFilter(RowFilter): @@ -375,28 +377,28 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - First converts to a :class:`.data_pb2.ColumnRange` and then uses it + First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it in the ``column_range_filter`` field. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ column_range_kwargs = {'family_name': self.column_family_id} if self.start_column is not None: if self.inclusive_start: - key = 'start_qualifier_inclusive' + key = 'start_qualifier_closed' else: - key = 'start_qualifier_exclusive' + key = 'start_qualifier_open' column_range_kwargs[key] = _to_bytes(self.start_column) if self.end_column is not None: if self.inclusive_end: - key = 'end_qualifier_inclusive' + key = 'end_qualifier_closed' else: - key = 'end_qualifier_exclusive' + key = 'end_qualifier_open' column_range_kwargs[key] = _to_bytes(self.end_column) - column_range = data_pb2.ColumnRange(**column_range_kwargs) - return data_pb2.RowFilter(column_range_filter=column_range) + column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) + return data_v2_pb2.RowFilter(column_range_filter=column_range) class ValueRegexFilter(_RegexFilter): @@ -423,10 +425,10 @@ class ValueRegexFilter(_RegexFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(value_regex_filter=self.regex) + return data_v2_pb2.RowFilter(value_regex_filter=self.regex) class ValueRangeFilter(RowFilter): @@ -492,28 +494,28 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - First converts to a :class:`.data_pb2.ValueRange` and then uses + First converts to a :class:`.data_v2_pb2.ValueRange` and then uses it to create a row filter protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ value_range_kwargs = {} if self.start_value is not None: if self.inclusive_start: - key = 'start_value_inclusive' + key = 'start_value_closed' else: - key = 'start_value_exclusive' + key = 'start_value_open' value_range_kwargs[key] = _to_bytes(self.start_value) if self.end_value is not None: if self.inclusive_end: - key = 'end_value_inclusive' + key = 'end_value_closed' else: - key = 'end_value_exclusive' + key = 'end_value_open' value_range_kwargs[key] = _to_bytes(self.end_value) - value_range = data_pb2.ValueRange(**value_range_kwargs) - return data_pb2.RowFilter(value_range_filter=value_range) + value_range = data_v2_pb2.ValueRange(**value_range_kwargs) + return data_v2_pb2.RowFilter(value_range_filter=value_range) class _CellCountFilter(RowFilter): @@ -545,10 +547,11 @@ class CellsRowOffsetFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells) + return data_v2_pb2.RowFilter( + cells_per_row_offset_filter=self.num_cells) class CellsRowLimitFilter(_CellCountFilter): @@ -561,10 +564,10 @@ class CellsRowLimitFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) + return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) class CellsColumnLimitFilter(_CellCountFilter): @@ -579,10 +582,11 @@ class CellsColumnLimitFilter(_CellCountFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells) + return data_v2_pb2.RowFilter( + cells_per_column_limit_filter=self.num_cells) class StripValueTransformerFilter(_BoolFilter): @@ -597,10 +601,10 @@ class StripValueTransformerFilter(_BoolFilter): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(strip_value_transformer=self.flag) + return data_v2_pb2.RowFilter(strip_value_transformer=self.flag) class ApplyLabelFilter(RowFilter): @@ -633,10 +637,10 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_pb2.RowFilter(apply_label_transformer=self.label) + return data_v2_pb2.RowFilter(apply_label_transformer=self.label) class _FilterCombination(RowFilter): @@ -675,12 +679,12 @@ class RowFilterChain(_FilterCombination): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - chain = data_pb2.RowFilter.Chain( + chain = data_v2_pb2.RowFilter.Chain( filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_pb2.RowFilter(chain=chain) + return data_v2_pb2.RowFilter(chain=chain) class RowFilterUnion(_FilterCombination): @@ -699,12 +703,12 @@ class RowFilterUnion(_FilterCombination): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - interleave = data_pb2.RowFilter.Interleave( + interleave = data_v2_pb2.RowFilter.Interleave( filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_pb2.RowFilter(interleave=interleave) + return data_v2_pb2.RowFilter(interleave=interleave) class ConditionalRowFilter(RowFilter): @@ -752,7 +756,7 @@ def __eq__(self, other): def to_pb(self): """Converts the row filter to a protobuf. - :rtype: :class:`.data_pb2.RowFilter` + :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ condition_kwargs = {'predicate_filter': self.base_filter.to_pb()} @@ -760,5 +764,5 @@ def to_pb(self): condition_kwargs['true_filter'] = self.true_filter.to_pb() if self.false_filter is not None: condition_kwargs['false_filter'] = self.false_filter.to_pb() - condition = data_pb2.RowFilter.Condition(**condition_kwargs) - return data_pb2.RowFilter(condition=condition) + condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) + return data_v2_pb2.RowFilter(condition=condition) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 5815086d7c00..3eef6fe2a5ad 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -14,19 +14,16 @@ """User friendly container for Google Cloud Bigtable Table.""" - from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) -from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as data_messages_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as data_messages_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) from gcloud.bigtable.column_family import _gc_rule_from_pb from gcloud.bigtable.column_family import ColumnFamily from gcloud.bigtable.row import AppendRow from gcloud.bigtable.row import ConditionalRow from gcloud.bigtable.row import DirectRow -from gcloud.bigtable.row_data import PartialRowData from gcloud.bigtable.row_data import PartialRowsData @@ -55,13 +52,13 @@ class Table(object): :type table_id: str :param table_id: The ID of the table. - :type cluster: :class:`Cluster <.cluster.Cluster>` - :param cluster: The cluster that owns the table. + :type instance: :class:`Cluster <.instance.Instance>` + :param instance: The instance that owns the table. """ - def __init__(self, table_id, cluster): + def __init__(self, table_id, instance): self.table_id = table_id - self._cluster = cluster + self._instance = instance @property def name(self): @@ -79,7 +76,7 @@ def name(self): :rtype: str :returns: The table name. """ - return self._cluster.name + '/tables/' + self.table_id + return self._instance.name + '/tables/' + self.table_id def column_family(self, column_family_id, gc_rule=None): """Factory to create a column family associated with this table. @@ -134,7 +131,7 @@ def __eq__(self, other): if not isinstance(other, self.__class__): return False return (other.table_id == self.table_id and - other._cluster == self._cluster) + other._instance == self._instance) def __ne__(self, other): return not self.__eq__(other) @@ -144,7 +141,7 @@ def create(self, initial_split_keys=None): .. note:: - Though a :class:`._generated.bigtable_table_data_pb2.Table` is also + Though a :class:`._generated_v2.table_pb2.Table` is also allowed (as the ``table`` property) in a create table request, we do not support it in this method. As mentioned in the :class:`Table` docstring, the name is the only useful property in @@ -153,7 +150,7 @@ def create(self, initial_split_keys=None): .. note:: A create request returns a - :class:`._generated.bigtable_table_data_pb2.Table` but we don't use + :class:`._generated_v2.table_pb2.Table` but we don't use this response. The proto definition allows for the inclusion of a ``current_operation`` in the response, but it does not appear that the Cloud Bigtable API returns any operation. @@ -167,49 +164,24 @@ def create(self, initial_split_keys=None): created, spanning the key ranges: ``[, s1)``, ``[s1, s2)``, ``[s2, )``. """ - request_pb = messages_pb2.CreateTableRequest( - initial_split_keys=initial_split_keys or [], - name=self._cluster.name, + split_pb = table_admin_messages_v2_pb2.CreateTableRequest.Split + if initial_split_keys is not None: + initial_split_keys = [ + split_pb(key=key) for key in initial_split_keys] + request_pb = table_admin_messages_v2_pb2.CreateTableRequest( + initial_splits=initial_split_keys or [], + parent=self._instance.name, table_id=self.table_id, ) - client = self._cluster._client - # We expect a `._generated.bigtable_table_data_pb2.Table` + client = self._instance._client + # We expect a `._generated_v2.table_pb2.Table` client._table_stub.CreateTable(request_pb, client.timeout_seconds) - def rename(self, new_table_id): - """Rename this table. - - .. note:: - - This cannot be used to move tables between clusters, - zones, or projects. - - .. note:: - - The Bigtable Table Admin API currently (``v1``) returns - - ``BigtableTableService.RenameTable is not yet implemented`` - - when this method is used. It's unclear when this method will - actually be supported by the API. - - :type new_table_id: str - :param new_table_id: The new name table ID. - """ - request_pb = messages_pb2.RenameTableRequest( - name=self.name, - new_id=new_table_id, - ) - client = self._cluster._client - # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.RenameTable(request_pb, client.timeout_seconds) - - self.table_id = new_table_id - def delete(self): """Delete this table.""" - request_pb = messages_pb2.DeleteTableRequest(name=self.name) - client = self._cluster._client + request_pb = table_admin_messages_v2_pb2.DeleteTableRequest( + name=self.name) + client = self._instance._client # We expect a `google.protobuf.empty_pb2.Empty` client._table_stub.DeleteTable(request_pb, client.timeout_seconds) @@ -224,9 +196,10 @@ def list_column_families(self): family name from the response does not agree with the computed name from the column family ID. """ - request_pb = messages_pb2.GetTableRequest(name=self.name) - client = self._cluster._client - # We expect a `._generated.bigtable_table_data_pb2.Table` + request_pb = table_admin_messages_v2_pb2.GetTableRequest( + name=self.name) + client = self._instance._client + # We expect a `._generated_v2.table_pb2.Table` table_pb = client._table_stub.GetTable(request_pb, client.timeout_seconds) @@ -235,10 +208,6 @@ def list_column_families(self): gc_rule = _gc_rule_from_pb(value_pb.gc_rule) column_family = self.column_family(column_family_id, gc_rule=gc_rule) - if column_family.name != value_pb.name: - raise ValueError('Column family name %s does not agree with ' - 'name from request: %s.' % ( - column_family.name, value_pb.name)) result[column_family_id] = column_family return result @@ -260,24 +229,21 @@ def read_row(self, row_key, filter_=None): """ request_pb = _create_row_request(self.name, row_key=row_key, filter_=filter_) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` - result = PartialRowData(row_key) - for read_rows_response in response_iterator: - result.update_from_read_rows(read_rows_response) + rows_data = PartialRowsData(response_iterator) + rows_data.consume_all() + if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): + raise ValueError('The row remains partial / is not committed.') - # Make sure the result actually contains data. - if not result._chunks_encountered: + if len(rows_data.rows) == 0: return None - # Make sure the result was committed by the back-end. - if not result.committed: - raise ValueError('The row remains partial / is not committed.') - return result - def read_rows(self, start_key=None, end_key=None, - allow_row_interleaving=None, limit=None, filter_=None): + return rows_data.rows[row_key] + + def read_rows(self, start_key=None, end_key=None, limit=None, + filter_=None): """Read rows from this table. :type start_key: bytes @@ -290,26 +256,10 @@ def read_rows(self, start_key=None, end_key=None, The range will not include ``end_key``. If left empty, will be interpreted as an infinite string. - :type allow_row_interleaving: bool - :param allow_row_interleaving: (Optional) By default, rows are read - sequentially, producing results which - are guaranteed to arrive in increasing - row order. Setting - ``allow_row_interleaving`` to - :data:`True` allows multiple rows to be - interleaved in the response stream, - which increases throughput but breaks - this guarantee, and may force the - client to use more memory to buffer - partially-received rows. - :type limit: int :param limit: (Optional) The read will terminate after committing to N rows' worth of results. The default (zero) is to return - all results. Note that if ``allow_row_interleaving`` is - set to :data:`True`, partial results may be returned for - more than N rows. However, only N ``commit_row`` chunks - will be sent. + all results. :type filter_: :class:`.RowFilter` :param filter_: (Optional) The filter to apply to the contents of the @@ -322,11 +272,11 @@ def read_rows(self, start_key=None, end_key=None, """ request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, - allow_row_interleaving=allow_row_interleaving, limit=limit) - client = self._cluster._client + limit=limit) + client = self._instance._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` + # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` return PartialRowsData(response_iterator) def sample_row_keys(self): @@ -360,16 +310,16 @@ def sample_row_keys(self): or by casting to a :class:`list` and can be cancelled by calling ``cancel()``. """ - request_pb = data_messages_pb2.SampleRowKeysRequest( + request_pb = data_messages_v2_pb2.SampleRowKeysRequest( table_name=self.name) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.SampleRowKeys( request_pb, client.timeout_seconds) return response_iterator def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, - filter_=None, allow_row_interleaving=None, limit=None): + filter_=None, limit=None): """Creates a request to read rows in a table. :type table_name: str @@ -392,28 +342,12 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, :param filter_: (Optional) The filter to apply to the contents of the specified row(s). If unset, reads the entire table. - :type allow_row_interleaving: bool - :param allow_row_interleaving: (Optional) By default, rows are read - sequentially, producing results which are - guaranteed to arrive in increasing row - order. Setting - ``allow_row_interleaving`` to - :data:`True` allows multiple rows to be - interleaved in the response stream, - which increases throughput but breaks - this guarantee, and may force the - client to use more memory to buffer - partially-received rows. - :type limit: int :param limit: (Optional) The read will terminate after committing to N rows' worth of results. The default (zero) is to return - all results. Note that if ``allow_row_interleaving`` is - set to :data:`True`, partial results may be returned for - more than N rows. However, only N ``commit_row`` chunks - will be sent. + all results. - :rtype: :class:`data_messages_pb2.ReadRowsRequest` + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. :raises: :class:`ValueError ` if both ``row_key`` and one of ``start_key`` and ``end_key`` are set @@ -423,21 +357,23 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, (start_key is not None or end_key is not None)): raise ValueError('Row key and row range cannot be ' 'set simultaneously') - if row_key is not None: - request_kwargs['row_key'] = _to_bytes(row_key) + range_kwargs = {} if start_key is not None or end_key is not None: - range_kwargs = {} if start_key is not None: - range_kwargs['start_key'] = _to_bytes(start_key) + range_kwargs['start_key_closed'] = _to_bytes(start_key) if end_key is not None: - range_kwargs['end_key'] = _to_bytes(end_key) - row_range = data_pb2.RowRange(**range_kwargs) - request_kwargs['row_range'] = row_range + range_kwargs['end_key_open'] = _to_bytes(end_key) if filter_ is not None: request_kwargs['filter'] = filter_.to_pb() - if allow_row_interleaving is not None: - request_kwargs['allow_row_interleaving'] = allow_row_interleaving if limit is not None: - request_kwargs['num_rows_limit'] = limit + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) - return data_messages_pb2.ReadRowsRequest(**request_kwargs) + return message diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index bb424ad259b1..435798ecdf61 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -18,6 +18,12 @@ class TestClient(unittest2.TestCase): + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + TIMEOUT_SECONDS = 80 + USER_AGENT = 'you-sir-age-int' + def _getTargetClass(self): from gcloud.bigtable.client import Client return Client @@ -33,8 +39,7 @@ def _constructor_test_helper(self, expected_scopes, creds, user_agent = user_agent or MUT.DEFAULT_USER_AGENT timeout_seconds = timeout_seconds or MUT.DEFAULT_TIMEOUT_SECONDS - PROJECT = 'PROJECT' - client = self._makeOne(project=PROJECT, credentials=creds, + client = self._makeOne(project=self.PROJECT, credentials=creds, read_only=read_only, admin=admin, user_agent=user_agent, timeout_seconds=timeout_seconds) @@ -44,12 +49,12 @@ def _constructor_test_helper(self, expected_scopes, creds, if expected_scopes is not None: self.assertEqual(client._credentials.scopes, expected_scopes) - self.assertEqual(client.project, PROJECT) + self.assertEqual(client.project, self.PROJECT) self.assertEqual(client.timeout_seconds, timeout_seconds) self.assertEqual(client.user_agent, user_agent) # Check stubs are set (but null) self.assertEqual(client._data_stub_internal, None) - self.assertEqual(client._cluster_stub_internal, None) + self.assertEqual(client._instance_stub_internal, None) self.assertEqual(client._operations_stub_internal, None) self.assertEqual(client._table_stub_internal, None) @@ -63,13 +68,13 @@ def test_constructor_default_scopes(self): def test_constructor_custom_user_agent_and_timeout(self): from gcloud.bigtable import client as MUT - timeout_seconds = 1337 - user_agent = 'custom-application' + CUSTOM_TIMEOUT_SECONDS = 1337 + CUSTOM_USER_AGENT = 'custom-application' expected_scopes = [MUT.DATA_SCOPE] creds = _Credentials() self._constructor_test_helper(expected_scopes, creds, - user_agent=user_agent, - timeout_seconds=timeout_seconds) + user_agent=CUSTOM_USER_AGENT, + timeout_seconds=CUSTOM_TIMEOUT_SECONDS) def test_constructor_with_admin(self): from gcloud.bigtable import client as MUT @@ -112,8 +117,7 @@ def test_constructor_credentials_wo_create_scoped(self): def _context_manager_helper(self): credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) + client = self._makeOne(project=self.PROJECT, credentials=credentials) def mock_start(): client._data_stub_internal = object() @@ -151,17 +155,17 @@ class DummyException(Exception): def _copy_test_helper(self, read_only=False, admin=False): credentials = _Credentials('value') - project = 'PROJECT' - timeout_seconds = 123 - user_agent = 'you-sir-age-int' - client = self._makeOne(project=project, credentials=credentials, - read_only=read_only, admin=admin, - timeout_seconds=timeout_seconds, - user_agent=user_agent) + client = self._makeOne( + project=self.PROJECT, + credentials=credentials, + read_only=read_only, + admin=admin, + timeout_seconds=self.TIMEOUT_SECONDS, + user_agent=self.USER_AGENT) # Put some fake stubs in place so that we can verify they # don't get copied. client._data_stub_internal = object() - client._cluster_stub_internal = object() + client._instance_stub_internal = object() client._operations_stub_internal = object() client._table_stub_internal = object() @@ -173,7 +177,7 @@ def _copy_test_helper(self, read_only=False, admin=False): self.assertEqual(new_client.timeout_seconds, client.timeout_seconds) # Make sure stubs are not preserved. self.assertEqual(new_client._data_stub_internal, None) - self.assertEqual(new_client._cluster_stub_internal, None) + self.assertEqual(new_client._instance_stub_internal, None) self.assertEqual(new_client._operations_stub_internal, None) self.assertEqual(new_client._table_stub_internal, None) @@ -213,29 +217,30 @@ def test_data_stub_failure(self): with self.assertRaises(ValueError): getattr(client, '_data_stub') - def test_cluster_stub_getter(self): + def test_instance_stub_getter(self): credentials = _Credentials() project = 'PROJECT' client = self._makeOne(project=project, credentials=credentials, admin=True) - client._cluster_stub_internal = object() - self.assertTrue(client._cluster_stub is client._cluster_stub_internal) + client._instance_stub_internal = object() + self.assertTrue( + client._instance_stub is client._instance_stub_internal) - def test_cluster_stub_non_admin_failure(self): + def test_instance_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' client = self._makeOne(project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): - getattr(client, '_cluster_stub') + getattr(client, '_instance_stub') - def test_cluster_stub_unset_failure(self): + def test_instance_stub_unset_failure(self): credentials = _Credentials() project = 'PROJECT' client = self._makeOne(project=project, credentials=credentials, admin=True) with self.assertRaises(ValueError): - getattr(client, '_cluster_stub') + getattr(client, '_instance_stub') def test_operations_stub_getter(self): credentials = _Credentials() @@ -289,9 +294,9 @@ def test_table_stub_unset_failure(self): def test__make_data_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import DATA_API_HOST - from gcloud.bigtable.client import DATA_API_PORT - from gcloud.bigtable.client import DATA_STUB_FACTORY + from gcloud.bigtable.client import DATA_API_HOST_V2 + from gcloud.bigtable.client import DATA_API_PORT_V2 + from gcloud.bigtable.client import DATA_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -311,18 +316,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - DATA_STUB_FACTORY, - DATA_API_HOST, - DATA_API_PORT, + DATA_STUB_FACTORY_V2, + DATA_API_HOST_V2, + DATA_API_PORT_V2, ), ]) - def test__make_cluster_stub(self): + def test__make_instance_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT - from gcloud.bigtable.client import CLUSTER_STUB_FACTORY + from gcloud.bigtable.client import INSTANCE_ADMIN_HOST_V2 + from gcloud.bigtable.client import INSTANCE_ADMIN_PORT_V2 + from gcloud.bigtable.client import INSTANCE_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -336,24 +341,24 @@ def mock_make_stub(*args): return fake_stub with _Monkey(MUT, _make_stub=mock_make_stub): - result = client._make_cluster_stub() + result = client._make_instance_stub() self.assertTrue(result is fake_stub) self.assertEqual(make_stub_args, [ ( client, - CLUSTER_STUB_FACTORY, - CLUSTER_ADMIN_HOST, - CLUSTER_ADMIN_PORT, + INSTANCE_STUB_FACTORY_V2, + INSTANCE_ADMIN_HOST_V2, + INSTANCE_ADMIN_PORT_V2, ), ]) def test__make_operations_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT - from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY + from gcloud.bigtable.client import OPERATIONS_API_HOST_V2 + from gcloud.bigtable.client import OPERATIONS_API_PORT_V2 + from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -373,18 +378,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - OPERATIONS_STUB_FACTORY, - CLUSTER_ADMIN_HOST, - CLUSTER_ADMIN_PORT, + OPERATIONS_STUB_FACTORY_V2, + OPERATIONS_API_HOST_V2, + OPERATIONS_API_PORT_V2, ), ]) def test__make_table_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import TABLE_ADMIN_HOST - from gcloud.bigtable.client import TABLE_ADMIN_PORT - from gcloud.bigtable.client import TABLE_STUB_FACTORY + from gcloud.bigtable.client import TABLE_ADMIN_HOST_V2 + from gcloud.bigtable.client import TABLE_ADMIN_PORT_V2 + from gcloud.bigtable.client import TABLE_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -404,9 +409,9 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - TABLE_STUB_FACTORY, - TABLE_ADMIN_HOST, - TABLE_ADMIN_PORT, + TABLE_STUB_FACTORY_V2, + TABLE_ADMIN_HOST_V2, + TABLE_ADMIN_PORT_V2, ), ]) @@ -443,13 +448,13 @@ def mock_make_stub(*args): self.assertTrue(client._data_stub_internal is stub) if admin: - self.assertTrue(client._cluster_stub_internal is stub) + self.assertTrue(client._instance_stub_internal is stub) self.assertTrue(client._operations_stub_internal is stub) self.assertTrue(client._table_stub_internal is stub) self.assertEqual(stub._entered, 4) self.assertEqual(len(make_stub_args), 4) else: - self.assertTrue(client._cluster_stub_internal is None) + self.assertTrue(client._instance_stub_internal is None) self.assertTrue(client._operations_stub_internal is None) self.assertTrue(client._table_stub_internal is None) self.assertEqual(stub._entered, 1) @@ -484,12 +489,12 @@ def _stop_method_helper(self, admin): stub1 = _FakeStub() stub2 = _FakeStub() client._data_stub_internal = stub1 - client._cluster_stub_internal = stub2 + client._instance_stub_internal = stub2 client._operations_stub_internal = stub2 client._table_stub_internal = stub2 client.stop() self.assertTrue(client._data_stub_internal is None) - self.assertTrue(client._cluster_stub_internal is None) + self.assertTrue(client._instance_stub_internal is None) self.assertTrue(client._operations_stub_internal is None) self.assertTrue(client._table_stub_internal is None) self.assertEqual(stub1._entered, 0) @@ -516,147 +521,117 @@ def test_stop_while_stopped(self): # This is a bit hacky. We set the cluster stub protected value # since it isn't used in is_started() and make sure that stop # doesn't reset this value to None. - client._cluster_stub_internal = cluster_stub = object() + client._instance_stub_internal = instance_stub = object() client.stop() # Make sure the cluster stub did not change. - self.assertEqual(client._cluster_stub_internal, cluster_stub) + self.assertEqual(client._instance_stub_internal, instance_stub) - def test_cluster_factory(self): - from gcloud.bigtable.cluster import Cluster + def test_instance_factory_defaults(self): + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES + from gcloud.bigtable.instance import Instance + from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) + client = self._makeOne(project=PROJECT, credentials=credentials) - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display-name' - serve_nodes = 42 - cluster = client.cluster(zone, cluster_id, display_name=display_name, - serve_nodes=serve_nodes) - self.assertTrue(isinstance(cluster, Cluster)) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) - - def _list_zones_helper(self, zone_status): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) - credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 281330 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) - - # Create request_pb - request_pb = messages_pb2.ListZonesRequest( - name='projects/' + project, - ) + self.assertTrue(isinstance(instance, Instance)) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance._cluster_location_id, + _EXISTING_INSTANCE_LOCATION_ID) + self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) + self.assertTrue(instance._client is client) - # Create response_pb - zone1 = 'foo' - zone2 = 'bar' - response_pb = messages_pb2.ListZonesResponse( - zones=[ - data_pb2.Zone(display_name=zone1, status=zone_status), - data_pb2.Zone(display_name=zone2, status=zone_status), - ], - ) + def test_instance_factory_w_explicit_serve_nodes(self): + from gcloud.bigtable.instance import Instance - # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = [zone1, zone2] - - # Perform the method and check the result. - result = client.list_zones() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListZones', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_zones(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - self._list_zones_helper(data_pb2.Zone.OK) - - def test_list_zones_failure(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - with self.assertRaises(ValueError): - self._list_zones_helper(data_pb2.Zone.EMERGENCY_MAINENANCE) - - def test_list_clusters(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + LOCATION_ID = 'locname' + SERVE_NODES = 5 + credentials = _Credentials() + client = self._makeOne(project=PROJECT, credentials=credentials) + + instance = client.instance( + INSTANCE_ID, display_name=DISPLAY_NAME, + location=LOCATION_ID, serve_nodes=SERVE_NODES) + + self.assertTrue(isinstance(instance, Instance)) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance._cluster_location_id, LOCATION_ID) + self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES) + self.assertTrue(instance._client is client) + + def test_list_instances(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) from gcloud.bigtable._testing import _FakeStub + LOCATION = 'projects/' + self.PROJECT + '/locations/locname' + FAILED_LOCATION = 'FAILED' + INSTANCE_ID1 = 'instance-id1' + INSTANCE_ID2 = 'instance-id2' + INSTANCE_NAME1 = ( + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1) + INSTANCE_NAME2 = ( + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) + credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 8004 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) + client = self._makeOne( + project=self.PROJECT, + credentials=credentials, + admin=True, + timeout_seconds=self.TIMEOUT_SECONDS, + ) # Create request_pb - request_pb = messages_pb2.ListClustersRequest( - name='projects/' + project, + request_pb = messages_v2_pb2.ListInstancesRequest( + parent='projects/' + self.PROJECT, ) # Create response_pb - zone = 'foo' - failed_zone = 'bar' - cluster_id1 = 'cluster-id1' - cluster_id2 = 'cluster-id2' - cluster_name1 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id1) - cluster_name2 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id2) - response_pb = messages_pb2.ListClustersResponse( - failed_zones=[ - data_pb2.Zone(display_name=failed_zone), + response_pb = messages_v2_pb2.ListInstancesResponse( + failed_locations=[ + FAILED_LOCATION, ], - clusters=[ - data_pb2.Cluster( - name=cluster_name1, - display_name=cluster_name1, - serve_nodes=3, + instances=[ + data_v2_pb2.Instance( + name=INSTANCE_NAME1, + display_name=INSTANCE_NAME1, ), - data_pb2.Cluster( - name=cluster_name2, - display_name=cluster_name2, - serve_nodes=3, + data_v2_pb2.Instance( + name=INSTANCE_NAME2, + display_name=INSTANCE_NAME2, ), ], ) # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) + client._instance_stub_internal = stub = _FakeStub(response_pb) # Create expected_result. - failed_zones = [failed_zone] - clusters = [ - client.cluster(zone, cluster_id1), - client.cluster(zone, cluster_id2), + failed_locations = [FAILED_LOCATION] + instances = [ + client.instance(INSTANCE_ID1, LOCATION), + client.instance(INSTANCE_ID2, LOCATION), ] - expected_result = (clusters, failed_zones) + expected_result = (instances, failed_locations) # Perform the method and check the result. - result = client.list_clusters() + result = client.list_instances() self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( - 'ListClusters', - (request_pb, timeout_seconds), + 'ListInstances', + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -673,15 +648,15 @@ def _makeOne(self, *args, **kwargs): def test_constructor(self): from gcloud.bigtable.client import Client from gcloud.bigtable.client import DATA_SCOPE + PROJECT = 'PROJECT' + USER_AGENT = 'USER_AGENT' credentials = _Credentials() - project = 'PROJECT' - user_agent = 'USER_AGENT' - client = Client(project=project, credentials=credentials, - user_agent=user_agent) + client = Client(project=PROJECT, credentials=credentials, + user_agent=USER_AGENT) transformer = self._makeOne(client) self.assertTrue(transformer._credentials is credentials) - self.assertEqual(transformer._user_agent, user_agent) + self.assertEqual(transformer._user_agent, USER_AGENT) self.assertEqual(credentials.scopes, [DATA_SCOPE]) def test___call__(self): diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index 427a4ec9126b..4f8da614f439 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -26,15 +26,12 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def _constructor_test_helper(self, cluster=None): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation = self._makeOne(op_type, op_id, cluster=cluster) self.assertEqual(operation.op_type, op_type) self.assertEqual(operation.op_id, op_id) - self.assertEqual(operation.begin, begin) self.assertEqual(operation._cluster, cluster) self.assertFalse(operation._complete) @@ -46,13 +43,11 @@ def test_constructor_explicit_cluster(self): self._constructor_test_helper(cluster=cluster) def test___eq__(self): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation1 = self._makeOne(op_type, op_id, cluster=cluster) + operation2 = self._makeOne(op_type, op_id, cluster=cluster) self.assertEqual(operation1, operation2) def test___eq__type_differ(self): @@ -61,13 +56,11 @@ def test___eq__type_differ(self): self.assertNotEqual(operation1, operation2) def test___ne__same_value(self): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation1 = self._makeOne(op_type, op_id, cluster=cluster) + operation2 = self._makeOne(op_type, op_id, cluster=cluster) comparison_val = (operation1 != operation2) self.assertFalse(comparison_val) @@ -83,27 +76,27 @@ def test_finished_without_operation(self): operation.finished() def _finished_helper(self, done): - import datetime from google.longrunning import operations_pb2 from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.cluster import Cluster - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - op_type = 'fake-op' - op_id = 789 - begin = datetime.datetime(2015, 10, 22, 1, 1) + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + OP_TYPE = 'fake-op' + OP_ID = 789 timeout_seconds = 1 - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = Cluster(zone, cluster_id, client) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) + client = _Client(PROJECT, timeout_seconds=timeout_seconds) + instance = _Instance(INSTANCE_ID, client) + cluster = Cluster(CLUSTER_ID, instance) + operation = self._makeOne(OP_TYPE, OP_ID, cluster=cluster) # Create request_pb - op_name = ('operations/projects/' + project + '/zones/' + - zone + '/clusters/' + cluster_id + - '/operations/%d' % (op_id,)) + op_name = ('operations/projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/clusters/' + CLUSTER_ID + + '/operations/%d' % (OP_ID,)) request_pb = operations_pb2.GetOperationRequest(name=op_name) # Create response_pb @@ -139,6 +132,14 @@ def test_finished_not_done(self): class TestCluster(unittest2.TestCase): + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + CLUSTER_NAME = ('projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/clusters/' + CLUSTER_ID) + TIMEOUT_SECONDS = 123 + def _getTargetClass(self): from gcloud.bigtable.cluster import Cluster return Cluster @@ -147,292 +148,226 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_constructor_defaults(self): - zone = 'zone' - cluster_id = 'cluster-id' - client = object() + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(zone, cluster_id, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, cluster_id) - self.assertEqual(cluster.serve_nodes, 3) - self.assertTrue(cluster._client is client) + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_constructor_non_default(self): - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - client = object() - - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) + SERVE_NODES = 8 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test_copy(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - - client = _Client(project) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) + SERVE_NODES = 8 + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) new_cluster = cluster.copy() # Make sure the client copy succeeded. - self.assertFalse(new_cluster._client is client) - self.assertEqual(new_cluster._client, client) + self.assertFalse(new_cluster._instance is instance) + self.assertEqual(new_cluster.serve_nodes, SERVE_NODES) # Make sure the client got copied to a new instance. self.assertFalse(cluster is new_cluster) self.assertEqual(cluster, new_cluster) - def test_table_factory(self): - from gcloud.bigtable.table import Table - - zone = 'zone' - cluster_id = 'cluster-id' - cluster = self._makeOne(zone, cluster_id, None) - - table_id = 'table_id' - table = cluster.table(table_id) - self.assertTrue(isinstance(table, Table)) - self.assertEqual(table.table_id, table_id) - self.assertEqual(table._cluster, cluster) - def test__update_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - display_name = 'display_name' - serve_nodes = 8 - cluster_pb = data_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, + SERVE_NODES = 8 + cluster_pb = _ClusterPB( + serve_nodes=SERVE_NODES, ) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) + cluster = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - - def test__update_from_pb_no_display_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - - cluster_pb = data_pb2.Cluster(serve_nodes=331) - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - with self.assertRaises(ValueError): - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test__update_from_pb_no_serve_nodes(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - cluster_pb = data_pb2.Cluster(display_name='name') - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) + cluster_pb = _ClusterPB() + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) with self.assertRaises(ValueError): cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) - - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_pb2.Cluster( - name=cluster_name, - display_name=cluster_id, - serve_nodes=331, + SERVE_NODES = 331 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, ) klass = self._getTargetClass() - cluster = klass.from_pb(cluster_pb, client) + cluster = klass.from_pb(cluster_pb, instance) self.assertTrue(isinstance(cluster, klass)) - self.assertEqual(cluster._client, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test_from_pb_bad_cluster_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - - cluster_name = 'INCORRECT_FORMAT' - cluster_pb = data_pb2.Cluster(name=cluster_name) + BAD_CLUSTER_NAME = 'INCORRECT_FORMAT' + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME) klass = self._getTargetClass() with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, None) + klass.from_pb(cluster_pb, instance) def test_from_pb_project_mistmatch(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(ALT_PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - alt_project = 'ALT_PROJECT' - client = _Client(project=alt_project) + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) - self.assertNotEqual(project, alt_project) + def test_from_pb_instance_mistmatch(self): + ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + client = _Client(self.PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_pb2.Cluster(name=cluster_name) + self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) + + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) klass = self._getTargetClass() with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, client) + klass.from_pb(cluster_pb, instance) def test_name_property(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(zone, cluster_id, client) - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - self.assertEqual(cluster.name, cluster_name) + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.name, self.CLUSTER_NAME) def test___eq__(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster1, cluster2) def test___eq__type_differ(self): - cluster1 = self._makeOne('zone', 'cluster_id', 'client') + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) cluster2 = object() self.assertNotEqual(cluster1, cluster2) def test___ne__same_value(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) comparison_val = (cluster1 != cluster2) self.assertFalse(comparison_val) def test___ne__(self): - cluster1 = self._makeOne('zone1', 'cluster_id1', 'client1') - cluster2 = self._makeOne('zone2', 'cluster_id2', 'client2') + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne('cluster_id1', instance) + cluster2 = self._makeOne('cluster_id2', instance) self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 123 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + SERVE_NODES = 31 + LOCATION = 'LOCATION' + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_pb2.GetClusterRequest(name=cluster_name) + request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME) # Create response_pb - serve_nodes = 31 - display_name = u'hey-hi-hello' - response_pb = data_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, + response_pb = _ClusterPB( + serve_nodes=SERVE_NODES, + location=LOCATION, ) # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. expected_result = None # reload() has no return value. # Check Cluster optional config values before. self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - self.assertEqual(cluster.display_name, cluster_id) # Perform the method and check the result. result = cluster.reload() self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'GetCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) # Check Cluster optional config values before. - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertEqual(cluster.display_name, display_name) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(cluster.location, LOCATION) def test_create(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 578 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb. Just a mock since we monkey patch # _prepare_create_request request_pb = object() # Create response_pb - op_id = 5678 - op_begin = object() - op_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, op_id)) - current_op = operations_pb2.Operation(name=op_name) - response_pb = data_pb2.Cluster(current_operation=current_op) + OP_ID = 5678 + OP_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) + response_pb = operations_pb2.Operation(name=OP_NAME) # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. - expected_result = MUT.Operation('create', op_id, op_begin, - cluster=cluster) + expected_result = MUT.Operation('create', OP_ID, cluster=cluster) # Create the mocks. prep_create_called = [] @@ -445,7 +380,7 @@ def mock_prep_create_req(cluster): def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return op_id, op_begin + return OP_ID # Perform the method and check the result. with _Monkey(MUT, _prepare_create_request=mock_prep_create_req, @@ -455,60 +390,47 @@ def mock_process_operation(operation_pb): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'CreateCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) self.assertEqual(prep_create_called, [cluster]) - self.assertEqual(process_operation_called, [current_op]) + self.assertEqual(process_operation_called, [response_pb]) def test_update(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - serve_nodes = 81 - display_name = 'display_name' - timeout_seconds = 9 + SERVE_NODES = 81 - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = data_pb2.Cluster( - name=cluster_name, - display_name=display_name, - serve_nodes=serve_nodes, + request_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, ) # Create response_pb - current_op = operations_pb2.Operation() - response_pb = data_pb2.Cluster(current_operation=current_op) + response_pb = operations_pb2.Operation() # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('update', op_id, op_begin, - cluster=cluster) + OP_ID = 5678 + expected_result = MUT.Operation('update', OP_ID, cluster=cluster) # Create mocks process_operation_called = [] def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return op_id, op_begin + return OP_ID # Perform the method and check the result. with _Monkey(MUT, _process_operation=mock_process_operation): @@ -517,35 +439,27 @@ def mock_process_operation(operation_pb): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'UpdateCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - self.assertEqual(process_operation_called, [current_op]) + self.assertEqual(process_operation_called, [response_pb]) def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 57 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_pb2.DeleteClusterRequest(name=cluster_name) + request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME) # Create response_pb response_pb = empty_pb2.Empty() # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) + client._instance_stub = stub = _FakeStub(response_pb) # Create expected_result. expected_result = None # delete() has no return value. @@ -556,126 +470,9 @@ def test_delete(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'DeleteCluster', - (request_pb, timeout_seconds), - {}, - )]) - - def test_undelete(self): - from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 78 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_pb2.UndeleteClusterRequest(name=cluster_name) - - # Create response_pb - response_pb = operations_pb2.Operation() - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('undelete', op_id, op_begin, - cluster=cluster) - - # Create the mocks. - process_operation_called = [] - - def mock_process_operation(operation_pb): - process_operation_called.append(operation_pb) - return op_id, op_begin - - # Perform the method and check the result. - with _Monkey(MUT, _process_operation=mock_process_operation): - result = cluster.undelete() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'UndeleteCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - self.assertEqual(process_operation_called, [response_pb]) - - def _list_tables_helper(self, table_id, table_name=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as table_data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 45 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_ - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = table_messages_pb2.ListTablesRequest(name=cluster_name) - - # Create response_pb - table_name = table_name or (cluster_name + '/tables/' + table_id) - response_pb = table_messages_pb2.ListTablesResponse( - tables=[ - table_data_pb2.Table(name=table_name), - ], - ) - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_table = cluster.table(table_id) - expected_result = [expected_table] - - # Perform the method and check the result. - result = cluster.list_tables() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListTables', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_tables(self): - table_id = 'table_id' - self._list_tables_helper(table_id) - - def test_list_tables_failure_bad_split(self): - with self.assertRaises(ValueError): - self._list_tables_helper(None, table_name='wrong-format') - - def test_list_tables_failure_name_bad_before(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - - table_id = 'table_id' - bad_table_name = ('nonempty-section-before' + - 'projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) - with self.assertRaises(ValueError): - self._list_tables_helper(table_id, table_name=bad_table_name) class Test__prepare_create_request(unittest2.TestCase): @@ -685,30 +482,23 @@ def _callFUT(self, cluster): return _prepare_create_request(cluster) def test_it(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) from gcloud.bigtable.cluster import Cluster - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = u'DISPLAY_NAME' - serve_nodes = 8 - client = _Client(project) + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + SERVE_NODES = 8 + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster = Cluster(CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) - cluster = Cluster(zone, cluster_id, client, - display_name=display_name, serve_nodes=serve_nodes) request_pb = self._callFUT(cluster) - self.assertTrue(isinstance(request_pb, - messages_pb2.CreateClusterRequest)) - self.assertEqual(request_pb.cluster_id, cluster_id) - self.assertEqual(request_pb.name, - 'projects/' + project + '/zones/' + zone) - self.assertTrue(isinstance(request_pb.cluster, data_pb2.Cluster)) - self.assertEqual(request_pb.cluster.display_name, display_name) - self.assertEqual(request_pb.cluster.serve_nodes, serve_nodes) + + self.assertEqual(request_pb.cluster_id, CLUSTER_ID) + self.assertEqual(request_pb.parent, instance.name) + self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) class Test__parse_pb_any_to_native(unittest2.TestCase): @@ -720,16 +510,16 @@ def _callFUT(self, any_val, expected_type=None): def test_with_known_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable import cluster as MUT - type_url = 'type.googleapis.com/' + data_pb2._CELL.full_name - fake_type_url_map = {type_url: data_pb2.Cell} - - cell = data_pb2.Cell( + cell = _CellPB( timestamp_micros=0, value=b'foobar', ) + + type_url = 'type.googleapis.com/' + cell.DESCRIPTOR.full_name + fake_type_url_map = {type_url: cell.__class__} + any_val = any_pb2.Any( type_url=type_url, value=cell.SerializeToString(), @@ -739,85 +529,6 @@ def test_with_known_type_url(self): self.assertEqual(result, cell) - def test_with_create_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - - type_url = ('type.googleapis.com/' + - messages_pb2._CREATECLUSTERMETADATA.full_name) - metadata = messages_pb2.CreateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - original_request=messages_pb2.CreateClusterRequest( - name='foo', - cluster_id='bar', - cluster=data_pb2.Cluster( - display_name='quux', - serve_nodes=1337, - ), - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_update_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - - type_url = ('type.googleapis.com/' + - messages_pb2._UPDATECLUSTERMETADATA.full_name) - metadata = messages_pb2.UpdateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - cancel_time=Timestamp(seconds=100, nanos=76543), - original_request=data_pb2.Cluster( - display_name='the-end', - serve_nodes=42, - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_undelete_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - - type_url = ('type.googleapis.com/' + - messages_pb2._UNDELETECLUSTERMETADATA.full_name) - metadata = messages_pb2.UndeleteClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - def test_unknown_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey @@ -851,58 +562,72 @@ def _callFUT(self, operation_pb): def test_it(self): from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - expected_operation_id = 234 - operation_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, - expected_operation_id)) - - current_op = operations_pb2.Operation(name=operation_name) - # Create mocks. - request_metadata = messages_pb2.CreateClusterMetadata() - parse_pb_any_called = [] + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + EXPECTED_OPERATION_ID = 234 + OPERATION_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (PROJECT, INSTANCE_ID, CLUSTER_ID, EXPECTED_OPERATION_ID)) - def mock_parse_pb_any_to_native(any_val, expected_type=None): - parse_pb_any_called.append((any_val, expected_type)) - return request_metadata - - expected_operation_begin = object() - ts_to_dt_called = [] - - def mock_pb_timestamp_to_datetime(timestamp): - ts_to_dt_called.append(timestamp) - return expected_operation_begin + operation_pb = operations_pb2.Operation(name=OPERATION_NAME) # Exectute method with mocks in place. - with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, - _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): - operation_id, operation_begin = self._callFUT(current_op) + operation_id = self._callFUT(operation_pb) # Check outputs. - self.assertEqual(operation_id, expected_operation_id) - self.assertTrue(operation_begin is expected_operation_begin) - - # Check mocks were used correctly. - self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) - self.assertEqual(ts_to_dt_called, [request_metadata.request_time]) + self.assertEqual(operation_id, EXPECTED_OPERATION_ID) def test_op_name_parsing_failure(self): from google.longrunning import operations_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - current_op = operations_pb2.Operation(name='invalid') - cluster = data_pb2.Cluster(current_operation=current_op) + operation_pb = operations_pb2.Operation(name='invalid') with self.assertRaises(ValueError): - self._callFUT(cluster) + self._callFUT(operation_pb) + + +def _CellPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Cell(*args, **kw) + + +def _ClusterPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as instance_v2_pb2) + return instance_v2_pb2.Cluster(*args, **kw) + + +def _DeleteClusterRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.DeleteClusterRequest(*args, **kw) + + +def _GetClusterRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.GetClusterRequest(*args, **kw) + + +class _Instance(object): + + def __init__(self, instance_id, client): + self.instance_id = instance_id + self._client = client + + @property + def name(self): + return 'projects/%s/instances/%s' % ( + self._client.project, self.instance_id) + + def copy(self): + return self.__class__(self.instance_id, self._client) + + def __eq__(self, other): + return (other.instance_id == self.instance_id and + other._client == self._client) class _Client(object): @@ -912,10 +637,6 @@ def __init__(self, project, timeout_seconds=None): self.project_name = 'projects/' + self.project self.timeout_seconds = timeout_seconds - def copy(self): - from copy import deepcopy - return deepcopy(self) - def __eq__(self, other): return (other.project == self.project and other.project_name == self.project_name and diff --git a/gcloud/bigtable/test_column_family.py b/gcloud/bigtable/test_column_family.py index 139a959e0a7b..d9deaf841fa0 100644 --- a/gcloud/bigtable/test_column_family.py +++ b/gcloud/bigtable/test_column_family.py @@ -107,13 +107,11 @@ def test___ne__same_value(self): self.assertFalse(comparison_val) def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) max_num_versions = 1337 gc_rule = self._makeOne(max_num_versions=max_num_versions) pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, - data_pb2.GcRule(max_num_versions=max_num_versions)) + expected = _GcRulePB(max_num_versions=max_num_versions) + self.assertEqual(pb_val, expected) class TestMaxAgeGCRule(unittest2.TestCase): @@ -147,14 +145,12 @@ def test___ne__same_value(self): def test_to_pb(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) max_age = datetime.timedelta(seconds=1) duration = duration_pb2.Duration(seconds=1) gc_rule = self._makeOne(max_age=max_age) pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, data_pb2.GcRule(max_age=duration)) + self.assertEqual(pb_val, _GcRulePB(max_age=duration)) class TestGCRuleUnion(unittest2.TestCase): @@ -193,22 +189,21 @@ def test___ne__same_value(self): def test_to_pb(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) + pb_rule3 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) gc_rule_pb = rule3.to_pb() self.assertEqual(gc_rule_pb, pb_rule3) @@ -216,30 +211,29 @@ def test_to_pb(self): def test_to_pb_nested(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) + pb_rule3 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule3, pb_rule4])) + pb_rule5 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) gc_rule_pb = rule5.to_pb() self.assertEqual(gc_rule_pb, pb_rule5) @@ -281,22 +275,21 @@ def test___ne__same_value(self): def test_to_pb(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( + pb_rule3 = _GcRulePB( + intersection=_GcRuleIntersectionPB( rules=[pb_rule1, pb_rule2])) gc_rule_pb = rule3.to_pb() @@ -305,31 +298,30 @@ def test_to_pb(self): def test_to_pb_nested(self): import datetime from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) from gcloud.bigtable.column_family import MaxAgeGCRule from gcloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( + pb_rule3 = _GcRulePB( + intersection=_GcRuleIntersectionPB( rules=[pb_rule1, pb_rule2])) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( + pb_rule5 = _GcRulePB( + intersection=_GcRuleIntersectionPB( rules=[pb_rule3, pb_rule4])) gc_rule_pb = rule5.to_pb() @@ -349,7 +341,8 @@ def test_constructor(self): column_family_id = u'column-family-id' table = object() gc_rule = object() - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) self.assertEqual(column_family.column_family_id, column_family_id) self.assertTrue(column_family._table is table) @@ -396,10 +389,8 @@ def test___ne__(self): self.assertNotEqual(column_family1, column_family2) def _create_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -413,21 +404,23 @@ def _create_test_helper(self, gc_rule=None): client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) # Create request_pb if gc_rule is None: - column_family_pb = data_pb2.ColumnFamily() + column_family_pb = _ColumnFamilyPB() else: - column_family_pb = data_pb2.ColumnFamily(gc_rule=gc_rule.to_pb()) - request_pb = messages_pb2.CreateColumnFamilyRequest( - name=table_name, - column_family_id=column_family_id, - column_family=column_family_pb, + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + create=column_family_pb, ) # Create response_pb - response_pb = data_pb2.ColumnFamily() + response_pb = _ColumnFamilyPB() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -441,7 +434,7 @@ def _create_test_helper(self, gc_rule=None): self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( - 'CreateColumnFamily', + 'ModifyColumnFamilies', (request_pb, timeout_seconds), {}, )]) @@ -455,9 +448,9 @@ def test_create_with_gc_rule(self): self._create_test_helper(gc_rule=gc_rule) def _update_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) project_id = 'project-id' zone = 'zone' @@ -467,23 +460,26 @@ def _update_test_helper(self, gc_rule=None): timeout_seconds = 28 table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = table_name + '/columnFamilies/' + column_family_id client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) # Create request_pb if gc_rule is None: - request_pb = data_pb2.ColumnFamily(name=column_family_name) + column_family_pb = _ColumnFamilyPB() else: - request_pb = data_pb2.ColumnFamily( - name=column_family_name, - gc_rule=gc_rule.to_pb(), - ) + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + update=column_family_pb, + ) # Create response_pb - response_pb = data_pb2.ColumnFamily() + response_pb = _ColumnFamilyPB() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -497,7 +493,7 @@ def _update_test_helper(self, gc_rule=None): self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( - 'UpdateColumnFamily', + 'ModifyColumnFamilies', (request_pb, timeout_seconds), {}, )]) @@ -512,8 +508,8 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) from gcloud.bigtable._testing import _FakeStub project_id = 'project-id' @@ -524,15 +520,17 @@ def test_delete(self): timeout_seconds = 7 table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = table_name + '/columnFamilies/' + column_family_id client = _Client(timeout_seconds=timeout_seconds) table = _Table(table_name, client=client) column_family = self._makeOne(column_family_id, table) # Create request_pb - request_pb = messages_pb2.DeleteColumnFamilyRequest( - name=column_family_name) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + drop=True) # Create response_pb response_pb = empty_pb2.Empty() @@ -549,7 +547,7 @@ def test_delete(self): self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( - 'DeleteColumnFamily', + 'ModifyColumnFamilies', (request_pb, timeout_seconds), {}, )]) @@ -562,10 +560,8 @@ def _callFUT(self, *args, **kwargs): return _gc_rule_from_pb(*args, **kwargs) def test_empty(self): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - gc_rule_pb = data_pb2.GcRule() + gc_rule_pb = _GcRulePB() self.assertEqual(self._callFUT(gc_rule_pb), None) def test_max_num_versions(self): @@ -630,7 +626,31 @@ def WhichOneof(cls, name): self.assertEqual(MockProto.names, ['rule']) -class _Cluster(object): +def _GcRulePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule(*args, **kw) + + +def _GcRuleIntersectionPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Intersection(*args, **kw) + + +def _GcRuleUnionPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Union(*args, **kw) + + +def _ColumnFamilyPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) + + +class _Instance(object): def __init__(self, client=None): self._client = client @@ -646,4 +666,4 @@ class _Table(object): def __init__(self, name, client=None): self.name = name - self._cluster = _Cluster(client) + self._instance = _Instance(client) diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py new file mode 100644 index 000000000000..da8827685292 --- /dev/null +++ b/gcloud/bigtable/test_instance.py @@ -0,0 +1,866 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import unittest2 + + +class TestOperation(unittest2.TestCase): + + OP_TYPE = 'fake-op' + OP_ID = 8915 + BEGIN = datetime.datetime(2015, 10, 22, 1, 1) + LOCATION_ID = 'loc-id' + + def _getTargetClass(self): + from gcloud.bigtable.instance import Operation + return Operation + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _constructor_test_helper(self, instance=None): + operation = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) + + self.assertEqual(operation.op_type, self.OP_TYPE) + self.assertEqual(operation.op_id, self.OP_ID) + self.assertEqual(operation.begin, self.BEGIN) + self.assertEqual(operation.location_id, self.LOCATION_ID) + self.assertEqual(operation._instance, instance) + self.assertFalse(operation._complete) + + def test_constructor_defaults(self): + self._constructor_test_helper() + + def test_constructor_explicit_instance(self): + instance = object() + self._constructor_test_helper(instance=instance) + + def test___eq__(self): + instance = object() + operation1 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) + operation2 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) + self.assertEqual(operation1, operation2) + + def test___eq__type_differ(self): + operation1 = self._makeOne('foo', 123, None, self.LOCATION_ID) + operation2 = object() + self.assertNotEqual(operation1, operation2) + + def test___ne__same_value(self): + instance = object() + operation1 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) + operation2 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) + comparison_val = (operation1 != operation2) + self.assertFalse(comparison_val) + + def test___ne__(self): + operation1 = self._makeOne('foo', 123, None, self.LOCATION_ID) + operation2 = self._makeOne('bar', 456, None, self.LOCATION_ID) + self.assertNotEqual(operation1, operation2) + + def test_finished_without_operation(self): + operation = self._makeOne(None, None, None, None) + operation._complete = True + with self.assertRaises(ValueError): + operation.finished() + + def _finished_helper(self, done): + from google.longrunning import operations_pb2 + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable.instance import Instance + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + TIMEOUT_SECONDS = 1 + + client = _Client(PROJECT, timeout_seconds=TIMEOUT_SECONDS) + instance = Instance(INSTANCE_ID, client, self.LOCATION_ID) + operation = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID, + instance=instance) + + # Create request_pb + op_name = ('operations/projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/locations/' + self.LOCATION_ID + + '/operations/%d' % (self.OP_ID,)) + request_pb = operations_pb2.GetOperationRequest(name=op_name) + + # Create response_pb + response_pb = operations_pb2.Operation(done=done) + + # Patch the stub used by the API method. + client._operations_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = done + + # Perform the method and check the result. + result = operation.finished() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetOperation', + (request_pb, TIMEOUT_SECONDS), + {}, + )]) + + if done: + self.assertTrue(operation._complete) + else: + self.assertFalse(operation._complete) + + def test_finished(self): + self._finished_helper(done=True) + + def test_finished_not_done(self): + self._finished_helper(done=False) + + +class TestInstance(unittest2.TestCase): + + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID + LOCATION_ID = 'locname' + LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID + DISPLAY_NAME = 'display_name' + OP_ID = 8915 + OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % + (PROJECT, INSTANCE_ID, OP_ID)) + TABLE_ID = 'table_id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + TIMEOUT_SECONDS = 1 + + def _getTargetClass(self): + from gcloud.bigtable.instance import Instance + return Instance + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES + + client = object() + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, self.INSTANCE_ID) + self.assertTrue(instance._client is client) + self.assertEqual(instance._cluster_location_id, self.LOCATION_ID) + self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) + + def test_constructor_non_default(self): + display_name = 'display_name' + client = object() + + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=display_name) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, display_name) + self.assertTrue(instance._client is client) + + def test_copy(self): + display_name = 'display_name' + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=display_name) + new_instance = instance.copy() + + # Make sure the client copy succeeded. + self.assertFalse(new_instance._client is client) + self.assertEqual(new_instance._client, client) + # Make sure the client got copied to a new instance. + self.assertFalse(instance is new_instance) + self.assertEqual(instance, new_instance) + + def test_table_factory(self): + from gcloud.bigtable.table import Table + + instance = self._makeOne(self.INSTANCE_ID, None, self.LOCATION_ID) + + table = instance.table(self.TABLE_ID) + self.assertTrue(isinstance(table, Table)) + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertEqual(table._instance, instance) + + def test__update_from_pb_success(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + display_name = 'display_name' + instance_pb = data_v2_pb2.Instance( + display_name=display_name, + ) + + instance = self._makeOne(None, None, None, None) + self.assertEqual(instance.display_name, None) + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, display_name) + + def test__update_from_pb_no_display_name(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + instance_pb = data_v2_pb2.Instance() + instance = self._makeOne(None, None, None, None) + self.assertEqual(instance.display_name, None) + with self.assertRaises(ValueError): + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, None) + + def test_from_pb_success(self): + from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + client = _Client(project=self.PROJECT) + + instance_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.INSTANCE_ID, + ) + + klass = self._getTargetClass() + instance = klass.from_pb(instance_pb, client) + self.assertTrue(isinstance(instance, klass)) + self.assertEqual(instance._client, client) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance._cluster_location_id, + _EXISTING_INSTANCE_LOCATION_ID) + + def test_from_pb_bad_instance_name(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + instance_name = 'INCORRECT_FORMAT' + instance_pb = data_v2_pb2.Instance(name=instance_name) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, None) + + def test_from_pb_project_mistmatch(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(project=ALT_PROJECT) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + + instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, client) + + def test_name_property(self): + client = _Client(project=self.PROJECT) + + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + self.assertEqual(instance.name, self.INSTANCE_NAME) + + def test___eq__(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + self.assertEqual(instance1, instance2) + + def test___eq__type_differ(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = object() + self.assertNotEqual(instance1, instance2) + + def test___ne__same_value(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + comparison_val = (instance1 != instance2) + self.assertFalse(comparison_val) + + def test___ne__(self): + instance1 = self._makeOne('instance_id1', 'client1', self.LOCATION_ID) + instance2 = self._makeOne('instance_id2', 'client2', self.LOCATION_ID) + self.assertNotEqual(instance1, instance2) + + def test_reload(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create request_pb + request_pb = messages_v2_pb.GetInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + DISPLAY_NAME = u'hey-hi-hello' + response_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, self.INSTANCE_ID) + + # Perform the method and check the result. + result = instance.reload() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, DISPLAY_NAME) + + def test_create(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable import instance as MUT + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create request_pb. Just a mock since we monkey patch + # _prepare_create_request + request_pb = object() + + # Create response_pb + OP_BEGIN = object() + response_pb = operations_pb2.Operation(name=self.OP_NAME) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN, + self.LOCATION_ID, instance=instance) + + # Create the mocks. + prep_create_called = [] + + def mock_prep_create_req(instance): + prep_create_called.append(instance) + return request_pb + + process_operation_called = [] + + def mock_process_operation(operation_pb): + process_operation_called.append(operation_pb) + return self.OP_ID, self.LOCATION_ID, OP_BEGIN + + # Perform the method and check the result. + with _Monkey(MUT, + _prepare_create_request=mock_prep_create_req, + _process_operation=mock_process_operation): + result = instance.create() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'CreateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + self.assertEqual(prep_create_called, [instance]) + self.assertEqual(process_operation_called, [response_pb]) + + def test_create_w_explicit_serve_nodes(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable import instance as MUT + + SERVE_NODES = 5 + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + serve_nodes=SERVE_NODES) + + # Create request_pb. Just a mock since we monkey patch + # _prepare_create_request + request_pb = object() + + # Create response_pb + OP_BEGIN = object() + response_pb = operations_pb2.Operation(name=self.OP_NAME) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN, + self.LOCATION_ID, instance=instance) + + # Create the mocks. + prep_create_called = [] + + def mock_prep_create_req(instance): + prep_create_called.append(instance) + return request_pb + + process_operation_called = [] + + def mock_process_operation(operation_pb): + process_operation_called.append(operation_pb) + return self.OP_ID, self.LOCATION_ID, OP_BEGIN + + # Perform the method and check the result. + with _Monkey(MUT, + _prepare_create_request=mock_prep_create_req, + _process_operation=mock_process_operation): + result = instance.create() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'CreateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + self.assertEqual(prep_create_called, [instance]) + self.assertEqual(process_operation_called, [response_pb]) + + def test_update(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=self.DISPLAY_NAME) + + # Create request_pb + request_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.DISPLAY_NAME, + ) + + # Create response_pb + response_pb = data_v2_pb2.Instance() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None + + # Perform the method and check the result. + result = instance.update() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'UpdateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_delete(self): + from google.protobuf import empty_pb2 + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create request_pb + request_pb = messages_v2_pb.DeleteInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = instance.delete() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'DeleteInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_list_clusters(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as instance_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + + FAILED_LOCATION = 'FAILED' + FAILED_LOCATIONS = [FAILED_LOCATION] + CLUSTER_ID1 = 'cluster-id1' + CLUSTER_ID2 = 'cluster-id2' + SERVE_NODES = 4 + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) + CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) + # Create request_pb + request_pb = messages_v2_pb2.ListClustersRequest( + parent=instance.name, + ) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[FAILED_LOCATION], + clusters=[ + instance_v2_pb2.Cluster( + name=CLUSTER_NAME1, + serve_nodes=SERVE_NODES, + ), + instance_v2_pb2.Cluster( + name=CLUSTER_NAME2, + serve_nodes=SERVE_NODES, + ), + ], + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + clusters = [ + instance.cluster(CLUSTER_ID1), + instance.cluster(CLUSTER_ID2), + ] + expected_result = (clusters, FAILED_LOCATIONS) + + # Perform the method and check the result. + result = instance.list_clusters() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListClusters', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def _list_tables_helper(self, table_name=None): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_messages_v1_pb2) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create request_ + request_pb = table_messages_v1_pb2.ListTablesRequest( + parent=self.INSTANCE_NAME) + + # Create response_pb + if table_name is None: + table_name = self.TABLE_NAME + + response_pb = table_messages_v1_pb2.ListTablesResponse( + tables=[ + table_data_v2_pb2.Table(name=table_name), + ], + ) + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_table = instance.table(self.TABLE_ID) + expected_result = [expected_table] + + # Perform the method and check the result. + result = instance.list_tables() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListTables', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_list_tables(self): + self._list_tables_helper() + + def test_list_tables_failure_bad_split(self): + with self.assertRaises(ValueError): + self._list_tables_helper(table_name='wrong-format') + + def test_list_tables_failure_name_bad_before(self): + BAD_TABLE_NAME = ('nonempty-section-before' + + 'projects/' + self.PROJECT + + '/instances/' + self.INSTANCE_ID + + '/tables/' + self.TABLE_ID) + with self.assertRaises(ValueError): + self._list_tables_helper(table_name=BAD_TABLE_NAME) + + +class Test__prepare_create_request(unittest2.TestCase): + PROJECT = 'PROJECT' + PARENT = 'projects/' + PROJECT + LOCATION_ID = 'locname' + LOCATION_NAME = 'projects/' + PROJECT + '/locations/' + LOCATION_ID + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = PARENT + '/instances/' + INSTANCE_ID + CLUSTER_NAME = INSTANCE_NAME + '/clusters/' + INSTANCE_ID + + def _callFUT(self, instance, **kw): + from gcloud.bigtable.instance import _prepare_create_request + return _prepare_create_request(instance, **kw) + + def test_w_defaults(self): + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable.instance import Instance + + client = _Client(self.PROJECT) + + instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID) + request_pb = self._callFUT(instance) + self.assertTrue(isinstance(request_pb, + messages_v2_pb.CreateInstanceRequest)) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) + self.assertEqual(request_pb.parent, self.PARENT) + self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance)) + self.assertEqual(request_pb.instance.name, u'') + self.assertEqual(request_pb.instance.display_name, self.INSTANCE_ID) + + # An instance must also define a same-named cluster + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster)) + self.assertEqual(cluster.name, self.CLUSTER_NAME) + self.assertEqual(cluster.location, self.LOCATION_NAME) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + + def test_w_explicit_serve_nodes(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable.instance import Instance + DISPLAY_NAME = u'DISPLAY_NAME' + SERVE_NODES = 5 + client = _Client(self.PROJECT) + instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=DISPLAY_NAME, + serve_nodes=SERVE_NODES) + + request_pb = self._callFUT(instance) + + self.assertTrue(isinstance(request_pb, + messages_v2_pb.CreateInstanceRequest)) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) + self.assertEqual(request_pb.parent, + 'projects/' + self.PROJECT) + self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance)) + self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME) + # An instance must also define a same-named cluster + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster)) + self.assertEqual(cluster.location, self.LOCATION_NAME) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + + +class Test__parse_pb_any_to_native(unittest2.TestCase): + + def _callFUT(self, any_val, expected_type=None): + from gcloud.bigtable.instance import _parse_pb_any_to_native + return _parse_pb_any_to_native(any_val, expected_type=expected_type) + + def test_with_known_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + from gcloud.bigtable import instance as MUT + + TYPE_URL = 'type.googleapis.com/' + data_v2_pb2._CELL.full_name + fake_type_url_map = {TYPE_URL: data_v2_pb2.Cell} + + cell = data_v2_pb2.Cell( + timestamp_micros=0, + value=b'foobar', + ) + any_val = any_pb2.Any( + type_url=TYPE_URL, + value=cell.SerializeToString(), + ) + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + result = self._callFUT(any_val) + + self.assertEqual(result, cell) + + def test_with_create_instance_metadata(self): + from google.protobuf import any_pb2 + from google.protobuf.timestamp_pb2 import Timestamp + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + + TYPE_URL = ('type.googleapis.com/' + + messages_v2_pb._CREATEINSTANCEMETADATA.full_name) + metadata = messages_v2_pb.CreateInstanceMetadata( + request_time=Timestamp(seconds=1, nanos=1234), + finish_time=Timestamp(seconds=10, nanos=891011), + original_request=messages_v2_pb.CreateInstanceRequest( + parent='foo', + instance_id='bar', + instance=data_v2_pb2.Instance( + display_name='quux', + ), + ), + ) + + any_val = any_pb2.Any( + type_url=TYPE_URL, + value=metadata.SerializeToString(), + ) + result = self._callFUT(any_val) + self.assertEqual(result, metadata) + + def test_unknown_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable import instance as MUT + + fake_type_url_map = {} + any_val = any_pb2.Any() + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + with self.assertRaises(KeyError): + self._callFUT(any_val) + + def test_disagreeing_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable import instance as MUT + + TYPE_URL1 = 'foo' + TYPE_URL2 = 'bar' + fake_type_url_map = {TYPE_URL1: None} + any_val = any_pb2.Any(type_url=TYPE_URL2) + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + with self.assertRaises(ValueError): + self._callFUT(any_val, expected_type=TYPE_URL1) + + +class Test__process_operation(unittest2.TestCase): + + def _callFUT(self, operation_pb): + from gcloud.bigtable.instance import _process_operation + return _process_operation(operation_pb) + + def test_it(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable import instance as MUT + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + LOCATION_ID = 'location' + OP_ID = 234 + OPERATION_NAME = ( + 'operations/projects/%s/instances/%s/locations/%s/operations/%d' % + (PROJECT, INSTANCE_ID, LOCATION_ID, OP_ID)) + + current_op = operations_pb2.Operation(name=OPERATION_NAME) + + # Create mocks. + request_metadata = messages_v2_pb.CreateInstanceMetadata() + parse_pb_any_called = [] + + def mock_parse_pb_any_to_native(any_val, expected_type=None): + parse_pb_any_called.append((any_val, expected_type)) + return request_metadata + + expected_operation_begin = object() + ts_to_dt_called = [] + + def mock_pb_timestamp_to_datetime(timestamp): + ts_to_dt_called.append(timestamp) + return expected_operation_begin + + # Exectute method with mocks in place. + with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, + _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): + op_id, loc_id, op_begin = self._callFUT(current_op) + + # Check outputs. + self.assertEqual(op_id, OP_ID) + self.assertTrue(op_begin is expected_operation_begin) + self.assertEqual(loc_id, LOCATION_ID) + + # Check mocks were used correctly. + self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) + self.assertEqual(ts_to_dt_called, [request_metadata.request_time]) + + def test_op_name_parsing_failure(self): + from google.longrunning import operations_pb2 + + operation_pb = operations_pb2.Operation(name='invalid') + with self.assertRaises(ValueError): + self._callFUT(operation_pb) + + +class _Client(object): + + def __init__(self, project, timeout_seconds=None): + self.project = project + self.project_name = 'projects/' + self.project + self.timeout_seconds = timeout_seconds + + def copy(self): + from copy import deepcopy + return deepcopy(self) + + def __eq__(self, other): + return (other.project == self.project and + other.project_name == self.project_name and + other.timeout_seconds == self.timeout_seconds) diff --git a/gcloud/bigtable/test_row.py b/gcloud/bigtable/test_row.py index 9e6da708e6b6..ff18945acafb 100644 --- a/gcloud/bigtable/test_row.py +++ b/gcloud/bigtable/test_row.py @@ -75,8 +75,6 @@ def _set_cell_helper(self, column=None, column_bytes=None, timestamp_micros=-1): import six import struct - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - row_key = b'row_key' column_family_id = u'column_family_id' if column is None: @@ -89,8 +87,8 @@ def _set_cell_helper(self, column=None, column_bytes=None, if isinstance(value, six.integer_types): value = struct.pack('>q', value) - expected_pb = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( + expected_pb = _MutationPB( + set_cell=_MutationSetCellPB( family_name=column_family_id, column_qualifier=column_bytes or column, timestamp_micros=timestamp_micros, @@ -134,15 +132,13 @@ def test_set_cell_with_non_null_timestamp(self): timestamp_micros=millis_granularity) def test_delete(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - row_key = b'row_key' row = self._makeOne(row_key, object()) self.assertEqual(row._pb_mutations, []) row.delete() - expected_pb = data_pb2.Mutation( - delete_from_row=data_pb2.Mutation.DeleteFromRow(), + expected_pb = _MutationPB( + delete_from_row=_MutationDeleteFromRowPB(), ) self.assertEqual(row._pb_mutations, [expected_pb]) @@ -193,8 +189,6 @@ def test_delete_cells_non_iterable(self): row.delete_cells(column_family_id, columns) def test_delete_cells_all_columns(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - row_key = b'row_key' column_family_id = u'column_family_id' table = object() @@ -204,8 +198,8 @@ def test_delete_cells_all_columns(self): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, klass.ALL_COLUMNS) - expected_pb = data_pb2.Mutation( - delete_from_family=data_pb2.Mutation.DeleteFromFamily( + expected_pb = _MutationPB( + delete_from_family=_MutationDeleteFromFamilyPB( family_name=column_family_id, ), ) @@ -223,8 +217,6 @@ def test_delete_cells_no_columns(self): self.assertEqual(row._pb_mutations, []) def _delete_cells_helper(self, time_range=None): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - row_key = b'row_key' column = b'column' column_family_id = u'column_family_id' @@ -235,8 +227,8 @@ def _delete_cells_helper(self, time_range=None): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns, time_range=time_range) - expected_pb = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + expected_pb = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id, column_qualifier=column, ), @@ -275,8 +267,6 @@ def test_delete_cells_with_bad_column(self): self.assertEqual(row._pb_mutations, []) def test_delete_cells_with_string_columns(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - row_key = b'row_key' column_family_id = u'column_family_id' column1 = u'column1' @@ -290,14 +280,14 @@ def test_delete_cells_with_string_columns(self): self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns) - expected_pb1 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + expected_pb1 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id, column_qualifier=column1_bytes, ), ) - expected_pb2 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + expected_pb2 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id, column_qualifier=column2_bytes, ), @@ -306,9 +296,6 @@ def test_delete_cells_with_string_columns(self): def test_commit(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub row_key = b'row_key' @@ -322,15 +309,15 @@ def test_commit(self): # Create request_pb value = b'bytes-value' - mutation = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( + mutation = _MutationPB( + set_cell=_MutationSetCellPB( family_name=column_family_id, column_qualifier=column, timestamp_micros=-1, # Default value. value=value, ), ) - request_pb = messages_pb2.MutateRowRequest( + request_pb = _MutateRowRequestPB( table_name=table_name, row_key=row_key, mutations=[mutation], @@ -421,9 +408,6 @@ def test__get_mutations(self): self.assertTrue(false_mutations is row._get_mutations(None)) def test_commit(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.row_filters import RowSampleFilter @@ -442,29 +426,29 @@ def test_commit(self): # Create request_pb value1 = b'bytes-value' - mutation1 = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( + mutation1 = _MutationPB( + set_cell=_MutationSetCellPB( family_name=column_family_id1, column_qualifier=column1, timestamp_micros=-1, # Default value. value=value1, ), ) - mutation2 = data_pb2.Mutation( - delete_from_row=data_pb2.Mutation.DeleteFromRow(), + mutation2 = _MutationPB( + delete_from_row=_MutationDeleteFromRowPB(), ) - mutation3 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( + mutation3 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( family_name=column_family_id2, column_qualifier=column2, ), ) - mutation4 = data_pb2.Mutation( - delete_from_family=data_pb2.Mutation.DeleteFromFamily( + mutation4 = _MutationPB( + delete_from_family=_MutationDeleteFromFamilyPB( family_name=column_family_id3, ), ) - request_pb = messages_pb2.CheckAndMutateRowRequest( + request_pb = _CheckAndMutateRowRequestPB( table_name=table_name, row_key=row_key, predicate_filter=row_filter.to_pb(), @@ -474,7 +458,7 @@ def test_commit(self): # Create response_pb predicate_matched = True - response_pb = messages_pb2.CheckAndMutateRowResponse( + response_pb = _CheckAndMutateRowResponsePB( predicate_matched=predicate_matched) # Patch the stub used by the API method. @@ -560,8 +544,6 @@ def test_clear(self): self.assertEqual(row._rule_pb_list, []) def test_append_cell_value(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - table = object() row_key = b'row_key' row = self._makeOne(row_key, table) @@ -571,14 +553,12 @@ def test_append_cell_value(self): column_family_id = u'column_family_id' value = b'bytes-val' row.append_cell_value(column_family_id, column, value) - expected_pb = data_pb2.ReadModifyWriteRule( + expected_pb = _ReadModifyWriteRulePB( family_name=column_family_id, column_qualifier=column, append_value=value) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_increment_cell_value(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - table = object() row_key = b'row_key' row = self._makeOne(row_key, table) @@ -588,16 +568,13 @@ def test_increment_cell_value(self): column_family_id = u'column_family_id' int_value = 281330 row.increment_cell_value(column_family_id, column, int_value) - expected_pb = data_pb2.ReadModifyWriteRule( + expected_pb = _ReadModifyWriteRulePB( family_name=column_family_id, column_qualifier=column, increment_amount=int_value) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_commit(self): from gcloud._testing import _Monkey - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import row as MUT @@ -613,11 +590,11 @@ def test_commit(self): # Create request_pb value = b'bytes-value' # We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value). - request_pb = messages_pb2.ReadModifyWriteRowRequest( + request_pb = _ReadModifyWriteRowRequestPB( table_name=table_name, row_key=row_key, rules=[ - data_pb2.ReadModifyWriteRule( + _ReadModifyWriteRulePB( family_name=column_family_id, column_qualifier=column, append_value=value, @@ -693,8 +670,6 @@ def _callFUT(self, row_response): def test_it(self): from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - col_fam1 = u'col-fam-id' col_fam2 = u'col-fam-id2' col_name1 = b'col-name1' @@ -723,28 +698,28 @@ def test_it(self): ], }, } - sample_input = data_pb2.Row( + response_row = _RowPB( families=[ - data_pb2.Family( + _FamilyPB( name=col_fam1, columns=[ - data_pb2.Column( + _ColumnPB( qualifier=col_name1, cells=[ - data_pb2.Cell( + _CellPB( value=cell_val1, timestamp_micros=microseconds, ), - data_pb2.Cell( + _CellPB( value=cell_val2, timestamp_micros=microseconds, ), ], ), - data_pb2.Column( + _ColumnPB( qualifier=col_name2, cells=[ - data_pb2.Cell( + _CellPB( value=cell_val3, timestamp_micros=microseconds, ), @@ -752,13 +727,13 @@ def test_it(self): ), ], ), - data_pb2.Family( + _FamilyPB( name=col_fam2, columns=[ - data_pb2.Column( + _ColumnPB( qualifier=col_name3, cells=[ - data_pb2.Cell( + _CellPB( value=cell_val4, timestamp_micros=microseconds, ), @@ -768,6 +743,7 @@ def test_it(self): ), ], ) + sample_input = _ReadModifyWriteRowResponsePB(row=response_row) self.assertEqual(expected_output, self._callFUT(sample_input)) @@ -779,8 +755,6 @@ def _callFUT(self, family_pb): def test_it(self): from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - col_fam1 = u'col-fam-id' col_name1 = b'col-name1' col_name2 = b'col-name2' @@ -800,26 +774,26 @@ def test_it(self): ], } expected_output = (col_fam1, expected_dict) - sample_input = data_pb2.Family( + sample_input = _FamilyPB( name=col_fam1, columns=[ - data_pb2.Column( + _ColumnPB( qualifier=col_name1, cells=[ - data_pb2.Cell( + _CellPB( value=cell_val1, timestamp_micros=microseconds, ), - data_pb2.Cell( + _CellPB( value=cell_val2, timestamp_micros=microseconds, ), ], ), - data_pb2.Column( + _ColumnPB( qualifier=col_name2, cells=[ - data_pb2.Cell( + _CellPB( value=cell_val3, timestamp_micros=microseconds, ), @@ -830,6 +804,96 @@ def test_it(self): self.assertEqual(expected_output, self._callFUT(sample_input)) +def _CheckAndMutateRowRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowRequest(*args, **kw) + + +def _CheckAndMutateRowResponsePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) + + +def _MutateRowRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.MutateRowRequest(*args, **kw) + + +def _ReadModifyWriteRowRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw) + + +def _ReadModifyWriteRowResponsePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) + + +def _CellPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Cell(*args, **kw) + + +def _ColumnPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Column(*args, **kw) + + +def _FamilyPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Family(*args, **kw) + + +def _MutationPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation(*args, **kw) + + +def _MutationSetCellPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.SetCell(*args, **kw) + + +def _MutationDeleteFromColumnPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) + + +def _MutationDeleteFromFamilyPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) + + +def _MutationDeleteFromRowPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) + + +def _RowPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Row(*args, **kw) + + +def _ReadModifyWriteRulePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ReadModifyWriteRule(*args, **kw) + + class _Client(object): data_stub = None @@ -838,7 +902,7 @@ def __init__(self, timeout_seconds=None): self.timeout_seconds = timeout_seconds -class _Cluster(object): +class _Instance(object): def __init__(self, client=None): self._client = client @@ -848,4 +912,4 @@ class _Table(object): def __init__(self, name, client=None): self.name = name - self._cluster = _Cluster(client) + self._instance = _Instance(client) diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index 56b1c15f0655..2162212e7fdd 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -28,20 +28,20 @@ def _makeOne(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) timestamp_micros = 18738724000 # Make sure millis granularity timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) value = b'value-bytes' if labels is None: - cell_pb = data_pb2.Cell(value=value, - timestamp_micros=timestamp_micros) + cell_pb = data_v2_pb2.Cell( + value=value, timestamp_micros=timestamp_micros) cell_expected = self._makeOne(value, timestamp) else: - cell_pb = data_pb2.Cell(value=value, - timestamp_micros=timestamp_micros, - labels=labels) + cell_pb = data_v2_pb2.Cell( + value=value, timestamp_micros=timestamp_micros, labels=labels) cell_expected = self._makeOne(value, timestamp, labels=labels) klass = self._getTargetClass() @@ -105,8 +105,6 @@ def test_constructor(self): partial_row_data = self._makeOne(row_key) self.assertTrue(partial_row_data._row_key is row_key) self.assertEqual(partial_row_data._cells, {}) - self.assertFalse(partial_row_data._committed) - self.assertFalse(partial_row_data._chunks_encountered) def test___eq__(self): row_key = object() @@ -133,13 +131,6 @@ def test___ne__(self): partial_row_data2 = self._makeOne(row_key2) self.assertNotEqual(partial_row_data1, partial_row_data2) - def test___ne__committed(self): - row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data1._committed = object() - partial_row_data2 = self._makeOne(row_key) - self.assertNotEqual(partial_row_data1, partial_row_data2) - def test___ne__cells(self): row_key = object() partial_row_data1 = self._makeOne(row_key) @@ -190,195 +181,6 @@ def test_row_key_getter(self): partial_row_data = self._makeOne(row_key) self.assertTrue(partial_row_data.row_key is row_key) - def test_committed_getter(self): - partial_row_data = self._makeOne(None) - partial_row_data._committed = value = object() - self.assertTrue(partial_row_data.committed is value) - - def test_clear(self): - partial_row_data = self._makeOne(None) - cells = {1: 2} - partial_row_data._cells = cells - self.assertEqual(partial_row_data.cells, cells) - partial_row_data._committed = True - partial_row_data._chunks_encountered = True - partial_row_data.clear() - self.assertFalse(partial_row_data.committed) - self.assertFalse(partial_row_data._chunks_encountered) - self.assertEqual(partial_row_data.cells, {}) - - def test__handle_commit_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - - index = last_chunk_index = 1 - self.assertFalse(partial_row_data.committed) - partial_row_data._handle_commit_row(chunk, index, last_chunk_index) - self.assertTrue(partial_row_data.committed) - - def test__handle_commit_row_false(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=False) - - with self.assertRaises(ValueError): - partial_row_data._handle_commit_row(chunk, None, None) - - def test__handle_commit_row_not_last_chunk(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - - with self.assertRaises(ValueError): - index = 0 - last_chunk_index = 1 - self.assertNotEqual(index, last_chunk_index) - partial_row_data._handle_commit_row(chunk, index, last_chunk_index) - - def test__handle_reset_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) - - # Modify the PartialRowData object so we can check it's been cleared. - partial_row_data._cells = {1: 2} - partial_row_data._committed = True - partial_row_data._handle_reset_row(chunk) - self.assertEqual(partial_row_data.cells, {}) - self.assertFalse(partial_row_data.committed) - - def test__handle_reset_row_failure(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=False) - - with self.assertRaises(ValueError): - partial_row_data._handle_reset_row(chunk) - - def test__handle_row_contents(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.row_data import Cell - - partial_row_data = self._makeOne(None) - cell1_pb = data_pb2.Cell(timestamp_micros=1, value=b'val1') - cell2_pb = data_pb2.Cell(timestamp_micros=200, value=b'val2') - cell3_pb = data_pb2.Cell(timestamp_micros=300000, value=b'val3') - col1 = b'col1' - col2 = b'col2' - columns = [ - data_pb2.Column(qualifier=col1, cells=[cell1_pb, cell2_pb]), - data_pb2.Column(qualifier=col2, cells=[cell3_pb]), - ] - family_name = u'name' - row_contents = data_pb2.Family(name=family_name, columns=columns) - chunk = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) - - self.assertEqual(partial_row_data.cells, {}) - partial_row_data._handle_row_contents(chunk) - expected_cells = { - family_name: { - col1: [Cell.from_pb(cell1_pb), Cell.from_pb(cell2_pb)], - col2: [Cell.from_pb(cell3_pb)], - } - } - self.assertEqual(partial_row_data.cells, expected_cells) - - def test_update_from_read_rows(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - row_key = b'row-key' - partial_row_data = self._makeOne(row_key) - - # Set-up chunk1, some data that will be reset by chunk2. - ignored_family_name = u'ignore-name' - row_contents = data_pb2.Family(name=ignored_family_name) - chunk1 = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) - - # Set-up chunk2, a reset row. - chunk2 = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) - - # Set-up chunk3, a column family with no columns. - family_name = u'name' - row_contents = data_pb2.Family(name=family_name) - chunk3 = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) - - # Set-up chunk4, a commit row. - chunk4 = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - - # Prepare request and make sure PartialRowData is empty before. - read_rows_response_pb = messages_pb2.ReadRowsResponse( - row_key=row_key, chunks=[chunk1, chunk2, chunk3, chunk4]) - self.assertEqual(partial_row_data.cells, {}) - self.assertFalse(partial_row_data.committed) - self.assertFalse(partial_row_data._chunks_encountered) - - # Parse the response and make sure the cells took place. - partial_row_data.update_from_read_rows(read_rows_response_pb) - self.assertEqual(partial_row_data.cells, {family_name: {}}) - self.assertFalse(ignored_family_name in partial_row_data.cells) - self.assertTrue(partial_row_data.committed) - self.assertTrue(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_while_committed(self): - partial_row_data = self._makeOne(None) - partial_row_data._committed = True - self.assertFalse(partial_row_data._chunks_encountered) - - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(None) - - self.assertFalse(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_row_key_disagree(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - row_key1 = b'row-key1' - row_key2 = b'row-key2' - partial_row_data = self._makeOne(row_key1) - self.assertFalse(partial_row_data._chunks_encountered) - - self.assertNotEqual(row_key1, row_key2) - read_rows_response_pb = messages_pb2.ReadRowsResponse(row_key=row_key2) - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(read_rows_response_pb) - - self.assertFalse(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_empty_chunk(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - row_key = b'row-key' - partial_row_data = self._makeOne(row_key) - self.assertFalse(partial_row_data._chunks_encountered) - - chunk = messages_pb2.ReadRowsResponse.Chunk() - read_rows_response_pb = messages_pb2.ReadRowsResponse( - row_key=row_key, chunks=[chunk]) - - # This makes it an "empty" chunk. - self.assertEqual(chunk.WhichOneof('chunk'), None) - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(read_rows_response_pb) - - self.assertFalse(partial_row_data._chunks_encountered) - class TestPartialRowsData(unittest2.TestCase): @@ -437,6 +239,16 @@ def test___ne__(self): partial_rows_data2 = self._makeOne(response_iterator2) self.assertNotEqual(partial_rows_data1, partial_rows_data2) + def test_state_start(self): + prd = self._makeOne([]) + self.assertEqual(prd.state, prd.START) + + def test_state_new_row_w_row(self): + prd = self._makeOne([]) + prd._last_scanned_row_key = '' + prd._row = object() + self.assertEqual(prd.state, prd.NEW_ROW) + def test_rows_getter(self): partial_rows_data = self._makeOne(None) partial_rows_data._rows = value = object() @@ -449,43 +261,7 @@ def test_cancel(self): partial_rows_data.cancel() self.assertEqual(response_iterator.cancel_calls, 1) - def test_consume_next(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.row_data import PartialRowData - - row_key = b'row-key' - value_pb = messages_pb2.ReadRowsResponse(row_key=row_key) - response_iterator = _MockCancellableIterator(value_pb) - partial_rows_data = self._makeOne(response_iterator) - self.assertEqual(partial_rows_data.rows, {}) - partial_rows_data.consume_next() - expected_rows = {row_key: PartialRowData(row_key)} - self.assertEqual(partial_rows_data.rows, expected_rows) - - def test_consume_next_row_exists(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.row_data import PartialRowData - - row_key = b'row-key' - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - value_pb = messages_pb2.ReadRowsResponse(row_key=row_key, - chunks=[chunk]) - response_iterator = _MockCancellableIterator(value_pb) - partial_rows_data = self._makeOne(response_iterator) - existing_values = PartialRowData(row_key) - partial_rows_data._rows[row_key] = existing_values - self.assertFalse(existing_values.committed) - partial_rows_data.consume_next() - self.assertTrue(existing_values.committed) - self.assertEqual(existing_values.cells, {}) - - def test_consume_next_empty_iter(self): - response_iterator = _MockCancellableIterator() - partial_rows_data = self._makeOne(response_iterator) - with self.assertRaises(StopIteration): - partial_rows_data.consume_next() + # 'consume_nest' tested via 'TestPartialRowsData_JSON_acceptance_tests' def test_consume_all(self): klass = self._getDoNothingClass() @@ -495,7 +271,8 @@ def test_consume_all(self): partial_rows_data = klass(response_iterator) self.assertEqual(partial_rows_data._consumed, []) partial_rows_data.consume_all() - self.assertEqual(partial_rows_data._consumed, [value1, value2, value3]) + self.assertEqual( + partial_rows_data._consumed, [value1, value2, value3]) def test_consume_all_with_max_loops(self): klass = self._getDoNothingClass() @@ -507,7 +284,374 @@ def test_consume_all_with_max_loops(self): partial_rows_data.consume_all(max_loops=1) self.assertEqual(partial_rows_data._consumed, [value1]) # Make sure the iterator still has the remaining values. - self.assertEqual(list(response_iterator.iter_values), [value2, value3]) + self.assertEqual( + list(response_iterator.iter_values), [value2, value3]) + + def test__copy_from_current_unset(self): + prd = self._makeOne([]) + chunks = _generate_cell_chunks(['']) + chunk = chunks[0] + prd._copy_from_current(chunk) + self.assertEqual(chunk.row_key, b'') + self.assertEqual(chunk.family_name.value, u'') + self.assertEqual(chunk.qualifier.value, b'') + self.assertEqual(chunk.timestamp_micros, 0) + self.assertEqual(chunk.labels, []) + + def test__copy_from_current_blank(self): + ROW_KEY = b'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + prd._cell = _PartialCellData() + chunks = _generate_cell_chunks(['']) + chunk = chunks[0] + chunk.row_key = ROW_KEY + chunk.family_name.value = FAMILY_NAME + chunk.qualifier.value = QUALIFIER + chunk.timestamp_micros = TIMESTAMP_MICROS + chunk.labels.extend(LABELS) + prd._copy_from_current(chunk) + self.assertEqual(chunk.row_key, ROW_KEY) + self.assertEqual(chunk.family_name.value, FAMILY_NAME) + self.assertEqual(chunk.qualifier.value, QUALIFIER) + self.assertEqual(chunk.timestamp_micros, TIMESTAMP_MICROS) + self.assertEqual(chunk.labels, LABELS) + + def test__copy_from_previous_unset(self): + prd = self._makeOne([]) + cell = _PartialCellData() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, '') + self.assertEqual(cell.family_name, u'') + self.assertEqual(cell.qualifier, b'') + self.assertEqual(cell.timestamp_micros, 0) + self.assertEqual(cell.labels, []) + + def test__copy_from_previous_blank(self): + ROW_KEY = 'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + cell = _PartialCellData( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + prd._previous_cell = _PartialCellData() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, ROW_KEY) + self.assertEqual(cell.family_name, FAMILY_NAME) + self.assertEqual(cell.qualifier, QUALIFIER) + self.assertEqual(cell.timestamp_micros, TIMESTAMP_MICROS) + self.assertEqual(cell.labels, LABELS) + + def test__copy_from_previous_filled(self): + ROW_KEY = 'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + prd._previous_cell = _PartialCellData( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + cell = _PartialCellData() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, ROW_KEY) + self.assertEqual(cell.family_name, FAMILY_NAME) + self.assertEqual(cell.qualifier, QUALIFIER) + self.assertEqual(cell.timestamp_micros, 0) + self.assertEqual(cell.labels, []) + + def test__save_row_no_cell(self): + ROW_KEY = 'RK' + prd = self._makeOne([]) + row = prd._row = _Dummy(row_key=ROW_KEY) + prd._cell = None + prd._save_current_row() + self.assertTrue(prd._rows[ROW_KEY] is row) + + def test_invalid_last_scanned_row_key_on_start(self): + from gcloud.bigtable.row_data import InvalidReadRowsResponse + response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidReadRowsResponse): + prd.consume_next() + + def test_valid_last_scanned_row_key_on_start(self): + response = _ReadRowsResponseV2( + chunks=(), last_scanned_row_key='AFTER') + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd._last_scanned_row_key = 'BEFORE' + prd.consume_next() + self.assertEqual(prd._last_scanned_row_key, 'AFTER') + + def test_invalid_empty_chunk(self): + from gcloud.bigtable.row_data import InvalidChunk + chunks = _generate_cell_chunks(['']) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidChunk): + prd.consume_next() + + def test_invalid_empty_second_chunk(self): + from gcloud.bigtable.row_data import InvalidChunk + chunks = _generate_cell_chunks(['', '']) + first = chunks[0] + first.row_key = b'RK' + first.family_name.value = 'A' + first.qualifier.value = b'C' + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidChunk): + prd.consume_next() + + +class TestPartialRowsData_JSON_acceptance_tests(unittest2.TestCase): + + _json_tests = None + + def _getTargetClass(self): + from gcloud.bigtable.row_data import PartialRowsData + return PartialRowsData + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _load_json_test(self, test_name): + import os + if self.__class__._json_tests is None: + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, 'read-rows-acceptance-test.json') + raw = _parse_readrows_acceptance_tests(filename) + tests = self.__class__._json_tests = {} + for (name, chunks, results) in raw: + tests[name] = chunks, results + return self.__class__._json_tests[test_name] + + # JSON Error cases: invalid chunks + + def _fail_during_consume(self, testcase_name): + from gcloud.bigtable.row_data import InvalidChunk + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidChunk): + prd.consume_next() + expected_result = self._sort_flattend_cells( + [result for result in results if not result['error']]) + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + self.assertEqual(flattened, expected_result) + + def test_invalid_no_cell_key_before_commit(self): + self._fail_during_consume('invalid - no cell key before commit') + + def test_invalid_no_cell_key_before_value(self): + self._fail_during_consume('invalid - no cell key before value') + + def test_invalid_new_col_family_wo_qualifier(self): + self._fail_during_consume( + 'invalid - new col family must specify qualifier') + + def test_invalid_no_commit_between_rows(self): + self._fail_during_consume('invalid - no commit between rows') + + def test_invalid_no_commit_after_first_row(self): + self._fail_during_consume('invalid - no commit after first row') + + def test_invalid_duplicate_row_key(self): + self._fail_during_consume('invalid - duplicate row key') + + def test_invalid_new_row_missing_row_key(self): + self._fail_during_consume('invalid - new row missing row key') + + def test_invalid_bare_reset(self): + self._fail_during_consume('invalid - bare reset') + + def test_invalid_bad_reset_no_commit(self): + self._fail_during_consume('invalid - bad reset, no commit') + + def test_invalid_missing_key_after_reset(self): + self._fail_during_consume('invalid - missing key after reset') + + def test_invalid_reset_with_chunk(self): + self._fail_during_consume('invalid - reset with chunk') + + def test_invalid_commit_with_chunk(self): + self._fail_during_consume('invalid - commit with chunk') + + # JSON Error cases: incomplete final row + + def _sort_flattend_cells(self, flattened): + import operator + key_func = operator.itemgetter('rk', 'fm', 'qual') + return sorted(flattened, key=key_func) + + def _incomplete_final_row(self, testcase_name): + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd.consume_next() + self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) + expected_result = self._sort_flattend_cells( + [result for result in results if not result['error']]) + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + self.assertEqual(flattened, expected_result) + + def test_invalid_no_commit(self): + self._incomplete_final_row('invalid - no commit') + + def test_invalid_last_row_missing_commit(self): + self._incomplete_final_row('invalid - last row missing commit') + + # Non-error cases + + _marker = object() + + def _match_results(self, testcase_name, expected_result=_marker): + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd.consume_next() + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + if expected_result is self._marker: + expected_result = self._sort_flattend_cells(results) + self.assertEqual(flattened, expected_result) + + def test_bare_commit_implies_ts_zero(self): + self._match_results('bare commit implies ts=0') + + def test_simple_row_with_timestamp(self): + self._match_results('simple row with timestamp') + + def test_missing_timestamp_implies_ts_zero(self): + self._match_results('missing timestamp, implied ts=0') + + def test_empty_cell_value(self): + self._match_results('empty cell value') + + def test_two_unsplit_cells(self): + self._match_results('two unsplit cells') + + def test_two_qualifiers(self): + self._match_results('two qualifiers') + + def test_two_families(self): + self._match_results('two families') + + def test_with_labels(self): + self._match_results('with labels') + + def test_split_cell_bare_commit(self): + self._match_results('split cell, bare commit') + + def test_split_cell(self): + self._match_results('split cell') + + def test_split_four_ways(self): + self._match_results('split four ways') + + def test_two_split_cells(self): + self._match_results('two split cells') + + def test_multi_qualifier_splits(self): + self._match_results('multi-qualifier splits') + + def test_multi_qualifier_multi_split(self): + self._match_results('multi-qualifier multi-split') + + def test_multi_family_split(self): + self._match_results('multi-family split') + + def test_two_rows(self): + self._match_results('two rows') + + def test_two_rows_implicit_timestamp(self): + self._match_results('two rows implicit timestamp') + + def test_two_rows_empty_value(self): + self._match_results('two rows empty value') + + def test_two_rows_one_with_multiple_cells(self): + self._match_results('two rows, one with multiple cells') + + def test_two_rows_multiple_cells_multiple_families(self): + self._match_results('two rows, multiple cells, multiple families') + + def test_two_rows_multiple_cells(self): + self._match_results('two rows, multiple cells') + + def test_two_rows_four_cells_two_labels(self): + self._match_results('two rows, four cells, 2 labels') + + def test_two_rows_with_splits_same_timestamp(self): + self._match_results('two rows with splits, same timestamp') + + def test_no_data_after_reset(self): + # JSON testcase has `"results": null` + self._match_results('no data after reset', expected_result=[]) + + def test_simple_reset(self): + self._match_results('simple reset') + + def test_reset_to_new_val(self): + self._match_results('reset to new val') + + def test_reset_to_new_qual(self): + self._match_results('reset to new qual') + + def test_reset_with_splits(self): + self._match_results('reset with splits') + + def test_two_resets(self): + self._match_results('two resets') + + def test_reset_to_new_row(self): + self._match_results('reset to new row') + + def test_reset_in_between_chunks(self): + self._match_results('reset in between chunks') + + def test_empty_cell_chunk(self): + self._match_results('empty cell chunk') + + +def _flatten_cells(prd): + # Match results format from JSON testcases. + # Doesn't handle error cases. + from gcloud._helpers import _bytes_to_unicode + from gcloud._helpers import _microseconds_from_datetime + for row_key, row in prd.rows.items(): + for family_name, family in row.cells.items(): + for qualifier, column in family.items(): + for cell in column: + yield { + u'rk': _bytes_to_unicode(row_key), + u'fm': family_name, + u'qual': _bytes_to_unicode(qualifier), + u'ts': _microseconds_from_datetime(cell.timestamp), + u'value': _bytes_to_unicode(cell.value), + u'label': u' '.join(cell.labels), + u'error': False, + } class _MockCancellableIterator(object): @@ -522,3 +666,62 @@ def cancel(self): def next(self): return next(self.iter_values) + + def __next__(self): # pragma: NO COVER Py3k + return self.next() + + +class _Dummy(object): + + def __init__(self, **kw): + self.__dict__.update(kw) + + +class _PartialCellData(object): + + row_key = '' + family_name = u'' + qualifier = b'' + timestamp_micros = 0 + + def __init__(self, **kw): + self.labels = kw.pop('labels', []) + self.__dict__.update(kw) + + +class _ReadRowsResponseV2(object): + + def __init__(self, chunks, last_scanned_row_key=''): + self.chunks = chunks + self.last_scanned_row_key = last_scanned_row_key + + +def _generate_cell_chunks(chunk_text_pbs): + from google.protobuf.text_format import Merge + from gcloud.bigtable._generated_v2.bigtable_pb2 import ReadRowsResponse + + chunks = [] + + for chunk_text_pb in chunk_text_pbs: + chunk = ReadRowsResponse.CellChunk() + chunks.append(Merge(chunk_text_pb, chunk)) + + return chunks + + +def _parse_readrows_acceptance_tests(filename): + """Parse acceptance tests from JSON + + See: + https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/master/bigtable-client-core/src/test/resources/com/google/cloud/bigtable/grpc/scanner/v2/read-rows-acceptance-test.json + """ + import json + + with open(filename) as json_file: + test_json = json.load(json_file) + + for test in test_json['tests']: + name = test['name'] + chunks = _generate_cell_chunks(test['chunks']) + results = test['results'] + yield name, chunks, results diff --git a/gcloud/bigtable/test_row_filters.py b/gcloud/bigtable/test_row_filters.py index aed90574683f..594a4fe47c2b 100644 --- a/gcloud/bigtable/test_row_filters.py +++ b/gcloud/bigtable/test_row_filters.py @@ -60,12 +60,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(sink=flag) + expected_pb = _RowFilterPB(sink=flag) self.assertEqual(pb_val, expected_pb) @@ -79,12 +77,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(pass_all_filter=flag) + expected_pb = _RowFilterPB(pass_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -98,12 +94,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(block_all_filter=flag) + expected_pb = _RowFilterPB(block_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -156,12 +150,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - regex = b'row-key-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(row_key_regex_filter=regex) + expected_pb = _RowFilterPB(row_key_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -192,12 +184,10 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - sample = 0.25 row_filter = self._makeOne(sample) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(row_sample_filter=sample) + expected_pb = _RowFilterPB(row_sample_filter=sample) self.assertEqual(pb_val, expected_pb) @@ -211,12 +201,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - regex = u'family-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(family_name_regex_filter=regex) + expected_pb = _RowFilterPB(family_name_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -230,12 +218,11 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - regex = b'column-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(column_qualifier_regex_filter=regex) + expected_pb = _RowFilterPB( + column_qualifier_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -280,8 +267,6 @@ def test___ne__same_value(self): def _to_pb_helper(self, start_micros=None, end_micros=None): import datetime from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - pb_kwargs = {} start = None @@ -294,7 +279,7 @@ def _to_pb_helper(self, start_micros=None, end_micros=None): pb_kwargs['end_timestamp_micros'] = end_micros time_range = self._makeOne(start=start, end=end) - expected_pb = data_pb2.TimestampRange(**pb_kwargs) + expected_pb = _TimestampRangePB(**pb_kwargs) self.assertEqual(time_range.to_pb(), expected_pb) def test_to_pb(self): @@ -342,14 +327,13 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import TimestampRange range_ = TimestampRange() row_filter = self._makeOne(range_) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter( - timestamp_range_filter=data_pb2.TimestampRange()) + expected_pb = _RowFilterPB( + timestamp_range_filter=_TimestampRangePB()) self.assertEqual(pb_val, expected_pb) @@ -377,10 +361,12 @@ def test_constructor_explicit(self): end_column = object() inclusive_start = object() inclusive_end = object() - row_filter = self._makeOne(column_family_id, start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + row_filter = self._makeOne( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) self.assertTrue(row_filter.column_family_id is column_family_id) self.assertTrue(row_filter.start_column is start_column) self.assertTrue(row_filter.end_column is end_column) @@ -422,66 +408,56 @@ def test___eq__type_differ(self): self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - column_family_id = u'column-family-id' row_filter = self._makeOne(column_family_id) - col_range_pb = data_pb2.ColumnRange(family_name=column_family_id) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + col_range_pb = _ColumnRangePB(family_name=column_family_id) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, start_column=column) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, - start_qualifier_inclusive=column, + start_qualifier_closed=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, start_column=column, inclusive_start=False) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, - start_qualifier_exclusive=column, + start_qualifier_open=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, end_column=column) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, - end_qualifier_inclusive=column, + end_qualifier_closed=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - column_family_id = u'column-family-id' column = b'column' row_filter = self._makeOne(column_family_id, end_column=column, inclusive_end=False) - col_range_pb = data_pb2.ColumnRange( + col_range_pb = _ColumnRangePB( family_name=column_family_id, - end_qualifier_exclusive=column, + end_qualifier_open=column, ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -495,12 +471,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - regex = b'value-regex' row_filter = self._makeOne(regex) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(value_regex_filter=regex) + expected_pb = _RowFilterPB(value_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -561,47 +535,37 @@ def test___eq__type_differ(self): self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - row_filter = self._makeOne() - expected_pb = data_pb2.RowFilter( - value_range_filter=data_pb2.ValueRange()) + expected_pb = _RowFilterPB( + value_range_filter=_ValueRangePB()) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - value = b'some-value' row_filter = self._makeOne(start_value=value) - val_range_pb = data_pb2.ValueRange(start_value_inclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(start_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - value = b'some-value' row_filter = self._makeOne(start_value=value, inclusive_start=False) - val_range_pb = data_pb2.ValueRange(start_value_exclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(start_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - value = b'some-value' row_filter = self._makeOne(end_value=value) - val_range_pb = data_pb2.ValueRange(end_value_inclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(end_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - value = b'some-value' row_filter = self._makeOne(end_value=value, inclusive_end=False) - val_range_pb = data_pb2.ValueRange(end_value_exclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) + val_range_pb = _ValueRangePB(end_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -649,12 +613,11 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - num_cells = 76 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(cells_per_row_offset_filter=num_cells) + expected_pb = _RowFilterPB( + cells_per_row_offset_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -668,12 +631,11 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - num_cells = 189 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(cells_per_row_limit_filter=num_cells) + expected_pb = _RowFilterPB( + cells_per_row_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -687,12 +649,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - num_cells = 10 row_filter = self._makeOne(num_cells) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter( + expected_pb = _RowFilterPB( cells_per_column_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) @@ -707,12 +667,10 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - flag = True row_filter = self._makeOne(flag) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(strip_value_transformer=flag) + expected_pb = _RowFilterPB(strip_value_transformer=flag) self.assertEqual(pb_val, expected_pb) @@ -743,12 +701,10 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - label = u'label' row_filter = self._makeOne(label) pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(apply_label_transformer=label) + expected_pb = _RowFilterPB(apply_label_transformer=label) self.assertEqual(pb_val, expected_pb) @@ -793,7 +749,6 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -806,15 +761,14 @@ def test_to_pb(self): row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - chain=data_pb2.RowFilter.Chain( + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB( filters=[row_filter1_pb, row_filter2_pb], ), ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import CellsRowLimitFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -831,8 +785,8 @@ def test_to_pb_nested(self): row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() - expected_pb = data_pb2.RowFilter( - chain=data_pb2.RowFilter.Chain( + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB( filters=[row_filter3_pb, row_filter4_pb], ), ) @@ -849,7 +803,6 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -862,15 +815,14 @@ def test_to_pb(self): row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - interleave=data_pb2.RowFilter.Interleave( + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB( filters=[row_filter1_pb, row_filter2_pb], ), ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import CellsRowLimitFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -887,8 +839,8 @@ def test_to_pb_nested(self): row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() - expected_pb = data_pb2.RowFilter( - interleave=data_pb2.RowFilter.Interleave( + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB( filters=[row_filter3_pb, row_filter4_pb], ), ) @@ -938,7 +890,6 @@ def test___eq__type_differ(self): self.assertNotEqual(cond_filter1, cond_filter2) def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import CellsRowOffsetFilter from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -956,8 +907,8 @@ def test_to_pb(self): false_filter=row_filter3) filter_pb = row_filter4.to_pb() - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( predicate_filter=row_filter1_pb, true_filter=row_filter2_pb, false_filter=row_filter3_pb, @@ -966,7 +917,6 @@ def test_to_pb(self): self.assertEqual(filter_pb, expected_pb) def test_to_pb_true_only(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -979,8 +929,8 @@ def test_to_pb_true_only(self): row_filter3 = self._makeOne(row_filter1, true_filter=row_filter2) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( predicate_filter=row_filter1_pb, true_filter=row_filter2_pb, ), @@ -988,7 +938,6 @@ def test_to_pb_true_only(self): self.assertEqual(filter_pb, expected_pb) def test_to_pb_false_only(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 from gcloud.bigtable.row_filters import RowSampleFilter from gcloud.bigtable.row_filters import StripValueTransformerFilter @@ -1001,10 +950,52 @@ def test_to_pb_false_only(self): row_filter3 = self._makeOne(row_filter1, false_filter=row_filter2) filter_pb = row_filter3.to_pb() - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( predicate_filter=row_filter1_pb, false_filter=row_filter2_pb, ), ) self.assertEqual(filter_pb, expected_pb) + + +def _ColumnRangePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ColumnRange(*args, **kw) + + +def _RowFilterPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter(*args, **kw) + + +def _RowFilterChainPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Chain(*args, **kw) + + +def _RowFilterConditionPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Condition(*args, **kw) + + +def _RowFilterInterleavePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Interleave(*args, **kw) + + +def _TimestampRangePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.TimestampRange(*args, **kw) + + +def _ValueRangePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ValueRange(*args, **kw) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index 9fcdf21593b0..1494b3917d91 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -18,6 +18,18 @@ class TestTable(unittest2.TestCase): + PROJECT_ID = 'project-id' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) + TABLE_ID = 'table-id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + TIMEOUT_SECONDS = 1333 + ROW_KEY = b'row-key' + FAMILY_NAME = u'family' + QUALIFIER = b'qualifier' + TIMESTAMP_MICROS = 100 + VALUE = b'value' + def _getTargetClass(self): from gcloud.bigtable.table import Table return Table @@ -27,19 +39,19 @@ def _makeOne(self, *args, **kwargs): def test_constructor(self): table_id = 'table-id' - cluster = object() + instance = object() - table = self._makeOne(table_id, cluster) + table = self._makeOne(table_id, instance) self.assertEqual(table.table_id, table_id) - self.assertTrue(table._cluster is cluster) + self.assertTrue(table._instance is instance) def test_name_property(self): table_id = 'table-id' - cluster_name = 'cluster_name' + instance_name = 'instance_name' - cluster = _Cluster(cluster_name) - table = self._makeOne(table_id, cluster) - expected_name = cluster_name + '/tables/' + table_id + instance = _Instance(instance_name) + table = self._makeOne(table_id, instance) + expected_name = instance_name + '/tables/' + table_id self.assertEqual(table.name, expected_name) def test_column_family_factory(self): @@ -94,64 +106,53 @@ def test_row_factory_append(self): self.assertEqual(row._table, table) def test_row_factory_failure(self): - table_id = 'table-id' - table = self._makeOne(table_id, None) + table = self._makeOne(self.TABLE_ID, None) with self.assertRaises(ValueError): table.row(b'row_key', filter_=object(), append=True) def test___eq__(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) self.assertEqual(table1, table2) def test___eq__type_differ(self): - table1 = self._makeOne('table_id', None) + table1 = self._makeOne(self.TABLE_ID, None) table2 = object() self.assertNotEqual(table1, table2) def test___ne__same_value(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) comparison_val = (table1 != table2) self.assertFalse(comparison_val) def test___ne__(self): - table1 = self._makeOne('table_id1', 'cluster1') - table2 = self._makeOne('table_id2', 'cluster2') + table1 = self._makeOne('table_id1', 'instance1') + table2 = self._makeOne('table_id2', 'instance2') self.assertNotEqual(table1, table2) def _create_test_helper(self, initial_split_keys): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + from gcloud._helpers import _to_bytes from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 150 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - request_pb = messages_pb2.CreateTableRequest( - initial_split_keys=initial_split_keys, - name=cluster_name, - table_id=table_id, + splits_pb = [ + _CreateTableRequestSplitPB(key=_to_bytes(key)) + for key in initial_split_keys or ()] + request_pb = _CreateTableRequestPB( + initial_splits=splits_pb, + parent=self.INSTANCE_NAME, + table_id=self.TABLE_ID, ) # Create response_pb - response_pb = data_pb2.Table() + response_pb = _TablePB() # Patch the stub used by the API method. client._table_stub = stub = _FakeStub(response_pb) @@ -164,7 +165,7 @@ def _create_test_helper(self, initial_split_keys): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'CreateTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -173,85 +174,24 @@ def test_create(self): self._create_test_helper(initial_split_keys) def test_create_with_split_keys(self): - initial_split_keys = ['s1', 's2'] + initial_split_keys = [b's1', b's2'] self._create_test_helper(initial_split_keys) - def test_rename(self): - from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - new_table_id = 'new_table_id' - timeout_seconds = 97 - self.assertNotEqual(new_table_id, table_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.RenameTableRequest( - name=table_name, - new_id=new_table_id, - ) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # rename() has no return value. - - # Perform the method and check the result. - result = table.rename(new_table_id) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'RenameTable', - (request_pb, timeout_seconds), - {}, - )]) - - def _list_column_families_helper(self, column_family_name=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) + def _list_column_families_helper(self): from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 502 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.GetTableRequest(name=table_name) + request_pb = _GetTableRequestPB(name=self.TABLE_NAME) # Create response_pb - column_family_id = 'foo' - if column_family_name is None: - column_family_name = (table_name + '/columnFamilies/' + - column_family_id) - column_family = data_pb2.ColumnFamily(name=column_family_name) - response_pb = data_pb2.Table( - column_families={column_family_id: column_family}, + COLUMN_FAMILY_ID = 'foo' + column_family = _ColumnFamilyPB() + response_pb = _TablePB( + column_families={COLUMN_FAMILY_ID: column_family}, ) # Patch the stub used by the API method. @@ -259,7 +199,7 @@ def _list_column_families_helper(self, column_family_name=None): # Create expected_result. expected_result = { - column_family_id: table.column_family(column_family_id), + COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID), } # Perform the method and check the result. @@ -267,40 +207,23 @@ def _list_column_families_helper(self, column_family_name=None): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'GetTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) def test_list_column_families(self): self._list_column_families_helper() - def test_list_column_families_failure(self): - column_family_name = 'not-the-right-format' - with self.assertRaises(ValueError): - self._list_column_families_helper( - column_family_name=column_family_name) - def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 871 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.DeleteTableRequest(name=table_name) + request_pb = _DeleteTableRequestPB(name=self.TABLE_NAME) # Create response_pb response_pb = empty_pb2.Empty() @@ -316,28 +239,18 @@ def test_delete(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'DeleteTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - def _read_row_helper(self, chunks): + def _read_row_helper(self, chunks, expected_result): from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable.row_data import PartialRowData from gcloud.bigtable import table as MUT - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 596 - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -348,56 +261,67 @@ def mock_create_row_request(table_name, row_key, filter_): return request_pb # Create response_iterator - row_key = b'row-key' - response_pb = messages_pb2.ReadRowsResponse(row_key=row_key, - chunks=chunks) - response_iterator = [response_pb] + if chunks is None: + response_iterator = iter(()) # no responses at all + else: + response_pb = _ReadRowsResponsePB(chunks=chunks) + response_iterator = iter([response_pb]) # Patch the stub used by the API method. client._data_stub = stub = _FakeStub(response_iterator) - # Create expected_result. - if chunks: - expected_result = PartialRowData(row_key) - expected_result._committed = True - expected_result._chunks_encountered = True - else: - expected_result = None - # Perform the method and check the result. filter_obj = object() with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_row(row_key, filter_=filter_obj) + result = table.read_row(self.ROW_KEY, filter_=filter_obj) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - self.assertEqual(mock_created, [(table.name, row_key, filter_obj)]) + self.assertEqual(mock_created, + [(table.name, self.ROW_KEY, filter_obj)]) - def test_read_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + def test_read_row_miss_no__responses(self): + self._read_row_helper(None, None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - chunks = [chunk] - self._read_row_helper(chunks) - - def test_read_empty_row(self): + def test_read_row_miss_no_chunks_in_response(self): chunks = [] - self._read_row_helper(chunks) + self._read_row_helper(chunks, None) - def test_read_row_still_partial(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) + def test_read_row_complete(self): + from gcloud.bigtable.row_data import Cell + from gcloud.bigtable.row_data import PartialRowData - # There is never a "commit row". - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk] + expected_result = PartialRowData(row_key=self.ROW_KEY) + family = expected_result._cells.setdefault(self.FAMILY_NAME, {}) + column = family.setdefault(self.QUALIFIER, []) + column.append(Cell.from_pb(chunk)) + self._read_row_helper(chunks, expected_result) + + def test_read_row_still_partial(self): + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + ) + # No "commit row". chunks = [chunk] with self.assertRaises(ValueError): - self._read_row_helper(chunks) + self._read_row_helper(chunks, None) def test_read_rows(self): from gcloud._testing import _Monkey @@ -405,16 +329,9 @@ def test_read_rows(self): from gcloud.bigtable.row_data import PartialRowsData from gcloud.bigtable import table as MUT - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 1111 - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -437,48 +354,35 @@ def mock_create_row_request(table_name, **kwargs): start_key = b'start-key' end_key = b'end-key' filter_obj = object() - allow_row_interleaving = True limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, - allow_row_interleaving=allow_row_interleaving, limit=limit) + limit=limit) self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) created_kwargs = { 'start_key': start_key, 'end_key': end_key, 'filter_': filter_obj, - 'allow_row_interleaving': allow_row_interleaving, 'limit': limit, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_sample_row_keys(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 1333 - - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.SampleRowKeysRequest(table_name=table_name) + request_pb = _SampleRowKeysRequestPB(table_name=self.TABLE_NAME) # Create response_iterator response_iterator = object() # Just passed to a mock. @@ -494,7 +398,7 @@ def test_sample_row_keys(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'SampleRowKeys', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -502,20 +406,17 @@ def test_sample_row_keys(self): class Test__create_row_request(unittest2.TestCase): def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, allow_row_interleaving=None, limit=None): + filter_=None, limit=None): from gcloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - filter_=filter_, allow_row_interleaving=allow_row_interleaving, - limit=limit) + filter_=filter_, limit=limit) def test_table_name_only(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - table_name = 'table_name' result = self._callFUT(table_name) - expected_result = messages_pb2.ReadRowsRequest(table_name=table_name) + expected_result = _ReadRowsRequestPB( + table_name=table_name) self.assertEqual(result, expected_result) def test_row_key_row_range_conflict(self): @@ -523,108 +424,133 @@ def test_row_key_row_range_conflict(self): self._callFUT(None, row_key=object(), end_key=object()) def test_row_key(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - table_name = 'table_name' row_key = b'row_key' result = self._callFUT(table_name, row_key=row_key) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = _ReadRowsRequestPB( table_name=table_name, - row_key=row_key, ) + expected_result.rows.row_keys.append(row_key) self.assertEqual(result, expected_result) def test_row_range_start_key(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - table_name = 'table_name' start_key = b'start_key' result = self._callFUT(table_name, start_key=start_key) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_pb2.RowRange(start_key=start_key), - ) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) def test_row_range_end_key(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - table_name = 'table_name' end_key = b'end_key' result = self._callFUT(table_name, end_key=end_key) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_pb2.RowRange(end_key=end_key), - ) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(end_key_open=end_key) self.assertEqual(result, expected_result) def test_row_range_both_keys(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - table_name = 'table_name' start_key = b'start_key' end_key = b'end_key' result = self._callFUT(table_name, start_key=start_key, end_key=end_key) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_pb2.RowRange(start_key=start_key, end_key=end_key), - ) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add( + start_key_closed=start_key, end_key_open=end_key) self.assertEqual(result, expected_result) def test_with_filter(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) from gcloud.bigtable.row_filters import RowSampleFilter - table_name = 'table_name' row_filter = RowSampleFilter(0.33) result = self._callFUT(table_name, filter_=row_filter) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = _ReadRowsRequestPB( table_name=table_name, filter=row_filter.to_pb(), ) self.assertEqual(result, expected_result) - def test_with_allow_row_interleaving(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - allow_row_interleaving = True - result = self._callFUT(table_name, - allow_row_interleaving=allow_row_interleaving) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - allow_row_interleaving=allow_row_interleaving, - ) - self.assertEqual(result, expected_result) - def test_with_limit(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - table_name = 'table_name' limit = 1337 result = self._callFUT(table_name, limit=limit) - expected_result = messages_pb2.ReadRowsRequest( + expected_result = _ReadRowsRequestPB( table_name=table_name, - num_rows_limit=limit, + rows_limit=limit, ) self.assertEqual(result, expected_result) +def _CreateTableRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest(*args, **kw) + + +def _CreateTableRequestSplitPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest.Split(*args, **kw) + + +def _DeleteTableRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.DeleteTableRequest(*args, **kw) + + +def _GetTableRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.GetTableRequest(*args, **kw) + + +def _ReadRowsRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsRequest(*args, **kw) + + +def _ReadRowsResponseCellChunkPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + family_name = kw.pop('family_name') + qualifier = kw.pop('qualifier') + message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) + message.family_name.value = family_name + message.qualifier.value = qualifier + return message + + +def _ReadRowsResponsePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsResponse(*args, **kw) + + +def _SampleRowKeysRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.SampleRowKeysRequest(*args, **kw) + + +def _TablePB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.Table(*args, **kw) + + +def _ColumnFamilyPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) + + class _Client(object): data_stub = None - cluster_stub = None + instance_stub = None operations_stub = None table_stub = None @@ -632,7 +558,7 @@ def __init__(self, timeout_seconds=None): self.timeout_seconds = timeout_seconds -class _Cluster(object): +class _Instance(object): def __init__(self, name, client=None): self.name = name diff --git a/scripts/make_datastore_grpc.py b/scripts/make_datastore_grpc.py index 1de717c4a08c..d1a50b009200 100644 --- a/scripts/make_datastore_grpc.py +++ b/scripts/make_datastore_grpc.py @@ -28,8 +28,7 @@ 'v1beta3', 'datastore.proto') GRPC_ONLY_FILE = os.path.join(ROOT_DIR, 'gcloud', 'datastore', '_generated', 'datastore_grpc_pb2.py') -PROTOC_CMD = 'protoc' -GRPC_PLUGIN = 'grpc_python_plugin' +GRPCIO_VIRTUALENV = os.environ.get('GRPCIO_VIRTUALENV', 'protoc') def get_pb2_contents_with_grpc(): @@ -43,14 +42,14 @@ def get_pb2_contents_with_grpc(): 'v1beta3', 'datastore_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', temp_dir, - '--plugin', - 'protoc-gen-grpc=' + GRPC_PLUGIN, - '--grpc_out', + '--grpc_python_out', temp_dir, PROTO_PATH, ]) @@ -73,7 +72,9 @@ def get_pb2_contents_without_grpc(): 'v1beta3', 'datastore_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', diff --git a/scripts/make_operations_grpc.py b/scripts/make_operations_grpc.py index 65b877250594..0e779964f3b0 100644 --- a/scripts/make_operations_grpc.py +++ b/scripts/make_operations_grpc.py @@ -23,14 +23,13 @@ ROOT_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) -PROTOS_DIR = os.path.join(ROOT_DIR, 'cloud-bigtable-client', - 'bigtable-protos', 'src', 'main', 'proto') +PROTOS_DIR = os.path.join(ROOT_DIR, 'googleapis-pb') PROTO_PATH = os.path.join(PROTOS_DIR, 'google', 'longrunning', 'operations.proto') +GENERATED_SUBDIR = os.environ.get('GENERATED_SUBDIR', '_generated') GRPC_ONLY_FILE = os.path.join(ROOT_DIR, 'gcloud', 'bigtable', - '_generated', 'operations_grpc_pb2.py') -PROTOC_CMD = 'protoc' -GRPC_PLUGIN = 'grpc_python_plugin' + GENERATED_SUBDIR, 'operations_grpc_pb2.py') +GRPCIO_VIRTUALENV = os.environ.get('GRPCIO_VIRTUALENV', 'protoc') def get_pb2_contents_with_grpc(): @@ -44,14 +43,14 @@ def get_pb2_contents_with_grpc(): 'operations_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', temp_dir, - '--plugin', - 'protoc-gen-grpc=' + GRPC_PLUGIN, - '--grpc_out', + '--grpc_python_out', temp_dir, PROTO_PATH, ]) @@ -74,7 +73,9 @@ def get_pb2_contents_without_grpc(): 'operations_pb2.py') try: return_code = subprocess.call([ - PROTOC_CMD, + '%s/bin/python' % GRPCIO_VIRTUALENV, + '-m', + 'grpc.tools.protoc', '--proto_path', PROTOS_DIR, '--python_out', diff --git a/scripts/rewrite_imports.py b/scripts/rewrite_imports.py index 7429ec14734c..d6523d4d5410 100644 --- a/scripts/rewrite_imports.py +++ b/scripts/rewrite_imports.py @@ -18,15 +18,20 @@ and the dependent modules (google/api and google/protobuf). """ -import glob +import sys IMPORT_TEMPLATE = 'import %s' IMPORT_FROM_TEMPLATE = 'from %s import ' REPLACEMENTS = { + # Bigtable v1 'google.bigtable.admin.cluster.v1': 'gcloud.bigtable._generated', 'google.bigtable.admin.table.v1': 'gcloud.bigtable._generated', 'google.bigtable.v1': 'gcloud.bigtable._generated', + # Bigtble v2 + 'google.bigtable.v2': 'gcloud.bigtable._generated_v2', + 'google.bigtable.admin.v2': 'gcloud.bigtable._generated_v2', + # Datastore v1beta3 'google.datastore.v1beta3': 'gcloud.datastore._generated', } @@ -107,6 +112,10 @@ def transform_line(line): :rtype: str :returns: The transformed line. """ + # Work around https://github.com/grpc/grpc/issues/7101 + if line == 'import ': + return '' + for old_module, new_module in REPLACEMENTS.iteritems(): result = transform_old_to_new(line, old_module, new_module) if result is not None: @@ -135,9 +144,7 @@ def rewrite_file(filename): def main(): """Rewrites all PB2 files.""" - pb2_files = (glob.glob('gcloud/bigtable/_generated/*pb2.py') + - glob.glob('gcloud/datastore/_generated/*pb2.py')) - for filename in pb2_files: + for filename in sys.argv[1:]: rewrite_file(filename) diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index d24902136e22..7fa7662efb07 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -32,6 +32,7 @@ IGNORED_DIRECTORIES = [ os.path.join('gcloud', 'bigtable', '_generated'), + os.path.join('gcloud', 'bigtable', '_generated_v2'), os.path.join('gcloud', 'datastore', '_generated'), ] IGNORED_FILES = [ diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index 4572766bccc3..6933bc60847c 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -35,9 +35,8 @@ from system_test_utils import unique_resource_id -CENTRAL_1C_ZONE = 'us-central1-c' -CLUSTER_ID = 'gcloud' + unique_resource_id('-') -CLUSTER_ID = CLUSTER_ID[:30] # Cluster IDs can't exceed 30 chars. +LOCATION_ID = 'us-central1-c' +INSTANCE_ID = 'gcloud' + unique_resource_id('-') TABLE_ID = 'gcloud-python-test-table' COLUMN_FAMILY_ID1 = u'col-fam-id1' COLUMN_FAMILY_ID2 = u'col-fam-id2' @@ -50,13 +49,7 @@ CELL_VAL4 = b'foo' ROW_KEY = b'row-key' ROW_KEY_ALT = b'row-key-alt' -EXISTING_CLUSTERS = [] -EXPECTED_ZONES = ( - 'asia-east1-b', - 'europe-west1-c', - 'us-central1-b', - CENTRAL_1C_ZONE, -) +EXISTING_INSTANCES = [] class Config(object): @@ -66,13 +59,13 @@ class Config(object): global state. """ CLIENT = None - CLUSTER = None + INSTANCE = None def _operation_wait(operation, max_attempts=5): """Wait until an operation has completed. - :type operation: :class:`gcloud.bigtable.cluster.Operation` + :type operation: :class:`gcloud.bigtable.instance.Operation` :param operation: Operation that has not finished. :type max_attempts: int @@ -89,114 +82,115 @@ def _operation_wait(operation, max_attempts=5): return True +def _retry_backoff(meth, *args, **kw): + from grpc.beta.interfaces import StatusCode + from grpc.framework.interfaces.face.face import AbortionError + backoff_intervals = [1, 2, 4, 8] + while True: + try: + return meth(*args, **kw) + except AbortionError as error: + if error.code != StatusCode.UNAVAILABLE: + raise + if backoff_intervals: + time.sleep(backoff_intervals.pop(0)) + else: + raise + + def setUpModule(): _helpers.PROJECT = TESTS_PROJECT Config.CLIENT = Client(admin=True) - Config.CLUSTER = Config.CLIENT.cluster(CENTRAL_1C_ZONE, CLUSTER_ID, - display_name=CLUSTER_ID) + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) Config.CLIENT.start() - clusters, failed_zones = Config.CLIENT.list_clusters() + instances, failed_locations = _retry_backoff( + Config.CLIENT.list_instances) - if len(failed_zones) != 0: - raise ValueError('List clusters failed in module set up.') + if len(failed_locations) != 0: + raise ValueError('List instances failed in module set up.') - EXISTING_CLUSTERS[:] = clusters + EXISTING_INSTANCES[:] = instances - # After listing, create the test cluster. - created_op = Config.CLUSTER.create() + # After listing, create the test instance. + created_op = Config.INSTANCE.create() if not _operation_wait(created_op): - raise RuntimeError('Cluster creation exceed 5 seconds.') + raise RuntimeError('Instance creation exceed 5 seconds.') def tearDownModule(): - Config.CLUSTER.delete() + Config.INSTANCE.delete() Config.CLIENT.stop() -class TestClusterAdminAPI(unittest2.TestCase): +class TestInstanceAdminAPI(unittest2.TestCase): def setUp(self): - self.clusters_to_delete = [] + self.instances_to_delete = [] def tearDown(self): - for cluster in self.clusters_to_delete: - cluster.delete() - - def test_list_zones(self): - zones = Config.CLIENT.list_zones() - self.assertEqual(sorted(zones), sorted(EXPECTED_ZONES)) - - def test_list_clusters(self): - clusters, failed_zones = Config.CLIENT.list_clusters() - self.assertEqual(failed_zones, []) - # We have added one new cluster in `setUpModule`. - self.assertEqual(len(clusters), len(EXISTING_CLUSTERS) + 1) - for cluster in clusters: - cluster_existence = (cluster in EXISTING_CLUSTERS or - cluster == Config.CLUSTER) - self.assertTrue(cluster_existence) + for instance in self.instances_to_delete: + instance.delete() + + def test_list_instances(self): + instances, failed_locations = Config.CLIENT.list_instances() + self.assertEqual(failed_locations, []) + # We have added one new instance in `setUpModule`. + self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) + for instance in instances: + instance_existence = (instance in EXISTING_INSTANCES or + instance == Config.INSTANCE) + self.assertTrue(instance_existence) def test_reload(self): - # Use same arguments as Config.CLUSTER (created in `setUpModule`) + # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. - cluster = Config.CLIENT.cluster(CENTRAL_1C_ZONE, CLUSTER_ID) + instance = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) # Make sure metadata unset before reloading. - cluster.display_name = None - cluster.serve_nodes = None + instance.display_name = None - cluster.reload() - self.assertEqual(cluster.display_name, Config.CLUSTER.display_name) - self.assertEqual(cluster.serve_nodes, Config.CLUSTER.serve_nodes) + instance.reload() + self.assertEqual(instance.display_name, Config.INSTANCE.display_name) - def test_create_cluster(self): - cluster_id = 'new' + unique_resource_id('-') - cluster_id = cluster_id[:30] # Cluster IDs can't exceed 30 chars. - cluster = Config.CLIENT.cluster(CENTRAL_1C_ZONE, cluster_id) - operation = cluster.create() - # Make sure this cluster gets deleted after the test case. - self.clusters_to_delete.append(cluster) + def test_create_instance(self): + ALT_INSTANCE_ID = 'new' + unique_resource_id('-') + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) + operation = instance.create() + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) # We want to make sure the operation completes. self.assertTrue(_operation_wait(operation)) - # Create a new cluster instance and make sure it is the same. - cluster_alt = Config.CLIENT.cluster(CENTRAL_1C_ZONE, cluster_id) - cluster_alt.reload() + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) + instance_alt.reload() - self.assertEqual(cluster, cluster_alt) - self.assertEqual(cluster.display_name, cluster_alt.display_name) - self.assertEqual(cluster.serve_nodes, cluster_alt.serve_nodes) + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) def test_update(self): - curr_display_name = Config.CLUSTER.display_name - Config.CLUSTER.display_name = 'Foo Bar Baz' - operation = Config.CLUSTER.update() - - # We want to make sure the operation completes. - self.assertTrue(_operation_wait(operation)) - - # Create a new cluster instance and make sure it is the same. - cluster_alt = Config.CLIENT.cluster(CENTRAL_1C_ZONE, CLUSTER_ID) - self.assertNotEqual(cluster_alt.display_name, - Config.CLUSTER.display_name) - cluster_alt.reload() - self.assertEqual(cluster_alt.display_name, - Config.CLUSTER.display_name) - - # Make sure to put the cluster back the way it was for the + OLD_DISPLAY_NAME = Config.INSTANCE.display_name + NEW_DISPLAY_NAME = 'Foo Bar Baz' + Config.INSTANCE.display_name = NEW_DISPLAY_NAME + Config.INSTANCE.update() + + # Create a new instance instance and reload it. + instance_alt = Config.CLIENT.instance(INSTANCE_ID, None) + self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME) + instance_alt.reload() + self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) + + # Make sure to put the instance back the way it was for the # other test cases. - Config.CLUSTER.display_name = curr_display_name - operation = Config.CLUSTER.update() - - # We want to make sure the operation completes. - self.assertTrue(_operation_wait(operation)) + Config.INSTANCE.display_name = OLD_DISPLAY_NAME + Config.INSTANCE.update() class TestTableAdminAPI(unittest2.TestCase): @classmethod def setUpClass(cls): - cls._table = Config.CLUSTER.table(TABLE_ID) + cls._table = Config.INSTANCE.table(TABLE_ID) cls._table.create() @classmethod @@ -211,14 +205,14 @@ def tearDown(self): table.delete() def test_list_tables(self): - # Since `Config.CLUSTER` is newly created in `setUpModule`, the table + # Since `Config.INSTANCE` is newly created in `setUpModule`, the table # created in `setUpClass` here will be the only one. - tables = Config.CLUSTER.list_tables() + tables = Config.INSTANCE.list_tables() self.assertEqual(tables, [self._table]) def test_create_table(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -226,34 +220,15 @@ def test_create_table(self): name_attr = operator.attrgetter('name') expected_tables = sorted([temp_table, self._table], key=name_attr) - # Then query for the tables in the cluster and sort them by + # Then query for the tables in the instance and sort them by # name as well. - tables = Config.CLUSTER.list_tables() + tables = Config.INSTANCE.list_tables() sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) - def test_rename_table(self): - from grpc.beta import interfaces - from grpc.framework.interfaces.face import face - - temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - with self.assertRaises(face.LocalError) as exc_manager: - temp_table.rename(temp_table_id + '-alt') - exc_caught = exc_manager.exception - self.assertNotEqual(exc_caught, None) - self.assertEqual(exc_caught.code, - interfaces.StatusCode.UNIMPLEMENTED) - self.assertEqual( - exc_caught.details, - 'BigtableTableService.RenameTable is not yet implemented') - def test_create_column_family(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -274,7 +249,7 @@ def test_create_column_family(self): def test_update_column_family(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -297,7 +272,7 @@ def test_update_column_family(self): def test_delete_column_family(self): temp_table_id = 'foo-bar-baz-table' - temp_table = Config.CLUSTER.table(temp_table_id) + temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -318,7 +293,7 @@ class TestDataAPI(unittest2.TestCase): @classmethod def setUpClass(cls): - cls._table = table = Config.CLUSTER.table(TABLE_ID) + cls._table = table = Config.INSTANCE.table(TABLE_ID) table.create() table.column_family(COLUMN_FAMILY_ID1).create() table.column_family(COLUMN_FAMILY_ID2).create() @@ -376,7 +351,6 @@ def test_read_row(self): # Read back the contents of the row. partial_row_data = self._table.read_row(ROW_KEY) - self.assertTrue(partial_row_data.committed) self.assertEqual(partial_row_data.row_key, ROW_KEY) # Check the cells match. @@ -459,7 +433,6 @@ def test_read_with_label_applied(self): # Bring our two labeled columns together. row_filter = RowFilterUnion(filters=[chain1, chain2]) partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) - self.assertTrue(partial_row_data.committed) self.assertEqual(partial_row_data.row_key, ROW_KEY) cells_returned = partial_row_data.cells diff --git a/system_tests/bigtable_happybase.py b/system_tests/bigtable_happybase.py index 231f1189d61c..26a3e870a160 100644 --- a/system_tests/bigtable_happybase.py +++ b/system_tests/bigtable_happybase.py @@ -30,9 +30,8 @@ _PACK_I64 = struct.Struct('>q').pack _FIRST_ELT = operator.itemgetter(0) _helpers.PROJECT = TESTS_PROJECT -ZONE = 'us-central1-c' -CLUSTER_ID = 'gcloud' + unique_resource_id('-') -CLUSTER_ID = CLUSTER_ID[:30] # Cluster IDs can't exceed 30 chars. +LOCATION_ID = 'us-central1-c' +INSTANCE_ID = 'gcloud' + unique_resource_id('-') TABLE_NAME = 'table-name' ALT_TABLE_NAME = 'other-table' TTL_FOR_TEST = 3 @@ -65,12 +64,12 @@ class Config(object): def set_connection(): client = client_mod.Client(admin=True) - cluster = client.cluster(ZONE, CLUSTER_ID) + instance = client.instance(INSTANCE_ID, LOCATION_ID) client.start() - operation = cluster.create() + operation = instance.create() if not _operation_wait(operation): - raise RuntimeError('Cluster creation exceed 5 seconds.') - Config.CONNECTION = Connection(cluster=cluster) + raise RuntimeError('Instance creation exceed 5 seconds.') + Config.CONNECTION = Connection(instance=instance) def setUpModule(): @@ -81,7 +80,7 @@ def setUpModule(): def tearDownModule(): Config.CONNECTION.delete_table(TABLE_NAME) - Config.CONNECTION._cluster.delete() + Config.CONNECTION._instance.delete() Config.CONNECTION.close() diff --git a/tox.ini b/tox.ini index 4d01e63b5d72..c960032c62b4 100644 --- a/tox.ini +++ b/tox.ini @@ -99,7 +99,7 @@ deps = {[testenv:docs]deps} passenv = {[testenv:docs]passenv} [pep8] -exclude = docs/conf.py,gcloud/bigtable/_generated/*,gcloud/datastore/_generated/* +exclude = docs/conf.py,gcloud/bigtable/_generated*/*,gcloud/datastore/_generated/* verbose = 1 [testenv:lint]