From 20eeb0a68d7b44d07a6d84bc7a7e040ad63bb96d Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 30 Jul 2024 17:40:21 -0600 Subject: [PATCH 01/12] fix: use single routing metadata header (#1005) --- .../services/bigtable/async_client.py | 116 ++++++++++-------- .../bigtable_v2/services/bigtable/client.py | 72 ++++++----- owlbot.py | 11 ++ tests/unit/data/_async/test_client.py | 57 +++++---- 4 files changed, 147 insertions(+), 109 deletions(-) diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 12432dda7..1ed7a4740 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -340,11 +340,13 @@ def read_rows( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -441,11 +443,13 @@ def sample_row_keys( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -563,11 +567,13 @@ async def mutate_row( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -679,11 +685,13 @@ def mutate_rows( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -838,11 +846,13 @@ async def check_and_mutate_row( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -936,9 +946,11 @@ async def ping_and_warm( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1062,11 +1074,13 @@ async def read_modify_write_row( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1172,11 +1186,13 @@ def generate_initial_change_stream_partitions( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1274,11 +1290,13 @@ def read_change_stream( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1377,11 +1395,13 @@ def execute_query( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("instance_name", request.instance_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance_name", request.instance_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 0937c90fe..4a3f19ce6 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -817,9 +817,9 @@ def read_rows( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -933,9 +933,9 @@ def sample_row_keys( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1070,9 +1070,9 @@ def mutate_row( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1201,9 +1201,9 @@ def mutate_rows( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1375,9 +1375,9 @@ def check_and_mutate_row( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1477,9 +1477,9 @@ def ping_and_warm( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1620,9 +1620,9 @@ def read_modify_write_row( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1725,11 +1725,13 @@ def generate_initial_change_stream_partitions( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1824,11 +1826,13 @@ def read_change_stream( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1933,9 +1937,9 @@ def execute_query( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() diff --git a/owlbot.py b/owlbot.py index 84aa3d61b..090f7ee93 100644 --- a/owlbot.py +++ b/owlbot.py @@ -143,6 +143,17 @@ def insert(file, before_line, insert_line, after_line, escape=None): escape='"' ) +# ---------------------------------------------------------------------------- +# Patch duplicate routing header: https://github.com/googleapis/gapic-generator-python/issues/2078 +# ---------------------------------------------------------------------------- +for file in ["client.py", "async_client.py"]: + s.replace( + f"google/cloud/bigtable_v2/services/bigtable/{file}", + "metadata \= tuple\(metadata\) \+ \(", + """metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (""" + ) # ---------------------------------------------------------------------------- # Samples templates diff --git a/tests/unit/data/_async/test_client.py b/tests/unit/data/_async/test_client.py index 9ebc403ce..6c49ca0da 100644 --- a/tests/unit/data/_async/test_client.py +++ b/tests/unit/data/_async/test_client.py @@ -1277,7 +1277,7 @@ async def test_customizable_retryable_errors( ("read_rows_sharded", ([ReadRowsQuery()],), "read_rows"), ("row_exists", (b"row_key",), "read_rows"), ("sample_row_keys", (), "sample_row_keys"), - ("mutate_row", (b"row_key", [mock.Mock()]), "mutate_row"), + ("mutate_row", (b"row_key", [mutations.DeleteAllFromRow()]), "mutate_row"), ( "bulk_mutate_rows", ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), @@ -1286,7 +1286,7 @@ async def test_customizable_retryable_errors( ("check_and_mutate_row", (b"row_key", None), "check_and_mutate_row"), ( "read_modify_write_row", - (b"row_key", mock.Mock()), + (b"row_key", IncrementRule("f", "q")), "read_modify_write_row", ), ], @@ -1298,31 +1298,34 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ from google.cloud.bigtable.data import TableAsync profile = "profile" if include_app_profile else None - with mock.patch( - f"google.cloud.bigtable_v2.BigtableAsyncClient.{gapic_fn}", mock.AsyncMock() - ) as gapic_mock: - gapic_mock.side_effect = RuntimeError("stop early") - async with _make_client() as client: - table = TableAsync(client, "instance-id", "table-id", profile) - try: - test_fn = table.__getattribute__(fn_name) - maybe_stream = await test_fn(*fn_args) - [i async for i in maybe_stream] - except Exception: - # we expect an exception from attempting to call the mock - pass - kwargs = gapic_mock.call_args_list[0].kwargs - metadata = kwargs["metadata"] - goog_metadata = None - for key, value in metadata: - if key == "x-goog-request-params": - goog_metadata = value - assert goog_metadata is not None, "x-goog-request-params not found" - assert "table_name=" + table.table_name in goog_metadata - if include_app_profile: - assert "app_profile_id=profile" in goog_metadata - else: - assert "app_profile_id=" not in goog_metadata + client = _make_client() + # create mock for rpc stub + transport_mock = mock.MagicMock() + rpc_mock = mock.AsyncMock() + transport_mock._wrapped_methods.__getitem__.return_value = rpc_mock + client._gapic_client._client._transport = transport_mock + client._gapic_client._client._is_universe_domain_valid = True + table = TableAsync(client, "instance-id", "table-id", profile) + try: + test_fn = table.__getattribute__(fn_name) + maybe_stream = await test_fn(*fn_args) + [i async for i in maybe_stream] + except Exception: + # we expect an exception from attempting to call the mock + pass + assert rpc_mock.call_count == 1 + kwargs = rpc_mock.call_args_list[0].kwargs + metadata = kwargs["metadata"] + # expect single metadata entry + assert len(metadata) == 1 + # expect x-goog-request-params tag + assert metadata[0][0] == "x-goog-request-params" + routing_str = metadata[0][1] + assert "table_name=" + table.table_name in routing_str + if include_app_profile: + assert "app_profile_id=profile" in routing_str + else: + assert "app_profile_id=" not in routing_str class TestReadRows: From f029a242e2b0e6020d0b87ef256a414194321fad Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:43:34 -0700 Subject: [PATCH 02/12] chore: Update gapic-generator-python to v1.18.4 (#1002) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add min, max, hll aggregators and more types docs: Corrected various type documentation PiperOrigin-RevId: 654022916 Source-Link: https://github.com/googleapis/googleapis/commit/157e3bf69c47a280139758ffe59f19834679ec5e Source-Link: https://github.com/googleapis/googleapis-gen/commit/f781685ad52d58b198baf95fa120d87877b3e46e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjc4MTY4NWFkNTJkNThiMTk4YmFmOTVmYTEyMGQ4Nzg3N2IzZTQ2ZSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add MergeToCell to Mutation APIs PiperOrigin-RevId: 654025780 Source-Link: https://github.com/googleapis/googleapis/commit/9effffdf94e20cafb0beeada3727abfff2a32346 Source-Link: https://github.com/googleapis/googleapis-gen/commit/28db5a5df7c4c24adb3b01086c3db2af976241b3 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjhkYjVhNWRmN2M0YzI0YWRiM2IwMTA4NmMzZGIyYWY5NzYyNDFiMyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.18.3 PiperOrigin-RevId: 655567917 Source-Link: https://github.com/googleapis/googleapis/commit/43aa65e3897557c11d947f3133ddb76e5c4b2a6c Source-Link: https://github.com/googleapis/googleapis-gen/commit/0e38378753074c0f66ff63348d6864929e104d5c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGUzODM3ODc1MzA3NGMwZjY2ZmY2MzM0OGQ2ODY0OTI5ZTEwNGQ1YyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.18.3 PiperOrigin-RevId: 656040068 Source-Link: https://github.com/googleapis/googleapis/commit/3f4e29a88f2e1f412439e61c48c88f81dec0bbbf Source-Link: https://github.com/googleapis/googleapis-gen/commit/b8feb2109dde7b0938c22c993d002251ac6714dc Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjhmZWIyMTA5ZGRlN2IwOTM4YzIyYzk5M2QwMDIyNTFhYzY3MTRkYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.18.4 PiperOrigin-RevId: 657207628 Source-Link: https://github.com/googleapis/googleapis/commit/33fe71e5a2061402283e0455636a98e5b78eaf7f Source-Link: https://github.com/googleapis/googleapis-gen/commit/e02739d122ed15bd5ef5771c57f12a83d47a1dda Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTAyNzM5ZDEyMmVkMTViZDVlZjU3NzFjNTdmMTJhODNkNDdhMWRkYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 4 + .../bigtable_instance_admin/client.py | 4 + .../bigtable_instance_admin/pagers.py | 69 +++- .../bigtable_table_admin/async_client.py | 8 + .../services/bigtable_table_admin/client.py | 8 + .../services/bigtable_table_admin/pagers.py | 125 +++++++- google/cloud/bigtable_admin_v2/types/types.py | 300 ++++++++++++++++-- google/cloud/bigtable_v2/types/data.py | 53 ++++ google/cloud/bigtable_v2/types/types.py | 18 ++ .../test_bigtable_instance_admin.py | 13 +- .../test_bigtable_table_admin.py | 25 +- tests/unit/gapic/bigtable_v2/test_bigtable.py | 1 + 12 files changed, 587 insertions(+), 41 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 171dd8298..abed851d5 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1698,6 +1698,8 @@ async def list_app_profiles( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2277,6 +2279,8 @@ async def list_hot_tablets( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 550bcb1e7..5877342c4 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2168,6 +2168,8 @@ def list_app_profiles( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2741,6 +2743,8 @@ def list_hot_tablets( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index f76da7622..bb7ee001f 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async from typing import ( Any, AsyncIterator, @@ -22,8 +25,18 @@ Tuple, Optional, Iterator, + Union, ) +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -52,6 +65,8 @@ def __init__( request: bigtable_instance_admin.ListAppProfilesRequest, response: bigtable_instance_admin.ListAppProfilesResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -63,12 +78,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -79,7 +99,12 @@ def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[instance.AppProfile]: @@ -116,6 +141,8 @@ def __init__( request: bigtable_instance_admin.ListAppProfilesRequest, response: bigtable_instance_admin.ListAppProfilesResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -127,12 +154,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -145,7 +177,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[instance.AppProfile]: @@ -184,6 +221,8 @@ def __init__( request: bigtable_instance_admin.ListHotTabletsRequest, response: bigtable_instance_admin.ListHotTabletsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -195,12 +234,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -211,7 +255,12 @@ def pages(self) -> Iterator[bigtable_instance_admin.ListHotTabletsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[instance.HotTablet]: @@ -248,6 +297,8 @@ def __init__( request: bigtable_instance_admin.ListHotTabletsRequest, response: bigtable_instance_admin.ListHotTabletsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -259,12 +310,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -277,7 +333,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[instance.HotTablet]: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 5e429f7e5..7454e08ac 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -618,6 +618,8 @@ async def list_tables( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -1197,6 +1199,8 @@ async def list_authorized_views( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2191,6 +2195,8 @@ async def list_snapshots( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2750,6 +2756,8 @@ async def list_backups( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index e9b06965c..4645d4f3b 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1149,6 +1149,8 @@ def list_tables( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -1710,6 +1712,8 @@ def list_authorized_views( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2677,6 +2681,8 @@ def list_snapshots( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -3218,6 +3224,8 @@ def list_backups( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index d6277bce2..5e20fbc5f 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async from typing import ( Any, AsyncIterator, @@ -22,8 +25,18 @@ Tuple, Optional, Iterator, + Union, ) +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -52,6 +65,8 @@ def __init__( request: bigtable_table_admin.ListTablesRequest, response: bigtable_table_admin.ListTablesResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -63,12 +78,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -79,7 +99,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Table]: @@ -114,6 +139,8 @@ def __init__( request: bigtable_table_admin.ListTablesRequest, response: bigtable_table_admin.ListTablesResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -125,12 +152,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -141,7 +173,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Table]: @@ -180,6 +217,8 @@ def __init__( request: bigtable_table_admin.ListAuthorizedViewsRequest, response: bigtable_table_admin.ListAuthorizedViewsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -191,12 +230,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -207,7 +251,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListAuthorizedViewsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.AuthorizedView]: @@ -244,6 +293,8 @@ def __init__( request: bigtable_table_admin.ListAuthorizedViewsRequest, response: bigtable_table_admin.ListAuthorizedViewsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -255,12 +306,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -273,7 +329,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.AuthorizedView]: @@ -312,6 +373,8 @@ def __init__( request: bigtable_table_admin.ListSnapshotsRequest, response: bigtable_table_admin.ListSnapshotsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -323,12 +386,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -339,7 +407,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Snapshot]: @@ -374,6 +447,8 @@ def __init__( request: bigtable_table_admin.ListSnapshotsRequest, response: bigtable_table_admin.ListSnapshotsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -385,12 +460,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -401,7 +481,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsRespons yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Snapshot]: @@ -440,6 +525,8 @@ def __init__( request: bigtable_table_admin.ListBackupsRequest, response: bigtable_table_admin.ListBackupsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -451,12 +538,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -467,7 +559,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Backup]: @@ -502,6 +599,8 @@ def __init__( request: bigtable_table_admin.ListBackupsRequest, response: bigtable_table_admin.ListBackupsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -513,12 +612,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -529,7 +633,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse] yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Backup]: diff --git a/google/cloud/bigtable_admin_v2/types/types.py b/google/cloud/bigtable_admin_v2/types/types.py index 362effbab..7d1d99034 100644 --- a/google/cloud/bigtable_admin_v2/types/types.py +++ b/google/cloud/bigtable_admin_v2/types/types.py @@ -36,26 +36,20 @@ class Type(proto.Message): For compatibility with Bigtable's existing untyped APIs, each ``Type`` includes an ``Encoding`` which describes how to convert - to/from the underlying data. This might involve composing a series - of steps into an "encoding chain," for example to convert from INT64 - -> STRING -> raw bytes. In most cases, a "link" in the encoding - chain will be based an on existing GoogleSQL conversion function - like ``CAST``. + to/from the underlying data. - Each link in the encoding chain also defines the following - properties: + Each encoding also defines the following properties: - - Natural sort: Does the encoded value sort consistently with the - original typed value? Note that Bigtable will always sort data - based on the raw encoded value, *not* the decoded type. + - Order-preserving: Does the encoded value sort consistently with + the original typed value? Note that Bigtable will always sort + data based on the raw encoded value, *not* the decoded type. - Example: BYTES values sort in the same order as their raw encodings. - - Counterexample: Encoding INT64 to a fixed-width STRING does - *not* preserve sort order when dealing with negative numbers. - INT64(1) > INT64(-1), but STRING("-00001") > STRING("00001). - - The overall encoding chain has this property if *every* link - does. + - Counterexample: Encoding INT64 as a fixed-width decimal string + does *not* preserve sort order when dealing with negative + numbers. ``INT64(1) > INT64(-1)``, but + ``STRING("-00001") > STRING("00001)``. - Self-delimiting: If we concatenate two encoded values, can we always tell where the first one ends and the second one begins? @@ -65,8 +59,6 @@ class Type(proto.Message): by a sign. - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have no way to tell where the first one ends. - - The overall encoding chain has this property if *any* link - does. - Compatibility: Which other systems have matching encoding schemes? For example, does this encoding have a GoogleSQL @@ -91,10 +83,42 @@ class Type(proto.Message): int64_type (google.cloud.bigtable_admin_v2.types.Type.Int64): Int64 + This field is a member of `oneof`_ ``kind``. + float32_type (google.cloud.bigtable_admin_v2.types.Type.Float32): + Float32 + + This field is a member of `oneof`_ ``kind``. + float64_type (google.cloud.bigtable_admin_v2.types.Type.Float64): + Float64 + + This field is a member of `oneof`_ ``kind``. + bool_type (google.cloud.bigtable_admin_v2.types.Type.Bool): + Bool + + This field is a member of `oneof`_ ``kind``. + timestamp_type (google.cloud.bigtable_admin_v2.types.Type.Timestamp): + Timestamp + + This field is a member of `oneof`_ ``kind``. + date_type (google.cloud.bigtable_admin_v2.types.Type.Date): + Date + This field is a member of `oneof`_ ``kind``. aggregate_type (google.cloud.bigtable_admin_v2.types.Type.Aggregate): Aggregate + This field is a member of `oneof`_ ``kind``. + struct_type (google.cloud.bigtable_admin_v2.types.Type.Struct): + Struct + + This field is a member of `oneof`_ ``kind``. + array_type (google.cloud.bigtable_admin_v2.types.Type.Array): + Array + + This field is a member of `oneof`_ ``kind``. + map_type (google.cloud.bigtable_admin_v2.types.Type.Map): + Map + This field is a member of `oneof`_ ``kind``. """ @@ -122,7 +146,7 @@ class Encoding(proto.Message): class Raw(proto.Message): r"""Leaves the value "as-is" - - Natural sort? Yes + - Order-preserving? Yes - Self-delimiting? No - Compatibility? N/A @@ -154,19 +178,31 @@ class String(proto.Message): class Encoding(proto.Message): r"""Rules used to convert to/from lower level types. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: utf8_raw (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Raw): - Use ``Utf8Raw`` encoding. + Deprecated: if set, converts to an empty ``utf8_bytes``. + + This field is a member of `oneof`_ ``encoding``. + utf8_bytes (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Bytes): + Use ``Utf8Bytes`` encoding. This field is a member of `oneof`_ ``encoding``. """ class Utf8Raw(proto.Message): + r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" + + class Utf8Bytes(proto.Message): r"""UTF-8 encoding - - Natural sort? No (ASCII characters only) + - Order-preserving? Yes (code point order) - Self-delimiting? No - Compatibility? @@ -182,6 +218,12 @@ class Utf8Raw(proto.Message): oneof="encoding", message="Type.String.Encoding.Utf8Raw", ) + utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.String.Encoding.Utf8Bytes", + ) encoding: "Type.String.Encoding" = proto.Field( proto.MESSAGE, @@ -214,7 +256,7 @@ class BigEndianBytes(proto.Message): r"""Encodes the value as an 8-byte big endian twos complement ``Bytes`` value. - - Natural sort? No (positive values only) + - Order-preserving? No (positive values only) - Self-delimiting? Yes - Compatibility? @@ -224,8 +266,7 @@ class BigEndianBytes(proto.Message): Attributes: bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): - The underlying ``Bytes`` type, which may be able to encode - further. + Deprecated: ignored if set. """ bytes_type: "Type.Bytes" = proto.Field( @@ -247,6 +288,113 @@ class BigEndianBytes(proto.Message): message="Type.Int64.Encoding", ) + class Bool(proto.Message): + r"""bool Values of type ``Bool`` are stored in ``Value.bool_value``.""" + + class Float32(proto.Message): + r"""Float32 Values of type ``Float32`` are stored in + ``Value.float_value``. + + """ + + class Float64(proto.Message): + r"""Float64 Values of type ``Float64`` are stored in + ``Value.float_value``. + + """ + + class Timestamp(proto.Message): + r"""Timestamp Values of type ``Timestamp`` are stored in + ``Value.timestamp_value``. + + """ + + class Date(proto.Message): + r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" + + class Struct(proto.Message): + r"""A structured data value, consisting of fields which map to + dynamically typed values. Values of type ``Struct`` are stored in + ``Value.array_value`` where entries are in the same order and number + as ``field_types``. + + Attributes: + fields (MutableSequence[google.cloud.bigtable_admin_v2.types.Type.Struct.Field]): + The names and types of the fields in this + struct. + """ + + class Field(proto.Message): + r"""A struct field and its type. + + Attributes: + field_name (str): + The field name (optional). Fields without a ``field_name`` + are considered anonymous and cannot be referenced by name. + type_ (google.cloud.bigtable_admin_v2.types.Type): + The type of values in this field. + """ + + field_name: str = proto.Field( + proto.STRING, + number=1, + ) + type_: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Type.Struct.Field", + ) + + class Array(proto.Message): + r"""An ordered list of elements of a given type. Values of type + ``Array`` are stored in ``Value.array_value``. + + Attributes: + element_type (google.cloud.bigtable_admin_v2.types.Type): + The type of the elements in the array. This must not be + ``Array``. + """ + + element_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + + class Map(proto.Message): + r"""A mapping of keys to values of a given type. Values of type ``Map`` + are stored in a ``Value.array_value`` where each entry is another + ``Value.array_value`` with two elements (the key and the value, in + that order). Normally encoded Map values won't have repeated keys, + however, clients are expected to handle the case in which they do. + If the same key appears multiple times, the *last* value takes + precedence. + + Attributes: + key_type (google.cloud.bigtable_admin_v2.types.Type): + The type of a map key. Only ``Bytes``, ``String``, and + ``Int64`` are allowed as key types. + value_type (google.cloud.bigtable_admin_v2.types.Type): + The type of the values in a map. + """ + + key_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + value_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + class Aggregate(proto.Message): r"""A value that combines incremental updates into a summarized value. @@ -254,6 +402,10 @@ class Aggregate(proto.Message): Writes will provide either the ``input_type`` or ``state_type``, and reads will always return the ``state_type`` . + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -270,6 +422,18 @@ class Aggregate(proto.Message): sum (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Sum): Sum aggregator. + This field is a member of `oneof`_ ``aggregator``. + hllpp_unique_count (google.cloud.bigtable_admin_v2.types.Type.Aggregate.HyperLogLogPlusPlusUniqueCount): + HyperLogLogPlusPlusUniqueCount aggregator. + + This field is a member of `oneof`_ ``aggregator``. + max_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Max): + Max aggregator. + + This field is a member of `oneof`_ ``aggregator``. + min_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Min): + Min aggregator. + This field is a member of `oneof`_ ``aggregator``. """ @@ -279,6 +443,28 @@ class Sum(proto.Message): """ + class Max(proto.Message): + r"""Computes the max of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Min(proto.Message): + r"""Computes the min of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class HyperLogLogPlusPlusUniqueCount(proto.Message): + r"""Computes an approximate unique count over the input values. When + using raw data as input, be careful to use a consistent encoding. + Otherwise the same value encoded differently could count more than + once, or two distinct values could count as identical. Input: Any, + or omit for Raw State: TBD Special state conversions: ``Int64`` (the + unique count estimate) + + """ + input_type: "Type" = proto.Field( proto.MESSAGE, number=1, @@ -295,6 +481,26 @@ class Sum(proto.Message): oneof="aggregator", message="Type.Aggregate.Sum", ) + hllpp_unique_count: "Type.Aggregate.HyperLogLogPlusPlusUniqueCount" = ( + proto.Field( + proto.MESSAGE, + number=5, + oneof="aggregator", + message="Type.Aggregate.HyperLogLogPlusPlusUniqueCount", + ) + ) + max_: "Type.Aggregate.Max" = proto.Field( + proto.MESSAGE, + number=6, + oneof="aggregator", + message="Type.Aggregate.Max", + ) + min_: "Type.Aggregate.Min" = proto.Field( + proto.MESSAGE, + number=7, + oneof="aggregator", + message="Type.Aggregate.Min", + ) bytes_type: Bytes = proto.Field( proto.MESSAGE, @@ -314,12 +520,60 @@ class Sum(proto.Message): oneof="kind", message=Int64, ) + float32_type: Float32 = proto.Field( + proto.MESSAGE, + number=12, + oneof="kind", + message=Float32, + ) + float64_type: Float64 = proto.Field( + proto.MESSAGE, + number=9, + oneof="kind", + message=Float64, + ) + bool_type: Bool = proto.Field( + proto.MESSAGE, + number=8, + oneof="kind", + message=Bool, + ) + timestamp_type: Timestamp = proto.Field( + proto.MESSAGE, + number=10, + oneof="kind", + message=Timestamp, + ) + date_type: Date = proto.Field( + proto.MESSAGE, + number=11, + oneof="kind", + message=Date, + ) aggregate_type: Aggregate = proto.Field( proto.MESSAGE, number=6, oneof="kind", message=Aggregate, ) + struct_type: Struct = proto.Field( + proto.MESSAGE, + number=7, + oneof="kind", + message=Struct, + ) + array_type: Array = proto.Field( + proto.MESSAGE, + number=3, + oneof="kind", + message=Array, + ) + map_type: Map = proto.Field( + proto.MESSAGE, + number=4, + oneof="kind", + message=Map, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py index ec32cac82..9d964a4f6 100644 --- a/google/cloud/bigtable_v2/types/data.py +++ b/google/cloud/bigtable_v2/types/data.py @@ -1032,6 +1032,10 @@ class Mutation(proto.Message): add_to_cell (google.cloud.bigtable_v2.types.Mutation.AddToCell): Incrementally updates an ``Aggregate`` cell. + This field is a member of `oneof`_ ``mutation``. + merge_to_cell (google.cloud.bigtable_v2.types.Mutation.MergeToCell): + Merges accumulated state to an ``Aggregate`` cell. + This field is a member of `oneof`_ ``mutation``. delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): Deletes cells from a column. @@ -1130,6 +1134,49 @@ class AddToCell(proto.Message): message="Value", ) + class MergeToCell(proto.Message): + r"""A Mutation which merges accumulated state into a cell in an + ``Aggregate`` family. + + Attributes: + family_name (str): + The name of the ``Aggregate`` family into which new data + should be added. This must be a family with a ``value_type`` + of ``Aggregate``. Format: ``[-_.a-zA-Z0-9]+`` + column_qualifier (google.cloud.bigtable_v2.types.Value): + The qualifier of the column into which new data should be + added. This must be a ``raw_value``. + timestamp (google.cloud.bigtable_v2.types.Value): + The timestamp of the cell to which new data should be added. + This must be a ``raw_timestamp_micros`` that matches the + table's ``granularity``. + input (google.cloud.bigtable_v2.types.Value): + The input value to be merged into the specified cell. This + must be compatible with the family's + ``value_type.state_type``. Merging ``NULL`` is allowed, but + has no effect. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: "Value" = proto.Field( + proto.MESSAGE, + number=2, + message="Value", + ) + timestamp: "Value" = proto.Field( + proto.MESSAGE, + number=3, + message="Value", + ) + input: "Value" = proto.Field( + proto.MESSAGE, + number=4, + message="Value", + ) + class DeleteFromColumn(proto.Message): r"""A Mutation which deletes cells from the specified column, optionally restricting the deletions to a given timestamp range. @@ -1191,6 +1238,12 @@ class DeleteFromRow(proto.Message): oneof="mutation", message=AddToCell, ) + merge_to_cell: MergeToCell = proto.Field( + proto.MESSAGE, + number=6, + oneof="mutation", + message=MergeToCell, + ) delete_from_column: DeleteFromColumn = proto.Field( proto.MESSAGE, number=2, diff --git a/google/cloud/bigtable_v2/types/types.py b/google/cloud/bigtable_v2/types/types.py index 8eb307b3e..153420e45 100644 --- a/google/cloud/bigtable_v2/types/types.py +++ b/google/cloud/bigtable_v2/types/types.py @@ -178,15 +178,27 @@ class String(proto.Message): class Encoding(proto.Message): r"""Rules used to convert to/from lower level types. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + utf8_raw (google.cloud.bigtable_v2.types.Type.String.Encoding.Utf8Raw): + Deprecated: if set, converts to an empty ``utf8_bytes``. + + This field is a member of `oneof`_ ``encoding``. utf8_bytes (google.cloud.bigtable_v2.types.Type.String.Encoding.Utf8Bytes): Use ``Utf8Bytes`` encoding. This field is a member of `oneof`_ ``encoding``. """ + class Utf8Raw(proto.Message): + r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" + class Utf8Bytes(proto.Message): r"""UTF-8 encoding @@ -200,6 +212,12 @@ class Utf8Bytes(proto.Message): """ + utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.String.Encoding.Utf8Raw", + ) utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( proto.MESSAGE, number=2, diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 64fa98937..ea6737973 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -47,6 +47,7 @@ from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( @@ -6799,12 +6800,16 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_app_profiles(request={}) + pager = client.list_app_profiles(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -9309,12 +9314,16 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_hot_tablets(request={}) + pager = client.list_hot_tablets(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 4c888da7c..2b84213bc 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -47,6 +47,7 @@ from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( @@ -2398,12 +2399,16 @@ def test_list_tables_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_tables(request={}) + pager = client.list_tables(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -4843,12 +4848,16 @@ def test_list_authorized_views_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_authorized_views(request={}) + pager = client.list_authorized_views(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -8826,12 +8835,16 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_snapshots(request={}) + pager = client.list_snapshots(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -11237,12 +11250,16 @@ def test_list_backups_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_backups(request={}) + pager = client.list_backups(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 348338d18..60cc7fd6e 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -43,6 +43,7 @@ from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient From abe67c45b6874fbd188a7918d06a689f617526ac Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 5 Aug 2024 17:11:13 -0600 Subject: [PATCH 03/12] chore: revert sync client customizations (#1009) --- .../bigtable_v2/services/bigtable/client.py | 72 +++++++++---------- owlbot.py | 2 +- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 4a3f19ce6..0937c90fe 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -817,9 +817,9 @@ def read_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -933,9 +933,9 @@ def sample_row_keys( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1070,9 +1070,9 @@ def mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1201,9 +1201,9 @@ def mutate_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1375,9 +1375,9 @@ def check_and_mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1477,9 +1477,9 @@ def ping_and_warm( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1620,9 +1620,9 @@ def read_modify_write_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1725,13 +1725,11 @@ def generate_initial_change_stream_partitions( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1826,13 +1824,11 @@ def read_change_stream( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1937,9 +1933,9 @@ def execute_query( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() diff --git a/owlbot.py b/owlbot.py index 090f7ee93..0ec4cd61c 100644 --- a/owlbot.py +++ b/owlbot.py @@ -146,7 +146,7 @@ def insert(file, before_line, insert_line, after_line, escape=None): # ---------------------------------------------------------------------------- # Patch duplicate routing header: https://github.com/googleapis/gapic-generator-python/issues/2078 # ---------------------------------------------------------------------------- -for file in ["client.py", "async_client.py"]: +for file in ["async_client.py"]: s.replace( f"google/cloud/bigtable_v2/services/bigtable/{file}", "metadata \= tuple\(metadata\) \+ \(", From 45bc8c4a0fe567ce5e0126a1a70e7eb3dca93e92 Mon Sep 17 00:00:00 2001 From: Kajetan Boroszko Date: Thu, 8 Aug 2024 22:12:25 +0200 Subject: [PATCH 04/12] feat: async execute query client (#1011) Co-authored-by: Mateusz Walkiewicz Co-authored-by: Owl Bot --- google/cloud/bigtable/data/__init__.py | 2 + .../bigtable/data/_async/_mutate_rows.py | 4 +- .../cloud/bigtable/data/_async/_read_rows.py | 3 +- google/cloud/bigtable/data/_async/client.py | 234 ++++-- google/cloud/bigtable/data/_helpers.py | 38 +- google/cloud/bigtable/data/exceptions.py | 8 + .../bigtable/data/execute_query/__init__.py | 38 + .../data/execute_query/_async/__init__.py | 13 + .../_async/execute_query_iterator.py | 211 ++++++ .../data/execute_query/_byte_cursor.py | 144 ++++ .../execute_query/_parameters_formatting.py | 118 +++ .../_query_result_parsing_utils.py | 133 ++++ .../bigtable/data/execute_query/_reader.py | 149 ++++ .../bigtable/data/execute_query/metadata.py | 354 +++++++++ .../bigtable/data/execute_query/values.py | 116 +++ google/cloud/bigtable/helpers.py | 31 + google/cloud/bigtable/instance.py | 1 + .../data_client/data_client_snippets_async.py | 45 +- .../data_client_snippets_async_test.py | 5 + testing/constraints-3.8.txt | 1 + tests/_testing.py | 36 + tests/system/data/test_execute_query_async.py | 288 +++++++ tests/system/data/test_execute_query_utils.py | 272 +++++++ tests/unit/_testing.py | 16 + tests/unit/data/_async/__init__.py | 13 + tests/unit/data/_testing.py | 18 + tests/unit/data/execute_query/__init__.py | 13 + .../data/execute_query/_async/__init__.py | 13 + .../data/execute_query/_async/_testing.py | 36 + .../_async/test_query_iterator.py | 156 ++++ tests/unit/data/execute_query/_testing.py | 17 + .../data/execute_query/test_byte_cursor.py | 149 ++++ .../test_execute_query_parameters_parsing.py | 134 ++++ .../test_query_result_parsing_utils.py | 715 ++++++++++++++++++ .../test_query_result_row_reader.py | 310 ++++++++ tests/unit/data/test__helpers.py | 25 +- tests/unit/data/test_helpers.py | 45 ++ tests/unit/v2_client/_testing.py | 3 + tests/unit/v2_client/test_instance.py | 26 + 39 files changed, 3855 insertions(+), 78 deletions(-) create mode 100644 google/cloud/bigtable/data/execute_query/__init__.py create mode 100644 google/cloud/bigtable/data/execute_query/_async/__init__.py create mode 100644 google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py create mode 100644 google/cloud/bigtable/data/execute_query/_byte_cursor.py create mode 100644 google/cloud/bigtable/data/execute_query/_parameters_formatting.py create mode 100644 google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py create mode 100644 google/cloud/bigtable/data/execute_query/_reader.py create mode 100644 google/cloud/bigtable/data/execute_query/metadata.py create mode 100644 google/cloud/bigtable/data/execute_query/values.py create mode 100644 google/cloud/bigtable/helpers.py create mode 100644 tests/_testing.py create mode 100644 tests/system/data/test_execute_query_async.py create mode 100644 tests/system/data/test_execute_query_utils.py create mode 100644 tests/unit/_testing.py create mode 100644 tests/unit/data/_async/__init__.py create mode 100644 tests/unit/data/_testing.py create mode 100644 tests/unit/data/execute_query/__init__.py create mode 100644 tests/unit/data/execute_query/_async/__init__.py create mode 100644 tests/unit/data/execute_query/_async/_testing.py create mode 100644 tests/unit/data/execute_query/_async/test_query_iterator.py create mode 100644 tests/unit/data/execute_query/_testing.py create mode 100644 tests/unit/data/execute_query/test_byte_cursor.py create mode 100644 tests/unit/data/execute_query/test_execute_query_parameters_parsing.py create mode 100644 tests/unit/data/execute_query/test_query_result_parsing_utils.py create mode 100644 tests/unit/data/execute_query/test_query_result_row_reader.py create mode 100644 tests/unit/data/test_helpers.py diff --git a/google/cloud/bigtable/data/__init__.py b/google/cloud/bigtable/data/__init__.py index 5229f8021..68dc22891 100644 --- a/google/cloud/bigtable/data/__init__.py +++ b/google/cloud/bigtable/data/__init__.py @@ -39,6 +39,7 @@ from google.cloud.bigtable.data.exceptions import RetryExceptionGroup from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup +from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed from google.cloud.bigtable.data._helpers import TABLE_DEFAULT from google.cloud.bigtable.data._helpers import RowKeySamples @@ -68,6 +69,7 @@ "RetryExceptionGroup", "MutationsExceptionGroup", "ShardedReadRowsExceptionGroup", + "ParameterTypeInferenceFailed", "ShardedQuery", "TABLE_DEFAULT", ) diff --git a/google/cloud/bigtable/data/_async/_mutate_rows.py b/google/cloud/bigtable/data/_async/_mutate_rows.py index 99b9944cd..465378aa4 100644 --- a/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -84,7 +84,9 @@ def __init__( f"all entries. Found {total_mutations}." ) # create partial function to pass to trigger rpc call - metadata = _make_metadata(table.table_name, table.app_profile_id) + metadata = _make_metadata( + table.table_name, table.app_profile_id, instance_name=None + ) self._gapic_fn = functools.partial( gapic_client.mutate_rows, table_name=table.table_name, diff --git a/google/cloud/bigtable/data/_async/_read_rows.py b/google/cloud/bigtable/data/_async/_read_rows.py index 78cb7a991..6034ae6cf 100644 --- a/google/cloud/bigtable/data/_async/_read_rows.py +++ b/google/cloud/bigtable/data/_async/_read_rows.py @@ -102,8 +102,7 @@ def __init__( self.table = table self._predicate = retries.if_exception_type(*retryable_exceptions) self._metadata = _make_metadata( - table.table_name, - table.app_profile_id, + table.table_name, table.app_profile_id, instance_name=None ) self._last_yielded_row_key: bytes | None = None self._remaining_count: int | None = self.request.rows_limit or None diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py index 34fdf847a..600937df8 100644 --- a/google/cloud/bigtable/data/_async/client.py +++ b/google/cloud/bigtable/data/_async/client.py @@ -15,74 +15,88 @@ from __future__ import annotations +import asyncio +from functools import partial +import os +import random +import sys +import time from typing import ( - cast, + TYPE_CHECKING, Any, AsyncIterable, + Dict, Optional, - Set, Sequence, - TYPE_CHECKING, + Set, + Union, + cast, ) - -import asyncio -import grpc -import time import warnings -import sys -import random -import os -from functools import partial +from google.api_core import client_options as client_options_lib +from google.api_core import retry as retries +from google.api_core.exceptions import Aborted, DeadlineExceeded, ServiceUnavailable +import google.auth._default +import google.auth.credentials +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore +import grpc +from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT +from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, +) +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data._async.mutations_batcher import ( + _MB_SIZE, + MutationsBatcherAsync, +) +from google.cloud.bigtable.data._helpers import ( + _CONCURRENCY_LIMIT, + TABLE_DEFAULT, + _attempt_timeout_generator, + _get_error_type, + _get_retryable_errors, + _get_timeouts, + _make_metadata, + _retry_exception_factory, + _validate_timeouts, + _WarmedInstanceKey, +) +from google.cloud.bigtable.data.exceptions import ( + FailedQueryShardError, + ShardedReadRowsExceptionGroup, +) +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.row_filters import ( + CellsRowLimitFilter, + RowFilter, + RowFilterChain, + StripValueTransformerFilter, +) +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, +) +from google.cloud.bigtable_v2.services.bigtable.async_client import ( + DEFAULT_CLIENT_INFO, + BigtableAsyncClient, +) from google.cloud.bigtable_v2.services.bigtable.client import BigtableClientMeta -from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient -from google.cloud.bigtable_v2.services.bigtable.async_client import DEFAULT_CLIENT_INFO from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( PooledBigtableGrpcAsyncIOTransport, PooledChannel, ) from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest -from google.cloud.client import ClientWithProject -from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore -from google.api_core import retry as retries -from google.api_core.exceptions import DeadlineExceeded -from google.api_core.exceptions import ServiceUnavailable -from google.api_core.exceptions import Aborted -from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync - -import google.auth.credentials -import google.auth._default -from google.api_core import client_options as client_options_lib -from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT -from google.cloud.bigtable.data.row import Row -from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery -from google.cloud.bigtable.data.exceptions import FailedQueryShardError -from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup - -from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry -from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync -from google.cloud.bigtable.data._helpers import TABLE_DEFAULT -from google.cloud.bigtable.data._helpers import _WarmedInstanceKey -from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT -from google.cloud.bigtable.data._helpers import _make_metadata -from google.cloud.bigtable.data._helpers import _retry_exception_factory -from google.cloud.bigtable.data._helpers import _validate_timeouts -from google.cloud.bigtable.data._helpers import _get_retryable_errors -from google.cloud.bigtable.data._helpers import _get_timeouts -from google.cloud.bigtable.data._helpers import _attempt_timeout_generator -from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync -from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE -from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule -from google.cloud.bigtable.data.row_filters import RowFilter -from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter -from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter -from google.cloud.bigtable.data.row_filters import RowFilterChain - if TYPE_CHECKING: - from google.cloud.bigtable.data._helpers import RowKeySamples - from google.cloud.bigtable.data._helpers import ShardedQuery + from google.cloud.bigtable.data._helpers import RowKeySamples, ShardedQuery class BigtableDataClientAsync(ClientWithProject): @@ -315,7 +329,9 @@ async def _manage_channel( next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) next_sleep = next_refresh - (time.time() - start_timestamp) - async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: + async def _register_instance( + self, instance_id: str, owner: Union[TableAsync, ExecuteQueryIteratorAsync] + ) -> None: """ Registers an instance with the client, and warms the channel pool for the instance @@ -346,7 +362,7 @@ async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: self._start_background_channel_refresh() async def _remove_instance_registration( - self, instance_id: str, owner: TableAsync + self, instance_id: str, owner: Union[TableAsync, ExecuteQueryIteratorAsync] ) -> bool: """ Removes an instance from the client's registered instances, to prevent @@ -416,6 +432,102 @@ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAs """ return TableAsync(self, instance_id, table_id, *args, **kwargs) + async def execute_query( + self, + query: str, + instance_id: str, + *, + parameters: Dict[str, ExecuteQueryValueType] | None = None, + parameter_types: Dict[str, SqlType.Type] | None = None, + app_profile_id: str | None = None, + operation_timeout: float = 600, + attempt_timeout: float | None = 20, + retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + ), + ) -> "ExecuteQueryIteratorAsync": + """ + Executes an SQL query on an instance. + Returns an iterator to asynchronously stream back columns from selected rows. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + - query: Query to be run on Bigtable instance. The query can use ``@param`` + placeholders to use parameter interpolation on the server. Values for all + parameters should be provided in ``parameters``. Types of parameters are + inferred but should be provided in ``parameter_types`` if the inference is + not possible (i.e. when value can be None, an empty list or an empty dict). + - instance_id: The Bigtable instance ID to perform the query on. + instance_id is combined with the client's project to fully + specify the instance. + - parameters: Dictionary with values for all parameters used in the ``query``. + - parameter_types: Dictionary with types of parameters used in the ``query``. + Required to contain entries only for parameters whose type cannot be + detected automatically (i.e. the value can be None, an empty list or + an empty dict). + - app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + Returns: + - an asynchronous iterator that yields rows returned by the query + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + """ + warnings.warn( + "ExecuteQuery is in preview and may change in the future.", + category=RuntimeWarning, + ) + + retryable_excs = [_get_error_type(e) for e in retryable_errors] + + pb_params = _format_execute_query_params(parameters, parameter_types) + + instance_name = self._gapic_client.instance_path(self.project, instance_id) + + request_body = { + "instance_name": instance_name, + "app_profile_id": app_profile_id, + "query": query, + "params": pb_params, + "proto_format": {}, + } + + # app_profile_id should be set to an empty string for ExecuteQueryRequest only + app_profile_id_for_metadata = app_profile_id or "" + + req_metadata = _make_metadata( + table_name=None, + app_profile_id=app_profile_id_for_metadata, + instance_name=instance_name, + ) + + return ExecuteQueryIteratorAsync( + self, + instance_id, + app_profile_id, + request_body, + attempt_timeout, + operation_timeout, + req_metadata, + retryable_excs, + ) + async def __aenter__(self): self._start_background_channel_refresh() return self @@ -893,7 +1005,9 @@ async def sample_row_keys( sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) # prepare request - metadata = _make_metadata(self.table_name, self.app_profile_id) + metadata = _make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ) async def execute_rpc(): results = await self.client._gapic_client.sample_row_keys( @@ -1029,7 +1143,9 @@ async def mutate_row( table_name=self.table_name, app_profile_id=self.app_profile_id, timeout=attempt_timeout, - metadata=_make_metadata(self.table_name, self.app_profile_id), + metadata=_make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ), retry=None, ) return await retries.retry_target_async( @@ -1147,7 +1263,9 @@ async def check_and_mutate_row( ): false_case_mutations = [false_case_mutations] false_case_list = [m._to_pb() for m in false_case_mutations or []] - metadata = _make_metadata(self.table_name, self.app_profile_id) + metadata = _make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ) result = await self.client._gapic_client.check_and_mutate_row( true_mutations=true_case_list, false_mutations=false_case_list, @@ -1198,7 +1316,9 @@ async def read_modify_write_row( rules = [rules] if not rules: raise ValueError("rules must contain at least one item") - metadata = _make_metadata(self.table_name, self.app_profile_id) + metadata = _make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ) result = await self.client._gapic_client.read_modify_write_row( rules=[rule._to_pb() for rule in rules], row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, diff --git a/google/cloud/bigtable/data/_helpers.py b/google/cloud/bigtable/data/_helpers.py index a8fba9ef1..2d36c521f 100644 --- a/google/cloud/bigtable/data/_helpers.py +++ b/google/cloud/bigtable/data/_helpers.py @@ -16,7 +16,7 @@ """ from __future__ import annotations -from typing import Sequence, List, Tuple, TYPE_CHECKING +from typing import Sequence, List, Tuple, TYPE_CHECKING, Union import time import enum from collections import namedtuple @@ -60,15 +60,26 @@ class TABLE_DEFAULT(enum.Enum): def _make_metadata( - table_name: str, app_profile_id: str | None + table_name: str | None, app_profile_id: str | None, instance_name: str | None ) -> list[tuple[str, str]]: """ Create properly formatted gRPC metadata for requests. """ params = [] - params.append(f"table_name={table_name}") + + if table_name is not None and instance_name is not None: + raise ValueError("metadata can't contain both instance_name and table_name") + + if table_name is not None: + params.append(f"table_name={table_name}") + if instance_name is not None: + params.append(f"name={instance_name}") if app_profile_id is not None: params.append(f"app_profile_id={app_profile_id}") + if len(params) == 0: + raise ValueError( + "At least one of table_name and app_profile_id should be not None." + ) params_str = "&".join(params) return [("x-goog-request-params", params_str)] @@ -203,6 +214,22 @@ def _validate_timeouts( raise ValueError("attempt_timeout must be greater than 0") +def _get_error_type( + call_code: Union["grpc.StatusCode", int, type[Exception]] +) -> type[Exception]: + """Helper function for ensuring the object is an exception type. + If it is not, the proper GoogleAPICallError type is infered from the status + code. + + Args: + - call_code: Exception type or gRPC status code. + """ + if isinstance(call_code, type): + return call_code + else: + return type(core_exceptions.from_grpc_status(call_code, "")) + + def _get_retryable_errors( call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT, table: "TableAsync", @@ -225,7 +252,4 @@ def _get_retryable_errors( elif call_codes == TABLE_DEFAULT.MUTATE_ROWS: call_codes = table.default_mutate_rows_retryable_errors - return [ - e if isinstance(e, type) else type(core_exceptions.from_grpc_status(e, "")) - for e in call_codes - ] + return [_get_error_type(e) for e in call_codes] diff --git a/google/cloud/bigtable/data/exceptions.py b/google/cloud/bigtable/data/exceptions.py index 8d97640aa..95cd44f2c 100644 --- a/google/cloud/bigtable/data/exceptions.py +++ b/google/cloud/bigtable/data/exceptions.py @@ -311,3 +311,11 @@ def __init__( self.__cause__ = cause self.index = failed_index self.query = failed_query + + +class InvalidExecuteQueryResponse(core_exceptions.GoogleAPICallError): + """Exception raised to invalid query response data from back-end.""" + + +class ParameterTypeInferenceFailed(ValueError): + """Exception raised when query parameter types were not provided and cannot be inferred.""" diff --git a/google/cloud/bigtable/data/execute_query/__init__.py b/google/cloud/bigtable/data/execute_query/__init__.py new file mode 100644 index 000000000..94af7d1cd --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, +) +from google.cloud.bigtable.data.execute_query.metadata import ( + Metadata, + ProtoMetadata, + SqlType, +) +from google.cloud.bigtable.data.execute_query.values import ( + ExecuteQueryValueType, + QueryResultRow, + Struct, +) + + +__all__ = [ + "ExecuteQueryValueType", + "SqlType", + "QueryResultRow", + "Struct", + "Metadata", + "ProtoMetadata", + "ExecuteQueryIteratorAsync", +] diff --git a/google/cloud/bigtable/data/execute_query/_async/__init__.py b/google/cloud/bigtable/data/execute_query/_async/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py new file mode 100644 index 000000000..3660c0b0f --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -0,0 +1,211 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import asyncio +from typing import ( + Any, + AsyncIterator, + Dict, + List, + Optional, + Sequence, + Tuple, +) + +from google.api_core import retry as retries + +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor +from google.cloud.bigtable.data._helpers import ( + _attempt_timeout_generator, + _retry_exception_factory, +) +from google.cloud.bigtable.data.exceptions import InvalidExecuteQueryResponse +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import Metadata, ProtoMetadata +from google.cloud.bigtable.data.execute_query._reader import ( + _QueryResultRowReader, + _Reader, +) +from google.cloud.bigtable_v2.types.bigtable import ( + ExecuteQueryRequest as ExecuteQueryRequestPB, +) + + +class ExecuteQueryIteratorAsync: + """ + ExecuteQueryIteratorAsync handles collecting streaming responses from the + ExecuteQuery RPC and parsing them to `QueryResultRow`s. + + ExecuteQueryIteratorAsync implements Asynchronous Iterator interface and can + be used with "async for" syntax. It is also a context manager. + + It is **not thread-safe**. It should not be used by multiple asyncio Tasks. + + Args: + client (google.cloud.bigtable.data._async.BigtableDataClientAsync): bigtable client + instance_id (str): id of the instance on which the query is executed + request_body (Dict[str, Any]): dict representing the body of the ExecuteQueryRequest + attempt_timeout (float | None): the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + operation_timeout (float): the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. If None, defaults to operation_timeout. + req_metadata (Sequence[Tuple[str, str]]): metadata used while sending the gRPC request + retryable_excs (List[type[Exception]]): a list of errors that will be retried if encountered. + """ + + def __init__( + self, + client: Any, + instance_id: str, + app_profile_id: Optional[str], + request_body: Dict[str, Any], + attempt_timeout: float | None, + operation_timeout: float, + req_metadata: Sequence[Tuple[str, str]], + retryable_excs: List[type[Exception]], + ) -> None: + self._table_name = None + self._app_profile_id = app_profile_id + self._client = client + self._instance_id = instance_id + self._byte_cursor = _ByteCursor[ProtoMetadata]() + self._reader: _Reader[QueryResultRow] = _QueryResultRowReader(self._byte_cursor) + self._result_generator = self._next_impl() + self._register_instance_task = None + self._is_closed = False + self._request_body = request_body + self._attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self._async_stream = retries.retry_target_stream_async( + self._make_request_with_resume_token, + retries.if_exception_type(*retryable_excs), + retries.exponential_sleep_generator(0.01, 60, multiplier=2), + operation_timeout, + exception_factory=_retry_exception_factory, + ) + self._req_metadata = req_metadata + + try: + self._register_instance_task = asyncio.create_task( + self._client._register_instance(instance_id, self) + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + @property + def is_closed(self): + return self._is_closed + + @property + def app_profile_id(self): + return self._app_profile_id + + @property + def table_name(self): + return self._table_name + + async def _make_request_with_resume_token(self): + """ + perfoms the rpc call using the correct resume token. + """ + resume_token = self._byte_cursor.prepare_for_new_request() + request = ExecuteQueryRequestPB( + { + **self._request_body, + "resume_token": resume_token, + } + ) + return await self._client._gapic_client.execute_query( + request, + timeout=next(self._attempt_timeout_gen), + metadata=self._req_metadata, + retry=None, + ) + + async def _await_metadata(self) -> None: + """ + If called before the first response was recieved, the first response + is awaited as part of this call. + """ + if self._byte_cursor.metadata is None: + metadata_msg = await self._async_stream.__anext__() + self._byte_cursor.consume_metadata(metadata_msg) + + async def _next_impl(self) -> AsyncIterator[QueryResultRow]: + """ + Generator wrapping the response stream which parses the stream results + and returns full `QueryResultRow`s. + """ + await self._await_metadata() + + async for response in self._async_stream: + try: + bytes_to_parse = self._byte_cursor.consume(response) + if bytes_to_parse is None: + continue + + results = self._reader.consume(bytes_to_parse) + if results is None: + continue + + except ValueError as e: + raise InvalidExecuteQueryResponse( + "Invalid ExecuteQuery response received" + ) from e + + for result in results: + yield result + await self.close() + + async def __anext__(self): + if self._is_closed: + raise StopAsyncIteration + return await self._result_generator.__anext__() + + def __aiter__(self): + return self + + async def metadata(self) -> Optional[Metadata]: + """ + Returns query metadata from the server or None if the iterator was + explicitly closed. + """ + if self._is_closed: + return None + # Metadata should be present in the first response in a stream. + if self._byte_cursor.metadata is None: + try: + await self._await_metadata() + except StopIteration: + return None + return self._byte_cursor.metadata + + async def close(self) -> None: + """ + Cancel all background tasks. Should be called all rows were processed. + """ + if self._is_closed: + return + self._is_closed = True + if self._register_instance_task is not None: + self._register_instance_task.cancel() + await self._client._remove_instance_registration(self._instance_id, self) diff --git a/google/cloud/bigtable/data/execute_query/_byte_cursor.py b/google/cloud/bigtable/data/execute_query/_byte_cursor.py new file mode 100644 index 000000000..60f23f541 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_byte_cursor.py @@ -0,0 +1,144 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Generic, Optional, TypeVar + +from google.cloud.bigtable_v2 import ExecuteQueryResponse +from google.cloud.bigtable.data.execute_query.metadata import ( + Metadata, + _pb_metadata_to_metadata_types, +) + +MT = TypeVar("MT", bound=Metadata) # metadata type + + +class _ByteCursor(Generic[MT]): + """ + Buffers bytes from `ExecuteQuery` responses until resume_token is received or end-of-stream + is reached. :class:`google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse` obtained from + the server should be passed to ``consume`` or ``consume_metadata`` methods and its non-None + results should be passed to appropriate + :class:`google.cloud.bigtable.execute_query_reader._Reader` for parsing gathered bytes. + + This class consumes data obtained externally to be usable in both sync and async clients. + + See :class:`google.cloud.bigtable.execute_query_reader._Reader` for more context. + """ + + def __init__(self): + self._metadata: Optional[MT] = None + self._buffer = bytearray() + self._resume_token = None + self._last_response_results_field = None + + @property + def metadata(self) -> Optional[MT]: + """ + Returns: + Metadata or None: Metadata read from the first response of the stream + or None if no response was consumed yet. + """ + return self._metadata + + def prepare_for_new_request(self): + """ + Prepares this ``_ByteCursor`` for retrying an ``ExecuteQuery`` request. + + Clears internal buffers of this ``_ByteCursor`` and returns last received + ``resume_token`` to be used in retried request. + + This is the only method that returns ``resume_token`` to the user. + Returning the token to the user is tightly coupled with clearing internal + buffers to prevent accidental retry without clearing the state, what would + cause invalid results. ``resume_token`` are not needed in other cases, + thus they is no separate getter for it. + + Returns: + bytes: Last received resume_token. + """ + self._buffer = bytearray() + # metadata is sent in the first response in a stream, + # if we've already received one, but it was not already commited + # by a subsequent resume_token, then we should clear it as well. + if not self._resume_token: + self._metadata = None + + return self._resume_token + + def consume_metadata(self, response: ExecuteQueryResponse) -> None: + """ + Reads metadata from first response of ``ExecuteQuery`` responses stream. + Should be called only once. + + Args: + response (google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse): First response + from the stream. + + Raises: + ValueError: If this method was already called or if metadata received from the server + cannot be parsed. + """ + if self._metadata is not None: + raise ValueError("Invalid state - metadata already consumed") + + if "metadata" in response: + metadata: Any = _pb_metadata_to_metadata_types(response.metadata) + self._metadata = metadata + else: + raise ValueError("Invalid parameter - response without metadata") + + return None + + def consume(self, response: ExecuteQueryResponse) -> Optional[bytes]: + """ + Reads results bytes from an ``ExecuteQuery`` response and adds them to a buffer. + + If the response contains a ``resume_token``: + - the ``resume_token`` is saved in this ``_ByteCursor``, and + - internal buffers are flushed and returned to the caller. + + ``resume_token`` is not available directly, but can be retrieved by calling + :meth:`._ByteCursor.prepare_for_new_request` when preparing to retry a request. + + Args: + response (google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse): + Response obtained from the stream. + + Returns: + bytes or None: bytes if buffers were flushed or None otherwise. + + Raises: + ValueError: If provided ``ExecuteQueryResponse`` is not valid + or contains bytes representing response of a different kind than previously + processed responses. + """ + response_pb = response._pb # proto-plus attribute retrieval is slow. + + if response_pb.HasField("results"): + results = response_pb.results + if results.HasField("proto_rows_batch"): + self._buffer.extend(results.proto_rows_batch.batch_data) + + if results.resume_token: + self._resume_token = results.resume_token + + if self._buffer: + return_value = memoryview(self._buffer) + self._buffer = bytearray() + return return_value + elif response_pb.HasField("metadata"): + self.consume_metadata(response) + else: + raise ValueError(f"Invalid ExecuteQueryResponse: {response}") + return None diff --git a/google/cloud/bigtable/data/execute_query/_parameters_formatting.py b/google/cloud/bigtable/data/execute_query/_parameters_formatting.py new file mode 100644 index 000000000..edb7a6380 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_parameters_formatting.py @@ -0,0 +1,118 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Optional +import datetime +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable.data.execute_query.metadata import SqlType + + +def _format_execute_query_params( + params: Optional[Dict[str, ExecuteQueryValueType]], + parameter_types: Optional[Dict[str, SqlType.Type]], +) -> Any: + """ + Takes a dictionary of param_name -> param_value and optionally parameter types. + If the parameters types are not provided, this function tries to infer them. + + Args: + params (Optional[Dict[str, ExecuteQueryValueType]]): mapping from parameter names + like they appear in query (without @ at the beginning) to their values. + Only values of type ExecuteQueryValueType are permitted. + parameter_types (Optional[Dict[str, SqlType.Type]]): mapping of parameter names + to their types. + + Raises: + ValueError: raised when parameter types cannot be inferred and were not + provided explicitly. + + Returns: + dictionary prasable to a protobuf represenging parameters as defined + in ExecuteQueryRequest.params + """ + if not params: + return {} + parameter_types = parameter_types or {} + + result_values = {} + + for key, value in params.items(): + user_provided_type = parameter_types.get(key) + try: + if user_provided_type: + if not isinstance(user_provided_type, SqlType.Type): + raise ValueError( + f"Parameter type for {key} should be provided as an instance of SqlType.Type subclass." + ) + param_type = user_provided_type + else: + param_type = _detect_type(value) + + value_pb_dict = _convert_value_to_pb_value_dict(value, param_type) + except ValueError as err: + raise ValueError(f"Error when parsing parameter {key}") from err + result_values[key] = value_pb_dict + + return result_values + + +def _convert_value_to_pb_value_dict( + value: ExecuteQueryValueType, param_type: SqlType.Type +) -> Any: + """ + Takes a value and converts it to a dictionary parsable to a protobuf. + + Args: + value (ExecuteQueryValueType): value + param_type (SqlType.Type): object describing which ExecuteQuery type the value represents. + + Returns: + dictionary parsable to a protobuf. + """ + # type field will be set only in top-level Value. + value_dict = param_type._to_value_pb_dict(value) + value_dict["type_"] = param_type._to_type_pb_dict() + return value_dict + + +_TYPES_TO_TYPE_DICTS = [ + (bytes, SqlType.Bytes()), + (str, SqlType.String()), + (bool, SqlType.Bool()), + (int, SqlType.Int64()), + (DatetimeWithNanoseconds, SqlType.Timestamp()), + (datetime.datetime, SqlType.Timestamp()), + (datetime.date, SqlType.Date()), +] + + +def _detect_type(value: ExecuteQueryValueType) -> SqlType.Type: + """ + Infers the ExecuteQuery type based on value. Raises error if type is amiguous. + raises ParameterTypeInferenceFailed if not possible. + """ + if value is None: + raise ParameterTypeInferenceFailed( + "Cannot infer type of None, please provide the type manually." + ) + + for field_type, type_dict in _TYPES_TO_TYPE_DICTS: + if isinstance(value, field_type): + return type_dict + + raise ParameterTypeInferenceFailed( + f"Cannot infer type of {type(value).__name__}, please provide the type manually." + ) diff --git a/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py b/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py new file mode 100644 index 000000000..b65dce27b --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py @@ -0,0 +1,133 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, Type +from google.cloud.bigtable.data.execute_query.values import Struct +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable_v2 import Value as PBValue +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +_REQUIRED_PROTO_FIELDS = { + SqlType.Bytes: "bytes_value", + SqlType.String: "string_value", + SqlType.Int64: "int_value", + SqlType.Float64: "float_value", + SqlType.Bool: "bool_value", + SqlType.Timestamp: "timestamp_value", + SqlType.Date: "date_value", + SqlType.Struct: "array_value", + SqlType.Array: "array_value", + SqlType.Map: "array_value", +} + + +def _parse_array_type(value: PBValue, metadata_type: SqlType.Array) -> Any: + """ + used for parsing an array represented as a protobuf to a python list. + """ + return list( + map( + lambda val: _parse_pb_value_to_python_value( + val, metadata_type.element_type + ), + value.array_value.values, + ) + ) + + +def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any: + """ + used for parsing a map represented as a protobuf to a python dict. + + Values of type `Map` are stored in a `Value.array_value` where each entry + is another `Value.array_value` with two elements (the key and the value, + in that order). + Normally encoded Map values won't have repeated keys, however, the client + must handle the case in which they do. If the same key appears + multiple times, the _last_ value takes precedence. + """ + + try: + return dict( + map( + lambda map_entry: ( + _parse_pb_value_to_python_value( + map_entry.array_value.values[0], metadata_type.key_type + ), + _parse_pb_value_to_python_value( + map_entry.array_value.values[1], metadata_type.value_type + ), + ), + value.array_value.values, + ) + ) + except IndexError: + raise ValueError("Invalid map entry - less or more than two values.") + + +def _parse_struct_type(value: PBValue, metadata_type: SqlType.Struct) -> Struct: + """ + used for parsing a struct represented as a protobuf to a + google.cloud.bigtable.data.execute_query.Struct + """ + if len(value.array_value.values) != len(metadata_type.fields): + raise ValueError("Mismatched lengths of values and types.") + + struct = Struct() + for value, field in zip(value.array_value.values, metadata_type.fields): + field_name, field_type = field + struct.add_field(field_name, _parse_pb_value_to_python_value(value, field_type)) + + return struct + + +def _parse_timestamp_type( + value: PBValue, metadata_type: SqlType.Timestamp +) -> DatetimeWithNanoseconds: + """ + used for parsing a timestamp represented as a protobuf to DatetimeWithNanoseconds + """ + return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) + + +_TYPE_PARSERS: Dict[Type[SqlType.Type], Callable[[PBValue, Any], Any]] = { + SqlType.Timestamp: _parse_timestamp_type, + SqlType.Struct: _parse_struct_type, + SqlType.Array: _parse_array_type, + SqlType.Map: _parse_map_type, +} + + +def _parse_pb_value_to_python_value(value: PBValue, metadata_type: SqlType.Type) -> Any: + """ + used for converting the value represented as a protobufs to a python object. + """ + value_kind = value.WhichOneof("kind") + if not value_kind: + return None + + kind = type(metadata_type) + if not value.HasField(_REQUIRED_PROTO_FIELDS[kind]): + raise ValueError( + f"{_REQUIRED_PROTO_FIELDS[kind]} field for {kind.__name__} type not found in a Value." + ) + + if kind in _TYPE_PARSERS: + parser = _TYPE_PARSERS[kind] + return parser(value, metadata_type) + elif kind in _REQUIRED_PROTO_FIELDS: + field_name = _REQUIRED_PROTO_FIELDS[kind] + return getattr(value, field_name) + else: + raise ValueError(f"Unknown kind {kind}") diff --git a/google/cloud/bigtable/data/execute_query/_reader.py b/google/cloud/bigtable/data/execute_query/_reader.py new file mode 100644 index 000000000..9c0259cde --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_reader.py @@ -0,0 +1,149 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import ( + TypeVar, + Generic, + Iterable, + Optional, + List, + Sequence, + cast, +) +from abc import ABC, abstractmethod + +from google.cloud.bigtable_v2 import ProtoRows, Value as PBValue +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + +from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import ( + _parse_pb_value_to_python_value, +) + +from google.cloud.bigtable.helpers import batched + +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import ProtoMetadata + + +T = TypeVar("T") + + +class _Reader(ABC, Generic[T]): + """ + An interface for classes that consume and parse bytes returned by ``_ByteCursor``. + Parsed bytes should be gathered into bundles (rows or columns) of expected size + and converted to an appropriate type ``T`` that will be returned as a semantically + meaningful result to the library user by + :meth:`google.cloud.bigtable.instance.Instance.execute_query` or + :meth:`google.cloud.bigtable.data._async.client.BigtableDataClientAsync.execute_query` + methods. + + This class consumes data obtained externally to be usable in both sync and async clients. + + See :class:`google.cloud.bigtable.byte_cursor._ByteCursor` for more context. + """ + + @abstractmethod + def consume(self, bytes_to_consume: bytes) -> Optional[Iterable[T]]: + """This method receives a parsable chunk of bytes and returns either a None if there is + not enough chunks to return to the user yet (e.g. we haven't received all columns in a + row yet), or a list of appropriate values gathered from one or more parsable chunks. + + Args: + bytes_to_consume (bytes): chunk of parsable bytes received from + :meth:`google.cloud.bigtable.byte_cursor._ByteCursor.consume` + method. + + Returns: + Iterable[T] or None: Iterable if gathered values can form one or more instances of T, + or None if there is not enough data to construct at least one instance of T with + appropriate number of entries. + """ + raise NotImplementedError + + +class _QueryResultRowReader(_Reader[QueryResultRow]): + """ + A :class:`._Reader` consuming bytes representing + :class:`google.cloud.bigtable_v2.types.Type` + and producing :class:`google.cloud.bigtable.execute_query.QueryResultRow`. + + Number of entries in each row is determined by number of columns in + :class:`google.cloud.bigtable.execute_query.Metadata` obtained from + :class:`google.cloud.bigtable.byte_cursor._ByteCursor` passed in the constructor. + """ + + def __init__(self, byte_cursor: _ByteCursor[ProtoMetadata]): + """ + Constructs new instance of ``_QueryResultRowReader``. + + Args: + byte_cursor (google.cloud.bigtable.byte_cursor._ByteCursor): + byte_cursor that will be used to gather bytes for this instance of ``_Reader``, + needed to obtain :class:`google.cloud.bigtable.execute_query.Metadata` about + processed stream. + """ + self._values: List[PBValue] = [] + self._byte_cursor = byte_cursor + + @property + def _metadata(self) -> Optional[ProtoMetadata]: + return self._byte_cursor.metadata + + def _construct_query_result_row(self, values: Sequence[PBValue]) -> QueryResultRow: + result = QueryResultRow() + # The logic, not defined by mypy types, ensures that the value of + # "metadata" is never null at the time it is retrieved here + metadata = cast(ProtoMetadata, self._metadata) + columns = metadata.columns + + assert len(values) == len( + columns + ), "This function should be called only when count of values matches count of columns." + + for column, value in zip(columns, values): + parsed_value = _parse_pb_value_to_python_value(value, column.column_type) + result.add_field(column.column_name, parsed_value) + return result + + def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]: + proto_rows = ProtoRows.pb().FromString(bytes_to_parse) + return proto_rows.values + + def consume(self, bytes_to_consume: bytes) -> Optional[Iterable[QueryResultRow]]: + if bytes_to_consume is None: + raise ValueError("bytes_to_consume shouldn't be None") + + self._values.extend(self._parse_proto_rows(bytes_to_consume)) + + # The logic, not defined by mypy types, ensures that the value of + # "metadata" is never null at the time it is retrieved here + num_columns = len(cast(ProtoMetadata, self._metadata).columns) + + if len(self._values) < num_columns: + return None + + rows = [] + for batch in batched(self._values, n=num_columns): + if len(batch) == num_columns: + rows.append(self._construct_query_result_row(batch)) + else: + raise ValueError( + "Server error, recieved bad number of values. " + f"Expected {num_columns} got {len(batch)}." + ) + + self._values = [] + + return rows diff --git a/google/cloud/bigtable/data/execute_query/metadata.py b/google/cloud/bigtable/data/execute_query/metadata.py new file mode 100644 index 000000000..98b94a644 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/metadata.py @@ -0,0 +1,354 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module provides the SqlType class used for specifying types in +ExecuteQuery and some utilities. + +The SqlTypes are used in Metadata returned by the ExecuteQuery operation as well +as for specifying query parameter types explicitly. +""" + +from collections import defaultdict +from typing import ( + Optional, + List, + Dict, + Set, + Type, + Union, + Tuple, + Any, +) +from google.cloud.bigtable.data.execute_query.values import _NamedList +from google.cloud.bigtable_v2 import ResultSetMetadata +from google.cloud.bigtable_v2 import Type as PBType +from google.type import date_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +import datetime + + +class SqlType: + """ + Classes denoting types of values returned by Bigtable's ExecuteQuery operation. + + Used in :class:`.Metadata`. + """ + + class Type: + expected_type: Optional[type] = None + value_pb_dict_field_name: Optional[str] = None + type_field_name: Optional[str] = None + + @classmethod + def from_pb_type(cls, pb_type: Optional[PBType] = None): + return cls() + + def _to_type_pb_dict(self) -> Dict[str, Any]: + if not self.type_field_name: + raise NotImplementedError( + "Fill in expected_type and value_pb_dict_field_name" + ) + + return {self.type_field_name: {}} + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if self.expected_type is None or self.value_pb_dict_field_name is None: + raise NotImplementedError( + "Fill in expected_type and value_pb_dict_field_name" + ) + + if value is None: + return {} + + if not isinstance(value, self.expected_type): + raise ValueError( + f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}" + ) + + return {self.value_pb_dict_field_name: value} + + def __eq__(self, other): + return isinstance(other, type(self)) + + def __str__(self) -> str: + return self.__class__.__name__ + + def __repr__(self) -> str: + return self.__str__() + + class Struct(_NamedList[Type], Type): + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Struct": + if type_pb is None: + raise ValueError("missing required argument type_pb") + fields: List[Tuple[Optional[str], SqlType.Type]] = [] + for field in type_pb.struct_type.fields: + fields.append((field.field_name, _pb_type_to_metadata_type(field.type))) + return cls(fields) + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Struct is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Struct is not supported as a query parameter") + + def __eq__(self, other: object): + # Cannot use super() here - we'd either have to: + # - call super() in these base classes, which would in turn call Object.__eq__ + # to compare objects by identity and return a False, or + # - do not call super() in these base classes, which would result in calling only + # one of the __eq__ methods (a super() in the base class would be required to call the other one), or + # - call super() in only one of the base classes, but that would be error prone and changing + # the order of base classes would introduce unexpected behaviour. + # we also have to disable mypy because it doesn't see that SqlType.Struct == _NamedList[Type] + return SqlType.Type.__eq__(self, other) and _NamedList.__eq__(self, other) # type: ignore + + def __str__(self): + return super(_NamedList, self).__str__() + + class Array(Type): + def __init__(self, element_type: "SqlType.Type"): + if isinstance(element_type, SqlType.Array): + raise ValueError("Arrays of arrays are not supported.") + self._element_type = element_type + + @property + def element_type(self): + return self._element_type + + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Array": + if type_pb is None: + raise ValueError("missing required argument type_pb") + return cls(_pb_type_to_metadata_type(type_pb.array_type.element_type)) + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Array is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Array is not supported as a query parameter") + + def __eq__(self, other): + return super().__eq__(other) and self.element_type == other.element_type + + def __str__(self) -> str: + return f"{self.__class__.__name__}<{str(self.element_type)}>" + + class Map(Type): + def __init__(self, key_type: "SqlType.Type", value_type: "SqlType.Type"): + self._key_type = key_type + self._value_type = value_type + + @property + def key_type(self): + return self._key_type + + @property + def value_type(self): + return self._value_type + + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Map": + if type_pb is None: + raise ValueError("missing required argument type_pb") + return cls( + _pb_type_to_metadata_type(type_pb.map_type.key_type), + _pb_type_to_metadata_type(type_pb.map_type.value_type), + ) + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Map is not supported as a query parameter") + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Map is not supported as a query parameter") + + def __eq__(self, other): + return ( + super().__eq__(other) + and self.key_type == other.key_type + and self.value_type == other.value_type + ) + + def __str__(self) -> str: + return ( + f"{self.__class__.__name__}<" + f"{str(self._key_type)},{str(self._value_type)}>" + ) + + class Bytes(Type): + expected_type = bytes + value_pb_dict_field_name = "bytes_value" + type_field_name = "bytes_type" + + class String(Type): + expected_type = str + value_pb_dict_field_name = "string_value" + type_field_name = "string_type" + + class Int64(Type): + expected_type = int + value_pb_dict_field_name = "int_value" + type_field_name = "int64_type" + + class Float64(Type): + expected_type = float + value_pb_dict_field_name = "float_value" + type_field_name = "float64_type" + + class Bool(Type): + expected_type = bool + value_pb_dict_field_name = "bool_value" + type_field_name = "bool_type" + + class Timestamp(Type): + type_field_name = "timestamp_type" + expected_types = ( + datetime.datetime, + DatetimeWithNanoseconds, + ) + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if value is None: + return {} + + if not isinstance(value, self.expected_types): + raise ValueError( + f"Expected one of {', '.join((_type.__name__ for _type in self.expected_types))}" + ) + + if isinstance(value, DatetimeWithNanoseconds): + return {"timestamp_value": value.timestamp_pb()} + else: # value must be an instance of datetime.datetime + ts = timestamp_pb2.Timestamp() + ts.FromDatetime(value) + return {"timestamp_value": ts} + + class Date(Type): + type_field_name = "date_type" + expected_type = datetime.date + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if value is None: + return {} + + if not isinstance(value, self.expected_type): + raise ValueError( + f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}" + ) + + return { + "date_value": date_pb2.Date( + year=value.year, + month=value.month, + day=value.day, + ) + } + + +class Metadata: + pass + + +class ProtoMetadata(Metadata): + class Column: + def __init__(self, column_name: Optional[str], column_type: SqlType.Type): + self._column_name = column_name + self._column_type = column_type + + @property + def column_name(self) -> Optional[str]: + return self._column_name + + @property + def column_type(self) -> SqlType.Type: + return self._column_type + + @property + def columns(self) -> List[Column]: + return self._columns + + def __init__( + self, columns: Optional[List[Tuple[Optional[str], SqlType.Type]]] = None + ): + self._columns: List[ProtoMetadata.Column] = [] + self._column_indexes: Dict[str, List[int]] = defaultdict(list) + self._duplicate_names: Set[str] = set() + + if columns: + for column_name, column_type in columns: + if column_name is not None: + if column_name in self._column_indexes: + self._duplicate_names.add(column_name) + self._column_indexes[column_name].append(len(self._columns)) + self._columns.append(ProtoMetadata.Column(column_name, column_type)) + + def __getitem__(self, index_or_name: Union[str, int]) -> Column: + if isinstance(index_or_name, str): + if index_or_name in self._duplicate_names: + raise KeyError( + f"Ambigious column name: '{index_or_name}', use index instead." + f" Field present on indexes {', '.join(map(str, self._column_indexes[index_or_name]))}." + ) + if index_or_name not in self._column_indexes: + raise KeyError(f"No such column: {index_or_name}") + index = self._column_indexes[index_or_name][0] + else: + index = index_or_name + return self._columns[index] + + def __len__(self): + return len(self._columns) + + def __str__(self) -> str: + columns_str = ", ".join([str(column) for column in self._columns]) + return f"{self.__class__.__name__}([{columns_str}])" + + def __repr__(self) -> str: + return self.__str__() + + +def _pb_metadata_to_metadata_types( + metadata_pb: ResultSetMetadata, +) -> Metadata: + if "proto_schema" in metadata_pb: + fields: List[Tuple[Optional[str], SqlType.Type]] = [] + for column_metadata in metadata_pb.proto_schema.columns: + fields.append( + (column_metadata.name, _pb_type_to_metadata_type(column_metadata.type)) + ) + return ProtoMetadata(fields) + raise ValueError("Invalid ResultSetMetadata object received.") + + +_PROTO_TYPE_TO_METADATA_TYPE_FACTORY: Dict[str, Type[SqlType.Type]] = { + "bytes_type": SqlType.Bytes, + "string_type": SqlType.String, + "int64_type": SqlType.Int64, + "float64_type": SqlType.Float64, + "bool_type": SqlType.Bool, + "timestamp_type": SqlType.Timestamp, + "date_type": SqlType.Date, + "struct_type": SqlType.Struct, + "array_type": SqlType.Array, + "map_type": SqlType.Map, +} + + +def _pb_type_to_metadata_type(type_pb: PBType) -> SqlType.Type: + kind = PBType.pb(type_pb).WhichOneof("kind") + if kind in _PROTO_TYPE_TO_METADATA_TYPE_FACTORY: + return _PROTO_TYPE_TO_METADATA_TYPE_FACTORY[kind].from_pb_type(type_pb) + raise ValueError(f"Unrecognized response data type: {type_pb}") diff --git a/google/cloud/bigtable/data/execute_query/values.py b/google/cloud/bigtable/data/execute_query/values.py new file mode 100644 index 000000000..450f6f855 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/values.py @@ -0,0 +1,116 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +from typing import ( + Optional, + List, + Dict, + Set, + Union, + TypeVar, + Generic, + Tuple, + Mapping, +) +from google.type import date_pb2 # type: ignore +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +T = TypeVar("T") + + +class _NamedList(Generic[T]): + """ + A class designed to store a list of elements, which can be accessed by + name or index. + This class is different from namedtuple, because namedtuple has some + restrictions on names of fields and we do not want to have them. + """ + + _str_cls_name = "_NamedList" + + def __init__(self, fields: Optional[List[Tuple[Optional[str], T]]] = None): + self._fields: List[Tuple[Optional[str], T]] = [] + self._field_indexes: Dict[str, List[int]] = defaultdict(list) + self._duplicate_names: Set[str] = set() + + if fields: + for field_name, field_type in fields: + self.add_field(field_name, field_type) + + def add_field(self, name: Optional[str], value: T): + if name: + if name in self._field_indexes: + self._duplicate_names.add(name) + self._field_indexes[name].append(len(self._fields)) + self._fields.append((name, value)) + + @property + def fields(self): + return self._fields + + def __getitem__(self, index_or_name: Union[str, int]): + if isinstance(index_or_name, str): + if index_or_name in self._duplicate_names: + raise KeyError( + f"Ambigious field name: '{index_or_name}', use index instead." + f" Field present on indexes {', '.join(map(str, self._field_indexes[index_or_name]))}." + ) + if index_or_name not in self._field_indexes: + raise KeyError(f"No such field: {index_or_name}") + index = self._field_indexes[index_or_name][0] + else: + index = index_or_name + return self._fields[index][1] + + def __len__(self): + return len(self._fields) + + def __eq__(self, other): + if not isinstance(other, _NamedList): + return False + + return ( + self._fields == other._fields + and self._field_indexes == other._field_indexes + ) + + def __str__(self) -> str: + fields_str = ", ".join([str(field) for field in self._fields]) + return f"{self.__class__.__name__}([{fields_str}])" + + def __repr__(self) -> str: + return self.__str__() + + +ExecuteQueryValueType = Union[ + int, + float, + bool, + bytes, + str, + DatetimeWithNanoseconds, + date_pb2.Date, + "Struct", + List["ExecuteQueryValueType"], + Mapping[Union[str, int, bytes], "ExecuteQueryValueType"], +] + + +class QueryResultRow(_NamedList[ExecuteQueryValueType]): + pass + + +class Struct(_NamedList[ExecuteQueryValueType]): + pass diff --git a/google/cloud/bigtable/helpers.py b/google/cloud/bigtable/helpers.py new file mode 100644 index 000000000..78af43089 --- /dev/null +++ b/google/cloud/bigtable/helpers.py @@ -0,0 +1,31 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TypeVar, Iterable, Generator, Tuple + +from itertools import islice + +T = TypeVar("T") + + +# batched landed in standard library in Python 3.11. +def batched(iterable: Iterable[T], n) -> Generator[Tuple[T, ...], None, None]: + # batched('ABCDEFG', 3) → ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + batch = tuple(islice(it, n)) + while batch: + yield batch + batch = tuple(islice(it, n)) diff --git a/google/cloud/bigtable/instance.py b/google/cloud/bigtable/instance.py index 6d092cefd..23fb1c95d 100644 --- a/google/cloud/bigtable/instance.py +++ b/google/cloud/bigtable/instance.py @@ -32,6 +32,7 @@ import warnings + _INSTANCE_NAME_RE = re.compile( r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$" ) diff --git a/samples/snippets/data_client/data_client_snippets_async.py b/samples/snippets/data_client/data_client_snippets_async.py index 742e7cb8e..dabbcb839 100644 --- a/samples/snippets/data_client/data_client_snippets_async.py +++ b/samples/snippets/data_client/data_client_snippets_async.py @@ -69,7 +69,10 @@ async def write_batch(project_id, instance_id, table_id): for sub_exception in e.exceptions: failed_entry: RowMutationEntry = sub_exception.entry cause: Exception = sub_exception.__cause__ - print(f"Failed mutation: {failed_entry.row_key} with error: {cause!r}") + print( + f"Failed mutation: {failed_entry.row_key} with error: {cause!r}" + ) + # [END bigtable_async_writes_batch] await write_batch(table.client.project, table.instance_id, table.table_id) @@ -94,6 +97,7 @@ async def write_increment(project_id, instance_id, table_id): # check result cell = result_row[0] print(f"{cell.row_key} value: {int(cell)}") + # [END bigtable_async_write_increment] await write_increment(table.client.project, table.instance_id, table.table_id) @@ -127,6 +131,7 @@ async def write_conditional(project_id, instance_id, table_id): ) if result is True: print("The row os_name was set to android") + # [END bigtable_async_writes_conditional] await write_conditional(table.client.project, table.instance_id, table.table_id) @@ -141,6 +146,7 @@ async def read_row(project_id, instance_id, table_id): row_key = "phone#4c410523#20190501" row = await table.read_row(row_key) print(row) + # [END bigtable_async_reads_row] await read_row(table.client.project, table.instance_id, table.table_id) @@ -158,6 +164,7 @@ async def read_row_partial(project_id, instance_id, table_id): row = await table.read_row(row_key, row_filter=col_filter) print(row) + # [END bigtable_async_reads_row_partial] await read_row_partial(table.client.project, table.instance_id, table.table_id) @@ -171,10 +178,9 @@ async def read_rows(project_id, instance_id, table_id): async with BigtableDataClientAsync(project=project_id) as client: async with client.get_table(instance_id, table_id) as table: - query = ReadRowsQuery(row_keys=[ - b"phone#4c410523#20190501", - b"phone#4c410523#20190502" - ]) + query = ReadRowsQuery( + row_keys=[b"phone#4c410523#20190501", b"phone#4c410523#20190502"] + ) async for row in await table.read_rows_stream(query): print(row) @@ -194,12 +200,13 @@ async def read_row_range(project_id, instance_id, table_id): row_range = RowRange( start_key=b"phone#4c410523#20190501", - end_key=b"phone#4c410523#201906201" + end_key=b"phone#4c410523#201906201", ) query = ReadRowsQuery(row_ranges=[row_range]) async for row in await table.read_rows_stream(query): print(row) + # [END bigtable_async_reads_row_range] await read_row_range(table.client.project, table.instance_id, table.table_id) @@ -221,6 +228,7 @@ async def read_prefix(project_id, instance_id, table_id): async for row in await table.read_rows_stream(query): print(row) + # [END bigtable_async_reads_prefix] await read_prefix(table.client.project, table.instance_id, table.table_id) @@ -240,5 +248,30 @@ async def read_with_filter(project_id, instance_id, table_id): async for row in await table.read_rows_stream(query): print(row) + # [END bigtable_async_reads_filter] await read_with_filter(table.client.project, table.instance_id, table.table_id) + + +async def execute_query(table): + # [START bigtable_async_execute_query] + from google.cloud.bigtable.data import BigtableDataClientAsync + + async def execute_query(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + query = ( + "SELECT _key, stats_summary['os_build'], " + "stats_summary['connected_cell'], " + "stats_summary['connected_wifi'] " + f"from `{table_id}` WHERE _key=@row_key" + ) + result = await client.execute_query( + query, + instance_id, + parameters={"row_key": b"phone#4c410523#20190501"}, + ) + results = [r async for r in result] + print(results) + + # [END bigtable_async_execute_query] + await execute_query(table.client.project, table.instance_id, table.table_id) diff --git a/samples/snippets/data_client/data_client_snippets_async_test.py b/samples/snippets/data_client/data_client_snippets_async_test.py index d9968e6dc..2e0fb9b81 100644 --- a/samples/snippets/data_client/data_client_snippets_async_test.py +++ b/samples/snippets/data_client/data_client_snippets_async_test.py @@ -101,3 +101,8 @@ async def test_read_with_prefix(table): @pytest.mark.asyncio async def test_read_with_filter(table): await data_snippets.read_with_filter(table) + + +@pytest.mark.asyncio +async def test_execute_query(table): + await data_snippets.execute_query(table) diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index fa7c56db1..5ed0c2fb9 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -12,3 +12,4 @@ grpc-google-iam-v1==0.12.4 proto-plus==1.22.3 libcst==0.2.5 protobuf==3.20.2 +pytest-asyncio==0.21.2 diff --git a/tests/_testing.py b/tests/_testing.py new file mode 100644 index 000000000..81cce7b78 --- /dev/null +++ b/tests/_testing.py @@ -0,0 +1,36 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue + + +TYPE_INT = { + "int64_type": { + "encoding": {"big_endian_bytes": {"bytes_type": {"encoding": {"raw": {}}}}} + } +} + + +def proto_rows_bytes(*args): + return ProtoRows.serialize(ProtoRows(values=[PBValue(**arg) for arg in args])) + + +def split_bytes_into_chunks(bytes_to_split, num_chunks): + from google.cloud.bigtable.helpers import batched + + assert num_chunks <= len(bytes_to_split) + bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 + result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) + assert len(result) == num_chunks + return result diff --git a/tests/system/data/test_execute_query_async.py b/tests/system/data/test_execute_query_async.py new file mode 100644 index 000000000..a680d2de0 --- /dev/null +++ b/tests/system/data/test_execute_query_async.py @@ -0,0 +1,288 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +import os +from unittest import mock +from .test_execute_query_utils import ( + ChannelMockAsync, + response_with_metadata, + response_with_result, +) +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data import BigtableDataClientAsync +import google.cloud.bigtable.data._async.client + +TABLE_NAME = "TABLE_NAME" +INSTANCE_NAME = "INSTANCE_NAME" + + +class TestAsyncExecuteQuery: + @pytest.fixture() + def async_channel_mock(self): + with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): + yield ChannelMockAsync() + + @pytest.fixture() + def async_client(self, async_channel_mock): + with mock.patch.dict( + os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"} + ), mock.patch.object( + google.cloud.bigtable.data._async.client, + "PooledChannel", + return_value=async_channel_mock, + ): + yield BigtableDataClientAsync() + + @pytest.mark.asyncio + async def test_execute_query(self, async_client, async_channel_mock): + values = [ + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(async_channel_mock.execute_query_calls) == 1 + + @pytest.mark.asyncio + async def test_execute_query_with_params(self, async_client, async_channel_mock): + values = [ + response_with_metadata(), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME} WHERE b=@b", + INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r async for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert len(async_channel_mock.execute_query_calls) == 1 + + @pytest.mark.asyncio + async def test_execute_query_error_before_metadata( + self, async_client, async_channel_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert len(async_channel_mock.execute_query_calls) == 2 + + @pytest.mark.asyncio + async def test_execute_query_error_after_metadata( + self, async_client, async_channel_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + response_with_metadata(), + DeadlineExceeded(""), + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert len(async_channel_mock.execute_query_calls) == 2 + assert async_channel_mock.resume_tokens == [] + + @pytest.mark.asyncio + async def test_execute_query_with_retries(self, async_client, async_channel_mock): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + DeadlineExceeded(""), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + DeadlineExceeded(""), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(async_channel_mock.execute_query_calls) == 3 + assert async_channel_mock.resume_tokens == [b"r1", b"r2"] + + @pytest.mark.parametrize( + "exception", + [ + (core_exceptions.DeadlineExceeded("")), + (core_exceptions.Aborted("")), + (core_exceptions.ServiceUnavailable("")), + ], + ) + @pytest.mark.asyncio + async def test_execute_query_retryable_error( + self, async_client, async_channel_mock, exception + ): + values = [ + response_with_metadata(), + response_with_result("test", resume_token=b"t1"), + exception, + response_with_result(8, resume_token=b"t2"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 1 + assert len(async_channel_mock.execute_query_calls) == 2 + assert async_channel_mock.resume_tokens == [b"t1"] + + @pytest.mark.asyncio + async def test_execute_query_retry_partial_row( + self, async_client, async_channel_mock + ): + values = [ + response_with_metadata(), + response_with_result("test", resume_token=b"t1"), + core_exceptions.DeadlineExceeded(""), + response_with_result(8, resume_token=b"t2"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert len(async_channel_mock.execute_query_calls) == 2 + assert async_channel_mock.resume_tokens == [b"t1"] + + @pytest.mark.parametrize( + "ExceptionType", + [ + (core_exceptions.InvalidArgument), + (core_exceptions.FailedPrecondition), + (core_exceptions.PermissionDenied), + (core_exceptions.MethodNotImplemented), + (core_exceptions.Cancelled), + (core_exceptions.AlreadyExists), + (core_exceptions.OutOfRange), + (core_exceptions.DataLoss), + (core_exceptions.Unauthenticated), + (core_exceptions.NotFound), + (core_exceptions.ResourceExhausted), + (core_exceptions.Unknown), + (core_exceptions.InternalServerError), + ], + ) + @pytest.mark.asyncio + async def test_execute_query_non_retryable( + self, async_client, async_channel_mock, ExceptionType + ): + values = [ + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + ExceptionType(""), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + r = await result.__anext__() + assert r["a"] == "test" + assert r["b"] == 8 + + with pytest.raises(ExceptionType): + r = await result.__anext__() + + assert len(async_channel_mock.execute_query_calls) == 1 + assert async_channel_mock.resume_tokens == [] + + @pytest.mark.asyncio + async def test_execute_query_metadata_received_multiple_times_detected( + self, async_client, async_channel_mock + ): + values = [ + response_with_metadata(), + response_with_metadata(), + ] + async_channel_mock.set_values(values) + + with pytest.raises(Exception, match="Invalid ExecuteQuery response received"): + [ + r + async for r in await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + ] diff --git a/tests/system/data/test_execute_query_utils.py b/tests/system/data/test_execute_query_utils.py new file mode 100644 index 000000000..9e27b95f2 --- /dev/null +++ b/tests/system/data/test_execute_query_utils.py @@ -0,0 +1,272 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import mock + +import google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio as pga +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue +import grpc.aio + + +try: + # async mock for python3.7-10 + from asyncio import coroutine + + def async_mock(return_value=None): + coro = mock.Mock(name="CoroutineResult") + corofunc = mock.Mock(name="CoroutineFunction", side_effect=coroutine(coro)) + corofunc.coro = coro + corofunc.coro.return_value = return_value + return corofunc + +except ImportError: + # async mock for python3.11 or later + from unittest.mock import AsyncMock + + def async_mock(return_value=None): + return AsyncMock(return_value=return_value) + + +# ExecuteQueryResponse( +# metadata={ +# "proto_schema": { +# "columns": [ +# {"name": "test1", "type_": TYPE_INT}, +# {"name": "test2", "type_": TYPE_INT}, +# ] +# } +# } +# ), +# ExecuteQueryResponse( +# results={"proto_rows_batch": {"batch_data": messages[0]}} +# ), + + +def response_with_metadata(): + schema = {"a": "string_type", "b": "int64_type"} + return ExecuteQueryResponse( + { + "metadata": { + "proto_schema": { + "columns": [ + {"name": name, "type_": {_type: {}}} + for name, _type in schema.items() + ] + } + } + } + ) + + +def response_with_result(*args, resume_token=None): + if resume_token is None: + resume_token_dict = {} + else: + resume_token_dict = {"resume_token": resume_token} + + values = [] + for column_value in args: + if column_value is None: + pb_value = PBValue({}) + else: + pb_value = PBValue( + { + "int_value" + if isinstance(column_value, int) + else "string_value": column_value + } + ) + values.append(pb_value) + rows = ProtoRows(values=values) + + return ExecuteQueryResponse( + { + "results": { + "proto_rows_batch": { + "batch_data": ProtoRows.serialize(rows), + }, + **resume_token_dict, + } + } + ) + + +class ExecuteQueryStreamMock: + def __init__(self, parent): + self.parent = parent + self.iter = iter(self.parent.values) + + def __call__(self, *args, **kwargs): + request = args[0] + + self.parent.execute_query_calls.append(request) + if request.resume_token: + self.parent.resume_tokens.append(request.resume_token) + + def stream(): + for value in self.iter: + if isinstance(value, Exception): + raise value + else: + yield value + + return stream() + + +class ChannelMock: + def __init__(self): + self.execute_query_calls = [] + self.values = [] + self.resume_tokens = [] + + def set_values(self, values): + self.values = values + + def unary_unary(self, *args, **kwargs): + return mock.MagicMock() + + def unary_stream(self, *args, **kwargs): + if args[0] == "/google.bigtable.v2.Bigtable/ExecuteQuery": + return ExecuteQueryStreamMock(self) + return mock.MagicMock() + + +class ChannelMockAsync(pga.PooledChannel, mock.MagicMock): + def __init__(self, *args, **kwargs): + mock.MagicMock.__init__(self, *args, **kwargs) + self.execute_query_calls = [] + self.values = [] + self.resume_tokens = [] + self._iter = [] + + def get_async_get(self, *args, **kwargs): + return self.async_gen + + def set_values(self, values): + self.values = values + self._iter = iter(self.values) + + def unary_unary(self, *args, **kwargs): + return async_mock() + + def unary_stream(self, *args, **kwargs): + if args[0] == "/google.bigtable.v2.Bigtable/ExecuteQuery": + + async def async_gen(*args, **kwargs): + for value in self._iter: + yield value + + iter = async_gen() + + class UnaryStreamCallMock(grpc.aio.UnaryStreamCall): + def __aiter__(self): + async def _impl(*args, **kwargs): + try: + while True: + yield await self.read() + except StopAsyncIteration: + pass + + return _impl() + + async def read(self): + value = await iter.__anext__() + if isinstance(value, Exception): + raise value + return value + + def add_done_callback(*args, **kwargs): + pass + + def cancel(*args, **kwargs): + pass + + def cancelled(*args, **kwargs): + pass + + def code(*args, **kwargs): + pass + + def details(*args, **kwargs): + pass + + def done(*args, **kwargs): + pass + + def initial_metadata(*args, **kwargs): + pass + + def time_remaining(*args, **kwargs): + pass + + def trailing_metadata(*args, **kwargs): + pass + + async def wait_for_connection(*args, **kwargs): + return async_mock() + + class UnaryStreamMultiCallableMock(grpc.aio.UnaryStreamMultiCallable): + def __init__(self, parent): + self.parent = parent + + def __call__( + self, + request, + *, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None + ): + self.parent.execute_query_calls.append(request) + if request.resume_token: + self.parent.resume_tokens.append(request.resume_token) + return UnaryStreamCallMock() + + def add_done_callback(*args, **kwargs): + pass + + def cancel(*args, **kwargs): + pass + + def cancelled(*args, **kwargs): + pass + + def code(*args, **kwargs): + pass + + def details(*args, **kwargs): + pass + + def done(*args, **kwargs): + pass + + def initial_metadata(*args, **kwargs): + pass + + def time_remaining(*args, **kwargs): + pass + + def trailing_metadata(*args, **kwargs): + pass + + def wait_for_connection(*args, **kwargs): + pass + + # unary_stream should return https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.UnaryStreamMultiCallable + # PTAL https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.Channel.unary_stream + return UnaryStreamMultiCallableMock(self) + return async_mock() diff --git a/tests/unit/_testing.py b/tests/unit/_testing.py new file mode 100644 index 000000000..e0d8d2a22 --- /dev/null +++ b/tests/unit/_testing.py @@ -0,0 +1,16 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/tests/unit/data/_async/__init__.py b/tests/unit/data/_async/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/tests/unit/data/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/data/_testing.py b/tests/unit/data/_testing.py new file mode 100644 index 000000000..b5dd3f444 --- /dev/null +++ b/tests/unit/data/_testing.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from unittest.mock import Mock +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/tests/unit/data/execute_query/__init__.py b/tests/unit/data/execute_query/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/tests/unit/data/execute_query/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/data/execute_query/_async/__init__.py b/tests/unit/data/execute_query/_async/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/tests/unit/data/execute_query/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/data/execute_query/_async/_testing.py b/tests/unit/data/execute_query/_async/_testing.py new file mode 100644 index 000000000..5a7acbdd9 --- /dev/null +++ b/tests/unit/data/execute_query/_async/_testing.py @@ -0,0 +1,36 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes + + +try: + # async mock for python3.7-10 + from unittest.mock import Mock + from asyncio import coroutine + + def async_mock(return_value=None): + coro = Mock(name="CoroutineResult") + corofunc = Mock(name="CoroutineFunction", side_effect=coroutine(coro)) + corofunc.coro = coro + corofunc.coro.return_value = return_value + return corofunc + +except ImportError: + # async mock for python3.11 or later + from unittest.mock import AsyncMock + + def async_mock(return_value=None): + return AsyncMock(return_value=return_value) diff --git a/tests/unit/data/execute_query/_async/test_query_iterator.py b/tests/unit/data/execute_query/_async/test_query_iterator.py new file mode 100644 index 000000000..5c577ed74 --- /dev/null +++ b/tests/unit/data/execute_query/_async/test_query_iterator.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from unittest.mock import Mock +from mock import patch +import pytest +from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, +) +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from ._testing import TYPE_INT, proto_rows_bytes, split_bytes_into_chunks, async_mock + + +class MockIteratorAsync: + def __init__(self, values, delay=None): + self._values = values + self.idx = 0 + self._delay = delay + + def __aiter__(self): + return self + + async def __anext__(self): + if self.idx >= len(self._values): + raise StopAsyncIteration + if self._delay is not None: + await asyncio.sleep(self._delay) + value = self._values[self.idx] + self.idx += 1 + return value + + +@pytest.fixture +def proto_byte_stream(): + proto_rows = [ + proto_rows_bytes({"int_value": 1}, {"int_value": 2}), + proto_rows_bytes({"int_value": 3}, {"int_value": 4}), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + ] + + messages = [ + *split_bytes_into_chunks(proto_rows[0], num_chunks=2), + *split_bytes_into_chunks(proto_rows[1], num_chunks=3), + proto_rows[2], + ] + + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": { + "columns": [ + {"name": "test1", "type_": TYPE_INT}, + {"name": "test2", "type_": TYPE_INT}, + ] + } + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[0]}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[1]}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[2]}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[3]}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[4]}, + "resume_token": b"token2", + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[5]}, + "resume_token": b"token3", + } + ), + ] + return stream + + +@pytest.mark.asyncio +async def test_iterator(proto_byte_stream): + client_mock = Mock() + + client_mock._register_instance = async_mock() + client_mock._remove_instance_registration = async_mock() + mock_async_iterator = MockIteratorAsync(proto_byte_stream) + iterator = None + + with patch( + "google.api_core.retry.retry_target_stream_async", + return_value=mock_async_iterator, + ): + iterator = ExecuteQueryIteratorAsync( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + result = [] + async for value in iterator: + result.append(tuple(value)) + assert result == [(1, 2), (3, 4), (5, 6)] + + assert iterator.is_closed + client_mock._register_instance.assert_called_once() + client_mock._remove_instance_registration.assert_called_once() + + assert mock_async_iterator.idx == len(proto_byte_stream) + + +@pytest.mark.asyncio +async def test_iterator_awaits_metadata(proto_byte_stream): + client_mock = Mock() + + client_mock._register_instance = async_mock() + client_mock._remove_instance_registration = async_mock() + mock_async_iterator = MockIteratorAsync(proto_byte_stream) + iterator = None + with patch( + "google.api_core.retry.retry_target_stream_async", + return_value=mock_async_iterator, + ): + iterator = ExecuteQueryIteratorAsync( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + + await iterator.metadata() + + assert mock_async_iterator.idx == 1 diff --git a/tests/unit/data/execute_query/_testing.py b/tests/unit/data/execute_query/_testing.py new file mode 100644 index 000000000..9d24eee34 --- /dev/null +++ b/tests/unit/data/execute_query/_testing.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/tests/unit/data/execute_query/test_byte_cursor.py b/tests/unit/data/execute_query/test_byte_cursor.py new file mode 100644 index 000000000..e283e1ca2 --- /dev/null +++ b/tests/unit/data/execute_query/test_byte_cursor.py @@ -0,0 +1,149 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + +from ._testing import TYPE_INT + + +def pass_values_to_byte_cursor(byte_cursor, iterable): + for value in iterable: + result = byte_cursor.consume(value) + if result is not None: + yield result + + +class TestByteCursor: + def test__proto_rows_batch__complete_data(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"456"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"789"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"abc"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"def"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"ghi"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"j"}, + "resume_token": b"token2", + } + ), + ] + assert byte_cursor.metadata is None + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value == b"1234567890" + assert byte_cursor._resume_token == b"token1" + assert byte_cursor.metadata.columns[0].column_name == "test1" + + value = next(byte_cursor_iter) + assert value == b"abcdefghij" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__empty_proto_rows_batch(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {}, "resume_token": b"token1"} + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token2", + } + ), + ] + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value == b"1230" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__no_proto_rows_batch(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse(results={"resume_token": b"token1"}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token2", + } + ), + ] + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value == b"1230" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__no_resume_token_at_the_end_of_stream(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"abc"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"def"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"ghi"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"j"}, + } + ), + ] + assert byte_cursor.metadata is None + assert byte_cursor.consume(stream[0]) is None + value = byte_cursor.consume(stream[1]) + assert value == b"0" + assert byte_cursor._resume_token == b"token1" + assert byte_cursor.metadata.columns[0].column_name == "test1" + + assert byte_cursor.consume(stream[2]) is None + assert byte_cursor.consume(stream[3]) is None + assert byte_cursor.consume(stream[3]) is None + assert byte_cursor.consume(stream[4]) is None + assert byte_cursor.consume(stream[5]) is None diff --git a/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py new file mode 100644 index 000000000..914a0920a --- /dev/null +++ b/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py @@ -0,0 +1,134 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, +) +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query.values import Struct +import datetime + +from google.type import date_pb2 +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + + +timestamp = int( + datetime.datetime(2024, 5, 12, 17, 44, 12, tzinfo=datetime.timezone.utc).timestamp() +) +dt_micros_non_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, 123, nanosecond=0, tzinfo=datetime.timezone.utc +).timestamp_pb() +dt_nanos_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=0, tzinfo=datetime.timezone.utc +).timestamp_pb() +dt_nanos_non_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=12, tzinfo=datetime.timezone.utc +).timestamp_pb() +pb_date = date_pb2.Date(year=2024, month=5, day=15) + + +@pytest.mark.parametrize( + "input_value,value_field,type_field,expected_value", + [ + (1, "int_value", "int64_type", 1), + ("2", "string_value", "string_type", "2"), + (b"3", "bytes_value", "bytes_type", b"3"), + (True, "bool_value", "bool_type", True), + ( + datetime.datetime.fromtimestamp(timestamp), + "timestamp_value", + "timestamp_type", + dt_nanos_zero, + ), + ( + datetime.datetime( + 2024, 5, 12, 17, 44, 12, 123, tzinfo=datetime.timezone.utc + ), + "timestamp_value", + "timestamp_type", + dt_micros_non_zero, + ), + (datetime.date(2024, 5, 15), "date_value", "date_type", pb_date), + ( + DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=12, tzinfo=datetime.timezone.utc + ), + "timestamp_value", + "timestamp_type", + dt_nanos_non_zero, + ), + ], +) +def test_instance_execute_query_parameters_simple_types_parsing( + input_value, value_field, type_field, expected_value +): + result = _format_execute_query_params( + { + "test": input_value, + }, + None, + ) + assert result["test"][value_field] == expected_value + assert type_field in result["test"]["type_"] + + +def test_instance_execute_query_parameters_not_supported_types(): + with pytest.raises(ValueError): + _format_execute_query_params({"test1": 1.1}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": {"a": 1}}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": [1]}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": Struct([("field1", 1)])}, None) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": {"a": 1}}, + { + "test1": SqlType.Map(SqlType.String(), SqlType.Int64()), + }, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": [1]}, + { + "test1": SqlType.Array(SqlType.Int64()), + }, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": Struct([("field1", 1)])}, + {"test1": SqlType.Struct([("field1", SqlType.Int64())])}, + ) + + +def test_instance_execute_query_parameters_not_match(): + with pytest.raises(ValueError, match="test2"): + _format_execute_query_params( + { + "test1": 1, + "test2": 1, + }, + { + "test1": SqlType.Int64(), + "test2": SqlType.String(), + }, + ) diff --git a/tests/unit/data/execute_query/test_query_result_parsing_utils.py b/tests/unit/data/execute_query/test_query_result_parsing_utils.py new file mode 100644 index 000000000..ff7211654 --- /dev/null +++ b/tests/unit/data/execute_query/test_query_result_parsing_utils.py @@ -0,0 +1,715 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from google.cloud.bigtable.data.execute_query.values import Struct +from google.cloud.bigtable_v2 import Type as PBType, Value as PBValue +from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import ( + _parse_pb_value_to_python_value, +) +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_type_to_metadata_type, + SqlType, +) + +from google.type import date_pb2 +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +import datetime + +from ._testing import TYPE_INT + +TYPE_BYTES = {"bytes_type": {}} +TYPE_TIMESTAMP = {"timestamp_type": {}} + + +class TestQueryResultParsingUtils: + @pytest.mark.parametrize( + "type_dict,value_dict,expected_metadata_type,expected_value", + [ + (TYPE_INT, {"int_value": 1}, SqlType.Int64, 1), + ( + {"string_type": {}}, + {"string_value": "test"}, + SqlType.String, + "test", + ), + ({"bool_type": {}}, {"bool_value": False}, SqlType.Bool, False), + ( + {"bytes_type": {}}, + {"bytes_value": b"test"}, + SqlType.Bytes, + b"test", + ), + ( + {"float64_type": {}}, + {"float_value": 17.21}, + SqlType.Float64, + 17.21, + ), + ( + {"timestamp_type": {}}, + {"timestamp_value": {"seconds": 1715864647, "nanos": 12}}, + SqlType.Timestamp, + DatetimeWithNanoseconds( + 2024, 5, 16, 13, 4, 7, nanosecond=12, tzinfo=datetime.timezone.utc + ), + ), + ( + {"date_type": {}}, + {"date_value": {"year": 1800, "month": 12, "day": 0}}, + SqlType.Date, + date_pb2.Date(year=1800, month=12, day=0), + ), + ], + ) + def test_basic_types( + self, type_dict, value_dict, expected_metadata_type, expected_value + ): + _type = PBType(type_dict) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is expected_metadata_type + value = PBValue(value_dict) + assert ( + _parse_pb_value_to_python_value(value._pb, metadata_type) == expected_value + ) + + # Larger test cases were extracted for readability + def test__array(self): + _type = PBType({"array_type": {"element_type": TYPE_INT}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Int64 + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 1}, + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + ] + } + } + ) + assert _parse_pb_value_to_python_value(value._pb, metadata_type) == [1, 2, 3, 4] + + def test__struct(self): + _type = PBType( + { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": TYPE_INT, + }, + { + "field_name": None, + "type_": {"string_type": {}}, + }, + { + "field_name": "field3", + "type_": {"array_type": {"element_type": TYPE_INT}}, + }, + { + "field_name": "field3", + "type_": {"string_type": {}}, + }, + ] + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test2"}, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + {"int_value": 5}, + ] + } + }, + {"string_value": "test4"}, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Struct + assert type(metadata_type["field1"]) is SqlType.Int64 + assert type(metadata_type[1]) is SqlType.String + assert type(metadata_type[2]) is SqlType.Array + assert type(metadata_type[2].element_type) is SqlType.Int64 + assert type(metadata_type[3]) is SqlType.String + + # duplicate fields not accesible by name + with pytest.raises(KeyError, match="Ambigious field name"): + metadata_type["field3"] + + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert isinstance(result, Struct) + assert result["field1"] == result[0] == 1 + assert result[1] == "test2" + + # duplicate fields not accesible by name + with pytest.raises(KeyError, match="Ambigious field name"): + result["field3"] + + # duplicate fields accessible by index + assert result[2] == [2, 3, 4, 5] + assert result[3] == "test4" + + def test__array_of_structs(self): + _type = PBType( + { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": TYPE_INT, + }, + { + "field_name": None, + "type_": {"string_type": {}}, + }, + { + "field_name": "field3", + "type_": {"bool_type": {}}, + }, + ] + } + } + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + {"bool_value": True}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"string_value": "test2"}, + {"bool_value": False}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 3}, + {"string_value": "test3"}, + {"bool_value": True}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 4}, + {"string_value": "test4"}, + {"bool_value": False}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Struct + assert type(metadata_type.element_type["field1"]) is SqlType.Int64 + assert type(metadata_type.element_type[1]) is SqlType.String + assert type(metadata_type.element_type["field3"]) is SqlType.Bool + + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert isinstance(result, list) + assert len(result) == 4 + + assert isinstance(result[0], Struct) + assert result[0]["field1"] == 1 + assert result[0][1] == "test1" + assert result[0]["field3"] + + assert isinstance(result[1], Struct) + assert result[1]["field1"] == 2 + assert result[1][1] == "test2" + assert not result[1]["field3"] + + assert isinstance(result[2], Struct) + assert result[2]["field1"] == 3 + assert result[2][1] == "test3" + assert result[2]["field3"] + + assert isinstance(result[3], Struct) + assert result[3]["field1"] == 4 + assert result[3][1] == "test4" + assert not result[3]["field3"] + + def test__map(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_INT, + "value_type": {"string_type": {}}, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"string_value": "test2"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 3}, + {"string_value": "test3"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 4}, + {"string_value": "test4"}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.String + + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert isinstance(result, dict) + assert len(result) == 4 + + assert result == { + 1: "test1", + 2: "test2", + 3: "test3", + 4: "test4", + } + + def test__map_repeated_values(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_INT, + "value_type": {"string_type": {}}, + } + }, + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test2"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test3"}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert len(result) == 1 + + assert result == { + 1: "test3", + } + + def test__map_of_maps_of_structs(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_INT, + "value_type": { + "map_type": { + "key_type": {"string_type": {}}, + "value_type": { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": TYPE_INT, + }, + { + "field_name": "field2", + "type_": {"string_type": {}}, + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (int, map) tuples + { + "array_value": { + "values": [ # (int, map) tuple + {"int_value": 1}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "1_1"}, + { + "array_value": { + "values": [ + { + "int_value": 1 + }, + { + "string_value": "test1" + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "1_2"}, + { + "array_value": { + "values": [ + { + "int_value": 2 + }, + { + "string_value": "test2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (int, map) tuple + {"int_value": 2}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "2_1"}, + { + "array_value": { + "values": [ + { + "int_value": 3 + }, + { + "string_value": "test3" + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "2_2"}, + { + "array_value": { + "values": [ + { + "int_value": 4 + }, + { + "string_value": "test4" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Map + assert type(metadata_type.value_type.key_type) is SqlType.String + assert type(metadata_type.value_type.value_type) is SqlType.Struct + assert type(metadata_type.value_type.value_type["field1"]) is SqlType.Int64 + assert type(metadata_type.value_type.value_type["field2"]) is SqlType.String + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + + assert result[1]["1_1"]["field1"] == 1 + assert result[1]["1_1"]["field2"] == "test1" + + assert result[1]["1_2"]["field1"] == 2 + assert result[1]["1_2"]["field2"] == "test2" + + assert result[2]["2_1"]["field1"] == 3 + assert result[2]["2_1"]["field2"] == "test3" + + assert result[2]["2_2"]["field1"] == 4 + assert result[2]["2_2"]["field2"] == "test4" + + def test__map_of_lists_of_structs(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": TYPE_BYTES, + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 + } + }, + { + "bytes_value": b"key1-value1" + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 2222222222 + } + }, + { + "bytes_value": b"key1-value2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key2"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 3333333333 + } + }, + { + "bytes_value": b"key2-value1" + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 4444444444 + } + }, + { + "bytes_value": b"key2-value2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Bytes + assert type(metadata_type.value_type) is SqlType.Array + assert type(metadata_type.value_type.element_type) is SqlType.Struct + assert ( + type(metadata_type.value_type.element_type["timestamp"]) + is SqlType.Timestamp + ) + assert type(metadata_type.value_type.element_type["value"]) is SqlType.Bytes + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + + timestamp1 = DatetimeWithNanoseconds( + 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc + ) + timestamp2 = DatetimeWithNanoseconds( + 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc + ) + timestamp3 = DatetimeWithNanoseconds( + 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc + ) + timestamp4 = DatetimeWithNanoseconds( + 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc + ) + + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == b"key1-value1" + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == b"key1-value2" + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == b"key2-value1" + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == b"key2-value2" + + def test__invalid_type_throws_exception(self): + _type = PBType({"string_type": {}}) + value = PBValue({"int_value": 1}) + metadata_type = _pb_type_to_metadata_type(_type) + + with pytest.raises( + ValueError, + match="string_value field for String type not found in a Value.", + ): + _parse_pb_value_to_python_value(value._pb, metadata_type) diff --git a/tests/unit/data/execute_query/test_query_result_row_reader.py b/tests/unit/data/execute_query/test_query_result_row_reader.py new file mode 100644 index 000000000..2bb1e4da0 --- /dev/null +++ b/tests/unit/data/execute_query/test_query_result_row_reader.py @@ -0,0 +1,310 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from unittest import mock +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable_v2.types.data import Value as PBValue +from google.cloud.bigtable.data.execute_query._reader import _QueryResultRowReader + +from google.cloud.bigtable.data.execute_query.metadata import ProtoMetadata, SqlType + +import google.cloud.bigtable.data.execute_query._reader +from ._testing import TYPE_INT, proto_rows_bytes + + +class TestQueryResultRowReader: + def test__single_values_received(self): + byte_cursor = mock.Mock( + metadata=ProtoMetadata( + [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] + ) + ) + values = [ + proto_rows_bytes({"int_value": 1}), + proto_rows_bytes({"int_value": 2}), + proto_rows_bytes({"int_value": 3}), + ] + + reader = _QueryResultRowReader(byte_cursor) + + assert reader.consume(values[0]) is None + result = reader.consume(values[1]) + assert len(result) == 1 + assert len(result[0]) == 2 + assert reader.consume(values[2]) is None + + def test__multiple_rows_received(self): + values = [ + proto_rows_bytes( + {"int_value": 1}, + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + ), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + proto_rows_bytes({"int_value": 7}, {"int_value": 8}), + ] + + byte_cursor = mock.Mock( + metadata=ProtoMetadata( + [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] + ) + ) + + reader = _QueryResultRowReader(byte_cursor) + + result = reader.consume(values[0]) + assert len(result) == 2 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 1 + assert result[0][1] == result[0]["test2"] == 2 + + assert len(result[1]) == 2 + assert result[1][0] == result[1]["test1"] == 3 + assert result[1][1] == result[1]["test2"] == 4 + + result = reader.consume(values[1]) + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 5 + assert result[0][1] == result[0]["test2"] == 6 + + result = reader.consume(values[2]) + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 7 + assert result[0][1] == result[0]["test2"] == 8 + + def test__received_values_are_passed_to_parser_in_batches(self): + byte_cursor = mock.Mock( + metadata=ProtoMetadata( + [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] + ) + ) + + assert SqlType.Struct([("a", SqlType.Int64())]) == SqlType.Struct( + [("a", SqlType.Int64())] + ) + assert SqlType.Struct([("a", SqlType.String())]) != SqlType.Struct( + [("a", SqlType.Int64())] + ) + assert SqlType.Struct([("a", SqlType.Int64())]) != SqlType.Struct( + [("b", SqlType.Int64())] + ) + + assert SqlType.Array(SqlType.Int64()) == SqlType.Array(SqlType.Int64()) + assert SqlType.Array(SqlType.Int64()) != SqlType.Array(SqlType.String()) + + assert SqlType.Map(SqlType.Int64(), SqlType.String()) == SqlType.Map( + SqlType.Int64(), SqlType.String() + ) + assert SqlType.Map(SqlType.Int64(), SqlType.String()) != SqlType.Map( + SqlType.String(), SqlType.String() + ) + + values = [ + {"int_value": 1}, + {"int_value": 2}, + ] + + reader = _QueryResultRowReader(byte_cursor) + with mock.patch.object( + google.cloud.bigtable.data.execute_query._reader, + "_parse_pb_value_to_python_value", + ) as parse_mock: + reader.consume(proto_rows_bytes(values[0])) + parse_mock.assert_not_called() + reader.consume(proto_rows_bytes(values[1])) + parse_mock.assert_has_calls( + [ + mock.call(PBValue(values[0]), SqlType.Int64()), + mock.call(PBValue(values[1]), SqlType.Int64()), + ] + ) + + def test__parser_errors_are_forwarded(self): + byte_cursor = mock.Mock(metadata=ProtoMetadata([("test1", SqlType.Int64())])) + + values = [ + {"string_value": "test"}, + ] + + reader = _QueryResultRowReader(byte_cursor) + with mock.patch.object( + google.cloud.bigtable.data.execute_query._reader, + "_parse_pb_value_to_python_value", + side_effect=ValueError("test"), + ) as parse_mock: + with pytest.raises(ValueError, match="test"): + reader.consume(proto_rows_bytes(values[0])) + + parse_mock.assert_has_calls( + [ + mock.call(PBValue(values[0]), SqlType.Int64()), + ] + ) + + def test__multiple_proto_rows_received_with_one_resume_token(self): + from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + + def split_bytes_into_chunks(bytes_to_split, num_chunks): + from google.cloud.bigtable.helpers import batched + + assert num_chunks <= len(bytes_to_split) + bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 + result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) + assert len(result) == num_chunks + return result + + def pass_values_to_byte_cursor(byte_cursor, iterable): + for value in iterable: + result = byte_cursor.consume(value) + if result is not None: + yield result + + proto_rows = [ + proto_rows_bytes({"int_value": 1}, {"int_value": 2}), + proto_rows_bytes({"int_value": 3}, {"int_value": 4}), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + ] + + messages = [ + *split_bytes_into_chunks(proto_rows[0], num_chunks=2), + *split_bytes_into_chunks(proto_rows[1], num_chunks=3), + proto_rows[2], + ] + + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": { + "columns": [ + {"name": "test1", "type_": TYPE_INT}, + {"name": "test2", "type_": TYPE_INT}, + ] + } + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[0]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[1]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[2]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[3]}} + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[4]}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[5]}, + "resume_token": b"token2", + } + ), + ] + + byte_cursor = _ByteCursor() + + reader = _QueryResultRowReader(byte_cursor) + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + + returned_values = [] + + def intercept_return_values(func): + nonlocal intercept_return_values + + def wrapped(*args, **kwargs): + value = func(*args, **kwargs) + returned_values.append(value) + return value + + return wrapped + + with mock.patch.object( + reader, + "_parse_proto_rows", + wraps=intercept_return_values(reader._parse_proto_rows), + ): + result = reader.consume(next(byte_cursor_iter)) + + # Despite the fact that two ProtoRows were received, a single resume_token after the second ProtoRows object forces us to parse them together. + # We will interpret them as one larger ProtoRows object. + assert len(returned_values) == 1 + assert len(returned_values[0]) == 4 + assert returned_values[0][0].int_value == 1 + assert returned_values[0][1].int_value == 2 + assert returned_values[0][2].int_value == 3 + assert returned_values[0][3].int_value == 4 + + assert len(result) == 2 + assert len(result[0]) == 2 + assert result[0][0] == 1 + assert result[0]["test1"] == 1 + assert result[0][1] == 2 + assert result[0]["test2"] == 2 + assert len(result[1]) == 2 + assert result[1][0] == 3 + assert result[1]["test1"] == 3 + assert result[1][1] == 4 + assert result[1]["test2"] == 4 + assert byte_cursor._resume_token == b"token1" + + returned_values = [] + with mock.patch.object( + reader, + "_parse_proto_rows", + wraps=intercept_return_values(reader._parse_proto_rows), + ): + result = reader.consume(next(byte_cursor_iter)) + + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == 5 + assert result[0]["test1"] == 5 + assert result[0][1] == 6 + assert result[0]["test2"] == 6 + assert byte_cursor._resume_token == b"token2" + + +class TestProtoMetadata: + def test__duplicate_column_names(self): + metadata = ProtoMetadata( + [ + ("test1", SqlType.Int64()), + ("test2", SqlType.Bytes()), + ("test2", SqlType.String()), + ] + ) + assert metadata[0].column_name == "test1" + assert metadata["test1"].column_type == SqlType.Int64() + + # duplicate columns not accesible by name + with pytest.raises(KeyError, match="Ambigious column name"): + metadata["test2"] + + # duplicate columns accessible by index + assert metadata[1].column_type == SqlType.Bytes() + assert metadata[1].column_name == "test2" + assert metadata[2].column_type == SqlType.String() + assert metadata[2].column_name == "test2" diff --git a/tests/unit/data/test__helpers.py b/tests/unit/data/test__helpers.py index 5a9c500ed..12ab3181e 100644 --- a/tests/unit/data/test__helpers.py +++ b/tests/unit/data/test__helpers.py @@ -23,16 +23,31 @@ class TestMakeMetadata: @pytest.mark.parametrize( - "table,profile,expected", + "table,profile,instance,expected", [ - ("table", "profile", "table_name=table&app_profile_id=profile"), - ("table", None, "table_name=table"), + ("table", "profile", None, "table_name=table&app_profile_id=profile"), + ("table", None, None, "table_name=table"), + (None, None, "instance", "name=instance"), + (None, "profile", None, "app_profile_id=profile"), + (None, "profile", "instance", "name=instance&app_profile_id=profile"), ], ) - def test__make_metadata(self, table, profile, expected): - metadata = _helpers._make_metadata(table, profile) + def test__make_metadata(self, table, profile, instance, expected): + metadata = _helpers._make_metadata(table, profile, instance) assert metadata == [("x-goog-request-params", expected)] + @pytest.mark.parametrize( + "table,profile,instance", + [ + ("table", None, "instance"), + ("table", "profile", "instance"), + (None, None, None), + ], + ) + def test__make_metadata_invalid_params(self, table, profile, instance): + with pytest.raises(ValueError): + _helpers._make_metadata(table, profile, instance) + class TestAttemptTimeoutGenerator: @pytest.mark.parametrize( diff --git a/tests/unit/data/test_helpers.py b/tests/unit/data/test_helpers.py new file mode 100644 index 000000000..5d1ad70f8 --- /dev/null +++ b/tests/unit/data/test_helpers.py @@ -0,0 +1,45 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import pytest +from google.cloud.bigtable.helpers import batched + + +class TestBatched: + @pytest.mark.parametrize( + "input_list,batch_size,expected", + [ + ([1, 2, 3, 4, 5], 3, [[1, 2, 3], [4, 5]]), + ([1, 2, 3, 4, 5, 6], 3, [[1, 2, 3], [4, 5, 6]]), + ([1, 2, 3, 4, 5], 2, [[1, 2], [3, 4], [5]]), + ([1, 2, 3, 4, 5], 1, [[1], [2], [3], [4], [5]]), + ([1, 2, 3, 4, 5], 5, [[1, 2, 3, 4, 5]]), + ([], 1, []), + ], + ) + def test_batched(self, input_list, batch_size, expected): + result = list(batched(input_list, batch_size)) + assert list(map(list, result)) == expected + + @pytest.mark.parametrize( + "input_list,batch_size", + [ + ([1], 0), + ([1], -1), + ], + ) + def test_batched_errs(self, input_list, batch_size): + with pytest.raises(ValueError): + list(batched(input_list, batch_size)) diff --git a/tests/unit/v2_client/_testing.py b/tests/unit/v2_client/_testing.py index 302d33ac1..855c0c10e 100644 --- a/tests/unit/v2_client/_testing.py +++ b/tests/unit/v2_client/_testing.py @@ -17,6 +17,9 @@ import mock +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes + class _FakeStub(object): """Acts as a gPRC stub.""" diff --git a/tests/unit/v2_client/test_instance.py b/tests/unit/v2_client/test_instance.py index 797e4bd9c..de6844a16 100644 --- a/tests/unit/v2_client/test_instance.py +++ b/tests/unit/v2_client/test_instance.py @@ -19,6 +19,7 @@ from ._testing import _make_credentials from google.cloud.bigtable.cluster import Cluster + PROJECT = "project" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID @@ -943,3 +944,28 @@ def _next_page(self): assert isinstance(app_profile_2, AppProfile) assert app_profile_2.name == app_profile_name2 + + +@pytest.fixture() +def data_api(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + data_api_mock = mock.create_autospec(BigtableClient) + data_api_mock.instance_path.return_value = ( + f"projects/{PROJECT}/instances/{INSTANCE_ID}" + ) + return data_api_mock + + +@pytest.fixture() +def client(data_api): + result = _make_client( + project="project-id", credentials=_make_credentials(), admin=True + ) + result._table_data_client = data_api + return result + + +@pytest.fixture() +def instance(client): + return client.instance(instance_id=INSTANCE_ID) From 6e801900bbe9385d3b579b8c3327c87c3617d92f Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Thu, 8 Aug 2024 18:28:11 -0400 Subject: [PATCH 05/12] docs: add clarification around SQL timestamps (#1012) --- google/cloud/bigtable/data/execute_query/metadata.py | 6 ++++++ google/cloud/bigtable/data/execute_query/values.py | 3 +++ 2 files changed, 9 insertions(+) diff --git a/google/cloud/bigtable/data/execute_query/metadata.py b/google/cloud/bigtable/data/execute_query/metadata.py index 98b94a644..4c08cbad3 100644 --- a/google/cloud/bigtable/data/execute_query/metadata.py +++ b/google/cloud/bigtable/data/execute_query/metadata.py @@ -214,6 +214,12 @@ class Bool(Type): type_field_name = "bool_type" class Timestamp(Type): + """ + Timestamp supports :class:`DatetimeWithNanoseconds` but Bigtable SQL does + not currently support nanoseconds precision. We support this for potential + compatibility in the future. Nanoseconds are currently ignored. + """ + type_field_name = "timestamp_type" expected_types = ( datetime.datetime, diff --git a/google/cloud/bigtable/data/execute_query/values.py b/google/cloud/bigtable/data/execute_query/values.py index 450f6f855..394bef71e 100644 --- a/google/cloud/bigtable/data/execute_query/values.py +++ b/google/cloud/bigtable/data/execute_query/values.py @@ -100,6 +100,9 @@ def __repr__(self) -> str: bool, bytes, str, + # Note that Bigtable SQL does not currently support nanosecond precision, + # only microseconds. We use this for compatibility with potential future + # support DatetimeWithNanoseconds, date_pb2.Date, "Struct", From 6686abfab4cd641b37ea5241afbfeee7ff9a81f1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 15:29:29 -0700 Subject: [PATCH 06/12] chore(python): fix docs build (#1008) Source-Link: https://github.com/googleapis/synthtool/commit/bef813d194de29ddf3576eda60148b6b3dcc93d9 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:94bb690db96e6242b2567a4860a94d48fa48696d092e51b0884a1a2c0a79a407 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 3 +- .kokoro/docker/docs/Dockerfile | 26 ++++++++-------- .kokoro/docker/docs/requirements.txt | 40 +++++++++++++----------- .kokoro/publish-docs.sh | 20 ++++++------ .kokoro/requirements.txt | 46 ++++++++++++++-------------- 5 files changed, 71 insertions(+), 64 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 620159621..6d064ddb9 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,4 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5651442a6336971a2fb2df40fb56b3337df67cafa14c0809cc89cb34ccee1b8e + digest: sha256:94bb690db96e6242b2567a4860a94d48fa48696d092e51b0884a1a2c0a79a407 +# created: 2024-07-31T14:52:44.926548819Z diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile index a26ce6193..e5410e296 100644 --- a/.kokoro/docker/docs/Dockerfile +++ b/.kokoro/docker/docs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ubuntu:22.04 +from ubuntu:24.04 ENV DEBIAN_FRONTEND noninteractive @@ -40,7 +40,6 @@ RUN apt-get update \ libssl-dev \ libsqlite3-dev \ portaudio19-dev \ - python3-distutils \ redis-server \ software-properties-common \ ssh \ @@ -60,28 +59,31 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb -###################### Install python 3.9.13 -# Download python 3.9.13 -RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz +###################### Install python 3.10.14 for docs/docfx session + +# Download python 3.10.14 +RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz # Extract files -RUN tar -xvf Python-3.9.13.tgz +RUN tar -xvf Python-3.10.14.tgz -# Install python 3.9.13 -RUN ./Python-3.9.13/configure --enable-optimizations +# Install python 3.10.14 +RUN ./Python-3.10.14/configure --enable-optimizations RUN make altinstall +ENV PATH /usr/local/bin/python3.10:$PATH + ###################### Install pip RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3 /tmp/get-pip.py \ + && python3.10 /tmp/get-pip.py \ && rm /tmp/get-pip.py # Test pip -RUN python3 -m pip +RUN python3.10 -m pip # Install build requirements COPY requirements.txt /requirements.txt -RUN python3 -m pip install --require-hashes -r requirements.txt +RUN python3.10 -m pip install --require-hashes -r requirements.txt -CMD ["python3.8"] +CMD ["python3.10"] diff --git a/.kokoro/docker/docs/requirements.txt b/.kokoro/docker/docs/requirements.txt index 0e5d70f20..7129c7715 100644 --- a/.kokoro/docker/docs/requirements.txt +++ b/.kokoro/docker/docs/requirements.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.2.3 \ - --hash=sha256:bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23 \ - --hash=sha256:c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c +argcomplete==3.4.0 \ + --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ + --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f # via nox colorlog==6.8.2 \ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ @@ -16,23 +16,27 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.13.1 \ - --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ - --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c +filelock==3.15.4 \ + --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ + --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 # via virtualenv -nox==2024.3.2 \ - --hash=sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be \ - --hash=sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553 +nox==2024.4.15 \ + --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ + --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f # via -r requirements.in -packaging==24.0 \ - --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ - --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 +packaging==24.1 \ + --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ + --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via nox -platformdirs==4.2.0 \ - --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \ - --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768 +platformdirs==4.2.2 \ + --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ + --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 # via virtualenv -virtualenv==20.25.1 \ - --hash=sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a \ - --hash=sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197 +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f + # via nox +virtualenv==20.26.3 \ + --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ + --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 # via nox diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 38f083f05..233205d58 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -21,18 +21,18 @@ export PYTHONUNBUFFERED=1 export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3 -m pip install --require-hashes -r .kokoro/requirements.txt -python3 -m nox --version +python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt +python3.10 -m nox --version # build docs nox -s docs # create metadata -python3 -m docuploader create-metadata \ +python3.10 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ + --version=$(python3.10 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ + --distribution-name=$(python3.10 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -40,18 +40,18 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" +python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" # docfx yaml files nox -s docfx # create metadata. -python3 -m docuploader create-metadata \ +python3.10 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ + --version=$(python3.10 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ + --distribution-name=$(python3.10 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -59,4 +59,4 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" +python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 35ece0e4d..9622baf0b 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.3.3 \ --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 # via google-auth -certifi==2024.6.2 \ - --hash=sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516 \ - --hash=sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56 +certifi==2024.7.4 \ + --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ + --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 # via requests cffi==1.16.0 \ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ @@ -371,23 +371,23 @@ more-itertools==10.3.0 \ # via # jaraco-classes # jaraco-functools -nh3==0.2.17 \ - --hash=sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a \ - --hash=sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911 \ - --hash=sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb \ - --hash=sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a \ - --hash=sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc \ - --hash=sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028 \ - --hash=sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9 \ - --hash=sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3 \ - --hash=sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351 \ - --hash=sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10 \ - --hash=sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71 \ - --hash=sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f \ - --hash=sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b \ - --hash=sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a \ - --hash=sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062 \ - --hash=sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a +nh3==0.2.18 \ + --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \ + --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \ + --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \ + --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \ + --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \ + --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \ + --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \ + --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \ + --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \ + --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \ + --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \ + --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \ + --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \ + --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \ + --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ + --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe # via readme-renderer nox==2024.4.15 \ --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ @@ -460,9 +460,9 @@ python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via gcp-releasetool -readme-renderer==43.0 \ - --hash=sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311 \ - --hash=sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9 +readme-renderer==44.0 \ + --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \ + --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1 # via twine requests==2.32.3 \ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ From b95801ffa8081e0072232247fbc5879105c109a6 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 15:30:33 -0700 Subject: [PATCH 07/12] feat: add fields and the BackupType proto for Hot Backups (#1010) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add fields and the BackupType proto for Hot Backups docs: clarify comments and fix typos PiperOrigin-RevId: 658791576 Source-Link: https://github.com/googleapis/googleapis/commit/c93b54fa3060c7185f6dc724f0f9ec0c12bc44fc Source-Link: https://github.com/googleapis/googleapis-gen/commit/e52ba38a95a82f7588d0dd3a2284c98850dab9e1 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTUyYmEzOGE5NWE4MmY3NTg4ZDBkZDNhMjI4NGM5ODg1MGRhYjllMSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_table_admin/async_client.py | 6 +- .../services/bigtable_table_admin/client.py | 6 +- .../bigtable_table_admin/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../types/bigtable_table_admin.py | 2 +- google/cloud/bigtable_admin_v2/types/table.py | 66 +++++++++++++++++-- .../test_bigtable_table_admin.py | 18 +++++ 7 files changed, 86 insertions(+), 16 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 7454e08ac..a59302efb 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2777,7 +2777,7 @@ async def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. @@ -2862,8 +2862,8 @@ async def copy_backup( [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. parent (:class:`str`): Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: + contain the backup copy. The cluster must already exist. + Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. This corresponds to the ``parent`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 4645d4f3b..b7be597ef 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -3245,7 +3245,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. @@ -3328,8 +3328,8 @@ def copy_backup( [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. parent (str): Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: + contain the backup copy. The cluster must already exist. + Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. This corresponds to the ``parent`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 71f06947f..8b0eadbbc 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1019,7 +1019,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index bdd6e20c8..e8b31ed36 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1048,7 +1048,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 0bc3b6b81..9d1bf3ef5 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1356,7 +1356,7 @@ class CopyBackupRequest(proto.Message): Attributes: parent (str): Required. The name of the destination cluster that will - contain the backup copy. The cluster must already exists. + contain the backup copy. The cluster must already exist. Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. backup_id (str): diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index ef162bee1..241d7853c 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -781,13 +781,18 @@ class Backup(proto.Message): this backup was copied. If a backup is not created by copying a backup, this field will be empty. Values are of the form: - projects//instances//backups/. + + projects//instances//clusters//backups/ expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. The expiration time of the backup, with - microseconds granularity that must be at least 6 hours and - at most 90 days from the time the request is received. Once - the ``expire_time`` has passed, Cloud Bigtable will delete - the backup and free the resources used by the backup. + Required. The expiration time of the backup. When creating a + backup or updating its ``expire_time``, the value must be + greater than the backup creation time by: + + - At least 6 hours + - At most 90 days + + Once the ``expire_time`` has passed, Cloud Bigtable will + delete the backup. start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. ``start_time`` is the time that the backup was started (i.e. approximately the time the @@ -805,6 +810,20 @@ class Backup(proto.Message): encryption_info (google.cloud.bigtable_admin_v2.types.EncryptionInfo): Output only. The encryption information for the backup. + backup_type (google.cloud.bigtable_admin_v2.types.Backup.BackupType): + Indicates the backup type of the backup. + hot_to_standard_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the hot backup will be converted to a + standard backup. Once the ``hot_to_standard_time`` has + passed, Cloud Bigtable will convert the hot backup to a + standard backup. This value must be greater than the backup + creation time by: + + - At least 24 hours + + This field only applies for hot backups. When creating or + updating a standard backup, attempting to set this field + will fail the request. """ class State(proto.Enum): @@ -823,6 +842,28 @@ class State(proto.Enum): CREATING = 1 READY = 2 + class BackupType(proto.Enum): + r"""The type of the backup. + + Values: + BACKUP_TYPE_UNSPECIFIED (0): + Not specified. + STANDARD (1): + The default type for Cloud Bigtable managed + backups. Supported for backups created in both + HDD and SSD instances. Requires optimization + when restored to a table in an SSD instance. + HOT (2): + A backup type with faster restore to SSD + performance. Only supported for backups created + in SSD instances. A new SSD table restored from + a hot backup reaches production performance more + quickly than a standard backup. + """ + BACKUP_TYPE_UNSPECIFIED = 0 + STANDARD = 1 + HOT = 2 + name: str = proto.Field( proto.STRING, number=1, @@ -864,6 +905,16 @@ class State(proto.Enum): number=9, message="EncryptionInfo", ) + backup_type: BackupType = proto.Field( + proto.ENUM, + number=11, + enum=BackupType, + ) + hot_to_standard_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) class BackupInfo(proto.Message): @@ -888,7 +939,8 @@ class BackupInfo(proto.Message): this backup was copied. If a backup is not created by copying a backup, this field will be empty. Values are of the form: - projects//instances//backups/. + + projects//instances//clusters//backups/ """ backup: str = proto.Field( diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 2b84213bc..580189044 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -9753,6 +9753,7 @@ def test_get_backup(request_type, transport: str = "grpc"): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) response = client.get_backup(request) @@ -9769,6 +9770,7 @@ def test_get_backup(request_type, transport: str = "grpc"): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_get_backup_empty_call(): @@ -9872,6 +9874,7 @@ async def test_get_backup_empty_call_async(): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.get_backup() @@ -9942,6 +9945,7 @@ async def test_get_backup_async( source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.get_backup(request) @@ -9959,6 +9963,7 @@ async def test_get_backup_async( assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.asyncio @@ -10131,6 +10136,7 @@ def test_update_backup(request_type, transport: str = "grpc"): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) response = client.update_backup(request) @@ -10147,6 +10153,7 @@ def test_update_backup(request_type, transport: str = "grpc"): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_update_backup_empty_call(): @@ -10246,6 +10253,7 @@ async def test_update_backup_empty_call_async(): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.update_backup() @@ -10319,6 +10327,7 @@ async def test_update_backup_async( source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.update_backup(request) @@ -10336,6 +10345,7 @@ async def test_update_backup_async( assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.asyncio @@ -19957,6 +19967,8 @@ def test_create_backup_rest(request_type): }, "kms_key_version": "kms_key_version_value", }, + "backup_type": 1, + "hot_to_standard_time": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -20368,6 +20380,7 @@ def test_get_backup_rest(request_type): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) # Wrap the value into a proper Response obj @@ -20388,6 +20401,7 @@ def test_get_backup_rest(request_type): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_get_backup_rest_use_cached_wrapped_rpc(): @@ -20697,6 +20711,8 @@ def test_update_backup_rest(request_type): }, "kms_key_version": "kms_key_version_value", }, + "backup_type": 1, + "hot_to_standard_time": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -20776,6 +20792,7 @@ def get_message_fields(field): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) # Wrap the value into a proper Response obj @@ -20796,6 +20813,7 @@ def get_message_fields(field): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_update_backup_rest_use_cached_wrapped_rpc(): From 7af1dc1750e653cb3fc8fa3f282d4846b6de272d Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 9 Aug 2024 15:03:56 -0600 Subject: [PATCH 08/12] Revert "chore(python): fix docs build (#1008)" (#1013) This reverts commit 6686abfab4cd641b37ea5241afbfeee7ff9a81f1. --- .github/.OwlBot.lock.yaml | 3 +- .kokoro/docker/docs/Dockerfile | 26 ++++++++-------- .kokoro/docker/docs/requirements.txt | 40 +++++++++++------------- .kokoro/publish-docs.sh | 20 ++++++------ .kokoro/requirements.txt | 46 ++++++++++++++-------------- 5 files changed, 64 insertions(+), 71 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 6d064ddb9..620159621 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:94bb690db96e6242b2567a4860a94d48fa48696d092e51b0884a1a2c0a79a407 -# created: 2024-07-31T14:52:44.926548819Z + digest: sha256:5651442a6336971a2fb2df40fb56b3337df67cafa14c0809cc89cb34ccee1b8e diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile index e5410e296..a26ce6193 100644 --- a/.kokoro/docker/docs/Dockerfile +++ b/.kokoro/docker/docs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ubuntu:24.04 +from ubuntu:22.04 ENV DEBIAN_FRONTEND noninteractive @@ -40,6 +40,7 @@ RUN apt-get update \ libssl-dev \ libsqlite3-dev \ portaudio19-dev \ + python3-distutils \ redis-server \ software-properties-common \ ssh \ @@ -59,31 +60,28 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb +###################### Install python 3.9.13 -###################### Install python 3.10.14 for docs/docfx session - -# Download python 3.10.14 -RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz +# Download python 3.9.13 +RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz # Extract files -RUN tar -xvf Python-3.10.14.tgz +RUN tar -xvf Python-3.9.13.tgz -# Install python 3.10.14 -RUN ./Python-3.10.14/configure --enable-optimizations +# Install python 3.9.13 +RUN ./Python-3.9.13/configure --enable-optimizations RUN make altinstall -ENV PATH /usr/local/bin/python3.10:$PATH - ###################### Install pip RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3.10 /tmp/get-pip.py \ + && python3 /tmp/get-pip.py \ && rm /tmp/get-pip.py # Test pip -RUN python3.10 -m pip +RUN python3 -m pip # Install build requirements COPY requirements.txt /requirements.txt -RUN python3.10 -m pip install --require-hashes -r requirements.txt +RUN python3 -m pip install --require-hashes -r requirements.txt -CMD ["python3.10"] +CMD ["python3.8"] diff --git a/.kokoro/docker/docs/requirements.txt b/.kokoro/docker/docs/requirements.txt index 7129c7715..0e5d70f20 100644 --- a/.kokoro/docker/docs/requirements.txt +++ b/.kokoro/docker/docs/requirements.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.4.0 \ - --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ - --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f +argcomplete==3.2.3 \ + --hash=sha256:bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23 \ + --hash=sha256:c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c # via nox colorlog==6.8.2 \ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ @@ -16,27 +16,23 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.15.4 \ - --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ - --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv -nox==2024.4.15 \ - --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ - --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f +nox==2024.3.2 \ + --hash=sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be \ + --hash=sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553 # via -r requirements.in -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 +packaging==24.0 \ + --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ + --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 # via nox -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.2.0 \ + --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \ + --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768 # via virtualenv -tomli==2.0.1 \ - --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ - --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f - # via nox -virtualenv==20.26.3 \ - --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ - --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 +virtualenv==20.25.1 \ + --hash=sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a \ + --hash=sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197 # via nox diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 233205d58..38f083f05 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -21,18 +21,18 @@ export PYTHONUNBUFFERED=1 export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt -python3.10 -m nox --version +python3 -m pip install --require-hashes -r .kokoro/requirements.txt +python3 -m nox --version # build docs nox -s docs # create metadata -python3.10 -m docuploader create-metadata \ +python3 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ + --version=$(python3 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ + --distribution-name=$(python3 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -40,18 +40,18 @@ python3.10 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" # docfx yaml files nox -s docfx # create metadata. -python3.10 -m docuploader create-metadata \ +python3 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ + --version=$(python3 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ + --distribution-name=$(python3 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -59,4 +59,4 @@ python3.10 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 9622baf0b..35ece0e4d 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.3.3 \ --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 # via google-auth -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 +certifi==2024.6.2 \ + --hash=sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516 \ + --hash=sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56 # via requests cffi==1.16.0 \ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ @@ -371,23 +371,23 @@ more-itertools==10.3.0 \ # via # jaraco-classes # jaraco-functools -nh3==0.2.18 \ - --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \ - --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \ - --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \ - --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \ - --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \ - --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \ - --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \ - --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \ - --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \ - --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \ - --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \ - --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \ - --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \ - --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \ - --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ - --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe +nh3==0.2.17 \ + --hash=sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a \ + --hash=sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911 \ + --hash=sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb \ + --hash=sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a \ + --hash=sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc \ + --hash=sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028 \ + --hash=sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9 \ + --hash=sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3 \ + --hash=sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351 \ + --hash=sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10 \ + --hash=sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71 \ + --hash=sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f \ + --hash=sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b \ + --hash=sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a \ + --hash=sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062 \ + --hash=sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a # via readme-renderer nox==2024.4.15 \ --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ @@ -460,9 +460,9 @@ python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via gcp-releasetool -readme-renderer==44.0 \ - --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \ - --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1 +readme-renderer==43.0 \ + --hash=sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311 \ + --hash=sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9 # via twine requests==2.32.3 \ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ From 4cf88dc16728c0d8a47ac505c5076a25e9226dd4 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 14:04:45 -0700 Subject: [PATCH 09/12] chore: Update gapic-generator-python to v1.18.5 (#1015) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.18.5 PiperOrigin-RevId: 661268868 Source-Link: https://github.com/googleapis/googleapis/commit/f7d214cb08cd7d9b018d44564a8b184263f64177 Source-Link: https://github.com/googleapis/googleapis-gen/commit/79a8411bbdb25a983fa3aae8c0e14327df129f94 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNzlhODQxMWJiZGIyNWE5ODNmYTNhYWU4YzBlMTQzMjdkZjEyOWY5NCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 6 +- .../bigtable_instance_admin/client.py | 2 +- .../bigtable_table_admin/async_client.py | 6 +- .../services/bigtable_table_admin/client.py | 2 +- .../services/bigtable/async_client.py | 5 +- .../bigtable_v2/services/bigtable/client.py | 2 +- .../test_bigtable_instance_admin.py | 249 ++++++------ .../test_bigtable_table_admin.py | 360 ++++++++++-------- tests/unit/gapic/bigtable_v2/test_bigtable.py | 90 +++-- 9 files changed, 401 insertions(+), 321 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index abed851d5..b6e77aaea 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -218,10 +217,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(BigtableInstanceAdminClient).get_transport_class, - type(BigtableInstanceAdminClient), - ) + get_transport_class = BigtableInstanceAdminClient.get_transport_class def __init__( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 5877342c4..b8173bf4b 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -790,7 +790,7 @@ def __init__( Type[BigtableInstanceAdminTransport], Callable[..., BigtableInstanceAdminTransport], ] = ( - type(self).get_transport_class(transport) + BigtableInstanceAdminClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., BigtableInstanceAdminTransport], transport) ) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index a59302efb..2e9eb13eb 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -218,10 +217,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(BigtableTableAdminClient).get_transport_class, - type(BigtableTableAdminClient), - ) + get_transport_class = BigtableTableAdminClient.get_transport_class def __init__( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index b7be597ef..55d50ee81 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -814,7 +814,7 @@ def __init__( Type[BigtableTableAdminTransport], Callable[..., BigtableTableAdminTransport], ] = ( - type(self).get_transport_class(transport) + BigtableTableAdminClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., BigtableTableAdminTransport], transport) ) diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 1ed7a4740..54b7f2c63 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -188,9 +187,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(BigtableClient).get_transport_class, type(BigtableClient) - ) + get_transport_class = BigtableClient.get_transport_class def __init__( self, diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 0937c90fe..86fa6b3a5 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -701,7 +701,7 @@ def __init__( transport_init: Union[ Type[BigtableTransport], Callable[..., BigtableTransport] ] = ( - type(self).get_transport_class(transport) + BigtableClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., BigtableTransport], transport) ) diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index ea6737973..26a7989a1 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1326,8 +1326,9 @@ def test_create_instance_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_instance(request) @@ -1381,26 +1382,28 @@ async def test_create_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_instance - ] = mock_object + ] = mock_rpc request = {} await client.create_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1789,22 +1792,23 @@ async def test_get_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_instance - ] = mock_object + ] = mock_rpc request = {} await client.get_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2164,22 +2168,23 @@ async def test_list_instances_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_instances - ] = mock_object + ] = mock_rpc request = {} await client.list_instances(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_instances(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2545,22 +2550,23 @@ async def test_update_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_instance - ] = mock_object + ] = mock_rpc request = {} await client.update_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.update_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2784,8 +2790,9 @@ def test_partial_update_instance_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.partial_update_instance(request) @@ -2841,26 +2848,28 @@ async def test_partial_update_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.partial_update_instance - ] = mock_object + ] = mock_rpc request = {} await client.partial_update_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.partial_update_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3220,22 +3229,23 @@ async def test_delete_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_instance - ] = mock_object + ] = mock_rpc request = {} await client.delete_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3522,8 +3532,9 @@ def test_create_cluster_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_cluster(request) @@ -3577,26 +3588,28 @@ async def test_create_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_cluster - ] = mock_object + ] = mock_rpc request = {} await client.create_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3975,22 +3988,23 @@ async def test_get_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_cluster - ] = mock_object + ] = mock_rpc request = {} await client.get_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4350,22 +4364,23 @@ async def test_list_clusters_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_clusters - ] = mock_object + ] = mock_rpc request = {} await client.list_clusters(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_clusters(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4663,8 +4678,9 @@ def test_update_cluster_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_cluster(request) @@ -4718,26 +4734,28 @@ async def test_update_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_cluster - ] = mock_object + ] = mock_rpc request = {} await client.update_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4952,8 +4970,9 @@ def test_partial_update_cluster_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.partial_update_cluster(request) @@ -5009,26 +5028,28 @@ async def test_partial_update_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.partial_update_cluster - ] = mock_object + ] = mock_rpc request = {} await client.partial_update_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.partial_update_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5388,22 +5409,23 @@ async def test_delete_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_cluster - ] = mock_object + ] = mock_rpc request = {} await client.delete_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5765,22 +5787,23 @@ async def test_create_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.create_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.create_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6167,22 +6190,23 @@ async def test_get_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.get_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6547,22 +6571,23 @@ async def test_list_app_profiles_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_app_profiles - ] = mock_object + ] = mock_rpc request = {} await client.list_app_profiles(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_app_profiles(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7076,8 +7101,9 @@ def test_update_app_profile_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_app_profile(request) @@ -7133,26 +7159,28 @@ async def test_update_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.update_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7524,22 +7552,23 @@ async def test_delete_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.delete_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7893,22 +7922,23 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8275,22 +8305,23 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.set_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8667,22 +8698,23 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions - ] = mock_object + ] = mock_rpc request = {} await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.test_iam_permissions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9075,22 +9107,23 @@ async def test_list_hot_tablets_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_hot_tablets - ] = mock_object + ] = mock_rpc request = {} await client.list_hot_tablets(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_hot_tablets(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 580189044..c9455cd5f 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1362,22 +1362,23 @@ async def test_create_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_table - ] = mock_object + ] = mock_rpc request = {} await client.create_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.create_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1706,8 +1707,9 @@ def test_create_table_from_snapshot_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_table_from_snapshot(request) @@ -1763,26 +1765,28 @@ async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_table_from_snapshot - ] = mock_object + ] = mock_rpc request = {} await client.create_table_from_snapshot(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_table_from_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2161,22 +2165,23 @@ async def test_list_tables_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_tables - ] = mock_object + ] = mock_rpc request = {} await client.list_tables(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_tables(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2723,22 +2728,23 @@ async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_table - ] = mock_object + ] = mock_rpc request = {} await client.get_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3027,8 +3033,9 @@ def test_update_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_table(request) @@ -3082,26 +3089,28 @@ async def test_update_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_table - ] = mock_object + ] = mock_rpc request = {} await client.update_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3451,22 +3460,23 @@ async def test_delete_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_table - ] = mock_object + ] = mock_rpc request = {} await client.delete_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3751,8 +3761,9 @@ def test_undelete_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.undelete_table(request) @@ -3806,26 +3817,28 @@ async def test_undelete_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.undelete_table - ] = mock_object + ] = mock_rpc request = {} await client.undelete_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.undelete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4129,8 +4142,9 @@ def test_create_authorized_view_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_authorized_view(request) @@ -4186,26 +4200,28 @@ async def test_create_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4597,22 +4613,23 @@ async def test_list_authorized_views_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_authorized_views - ] = mock_object + ] = mock_rpc request = {} await client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_authorized_views(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5192,22 +5209,23 @@ async def test_get_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5522,8 +5540,9 @@ def test_update_authorized_view_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_authorized_view(request) @@ -5579,26 +5598,28 @@ async def test_update_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5973,22 +5994,23 @@ async def test_delete_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6358,22 +6380,23 @@ async def test_modify_column_families_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.modify_column_families - ] = mock_object + ] = mock_rpc request = {} await client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.modify_column_families(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6756,22 +6779,23 @@ async def test_drop_row_range_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.drop_row_range - ] = mock_object + ] = mock_rpc request = {} await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.drop_row_range(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7045,22 +7069,23 @@ async def test_generate_consistency_token_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.generate_consistency_token - ] = mock_object + ] = mock_rpc request = {} await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.generate_consistency_token(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7432,22 +7457,23 @@ async def test_check_consistency_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.check_consistency - ] = mock_object + ] = mock_rpc request = {} await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.check_consistency(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7767,8 +7793,9 @@ def test_snapshot_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.snapshot_table(request) @@ -7822,26 +7849,28 @@ async def test_snapshot_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.snapshot_table - ] = mock_object + ] = mock_rpc request = {} await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.snapshot_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8227,22 +8256,23 @@ async def test_get_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_snapshot - ] = mock_object + ] = mock_rpc request = {} await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8596,22 +8626,23 @@ async def test_list_snapshots_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_snapshots - ] = mock_object + ] = mock_rpc request = {} await client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_snapshots(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9148,22 +9179,23 @@ async def test_delete_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_snapshot - ] = mock_object + ] = mock_rpc request = {} await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9450,8 +9482,9 @@ def test_create_backup_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_backup(request) @@ -9505,26 +9538,28 @@ async def test_create_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_backup - ] = mock_object + ] = mock_rpc request = {} await client.create_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9904,22 +9939,23 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_backup - ] = mock_object + ] = mock_rpc request = {} await client.get_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -10285,22 +10321,23 @@ async def test_update_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_backup - ] = mock_object + ] = mock_rpc request = {} await client.update_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.update_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -10659,22 +10696,23 @@ async def test_delete_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_backup - ] = mock_object + ] = mock_rpc request = {} await client.delete_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11021,22 +11059,23 @@ async def test_list_backups_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_backups - ] = mock_object + ] = mock_rpc request = {} await client.list_backups(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_backups(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11528,8 +11567,9 @@ def test_restore_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.restore_table(request) @@ -11583,26 +11623,28 @@ async def test_restore_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.restore_table - ] = mock_object + ] = mock_rpc request = {} await client.restore_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.restore_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11815,8 +11857,9 @@ def test_copy_backup_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.copy_backup(request) @@ -11870,26 +11913,28 @@ async def test_copy_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.copy_backup - ] = mock_object + ] = mock_rpc request = {} await client.copy_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.copy_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -12268,22 +12313,23 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -12650,22 +12696,23 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.set_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -13042,22 +13089,23 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions - ] = mock_object + ] = mock_rpc request = {} await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.test_iam_permissions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 60cc7fd6e..2be864732 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1239,22 +1239,23 @@ async def test_read_rows_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.read_rows - ] = mock_object + ] = mock_rpc request = {} await client.read_rows(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.read_rows(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1614,22 +1615,23 @@ async def test_sample_row_keys_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.sample_row_keys - ] = mock_object + ] = mock_rpc request = {} await client.sample_row_keys(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.sample_row_keys(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1985,22 +1987,23 @@ async def test_mutate_row_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.mutate_row - ] = mock_object + ] = mock_rpc request = {} await client.mutate_row(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.mutate_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2404,22 +2407,23 @@ async def test_mutate_rows_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.mutate_rows - ] = mock_object + ] = mock_rpc request = {} await client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.mutate_rows(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2804,22 +2808,23 @@ async def test_check_and_mutate_row_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.check_and_mutate_row - ] = mock_object + ] = mock_rpc request = {} await client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.check_and_mutate_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3334,22 +3339,23 @@ async def test_ping_and_warm_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.ping_and_warm - ] = mock_object + ] = mock_rpc request = {} await client.ping_and_warm(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.ping_and_warm(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3699,22 +3705,23 @@ async def test_read_modify_write_row_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.read_modify_write_row - ] = mock_object + ] = mock_rpc request = {} await client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.read_modify_write_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4123,22 +4130,23 @@ async def test_generate_initial_change_stream_partitions_async_use_cached_wrappe ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.generate_initial_change_stream_partitions - ] = mock_object + ] = mock_rpc request = {} await client.generate_initial_change_stream_partitions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.generate_initial_change_stream_partitions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4523,22 +4531,23 @@ async def test_read_change_stream_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.read_change_stream - ] = mock_object + ] = mock_rpc request = {} await client.read_change_stream(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.read_change_stream(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4906,22 +4915,23 @@ async def test_execute_query_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.execute_query - ] = mock_object + ] = mock_rpc request = {} await client.execute_query(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.execute_query(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio From 678a06c8853bfc6bbc7d14419f1f0a6cbb371e0a Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 9 Aug 2024 16:36:41 -0600 Subject: [PATCH 10/12] chore(docs): add execute query docs pages (#1014) --- .../async_data_execute_query_iterator.rst | 6 +++ .../async_data_execute_query_metadata.rst | 6 +++ .../async_data_execute_query_values.rst | 6 +++ docs/async_data_client/async_data_usage.rst | 3 ++ google/cloud/bigtable/data/_async/client.py | 22 ++++----- .../_async/execute_query_iterator.py | 45 +++++++++++-------- .../bigtable/data/execute_query/metadata.py | 33 ++++++++++++++ .../bigtable/data/execute_query/values.py | 8 +++- 8 files changed, 98 insertions(+), 31 deletions(-) create mode 100644 docs/async_data_client/async_data_execute_query_iterator.rst create mode 100644 docs/async_data_client/async_data_execute_query_metadata.rst create mode 100644 docs/async_data_client/async_data_execute_query_values.rst diff --git a/docs/async_data_client/async_data_execute_query_iterator.rst b/docs/async_data_client/async_data_execute_query_iterator.rst new file mode 100644 index 000000000..b911fab7f --- /dev/null +++ b/docs/async_data_client/async_data_execute_query_iterator.rst @@ -0,0 +1,6 @@ +Execute Query Iterator Async +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data.execute_query.ExecuteQueryIteratorAsync + :members: + :show-inheritance: diff --git a/docs/async_data_client/async_data_execute_query_metadata.rst b/docs/async_data_client/async_data_execute_query_metadata.rst new file mode 100644 index 000000000..69add630d --- /dev/null +++ b/docs/async_data_client/async_data_execute_query_metadata.rst @@ -0,0 +1,6 @@ +Execute Query Metadata +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.execute_query.metadata + :members: + :show-inheritance: diff --git a/docs/async_data_client/async_data_execute_query_values.rst b/docs/async_data_client/async_data_execute_query_values.rst new file mode 100644 index 000000000..6c4fb71c1 --- /dev/null +++ b/docs/async_data_client/async_data_execute_query_values.rst @@ -0,0 +1,6 @@ +Execute Query Values +~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.execute_query.values + :members: + :show-inheritance: diff --git a/docs/async_data_client/async_data_usage.rst b/docs/async_data_client/async_data_usage.rst index 8843b506b..61d5837fd 100644 --- a/docs/async_data_client/async_data_usage.rst +++ b/docs/async_data_client/async_data_usage.rst @@ -13,3 +13,6 @@ Async Data Client async_data_mutations async_data_read_modify_write_rules async_data_exceptions + async_data_execute_query_iterator + async_data_execute_query_values + async_data_execute_query_metadata diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py index 600937df8..82a874918 100644 --- a/google/cloud/bigtable/data/_async/client.py +++ b/google/cloud/bigtable/data/_async/client.py @@ -456,38 +456,38 @@ async def execute_query( retryable_errors list until operation_timeout is reached. Args: - - query: Query to be run on Bigtable instance. The query can use ``@param`` + query: Query to be run on Bigtable instance. The query can use ``@param`` placeholders to use parameter interpolation on the server. Values for all parameters should be provided in ``parameters``. Types of parameters are inferred but should be provided in ``parameter_types`` if the inference is not possible (i.e. when value can be None, an empty list or an empty dict). - - instance_id: The Bigtable instance ID to perform the query on. + instance_id: The Bigtable instance ID to perform the query on. instance_id is combined with the client's project to fully specify the instance. - - parameters: Dictionary with values for all parameters used in the ``query``. - - parameter_types: Dictionary with types of parameters used in the ``query``. + parameters: Dictionary with values for all parameters used in the ``query``. + parameter_types: Dictionary with types of parameters used in the ``query``. Required to contain entries only for parameters whose type cannot be detected automatically (i.e. the value can be None, an empty list or an empty dict). - - app_profile_id: The app profile to associate with requests. + app_profile_id: The app profile to associate with requests. https://cloud.google.com/bigtable/docs/app-profiles - - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to 600 seconds. - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the 20 seconds. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) Returns: - - an asynchronous iterator that yields rows returned by the query + ExecuteQueryIteratorAsync: an asynchronous iterator that yields rows returned by the query Raises: - - DeadlineExceeded: raised after operation timeout + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error """ warnings.warn( "ExecuteQuery is in preview and may change in the future.", diff --git a/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index 3660c0b0f..32081939b 100644 --- a/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -23,6 +23,7 @@ Optional, Sequence, Tuple, + TYPE_CHECKING, ) from google.api_core import retry as retries @@ -43,11 +44,14 @@ ExecuteQueryRequest as ExecuteQueryRequestPB, ) +if TYPE_CHECKING: + from google.cloud.bigtable.data import BigtableDataClientAsync + class ExecuteQueryIteratorAsync: """ ExecuteQueryIteratorAsync handles collecting streaming responses from the - ExecuteQuery RPC and parsing them to `QueryResultRow`s. + ExecuteQuery RPC and parsing them to QueryResultRows. ExecuteQueryIteratorAsync implements Asynchronous Iterator interface and can be used with "async for" syntax. It is also a context manager. @@ -55,23 +59,25 @@ class ExecuteQueryIteratorAsync: It is **not thread-safe**. It should not be used by multiple asyncio Tasks. Args: - client (google.cloud.bigtable.data._async.BigtableDataClientAsync): bigtable client - instance_id (str): id of the instance on which the query is executed - request_body (Dict[str, Any]): dict representing the body of the ExecuteQueryRequest - attempt_timeout (float | None): the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - Defaults to 600 seconds. - operation_timeout (float): the time budget for an individual network request, in seconds. - If it takes longer than this time to complete, the request will be cancelled with - a DeadlineExceeded exception, and a retry will be attempted. - Defaults to the 20 seconds. If None, defaults to operation_timeout. - req_metadata (Sequence[Tuple[str, str]]): metadata used while sending the gRPC request - retryable_excs (List[type[Exception]]): a list of errors that will be retried if encountered. + client: bigtable client + instance_id: id of the instance on which the query is executed + request_body: dict representing the body of the ExecuteQueryRequest + attempt_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + operation_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. If None, defaults to operation_timeout. + req_metadata: metadata used while sending the gRPC request + retryable_excs: a list of errors that will be retried if encountered. + Raises: + RuntimeError: if the instance is not created within an async event loop context. """ def __init__( self, - client: Any, + client: BigtableDataClientAsync, instance_id: str, app_profile_id: Optional[str], request_body: Dict[str, Any], @@ -112,15 +118,18 @@ def __init__( ) from e @property - def is_closed(self): + def is_closed(self) -> bool: + """Returns True if the iterator is closed, False otherwise.""" return self._is_closed @property - def app_profile_id(self): + def app_profile_id(self) -> Optional[str]: + """Returns the app_profile_id of the iterator.""" return self._app_profile_id @property - def table_name(self): + def table_name(self) -> Optional[str]: + """Returns the table_name of the iterator.""" return self._table_name async def _make_request_with_resume_token(self): @@ -176,7 +185,7 @@ async def _next_impl(self) -> AsyncIterator[QueryResultRow]: yield result await self.close() - async def __anext__(self): + async def __anext__(self) -> QueryResultRow: if self._is_closed: raise StopAsyncIteration return await self._result_generator.__anext__() diff --git a/google/cloud/bigtable/data/execute_query/metadata.py b/google/cloud/bigtable/data/execute_query/metadata.py index 4c08cbad3..0c9cf9697 100644 --- a/google/cloud/bigtable/data/execute_query/metadata.py +++ b/google/cloud/bigtable/data/execute_query/metadata.py @@ -90,6 +90,8 @@ def __repr__(self) -> str: return self.__str__() class Struct(_NamedList[Type], Type): + """Struct SQL type.""" + @classmethod def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Struct": if type_pb is None: @@ -120,6 +122,8 @@ def __str__(self): return super(_NamedList, self).__str__() class Array(Type): + """Array SQL type.""" + def __init__(self, element_type: "SqlType.Type"): if isinstance(element_type, SqlType.Array): raise ValueError("Arrays of arrays are not supported.") @@ -148,6 +152,8 @@ def __str__(self) -> str: return f"{self.__class__.__name__}<{str(self.element_type)}>" class Map(Type): + """Map SQL type.""" + def __init__(self, key_type: "SqlType.Type", value_type: "SqlType.Type"): self._key_type = key_type self._value_type = value_type @@ -189,32 +195,44 @@ def __str__(self) -> str: ) class Bytes(Type): + """Bytes SQL type.""" + expected_type = bytes value_pb_dict_field_name = "bytes_value" type_field_name = "bytes_type" class String(Type): + """String SQL type.""" + expected_type = str value_pb_dict_field_name = "string_value" type_field_name = "string_type" class Int64(Type): + """Int64 SQL type.""" + expected_type = int value_pb_dict_field_name = "int_value" type_field_name = "int64_type" class Float64(Type): + """Float64 SQL type.""" + expected_type = float value_pb_dict_field_name = "float_value" type_field_name = "float64_type" class Bool(Type): + """Bool SQL type.""" + expected_type = bool value_pb_dict_field_name = "bool_value" type_field_name = "bool_type" class Timestamp(Type): """ + Timestamp SQL type. + Timestamp supports :class:`DatetimeWithNanoseconds` but Bigtable SQL does not currently support nanoseconds precision. We support this for potential compatibility in the future. Nanoseconds are currently ignored. @@ -243,6 +261,8 @@ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: return {"timestamp_value": ts} class Date(Type): + """Date SQL type.""" + type_field_name = "date_type" expected_type = datetime.date @@ -265,10 +285,23 @@ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: class Metadata: + """ + Base class for metadata returned by the ExecuteQuery operation. + """ + pass class ProtoMetadata(Metadata): + """ + Metadata class for the ExecuteQuery operation. + + Args: + columns (List[Tuple[Optional[str], SqlType.Type]]): List of column + metadata tuples. Each tuple contains the column name and the column + type. + """ + class Column: def __init__(self, column_name: Optional[str], column_type: SqlType.Type): self._column_name = column_name diff --git a/google/cloud/bigtable/data/execute_query/values.py b/google/cloud/bigtable/data/execute_query/values.py index 394bef71e..80a0bff6f 100644 --- a/google/cloud/bigtable/data/execute_query/values.py +++ b/google/cloud/bigtable/data/execute_query/values.py @@ -112,8 +112,12 @@ def __repr__(self) -> str: class QueryResultRow(_NamedList[ExecuteQueryValueType]): - pass + """ + Represents a single row of the result + """ class Struct(_NamedList[ExecuteQueryValueType]): - pass + """ + Represents a struct value in the result + """ From a91fcb56e5f9c246c2f4a25a4670725847a0362c Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 12 Aug 2024 14:57:19 -0600 Subject: [PATCH 11/12] chore(docs): add async note to docs (#984) --- README.rst | 8 +++++++ docs/async_data_client/async_data_client.rst | 6 +++++ docs/async_data_client/async_data_table.rst | 5 ++++ docs/scripts/patch_devsite_toc.py | 24 ++++++++++++++++---- 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 63c50591c..2ecbd0185 100644 --- a/README.rst +++ b/README.rst @@ -34,8 +34,16 @@ remaining exclusively in the existing synchronous client. Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, or through the Github `issue tracker`_. + + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + + .. _issue tracker: https://github.com/googleapis/python-bigtable/issues + Quick Start ----------- diff --git a/docs/async_data_client/async_data_client.rst b/docs/async_data_client/async_data_client.rst index c5cc70740..0e1d9e23e 100644 --- a/docs/async_data_client/async_data_client.rst +++ b/docs/async_data_client/async_data_client.rst @@ -1,6 +1,12 @@ Bigtable Data Client Async ~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + + .. autoclass:: google.cloud.bigtable.data._async.client.BigtableDataClientAsync :members: :show-inheritance: diff --git a/docs/async_data_client/async_data_table.rst b/docs/async_data_client/async_data_table.rst index a977beb6a..3b7973e8e 100644 --- a/docs/async_data_client/async_data_table.rst +++ b/docs/async_data_client/async_data_table.rst @@ -1,6 +1,11 @@ Table Async ~~~~~~~~~~~ + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + .. autoclass:: google.cloud.bigtable.data._async.client.TableAsync :members: :show-inheritance: diff --git a/docs/scripts/patch_devsite_toc.py b/docs/scripts/patch_devsite_toc.py index 6338128dd..456d0af7b 100644 --- a/docs/scripts/patch_devsite_toc.py +++ b/docs/scripts/patch_devsite_toc.py @@ -88,19 +88,31 @@ def __init__(self, dir_name, index_file_name): index_file_path = os.path.join(dir_name, index_file_name) # find set of files referenced by the index file with open(index_file_path, "r") as f: - self.title = f.readline().strip() + self.title = None in_toc = False self.items = [] for line in f: # ignore empty lines if not line.strip(): continue + # add files explictly included in the toc + if line.startswith(".. include::"): + file_base = os.path.splitext(line.split("::")[1].strip())[0] + self.items.append( + self.extract_toc_entry( + file_base, file_title=file_base.capitalize() + ) + ) + continue if line.startswith(".. toctree::"): in_toc = True continue # ignore directives if ":" in line: continue + # set tile as first line with no directive + if self.title is None: + self.title = line.strip() if not in_toc: continue # bail when toc indented block is done @@ -109,14 +121,16 @@ def __init__(self, dir_name, index_file_name): # extract entries self.items.append(self.extract_toc_entry(line.strip())) - def extract_toc_entry(self, file_name): + def extract_toc_entry(self, file_name, file_title=None): """ Given the name of a file, extract the title and href for the toc entry, and return as a dictionary """ # load the file to get the title with open(f"{self.dir_name}/{file_name}.rst", "r") as f2: - file_title = f2.readline().strip() + if file_title is None: + # use first line as title if not provided + file_title = f2.readline().strip() return {"name": file_title, "href": f"{file_name}.md"} def to_dict(self): @@ -143,7 +157,9 @@ def validate_toc(toc_file_path, expected_section_list, added_sections): current_toc = yaml.safe_load(open(toc_file_path, "r")) # make sure the set of sections matches what we expect found_sections = [d["name"] for d in current_toc[0]["items"]] - assert found_sections == expected_section_list + assert ( + found_sections == expected_section_list + ), f"Expected {expected_section_list}, found {found_sections}" # make sure each customs ection is in the toc for section in added_sections: assert section.title in found_sections From f7905007d6e2a2a23b941f42d03ceee00715dcbb Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:32:50 -0700 Subject: [PATCH 12/12] chore(main): release 2.26.0 (#1006) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++ google/cloud/bigtable/gapic_version.py | 2 +- google/cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 26 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d6c7e9d68..d6de1e7f8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.25.0" + ".": "2.26.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 92b498748..09bffa32d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,27 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.26.0](https://github.com/googleapis/python-bigtable/compare/v2.25.0...v2.26.0) (2024-08-12) + + +### Features + +* Add fields and the BackupType proto for Hot Backups ([#1010](https://github.com/googleapis/python-bigtable/issues/1010)) ([b95801f](https://github.com/googleapis/python-bigtable/commit/b95801ffa8081e0072232247fbc5879105c109a6)) +* Add MergeToCell to Mutation APIs ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) +* Add min, max, hll aggregators and more types ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) +* Async execute query client ([#1011](https://github.com/googleapis/python-bigtable/issues/1011)) ([45bc8c4](https://github.com/googleapis/python-bigtable/commit/45bc8c4a0fe567ce5e0126a1a70e7eb3dca93e92)) + + +### Bug Fixes + +* Use single routing metadata header ([#1005](https://github.com/googleapis/python-bigtable/issues/1005)) ([20eeb0a](https://github.com/googleapis/python-bigtable/commit/20eeb0a68d7b44d07a6d84bc7a7e040ad63bb96d)) + + +### Documentation + +* Add clarification around SQL timestamps ([#1012](https://github.com/googleapis/python-bigtable/issues/1012)) ([6e80190](https://github.com/googleapis/python-bigtable/commit/6e801900bbe9385d3b579b8c3327c87c3617d92f)) +* Corrected various type documentation ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) + ## [2.25.0](https://github.com/googleapis/python-bigtable/compare/v2.24.0...v2.25.0) (2024-07-18) diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index e5fa8f60b..d56eed5c5 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index e5fa8f60b..d56eed5c5 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index e5fa8f60b..d56eed5c5 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index e5fa8f60b..d56eed5c5 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version}