- As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
+
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
Library versions released prior to that date will continue to be available. For more information please
visit
Python 2 support on Google Cloud.
diff --git a/docs/api-reference.rst b/docs/api-reference.rst
deleted file mode 100644
index 41046f78bf..0000000000
--- a/docs/api-reference.rst
+++ /dev/null
@@ -1,34 +0,0 @@
-API Reference
-=============
-
-The following classes and methods constitute the Spanner client.
-Most likely, you will be interacting almost exclusively with these:
-
-.. toctree::
- :maxdepth: 1
-
- client-api
- instance-api
- database-api
- table-api
- session-api
- keyset-api
- snapshot-api
- batch-api
- transaction-api
- streamed-api
-
-
-The classes and methods above depend on the following, lower-level
-classes and methods. Documentation for these is provided for completion,
-and some advanced use cases may wish to interact with these directly:
-
-.. toctree::
- :maxdepth: 1
-
- spanner_v1/services
- spanner_v1/types
- spanner_admin_database_v1/services
- spanner_admin_database_v1/types
- spanner_admin_instance_v1/services
- spanner_admin_instance_v1/types
diff --git a/docs/client-usage.rst b/docs/client-usage.rst
index ce13bf4aa0..7ba3390e59 100644
--- a/docs/client-usage.rst
+++ b/docs/client-usage.rst
@@ -1,5 +1,5 @@
-Spanner Client
-==============
+Spanner Client Usage
+====================
.. _spanner-client:
diff --git a/docs/conf.py b/docs/conf.py
index 7d53976561..010a6b6cda 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,4 +1,18 @@
# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
#
# google-cloud-spanner documentation build configuration file
#
@@ -29,7 +43,7 @@
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = "1.5.5"
+needs_sphinx = "4.5.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -63,12 +77,12 @@
# The encoding of source files.
# source_encoding = 'utf-8-sig'
-# The master toctree document.
-master_doc = "index"
+# The root toctree document.
+root_doc = "index"
# General information about the project.
project = u"google-cloud-spanner"
-copyright = u"2019, Google"
+copyright = u"2025, Google, LLC"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
@@ -97,6 +111,7 @@
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
+ "**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
@@ -142,7 +157,7 @@
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-spanner",
"github_user": "googleapis",
- "github_repo": "python-spanner",
+ "github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
@@ -252,13 +267,13 @@
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
- #'papersize': 'letterpaper',
+ # 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
- #'pointsize': '10pt',
+ # 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
- #'preamble': '',
+ # 'preamble': '',
# Latex figure (float) alignment
- #'figure_align': 'htbp',
+ # 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
@@ -266,7 +281,7 @@
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
- master_doc,
+ root_doc,
"google-cloud-spanner.tex",
u"google-cloud-spanner Documentation",
author,
@@ -301,9 +316,9 @@
# (source start file, name, description, authors, manual section).
man_pages = [
(
- master_doc,
+ root_doc,
"google-cloud-spanner",
- u"google-cloud-spanner Documentation",
+ "google-cloud-spanner Documentation",
[author],
1,
)
@@ -320,9 +335,9 @@
# dir menu entry, description, category)
texinfo_documents = [
(
- master_doc,
+ root_doc,
"google-cloud-spanner",
- u"google-cloud-spanner Documentation",
+ "google-cloud-spanner Documentation",
author,
"google-cloud-spanner",
"google-cloud-spanner Library",
@@ -345,10 +360,15 @@
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
- "python": ("http://python.readthedocs.org/en/latest/", None),
- "google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
- "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
- "grpc": ("https://grpc.io/grpc/python/", None),
+ "python": ("https://python.readthedocs.org/en/latest/", None),
+ "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
+ "google.api_core": (
+ "https://googleapis.dev/python/google-api-core/latest/",
+ None,
+ ),
+ "grpc": ("https://grpc.github.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
+ "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
diff --git a/docs/database-usage.rst b/docs/database-usage.rst
index 629f1ab28a..afcfa06cb2 100644
--- a/docs/database-usage.rst
+++ b/docs/database-usage.rst
@@ -1,5 +1,5 @@
-Database Admin
-==============
+Database Admin Usage
+====================
After creating an :class:`~google.cloud.spanner_v1.instance.Instance`, you can
interact with individual databases for that instance.
diff --git a/docs/index.rst b/docs/index.rst
index a4ab1b27d7..0de0483409 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -5,27 +5,48 @@
Usage Documentation
-------------------
.. toctree::
- :maxdepth: 1
- :titlesonly:
+ :maxdepth: 2
client-usage
- instance-usage
- database-usage
table-usage
batch-usage
snapshot-usage
transaction-usage
+ database-usage
+ instance-usage
+
API Documentation
-----------------
.. toctree::
:maxdepth: 1
:titlesonly:
- api-reference
advanced-session-pool-topics
opentelemetry-tracing
+ spanner_v1/client
+ spanner_v1/instance
+ spanner_v1/database
+ spanner_v1/table
+ spanner_v1/session
+ spanner_v1/keyset
+ spanner_v1/snapshot
+ spanner_v1/batch
+ spanner_v1/transaction
+ spanner_v1/streamed
+
+ spanner_v1/services_
+ spanner_v1/types_
+ spanner_admin_database_v1/services_
+ spanner_admin_database_v1/types_
+ spanner_admin_database_v1/database_admin
+ spanner_admin_instance_v1/services_
+ spanner_admin_instance_v1/types_
+ spanner_admin_instance_v1/instance_admin
+
+
+
Changelog
---------
@@ -35,3 +56,8 @@ For a list of all ``google-cloud-spanner`` releases:
:maxdepth: 2
changelog
+
+.. toctree::
+ :hidden:
+
+ summary_overview.md
diff --git a/docs/instance-usage.rst b/docs/instance-usage.rst
index 55042c2df3..b45b69acc6 100644
--- a/docs/instance-usage.rst
+++ b/docs/instance-usage.rst
@@ -1,5 +1,5 @@
-Instance Admin
-==============
+Instance Admin Usage
+====================
After creating a :class:`~google.cloud.spanner_v1.client.Client`, you can
interact with individual instances for a project.
diff --git a/docs/multiprocessing.rst b/docs/multiprocessing.rst
index 1cb29d4ca9..536d17b2ea 100644
--- a/docs/multiprocessing.rst
+++ b/docs/multiprocessing.rst
@@ -1,7 +1,7 @@
.. note::
- Because this client uses :mod:`grpcio` library, it is safe to
+ Because this client uses :mod:`grpc` library, it is safe to
share instances across threads. In multiprocessing scenarios, the best
practice is to create client instances *after* the invocation of
- :func:`os.fork` by :class:`multiprocessing.Pool` or
+ :func:`os.fork` by :class:`multiprocessing.pool.Pool` or
:class:`multiprocessing.Process`.
diff --git a/docs/opentelemetry-tracing.rst b/docs/opentelemetry-tracing.rst
index 9b3dea276f..c581d2cb87 100644
--- a/docs/opentelemetry-tracing.rst
+++ b/docs/opentelemetry-tracing.rst
@@ -8,10 +8,8 @@ To take advantage of these traces, we first need to install OpenTelemetry:
.. code-block:: sh
- pip install opentelemetry-api opentelemetry-sdk opentelemetry-instrumentation
-
- # [Optional] Installs the cloud monitoring exporter, however you can use any exporter of your choice
- pip install opentelemetry-exporter-google-cloud
+ pip install opentelemetry-api opentelemetry-sdk
+ pip install opentelemetry-exporter-gcp-trace
We also need to tell OpenTelemetry which exporter to use. To export Spanner traces to `Cloud Tracing
`_, add the following lines to your application:
@@ -19,22 +17,80 @@ We also need to tell OpenTelemetry which exporter to use. To export Spanner trac
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
- from opentelemetry.trace.sampling import ProbabilitySampler
+ from opentelemetry.sdk.trace.sampling import TraceIdRatioBased
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
- # BatchExportSpanProcessor exports spans to Cloud Trace
+ # BatchSpanProcessor exports spans to Cloud Trace
# in a seperate thread to not block on the main thread
- from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
# Create and export one trace every 1000 requests
- sampler = ProbabilitySampler(1/1000)
- # Use the default tracer provider
- trace.set_tracer_provider(TracerProvider(sampler=sampler))
- trace.get_tracer_provider().add_span_processor(
+ sampler = TraceIdRatioBased(1/1000)
+ tracer_provider = TracerProvider(sampler=sampler)
+ tracer_provider.add_span_processor(
# Initialize the cloud tracing exporter
- BatchExportSpanProcessor(CloudTraceSpanExporter())
+ BatchSpanProcessor(CloudTraceSpanExporter())
+ )
+ observability_options = dict(
+ tracer_provider=tracer_provider,
+
+ # By default extended_tracing is set to True due
+ # to legacy reasons to avoid breaking changes, you
+ # can modify it though using the environment variable
+ # SPANNER_ENABLE_EXTENDED_TRACING=false.
+ enable_extended_tracing=False,
+
+ # By default end to end tracing is set to False. Set to True
+ # for getting spans for Spanner server.
+ enable_end_to_end_tracing=True,
)
+ spanner = spanner.NewClient(project_id, observability_options=observability_options)
+
+
+To get more fine-grained traces from gRPC, you can enable the gRPC instrumentation by the following
+
+.. code-block:: sh
+
+ pip install opentelemetry-instrumentation opentelemetry-instrumentation-grpc
+
+and then in your Python code, please add the following lines:
+
+.. code:: python
+
+ from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient
+ grpc_client_instrumentor = GrpcInstrumentorClient()
+ grpc_client_instrumentor.instrument()
+
Generated spanner traces should now be available on `Cloud Trace `_.
Tracing is most effective when many libraries are instrumented to provide insight over the entire lifespan of a request.
For a list of libraries that can be instrumented, see the `OpenTelemetry Integrations` section of the `OpenTelemetry Python docs `_
+
+Annotating spans with SQL
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default your spans will be annotated with SQL statements where appropriate, but that can be a PII (Personally Identifiable Information)
+leak. Sadly due to legacy behavior, we cannot simply turn off this behavior by default. However you can control this behavior by setting
+
+ SPANNER_ENABLE_EXTENDED_TRACING=false
+
+to turn it off globally or when creating each SpannerClient, please set `observability_options.enable_extended_tracing=false`
+
+End to end tracing
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to client-side tracing, you can opt in for end-to-end tracing. End-to-end tracing helps you understand and debug latency issues that are specific to Spanner. Refer [here](https://cloud.google.com/spanner/docs/tracing-overview) for more information.
+
+To configure end-to-end tracing.
+
+1. Opt in for end-to-end tracing. You can opt-in by either:
+* Setting the environment variable `SPANNER_ENABLE_END_TO_END_TRACING=true` before your application is started
+* In code, by setting `observability_options.enable_end_to_end_tracing=true` when creating each SpannerClient.
+
+2. Set the trace context propagation in OpenTelemetry.
+
+.. code:: python
+
+ from opentelemetry.propagate import set_global_textmap
+ from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
+ set_global_textmap(TraceContextTextMapPropagator())
\ No newline at end of file
diff --git a/docs/snapshot-usage.rst b/docs/snapshot-usage.rst
index e088cd0ceb..0f00686a54 100644
--- a/docs/snapshot-usage.rst
+++ b/docs/snapshot-usage.rst
@@ -24,8 +24,7 @@ reads as of a given timestamp:
.. code:: python
import datetime
- from pytz import UTC
- TIMESTAMP = datetime.datetime.utcnow().replace(tzinfo=UTC)
+ TIMESTAMP = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
with database.snapshot(read_timestamp=TIMESTAMP) as snapshot:
...
@@ -65,16 +64,16 @@ Read Table Data
To read data for selected rows from a table in the database, call
:meth:`~google.cloud.spanner_v1.snapshot.Snapshot.read` which will return
-all rows specified in ``key_set``, or fail if the result set is too large,
+all rows specified in ``keyset``, or fail if the result set is too large,
.. code:: python
with database.snapshot() as snapshot:
result = snapshot.read(
table='table-name', columns=['first_name', 'last_name', 'age'],
- key_set=['phred@example.com', 'bharney@example.com'])
+ keyset=spanner.KeySet([['phred@example.com'], ['bharney@example.com']]))
- for row in result.rows:
+ for row in result:
print(row)
.. note::
@@ -100,7 +99,7 @@ result set is too large,
'WHERE p.employee_id == e.employee_id')
result = snapshot.execute_sql(QUERY)
- for row in list(result):
+ for row in result:
print(row)
.. note::
diff --git a/docs/spanner_admin_database_v1/database_admin.rst b/docs/spanner_admin_database_v1/database_admin.rst
index 5618b72cd6..bd6aab00e4 100644
--- a/docs/spanner_admin_database_v1/database_admin.rst
+++ b/docs/spanner_admin_database_v1/database_admin.rst
@@ -5,7 +5,6 @@ DatabaseAdmin
:members:
:inherited-members:
-
.. automodule:: google.cloud.spanner_admin_database_v1.services.database_admin.pagers
:members:
:inherited-members:
diff --git a/docs/spanner_admin_database_v1/services.rst b/docs/spanner_admin_database_v1/services_.rst
similarity index 100%
rename from docs/spanner_admin_database_v1/services.rst
rename to docs/spanner_admin_database_v1/services_.rst
diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types_.rst
similarity index 100%
rename from docs/spanner_admin_database_v1/types.rst
rename to docs/spanner_admin_database_v1/types_.rst
diff --git a/docs/spanner_admin_instance_v1/instance_admin.rst b/docs/spanner_admin_instance_v1/instance_admin.rst
index f18b5ca893..fe820b3fad 100644
--- a/docs/spanner_admin_instance_v1/instance_admin.rst
+++ b/docs/spanner_admin_instance_v1/instance_admin.rst
@@ -5,7 +5,6 @@ InstanceAdmin
:members:
:inherited-members:
-
.. automodule:: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers
:members:
:inherited-members:
diff --git a/docs/spanner_admin_instance_v1/services.rst b/docs/spanner_admin_instance_v1/services_.rst
similarity index 100%
rename from docs/spanner_admin_instance_v1/services.rst
rename to docs/spanner_admin_instance_v1/services_.rst
diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types_.rst
similarity index 100%
rename from docs/spanner_admin_instance_v1/types.rst
rename to docs/spanner_admin_instance_v1/types_.rst
diff --git a/docs/batch-api.rst b/docs/spanner_v1/batch.rst
similarity index 100%
rename from docs/batch-api.rst
rename to docs/spanner_v1/batch.rst
diff --git a/docs/client-api.rst b/docs/spanner_v1/client.rst
similarity index 100%
rename from docs/client-api.rst
rename to docs/spanner_v1/client.rst
diff --git a/docs/database-api.rst b/docs/spanner_v1/database.rst
similarity index 100%
rename from docs/database-api.rst
rename to docs/spanner_v1/database.rst
diff --git a/docs/instance-api.rst b/docs/spanner_v1/instance.rst
similarity index 100%
rename from docs/instance-api.rst
rename to docs/spanner_v1/instance.rst
diff --git a/docs/keyset-api.rst b/docs/spanner_v1/keyset.rst
similarity index 100%
rename from docs/keyset-api.rst
rename to docs/spanner_v1/keyset.rst
diff --git a/docs/spanner_v1/services.rst b/docs/spanner_v1/services_.rst
similarity index 100%
rename from docs/spanner_v1/services.rst
rename to docs/spanner_v1/services_.rst
diff --git a/docs/session-api.rst b/docs/spanner_v1/session.rst
similarity index 100%
rename from docs/session-api.rst
rename to docs/spanner_v1/session.rst
diff --git a/docs/snapshot-api.rst b/docs/spanner_v1/snapshot.rst
similarity index 100%
rename from docs/snapshot-api.rst
rename to docs/spanner_v1/snapshot.rst
diff --git a/docs/spanner_v1/spanner.rst b/docs/spanner_v1/spanner.rst
index f7803df4ae..b51f4447e4 100644
--- a/docs/spanner_v1/spanner.rst
+++ b/docs/spanner_v1/spanner.rst
@@ -5,7 +5,6 @@ Spanner
:members:
:inherited-members:
-
.. automodule:: google.cloud.spanner_v1.services.spanner.pagers
:members:
:inherited-members:
diff --git a/docs/streamed-api.rst b/docs/spanner_v1/streamed.rst
similarity index 100%
rename from docs/streamed-api.rst
rename to docs/spanner_v1/streamed.rst
diff --git a/docs/table-api.rst b/docs/spanner_v1/table.rst
similarity index 100%
rename from docs/table-api.rst
rename to docs/spanner_v1/table.rst
diff --git a/docs/transaction-api.rst b/docs/spanner_v1/transaction.rst
similarity index 100%
rename from docs/transaction-api.rst
rename to docs/spanner_v1/transaction.rst
diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types_.rst
similarity index 100%
rename from docs/spanner_v1/types.rst
rename to docs/spanner_v1/types_.rst
diff --git a/docs/summary_overview.md b/docs/summary_overview.md
new file mode 100644
index 0000000000..ffaf71df07
--- /dev/null
+++ b/docs/summary_overview.md
@@ -0,0 +1,22 @@
+[
+This is a templated file. Adding content to this file may result in it being
+reverted. Instead, if you want to place additional content, create an
+"overview_content.md" file in `docs/` directory. The Sphinx tool will
+pick up on the content and merge the content.
+]: #
+
+# Cloud Spanner API
+
+Overview of the APIs available for Cloud Spanner API.
+
+## All entries
+
+Classes, methods and properties & attributes for
+Cloud Spanner API.
+
+[classes](https://cloud.google.com/python/docs/reference/spanner/latest/summary_class.html)
+
+[methods](https://cloud.google.com/python/docs/reference/spanner/latest/summary_method.html)
+
+[properties and
+attributes](https://cloud.google.com/python/docs/reference/spanner/latest/summary_property.html)
diff --git a/docs/table-usage.rst b/docs/table-usage.rst
index 9d28da1ebb..01459b5f8e 100644
--- a/docs/table-usage.rst
+++ b/docs/table-usage.rst
@@ -1,5 +1,5 @@
-Table Admin
-===========
+Table Admin Usage
+=================
After creating an :class:`~google.cloud.spanner_v1.database.Database`, you can
interact with individual tables for that instance.
diff --git a/docs/transaction-usage.rst b/docs/transaction-usage.rst
index 4781cfa148..78026bf5a4 100644
--- a/docs/transaction-usage.rst
+++ b/docs/transaction-usage.rst
@@ -5,7 +5,8 @@ A :class:`~google.cloud.spanner_v1.transaction.Transaction` represents a
transaction: when the transaction commits, it will send any accumulated
mutations to the server.
-To understand more about how transactions work, visit [Transaction](https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction).
+To understand more about how transactions work, visit
+`Transaction `_.
To learn more about how to use them in the Python client, continue reading.
@@ -90,8 +91,8 @@ any of the records already exists.
Update records using a Transaction
----------------------------------
-:meth:`Transaction.update` updates one or more existing records in a table. Fails
-if any of the records does not already exist.
+:meth:`Transaction.update` updates one or more existing records in a table.
+Fails if any of the records does not already exist.
.. code:: python
@@ -178,9 +179,9 @@ Using :meth:`~Database.run_in_transaction`
Rather than calling :meth:`~Transaction.commit` or :meth:`~Transaction.rollback`
manually, you should use :meth:`~Database.run_in_transaction` to run the
-function that you need. The transaction's :meth:`~Transaction.commit` method
+function that you need. The transaction's :meth:`~Transaction.commit` method
will be called automatically if the ``with`` block exits without raising an
-exception. The function will automatically be retried for
+exception. The function will automatically be retried for
:class:`~google.api_core.exceptions.Aborted` errors, but will raise on
:class:`~google.api_core.exceptions.GoogleAPICallError` and
:meth:`~Transaction.rollback` will be called on all others.
@@ -188,25 +189,30 @@ exception. The function will automatically be retried for
.. code:: python
def _unit_of_work(transaction):
-
transaction.insert(
- 'citizens', columns=['email', 'first_name', 'last_name', 'age'],
+ 'citizens',
+ columns=['email', 'first_name', 'last_name', 'age'],
values=[
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
- ])
+ ]
+ )
transaction.update(
- 'citizens', columns=['email', 'age'],
+ 'citizens',
+ columns=['email', 'age'],
values=[
['phred@exammple.com', 33],
['bharney@example.com', 32],
- ])
+ ]
+ )
...
- transaction.delete('citizens',
- keyset['bharney@example.com', 'nonesuch@example.com'])
+ transaction.delete(
+ 'citizens',
+ keyset=['bharney@example.com', 'nonesuch@example.com']
+ )
db.run_in_transaction(_unit_of_work)
@@ -242,7 +248,7 @@ If an exception is raised inside the ``with`` block, the transaction's
...
transaction.delete('citizens',
- keyset['bharney@example.com', 'nonesuch@example.com'])
+ keyset=['bharney@example.com', 'nonesuch@example.com'])
Begin a Transaction
diff --git a/examples/grpc_instrumentation_enabled.py b/examples/grpc_instrumentation_enabled.py
new file mode 100644
index 0000000000..c8bccd0a9d
--- /dev/null
+++ b/examples/grpc_instrumentation_enabled.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+import os
+import time
+
+import google.cloud.spanner as spanner
+from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+from opentelemetry import trace
+
+# Enable the gRPC instrumentation if you'd like more introspection.
+from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient
+
+grpc_client_instrumentor = GrpcInstrumentorClient()
+grpc_client_instrumentor.instrument()
+
+
+def main():
+ # Setup common variables that'll be used between Spanner and traces.
+ project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project')
+
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+ trace.set_tracer_provider(tracer_provider)
+ # Retrieve a tracer from the global tracer provider.
+ tracer = tracer_provider.get_tracer('MyApp')
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(project_id)
+
+ instance = spanner_client.instance('test-instance')
+ database = instance.database('test-db')
+
+ # Now run our queries
+ with tracer.start_as_current_span('QueryInformationSchema'):
+ with database.snapshot() as snapshot:
+ with tracer.start_as_current_span('InformationSchema'):
+ info_schema = snapshot.execute_sql(
+ 'SELECT * FROM INFORMATION_SCHEMA.TABLES')
+ for row in info_schema:
+ print(row)
+
+ with tracer.start_as_current_span('ServerTimeQuery'):
+ with database.snapshot() as snapshot:
+ # Purposefully issue a bad SQL statement to examine exceptions
+ # that get recorded and a ERROR span status.
+ try:
+ data = snapshot.execute_sql('SELECT CURRENT_TIMESTAMPx()')
+ for row in data:
+ print(row)
+ except Exception as e:
+ pass
+
+
+if __name__ == '__main__':
+ main()
diff --git a/examples/trace.py b/examples/trace.py
new file mode 100644
index 0000000000..5b826ca5ad
--- /dev/null
+++ b/examples/trace.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+import os
+import time
+
+import google.cloud.spanner as spanner
+from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+from opentelemetry import trace
+from opentelemetry.propagate import set_global_textmap
+from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
+
+# Setup common variables that'll be used between Spanner and traces.
+project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project')
+
+def spanner_with_cloud_trace():
+ # [START spanner_opentelemetry_traces_cloudtrace_usage]
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(
+ project_id,
+ observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True),
+ )
+
+ # [END spanner_opentelemetry_traces_cloudtrace_usage]
+ return spanner_client
+
+def spanner_with_otlp():
+ # [START spanner_opentelemetry_traces_otlp_usage]
+ # Setup OpenTelemetry, trace and OTLP exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4317")
+ tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter))
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(
+ project_id,
+ observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True),
+ )
+ # [END spanner_opentelemetry_traces_otlp_usage]
+ return spanner_client
+
+
+def main():
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+
+ # Setup the Cloud Spanner Client.
+ # Change to "spanner_client = spanner_with_otlp" to use OTLP exporter
+ spanner_client = spanner_with_cloud_trace()
+ instance = spanner_client.instance('test-instance')
+ database = instance.database('test-db')
+
+ # Set W3C Trace Context as the global propagator for end to end tracing.
+ set_global_textmap(TraceContextTextMapPropagator())
+
+ # Retrieve a tracer from our custom tracer provider.
+ tracer = tracer_provider.get_tracer('MyApp')
+
+ # Now run our queries
+ with tracer.start_as_current_span('QueryInformationSchema'):
+ with database.snapshot() as snapshot:
+ with tracer.start_as_current_span('InformationSchema'):
+ info_schema = snapshot.execute_sql(
+ 'SELECT * FROM INFORMATION_SCHEMA.TABLES')
+ for row in info_schema:
+ print(row)
+
+ with tracer.start_as_current_span('ServerTimeQuery'):
+ with database.snapshot() as snapshot:
+ # Purposefully issue a bad SQL statement to examine exceptions
+ # that get recorded and a ERROR span status.
+ try:
+ data = snapshot.execute_sql('SELECT CURRENT_TIMESTAMPx()')
+ for row in data:
+ print(row)
+ except Exception as e:
+ print(e)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google/__init__.py b/google/__init__.py
deleted file mode 100644
index 2f4b4738ae..0000000000
--- a/google/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- import pkgutil
-
- __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py
deleted file mode 100644
index 2f4b4738ae..0000000000
--- a/google/cloud/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- import pkgutil
-
- __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/google/cloud/spanner_admin_database_v1/__init__.py b/google/cloud/spanner_admin_database_v1/__init__.py
index dded570012..42b15fe254 100644
--- a/google/cloud/spanner_admin_database_v1/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/__init__.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,32 +13,72 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+import google.api_core as api_core
+import sys
+
+__version__ = package_version.__version__
+
+if sys.version_info >= (3, 8): # pragma: NO COVER
+ from importlib import metadata
+else: # pragma: NO COVER
+ # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove
+ # this code path once we drop support for Python 3.7
+ import importlib_metadata as metadata
+
from .services.database_admin import DatabaseAdminClient
+from .services.database_admin import DatabaseAdminAsyncClient
+
from .types.backup import Backup
from .types.backup import BackupInfo
+from .types.backup import BackupInstancePartition
+from .types.backup import CopyBackupEncryptionConfig
+from .types.backup import CopyBackupMetadata
+from .types.backup import CopyBackupRequest
from .types.backup import CreateBackupEncryptionConfig
from .types.backup import CreateBackupMetadata
from .types.backup import CreateBackupRequest
from .types.backup import DeleteBackupRequest
+from .types.backup import FullBackupSpec
from .types.backup import GetBackupRequest
+from .types.backup import IncrementalBackupSpec
from .types.backup import ListBackupOperationsRequest
from .types.backup import ListBackupOperationsResponse
from .types.backup import ListBackupsRequest
from .types.backup import ListBackupsResponse
from .types.backup import UpdateBackupRequest
+from .types.backup_schedule import BackupSchedule
+from .types.backup_schedule import BackupScheduleSpec
+from .types.backup_schedule import CreateBackupScheduleRequest
+from .types.backup_schedule import CrontabSpec
+from .types.backup_schedule import DeleteBackupScheduleRequest
+from .types.backup_schedule import GetBackupScheduleRequest
+from .types.backup_schedule import ListBackupSchedulesRequest
+from .types.backup_schedule import ListBackupSchedulesResponse
+from .types.backup_schedule import UpdateBackupScheduleRequest
from .types.common import EncryptionConfig
from .types.common import EncryptionInfo
from .types.common import OperationProgress
+from .types.common import DatabaseDialect
+from .types.spanner_database_admin import AddSplitPointsRequest
+from .types.spanner_database_admin import AddSplitPointsResponse
from .types.spanner_database_admin import CreateDatabaseMetadata
from .types.spanner_database_admin import CreateDatabaseRequest
from .types.spanner_database_admin import Database
+from .types.spanner_database_admin import DatabaseRole
+from .types.spanner_database_admin import DdlStatementActionInfo
from .types.spanner_database_admin import DropDatabaseRequest
from .types.spanner_database_admin import GetDatabaseDdlRequest
from .types.spanner_database_admin import GetDatabaseDdlResponse
from .types.spanner_database_admin import GetDatabaseRequest
+from .types.spanner_database_admin import InternalUpdateGraphOperationRequest
+from .types.spanner_database_admin import InternalUpdateGraphOperationResponse
from .types.spanner_database_admin import ListDatabaseOperationsRequest
from .types.spanner_database_admin import ListDatabaseOperationsResponse
+from .types.spanner_database_admin import ListDatabaseRolesRequest
+from .types.spanner_database_admin import ListDatabaseRolesResponse
from .types.spanner_database_admin import ListDatabasesRequest
from .types.spanner_database_admin import ListDatabasesResponse
from .types.spanner_database_admin import OptimizeRestoredDatabaseMetadata
@@ -47,34 +86,155 @@
from .types.spanner_database_admin import RestoreDatabaseMetadata
from .types.spanner_database_admin import RestoreDatabaseRequest
from .types.spanner_database_admin import RestoreInfo
-from .types.spanner_database_admin import RestoreSourceType
+from .types.spanner_database_admin import SplitPoints
from .types.spanner_database_admin import UpdateDatabaseDdlMetadata
from .types.spanner_database_admin import UpdateDatabaseDdlRequest
+from .types.spanner_database_admin import UpdateDatabaseMetadata
+from .types.spanner_database_admin import UpdateDatabaseRequest
+from .types.spanner_database_admin import RestoreSourceType
+
+if hasattr(api_core, "check_python_version") and hasattr(
+ api_core, "check_dependency_versions"
+): # pragma: NO COVER
+ api_core.check_python_version("google.cloud.spanner_admin_database_v1") # type: ignore
+ api_core.check_dependency_versions("google.cloud.spanner_admin_database_v1") # type: ignore
+else: # pragma: NO COVER
+ # An older version of api_core is installed which does not define the
+ # functions above. We do equivalent checks manually.
+ try:
+ import warnings
+ import sys
+
+ _py_version_str = sys.version.split()[0]
+ _package_label = "google.cloud.spanner_admin_database_v1"
+ if sys.version_info < (3, 9):
+ warnings.warn(
+ "You are using a non-supported Python version "
+ + f"({_py_version_str}). Google will not post any further "
+ + f"updates to {_package_label} supporting this Python version. "
+ + "Please upgrade to the latest Python version, or at "
+ + f"least to Python 3.9, and then update {_package_label}.",
+ FutureWarning,
+ )
+ if sys.version_info[:2] == (3, 9):
+ warnings.warn(
+ f"You are using a Python version ({_py_version_str}) "
+ + f"which Google will stop supporting in {_package_label} in "
+ + "January 2026. Please "
+ + "upgrade to the latest Python version, or at "
+ + "least to Python 3.10, before then, and "
+ + f"then update {_package_label}.",
+ FutureWarning,
+ )
+
+ def parse_version_to_tuple(version_string: str):
+ """Safely converts a semantic version string to a comparable tuple of integers.
+ Example: "4.25.8" -> (4, 25, 8)
+ Ignores non-numeric parts and handles common version formats.
+ Args:
+ version_string: Version string in the format "x.y.z" or "x.y.z"
+ Returns:
+ Tuple of integers for the parsed version string.
+ """
+ parts = []
+ for part in version_string.split("."):
+ try:
+ parts.append(int(part))
+ except ValueError:
+ # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here.
+ # This is a simplification compared to 'packaging.parse_version', but sufficient
+ # for comparing strictly numeric semantic versions.
+ break
+ return tuple(parts)
+ def _get_version(dependency_name):
+ try:
+ version_string: str = metadata.version(dependency_name)
+ parsed_version = parse_version_to_tuple(version_string)
+ return (parsed_version, version_string)
+ except Exception:
+ # Catch exceptions from metadata.version() (e.g., PackageNotFoundError)
+ # or errors during parse_version_to_tuple
+ return (None, "--")
+
+ _dependency_package = "google.protobuf"
+ _next_supported_version = "4.25.8"
+ _next_supported_version_tuple = (4, 25, 8)
+ _recommendation = " (we recommend 6.x)"
+ (_version_used, _version_used_string) = _get_version(_dependency_package)
+ if _version_used and _version_used < _next_supported_version_tuple:
+ warnings.warn(
+ f"Package {_package_label} depends on "
+ + f"{_dependency_package}, currently installed at version "
+ + f"{_version_used_string}. Future updates to "
+ + f"{_package_label} will require {_dependency_package} at "
+ + f"version {_next_supported_version} or higher{_recommendation}."
+ + " Please ensure "
+ + "that either (a) your Python environment doesn't pin the "
+ + f"version of {_dependency_package}, so that updates to "
+ + f"{_package_label} can require the higher version, or "
+ + "(b) you manually update your Python environment to use at "
+ + f"least version {_next_supported_version} of "
+ + f"{_dependency_package}.",
+ FutureWarning,
+ )
+ except Exception:
+ warnings.warn(
+ "Could not determine the version of Python "
+ + "currently being used. To continue receiving "
+ + "updates for {_package_label}, ensure you are "
+ + "using a supported version of Python; see "
+ + "https://devguide.python.org/versions/"
+ )
__all__ = (
+ "DatabaseAdminAsyncClient",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
"Backup",
"BackupInfo",
+ "BackupInstancePartition",
+ "BackupSchedule",
+ "BackupScheduleSpec",
+ "CopyBackupEncryptionConfig",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
"CreateBackupEncryptionConfig",
"CreateBackupMetadata",
"CreateBackupRequest",
+ "CreateBackupScheduleRequest",
"CreateDatabaseMetadata",
"CreateDatabaseRequest",
+ "CrontabSpec",
"Database",
+ "DatabaseAdminClient",
+ "DatabaseDialect",
+ "DatabaseRole",
+ "DdlStatementActionInfo",
"DeleteBackupRequest",
+ "DeleteBackupScheduleRequest",
"DropDatabaseRequest",
"EncryptionConfig",
"EncryptionInfo",
+ "FullBackupSpec",
"GetBackupRequest",
+ "GetBackupScheduleRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
"GetDatabaseRequest",
+ "IncrementalBackupSpec",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
"ListBackupsRequest",
"ListBackupsResponse",
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
+ "ListDatabaseRolesRequest",
+ "ListDatabaseRolesResponse",
"ListDatabasesRequest",
"ListDatabasesResponse",
"OperationProgress",
@@ -84,8 +244,11 @@
"RestoreDatabaseRequest",
"RestoreInfo",
"RestoreSourceType",
+ "SplitPoints",
"UpdateBackupRequest",
+ "UpdateBackupScheduleRequest",
"UpdateDatabaseDdlMetadata",
"UpdateDatabaseDdlRequest",
- "DatabaseAdminClient",
+ "UpdateDatabaseMetadata",
+ "UpdateDatabaseRequest",
)
diff --git a/google/cloud/spanner_admin_database_v1/gapic_metadata.json b/google/cloud/spanner_admin_database_v1/gapic_metadata.json
new file mode 100644
index 0000000000..027a4f612b
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/gapic_metadata.json
@@ -0,0 +1,433 @@
+ {
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods",
+ "language": "python",
+ "libraryPackage": "google.cloud.spanner_admin_database_v1",
+ "protoPackage": "google.spanner.admin.database.v1",
+ "schema": "1.0",
+ "services": {
+ "DatabaseAdmin": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "DatabaseAdminClient",
+ "rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
+ "CreateDatabase": {
+ "methods": [
+ "create_database"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
+ "DropDatabase": {
+ "methods": [
+ "drop_database"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
+ "GetDatabase": {
+ "methods": [
+ "get_database"
+ ]
+ },
+ "GetDatabaseDdl": {
+ "methods": [
+ "get_database_ddl"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
+ "ListBackupOperations": {
+ "methods": [
+ "list_backup_operations"
+ ]
+ },
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListDatabaseOperations": {
+ "methods": [
+ "list_database_operations"
+ ]
+ },
+ "ListDatabaseRoles": {
+ "methods": [
+ "list_database_roles"
+ ]
+ },
+ "ListDatabases": {
+ "methods": [
+ "list_databases"
+ ]
+ },
+ "RestoreDatabase": {
+ "methods": [
+ "restore_database"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
+ "UpdateDatabaseDdl": {
+ "methods": [
+ "update_database_ddl"
+ ]
+ }
+ }
+ },
+ "grpc-async": {
+ "libraryClient": "DatabaseAdminAsyncClient",
+ "rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
+ "CreateDatabase": {
+ "methods": [
+ "create_database"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
+ "DropDatabase": {
+ "methods": [
+ "drop_database"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
+ "GetDatabase": {
+ "methods": [
+ "get_database"
+ ]
+ },
+ "GetDatabaseDdl": {
+ "methods": [
+ "get_database_ddl"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
+ "ListBackupOperations": {
+ "methods": [
+ "list_backup_operations"
+ ]
+ },
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListDatabaseOperations": {
+ "methods": [
+ "list_database_operations"
+ ]
+ },
+ "ListDatabaseRoles": {
+ "methods": [
+ "list_database_roles"
+ ]
+ },
+ "ListDatabases": {
+ "methods": [
+ "list_databases"
+ ]
+ },
+ "RestoreDatabase": {
+ "methods": [
+ "restore_database"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
+ "UpdateDatabaseDdl": {
+ "methods": [
+ "update_database_ddl"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "DatabaseAdminClient",
+ "rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
+ "CreateDatabase": {
+ "methods": [
+ "create_database"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
+ "DropDatabase": {
+ "methods": [
+ "drop_database"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
+ "GetDatabase": {
+ "methods": [
+ "get_database"
+ ]
+ },
+ "GetDatabaseDdl": {
+ "methods": [
+ "get_database_ddl"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
+ "ListBackupOperations": {
+ "methods": [
+ "list_backup_operations"
+ ]
+ },
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListDatabaseOperations": {
+ "methods": [
+ "list_database_operations"
+ ]
+ },
+ "ListDatabaseRoles": {
+ "methods": [
+ "list_database_roles"
+ ]
+ },
+ "ListDatabases": {
+ "methods": [
+ "list_databases"
+ ]
+ },
+ "RestoreDatabase": {
+ "methods": [
+ "restore_database"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
+ "UpdateDatabaseDdl": {
+ "methods": [
+ "update_database_ddl"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/setup.cfg b/google/cloud/spanner_admin_database_v1/gapic_version.py
similarity index 78%
rename from setup.cfg
rename to google/cloud/spanner_admin_database_v1/gapic_version.py
index c3a2b39f65..89cb359ff2 100644
--- a/setup.cfg
+++ b/google/cloud/spanner_admin_database_v1/gapic_version.py
@@ -1,19 +1,16 @@
# -*- coding: utf-8 -*-
-#
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# https://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-# Generated by synthtool. DO NOT EDIT!
-[bdist_wheel]
-universal = 1
+#
+__version__ = "3.61.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_database_v1/proto/backup.proto b/google/cloud/spanner_admin_database_v1/proto/backup.proto
deleted file mode 100644
index 31fdb5326c..0000000000
--- a/google/cloud/spanner_admin_database_v1/proto/backup.proto
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.spanner.admin.database.v1;
-
-import "google/api/field_behavior.proto";
-import "google/api/resource.proto";
-import "google/longrunning/operations.proto";
-import "google/protobuf/field_mask.proto";
-import "google/protobuf/timestamp.proto";
-import "google/spanner/admin/database/v1/common.proto";
-
-option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
-option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database";
-option java_multiple_files = true;
-option java_outer_classname = "BackupProto";
-option java_package = "com.google.spanner.admin.database.v1";
-option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
-option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
-
-// A backup of a Cloud Spanner database.
-message Backup {
- option (google.api.resource) = {
- type: "spanner.googleapis.com/Backup"
- pattern: "projects/{project}/instances/{instance}/backups/{backup}"
- };
-
- // Indicates the current state of the backup.
- enum State {
- // Not specified.
- STATE_UNSPECIFIED = 0;
-
- // The pending backup is still being created. Operations on the
- // backup may fail with `FAILED_PRECONDITION` in this state.
- CREATING = 1;
-
- // The backup is complete and ready for use.
- READY = 2;
- }
-
- // Required for the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation. Name of the database from which this backup was created. This
- // needs to be in the same instance as the backup. Values are of the form
- // `projects//instances//databases/`.
- string database = 2 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
-
- // The backup will contain an externally consistent copy of the database at
- // the timestamp specified by `version_time`. If `version_time` is not
- // specified, the system will set `version_time` to the `create_time` of the
- // backup.
- google.protobuf.Timestamp version_time = 9;
-
- // Required for the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation. The expiration time of the backup, with microseconds
- // granularity that must be at least 6 hours and at most 366 days
- // from the time the CreateBackup request is processed. Once the `expire_time`
- // has passed, the backup is eligible to be automatically deleted by Cloud
- // Spanner to free the resources used by the backup.
- google.protobuf.Timestamp expire_time = 3;
-
- // Output only for the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation. Required for the
- // [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
- // operation.
- //
- // A globally unique identifier for the backup which cannot be
- // changed. Values are of the form
- // `projects//instances//backups/[a-z][a-z0-9_\-]*[a-z0-9]`
- // The final segment of the name must be between 2 and 60 characters
- // in length.
- //
- // The backup is stored in the location(s) specified in the instance
- // configuration of the instance containing the backup, identified
- // by the prefix of the backup name of the form
- // `projects//instances/`.
- string name = 1;
-
- // Output only. The time the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // request is received. If the request does not specify `version_time`, the
- // `version_time` of the backup will be equivalent to the `create_time`.
- google.protobuf.Timestamp create_time = 4
- [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. Size of the backup in bytes.
- int64 size_bytes = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. The current state of the backup.
- State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. The names of the restored databases that reference the backup.
- // The database names are of
- // the form `projects//instances//databases/`.
- // Referencing databases may exist in different instances. The existence of
- // any referencing database prevents the backup from being deleted. When a
- // restored database from the backup enters the `READY` state, the reference
- // to the backup is removed.
- repeated string referencing_databases = 7 [
- (google.api.field_behavior) = OUTPUT_ONLY,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }
- ];
-
- // Output only. The encryption information for the backup.
- EncryptionInfo encryption_info = 8
- [(google.api.field_behavior) = OUTPUT_ONLY];
-}
-
-// The request for
-// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
-message CreateBackupRequest {
- // Required. The name of the instance in which the backup will be
- // created. This must be the same instance that contains the database the
- // backup will be created from. The backup will be stored in the
- // location(s) specified in the instance configuration of this
- // instance. Values are of the form
- // `projects//instances/`.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Instance"
- }
- ];
-
- // Required. The id of the backup to be created. The `backup_id` appended to
- // `parent` forms the full backup name of the form
- // `projects//instances//backups/`.
- string backup_id = 2 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The backup to create.
- Backup backup = 3 [(google.api.field_behavior) = REQUIRED];
-
- // Optional. The encryption configuration used to encrypt the backup. If this
- // field is not specified, the backup will use the same encryption
- // configuration as the database by default, namely
- // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
- // = `USE_DATABASE_ENCRYPTION`.
- CreateBackupEncryptionConfig encryption_config = 4
- [(google.api.field_behavior) = OPTIONAL];
-}
-
-// Metadata type for the operation returned by
-// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
-message CreateBackupMetadata {
- // The name of the backup being created.
- string name = 1 [
- (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
- ];
-
- // The name of the database the backup is created from.
- string database = 2 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
-
- // The progress of the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation.
- OperationProgress progress = 3;
-
- // The time at which cancellation of this operation was received.
- // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
- // starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not guaranteed.
- // Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a
- // [google.rpc.Status.code][google.rpc.Status.code] of 1,
- // corresponding to `Code.CANCELLED`.
- google.protobuf.Timestamp cancel_time = 4;
-}
-
-// The request for
-// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
-message UpdateBackupRequest {
- // Required. The backup to update. `backup.name`, and the fields to be updated
- // as specified by `update_mask` are required. Other fields are ignored.
- // Update is only supported for the following fields:
- // * `backup.expire_time`.
- Backup backup = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Required. A mask specifying which fields (e.g. `expire_time`) in the
- // Backup resource should be updated. This mask is relative to the Backup
- // resource, not to the request message. The field mask must always be
- // specified; this prevents any future fields from being erased accidentally
- // by clients that do not know about them.
- google.protobuf.FieldMask update_mask = 2
- [(google.api.field_behavior) = REQUIRED];
-}
-
-// The request for
-// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
-message GetBackupRequest {
- // Required. Name of the backup.
- // Values are of the form
- // `projects//instances//backups/`.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
- ];
-}
-
-// The request for
-// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
-message DeleteBackupRequest {
- // Required. Name of the backup to delete.
- // Values are of the form
- // `projects//instances//backups/`.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
- ];
-}
-
-// The request for
-// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
-message ListBackupsRequest {
- // Required. The instance to list backups from. Values are of the
- // form `projects//instances/`.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Instance"
- }
- ];
-
- // An expression that filters the list of returned backups.
- //
- // A filter expression consists of a field name, a comparison operator, and a
- // value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the
- // [Backup][google.spanner.admin.database.v1.Backup] are eligible for
- // filtering:
- //
- // * `name`
- // * `database`
- // * `state`
- // * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
- // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
- // * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
- // * `size_bytes`
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic, but
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // * `name:Howl` - The backup's name contains the string "howl".
- // * `database:prod`
- // - The database's name contains the string "prod".
- // * `state:CREATING` - The backup is pending creation.
- // * `state:READY` - The backup is fully created and ready for use.
- // * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
- // - The backup name contains the string "howl" and `create_time`
- // of the backup is before 2018-03-28T14:50:00Z.
- // * `expire_time < \"2018-03-28T14:50:00Z\"`
- // - The backup `expire_time` is before 2018-03-28T14:50:00Z.
- // * `size_bytes > 10000000000` - The backup's size is greater than 10GB
- string filter = 2;
-
- // Number of backups to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- int32 page_size = 3;
-
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
- // from a previous
- // [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
- // to the same `parent` and with the same `filter`.
- string page_token = 4;
-}
-
-// The response for
-// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
-message ListBackupsResponse {
- // The list of matching backups. Backups returned are ordered by `create_time`
- // in descending order, starting from the most recent `create_time`.
- repeated Backup backups = 1;
-
- // `next_page_token` can be sent in a subsequent
- // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
- // call to fetch more of the matching backups.
- string next_page_token = 2;
-}
-
-// The request for
-// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
-message ListBackupOperationsRequest {
- // Required. The instance of the backup operations. Values are of
- // the form `projects//instances/`.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Instance"
- }
- ];
-
- // An expression that filters the list of returned backup operations.
- //
- // A filter expression consists of a field name, a
- // comparison operator, and a value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the [operation][google.longrunning.Operation]
- // are eligible for filtering:
- //
- // * `name` - The name of the long-running operation
- // * `done` - False if the operation is in progress, else true.
- // * `metadata.@type` - the type of metadata. For example, the type string
- // for
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
- // is
- // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
- // * `metadata.` - any field in metadata.value.
- // * `error` - Error associated with the long-running operation.
- // * `response.@type` - the type of response.
- // * `response.` - any field in response.value.
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic, but
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // * `done:true` - The operation is complete.
- // * `metadata.database:prod` - The database the backup was taken from has
- // a name containing the string "prod".
- // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
- // `(metadata.name:howl) AND` \
- // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
- // `(error:*)` - Returns operations where:
- // * The operation's metadata type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- // * The backup name contains the string "howl".
- // * The operation started before 2018-03-28T14:50:00Z.
- // * The operation resulted in an error.
- string filter = 2;
-
- // Number of operations to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- int32 page_size = 3;
-
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
- // from a previous
- // [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
- // to the same `parent` and with the same `filter`.
- string page_token = 4;
-}
-
-// The response for
-// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
-message ListBackupOperationsResponse {
- // The list of matching backup [long-running
- // operations][google.longrunning.Operation]. Each operation's name will be
- // prefixed by the backup's name and the operation's
- // [metadata][google.longrunning.Operation.metadata] will be of type
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- // Operations returned include those that are pending or have
- // completed/failed/canceled within the last 7 days. Operations returned are
- // ordered by `operation.metadata.value.progress.start_time` in descending
- // order starting from the most recently started operation.
- repeated google.longrunning.Operation operations = 1;
-
- // `next_page_token` can be sent in a subsequent
- // [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
- // call to fetch more of the matching metadata.
- string next_page_token = 2;
-}
-
-// Information about a backup.
-message BackupInfo {
- // Name of the backup.
- string backup = 1 [
- (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
- ];
-
- // The backup contains an externally consistent copy of `source_database` at
- // the timestamp specified by `version_time`. If the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // request did not specify `version_time`, the `version_time` of the backup is
- // equivalent to the `create_time`.
- google.protobuf.Timestamp version_time = 4;
-
- // The time the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // request was received.
- google.protobuf.Timestamp create_time = 2;
-
- // Name of the database the backup was created from.
- string source_database = 3 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
-}
-
-// Encryption configuration for the backup to create.
-message CreateBackupEncryptionConfig {
- // Encryption types for the backup.
- enum EncryptionType {
- // Unspecified. Do not use.
- ENCRYPTION_TYPE_UNSPECIFIED = 0;
-
- // Use the same encryption configuration as the database. This is the
- // default option when
- // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
- // is empty. For example, if the database is using
- // `Customer_Managed_Encryption`, the backup will be using the same Cloud
- // KMS key as the database.
- USE_DATABASE_ENCRYPTION = 1;
-
- // Use Google default encryption.
- GOOGLE_DEFAULT_ENCRYPTION = 2;
-
- // Use customer managed encryption. If specified, `kms_key_name`
- // must contain a valid Cloud KMS key.
- CUSTOMER_MANAGED_ENCRYPTION = 3;
- }
-
- // Required. The encryption type of the backup.
- EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Optional. The Cloud KMS key that will be used to protect the backup.
- // This field should be set only when
- // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
- // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
- // `projects//locations//keyRings//cryptoKeys/`.
- string kms_key_name = 2 [
- (google.api.field_behavior) = OPTIONAL,
- (google.api.resource_reference) = {
- type: "cloudkms.googleapis.com/CryptoKey"
- }
- ];
-}
diff --git a/google/cloud/spanner_admin_database_v1/proto/common.proto b/google/cloud/spanner_admin_database_v1/proto/common.proto
deleted file mode 100644
index 24d7c2d080..0000000000
--- a/google/cloud/spanner_admin_database_v1/proto/common.proto
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.spanner.admin.database.v1;
-
-import "google/api/field_behavior.proto";
-import "google/api/resource.proto";
-import "google/protobuf/timestamp.proto";
-import "google/rpc/status.proto";
-
-option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
-option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database";
-option java_multiple_files = true;
-option java_outer_classname = "CommonProto";
-option java_package = "com.google.spanner.admin.database.v1";
-option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
-option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
-option (google.api.resource_definition) = {
- type: "cloudkms.googleapis.com/CryptoKey"
- pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}"
-};
-option (google.api.resource_definition) = {
- type: "cloudkms.googleapis.com/CryptoKeyVersion"
- pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}"
-};
-
-// Encapsulates progress related information for a Cloud Spanner long
-// running operation.
-message OperationProgress {
- // Percent completion of the operation.
- // Values are between 0 and 100 inclusive.
- int32 progress_percent = 1;
-
- // Time the request was received.
- google.protobuf.Timestamp start_time = 2;
-
- // If set, the time at which this operation failed or was completed
- // successfully.
- google.protobuf.Timestamp end_time = 3;
-}
-
-// Encryption configuration for a Cloud Spanner database.
-message EncryptionConfig {
- // The Cloud KMS key to be used for encrypting and decrypting
- // the database. Values are of the form
- // `projects//locations//keyRings//cryptoKeys/`.
- string kms_key_name = 2 [(google.api.resource_reference) = {
- type: "cloudkms.googleapis.com/CryptoKey"
- }];
-}
-
-// Encryption information for a Cloud Spanner database or backup.
-message EncryptionInfo {
- // Possible encryption types.
- enum Type {
- // Encryption type was not specified, though data at rest remains encrypted.
- TYPE_UNSPECIFIED = 0;
-
- // The data is encrypted at rest with a key that is
- // fully managed by Google. No key version or status will be populated.
- // This is the default state.
- GOOGLE_DEFAULT_ENCRYPTION = 1;
-
- // The data is encrypted at rest with a key that is
- // managed by the customer. The active version of the key. `kms_key_version`
- // will be populated, and `encryption_status` may be populated.
- CUSTOMER_MANAGED_ENCRYPTION = 2;
- }
-
- // Output only. The type of encryption.
- Type encryption_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. If present, the status of a recent encrypt/decrypt call on
- // underlying data for this database or backup. Regardless of status, data is
- // always encrypted at rest.
- google.rpc.Status encryption_status = 4
- [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. A Cloud KMS key version that is being used to protect the
- // database or backup.
- string kms_key_version = 2 [
- (google.api.field_behavior) = OUTPUT_ONLY,
- (google.api.resource_reference) = {
- type: "cloudkms.googleapis.com/CryptoKeyVersion"
- }
- ];
-}
diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto
deleted file mode 100644
index ac771bc061..0000000000
--- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto
+++ /dev/null
@@ -1,853 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.spanner.admin.database.v1;
-
-import "google/api/annotations.proto";
-import "google/api/client.proto";
-import "google/api/field_behavior.proto";
-import "google/api/resource.proto";
-import "google/iam/v1/iam_policy.proto";
-import "google/iam/v1/policy.proto";
-import "google/longrunning/operations.proto";
-import "google/protobuf/empty.proto";
-import "google/protobuf/timestamp.proto";
-import "google/spanner/admin/database/v1/backup.proto";
-import "google/spanner/admin/database/v1/common.proto";
-
-option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
-option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database";
-option java_multiple_files = true;
-option java_outer_classname = "SpannerDatabaseAdminProto";
-option java_package = "com.google.spanner.admin.database.v1";
-option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
-option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
-option (google.api.resource_definition) = {
- type: "spanner.googleapis.com/Instance"
- pattern: "projects/{project}/instances/{instance}"
-};
-
-// Cloud Spanner Database Admin API
-//
-// The Cloud Spanner Database Admin API can be used to create, drop, and
-// list databases. It also enables updating the schema of pre-existing
-// databases. It can be also used to create, delete and list backups for a
-// database and to restore from an existing backup.
-service DatabaseAdmin {
- option (google.api.default_host) = "spanner.googleapis.com";
- option (google.api.oauth_scopes) =
- "https://www.googleapis.com/auth/cloud-platform,"
- "https://www.googleapis.com/auth/spanner.admin";
-
- // Lists Cloud Spanner databases.
- rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) {
- option (google.api.http) = {
- get: "/v1/{parent=projects/*/instances/*}/databases"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Creates a new Cloud Spanner database and starts to prepare it for serving.
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format `/operations/` and
- // can be used to track preparation of the database. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Database][google.spanner.admin.database.v1.Database], if successful.
- rpc CreateDatabase(CreateDatabaseRequest)
- returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{parent=projects/*/instances/*}/databases"
- body: "*"
- };
- option (google.api.method_signature) = "parent,create_statement";
- option (google.longrunning.operation_info) = {
- response_type: "google.spanner.admin.database.v1.Database"
- metadata_type: "google.spanner.admin.database.v1.CreateDatabaseMetadata"
- };
- }
-
- // Gets the state of a Cloud Spanner database.
- rpc GetDatabase(GetDatabaseRequest) returns (Database) {
- option (google.api.http) = {
- get: "/v1/{name=projects/*/instances/*/databases/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Updates the schema of a Cloud Spanner database by
- // creating/altering/dropping tables, columns, indexes, etc. The returned
- // [long-running operation][google.longrunning.Operation] will have a name of
- // the format `/operations/` and can be used to
- // track execution of the schema change(s). The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
- // The operation has no response.
- rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest)
- returns (google.longrunning.Operation) {
- option (google.api.http) = {
- patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl"
- body: "*"
- };
- option (google.api.method_signature) = "database,statements";
- option (google.longrunning.operation_info) = {
- response_type: "google.protobuf.Empty"
- metadata_type: "google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata"
- };
- }
-
- // Drops (aka deletes) a Cloud Spanner database.
- // Completed backups for the database will be retained according to their
- // `expire_time`.
- rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) {
- option (google.api.http) = {
- delete: "/v1/{database=projects/*/instances/*/databases/*}"
- };
- option (google.api.method_signature) = "database";
- }
-
- // Returns the schema of a Cloud Spanner database as a list of formatted
- // DDL statements. This method does not show pending schema updates, those may
- // be queried using the [Operations][google.longrunning.Operations] API.
- rpc GetDatabaseDdl(GetDatabaseDdlRequest) returns (GetDatabaseDdlResponse) {
- option (google.api.http) = {
- get: "/v1/{database=projects/*/instances/*/databases/*}/ddl"
- };
- option (google.api.method_signature) = "database";
- }
-
- // Sets the access control policy on a database or backup resource.
- // Replaces any existing policy.
- //
- // Authorization requires `spanner.databases.setIamPolicy`
- // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
- // For backups, authorization requires `spanner.backups.setIamPolicy`
- // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
- rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest)
- returns (google.iam.v1.Policy) {
- option (google.api.http) = {
- post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy"
- body: "*"
- additional_bindings {
- post: "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy"
- body: "*"
- }
- };
- option (google.api.method_signature) = "resource,policy";
- }
-
- // Gets the access control policy for a database or backup resource.
- // Returns an empty policy if a database or backup exists but does not have a
- // policy set.
- //
- // Authorization requires `spanner.databases.getIamPolicy` permission on
- // [resource][google.iam.v1.GetIamPolicyRequest.resource].
- // For backups, authorization requires `spanner.backups.getIamPolicy`
- // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
- rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest)
- returns (google.iam.v1.Policy) {
- option (google.api.http) = {
- post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy"
- body: "*"
- additional_bindings {
- post: "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy"
- body: "*"
- }
- };
- option (google.api.method_signature) = "resource";
- }
-
- // Returns permissions that the caller has on the specified database or backup
- // resource.
- //
- // Attempting this RPC on a non-existent Cloud Spanner database will
- // result in a NOT_FOUND error if the user has
- // `spanner.databases.list` permission on the containing Cloud
- // Spanner instance. Otherwise returns an empty set of permissions.
- // Calling this method on a backup that does not exist will
- // result in a NOT_FOUND error if the user has
- // `spanner.backups.list` permission on the containing instance.
- rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest)
- returns (google.iam.v1.TestIamPermissionsResponse) {
- option (google.api.http) = {
- post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions"
- body: "*"
- additional_bindings {
- post: "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions"
- body: "*"
- }
- };
- option (google.api.method_signature) = "resource,permissions";
- }
-
- // Starts creating a new Cloud Spanner Backup.
- // The returned backup [long-running operation][google.longrunning.Operation]
- // will have a name of the format
- // `projects//instances//backups//operations/`
- // and can be used to track creation of the backup. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Backup][google.spanner.admin.database.v1.Backup], if successful.
- // Cancelling the returned operation will stop the creation and delete the
- // backup. There can be only one pending backup creation per database. Backup
- // creation of different databases can run concurrently.
- rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{parent=projects/*/instances/*}/backups"
- body: "backup"
- };
- option (google.api.method_signature) = "parent,backup,backup_id";
- option (google.longrunning.operation_info) = {
- response_type: "google.spanner.admin.database.v1.Backup"
- metadata_type: "google.spanner.admin.database.v1.CreateBackupMetadata"
- };
- }
-
- // Gets metadata on a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- rpc GetBackup(GetBackupRequest) returns (Backup) {
- option (google.api.http) = {
- get: "/v1/{name=projects/*/instances/*/backups/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Updates a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- rpc UpdateBackup(UpdateBackupRequest) returns (Backup) {
- option (google.api.http) = {
- patch: "/v1/{backup.name=projects/*/instances/*/backups/*}"
- body: "backup"
- };
- option (google.api.method_signature) = "backup,update_mask";
- }
-
- // Deletes a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) {
- option (google.api.http) = {
- delete: "/v1/{name=projects/*/instances/*/backups/*}"
- };
- option (google.api.method_signature) = "name";
- }
-
- // Lists completed and pending backups.
- // Backups returned are ordered by `create_time` in descending order,
- // starting from the most recent `create_time`.
- rpc ListBackups(ListBackupsRequest) returns (ListBackupsResponse) {
- option (google.api.http) = {
- get: "/v1/{parent=projects/*/instances/*}/backups"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Create a new database by restoring from a completed backup. The new
- // database must be in the same project and in an instance with the same
- // instance configuration as the instance containing
- // the backup. The returned database [long-running
- // operation][google.longrunning.Operation] has a name of the format
- // `projects//instances//databases//operations/`,
- // and can be used to track the progress of the operation, and to cancel it.
- // The [metadata][google.longrunning.Operation.metadata] field type is
- // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] type
- // is [Database][google.spanner.admin.database.v1.Database], if
- // successful. Cancelling the returned operation will stop the restore and
- // delete the database.
- // There can be only one database being restored into an instance at a time.
- // Once the restore operation completes, a new restore operation can be
- // initiated, without waiting for the optimize operation associated with the
- // first restore to complete.
- rpc RestoreDatabase(RestoreDatabaseRequest)
- returns (google.longrunning.Operation) {
- option (google.api.http) = {
- post: "/v1/{parent=projects/*/instances/*}/databases:restore"
- body: "*"
- };
- option (google.api.method_signature) = "parent,database_id,backup";
- option (google.longrunning.operation_info) = {
- response_type: "google.spanner.admin.database.v1.Database"
- metadata_type: "google.spanner.admin.database.v1.RestoreDatabaseMetadata"
- };
- }
-
- // Lists database [longrunning-operations][google.longrunning.Operation].
- // A database operation has a name of the form
- // `projects//instances//databases//operations/`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations.
- rpc ListDatabaseOperations(ListDatabaseOperationsRequest)
- returns (ListDatabaseOperationsResponse) {
- option (google.api.http) = {
- get: "/v1/{parent=projects/*/instances/*}/databaseOperations"
- };
- option (google.api.method_signature) = "parent";
- }
-
- // Lists the backup [long-running operations][google.longrunning.Operation] in
- // the given instance. A backup operation has a name of the form
- // `projects//instances//backups//operations/`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations. Operations returned are ordered by
- // `operation.metadata.value.progress.start_time` in descending order starting
- // from the most recently started operation.
- rpc ListBackupOperations(ListBackupOperationsRequest)
- returns (ListBackupOperationsResponse) {
- option (google.api.http) = {
- get: "/v1/{parent=projects/*/instances/*}/backupOperations"
- };
- option (google.api.method_signature) = "parent";
- }
-}
-
-// Information about the database restore.
-message RestoreInfo {
- // The type of the restore source.
- RestoreSourceType source_type = 1;
-
- // Information about the source used to restore the database.
- oneof source_info {
- // Information about the backup used to restore the database. The backup
- // may no longer exist.
- BackupInfo backup_info = 2;
- }
-}
-
-// A Cloud Spanner database.
-message Database {
- option (google.api.resource) = {
- type: "spanner.googleapis.com/Database"
- pattern: "projects/{project}/instances/{instance}/databases/{database}"
- };
-
- // Indicates the current state of the database.
- enum State {
- // Not specified.
- STATE_UNSPECIFIED = 0;
-
- // The database is still being created. Operations on the database may fail
- // with `FAILED_PRECONDITION` in this state.
- CREATING = 1;
-
- // The database is fully created and ready for use.
- READY = 2;
-
- // The database is fully created and ready for use, but is still
- // being optimized for performance and cannot handle full load.
- //
- // In this state, the database still references the backup
- // it was restore from, preventing the backup
- // from being deleted. When optimizations are complete, the full performance
- // of the database will be restored, and the database will transition to
- // `READY` state.
- READY_OPTIMIZING = 3;
- }
-
- // Required. The name of the database. Values are of the form
- // `projects//instances//databases/`,
- // where `` is as specified in the `CREATE DATABASE`
- // statement. This name can be passed to other API methods to
- // identify the database.
- string name = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Output only. The current database state.
- State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. If exists, the time at which the database creation started.
- google.protobuf.Timestamp create_time = 3
- [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. Applicable only for restored databases. Contains information
- // about the restore source.
- RestoreInfo restore_info = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. For databases that are using customer managed encryption, this
- // field contains the encryption configuration for the database.
- // For databases that are using Google default or other types of encryption,
- // this field is empty.
- EncryptionConfig encryption_config = 5
- [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. For databases that are using customer managed encryption, this
- // field contains the encryption information for the database, such as
- // encryption state and the Cloud KMS key versions that are in use.
- //
- // For databases that are using Google default or other types of encryption,
- // this field is empty.
- //
- // This field is propagated lazily from the backend. There might be a delay
- // from when a key version is being used and when it appears in this field.
- repeated EncryptionInfo encryption_info = 8
- [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. The period in which Cloud Spanner retains all versions of data
- // for the database. This is the same as the value of version_retention_period
- // database option set using
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
- // Defaults to 1 hour, if not set.
- string version_retention_period = 6
- [(google.api.field_behavior) = OUTPUT_ONLY];
-
- // Output only. Earliest timestamp at which older versions of the data can be
- // read. This value is continuously updated by Cloud Spanner and becomes stale
- // the moment it is queried. If you are using this value to recover data, make
- // sure to account for the time from the moment when the value is queried to
- // the moment when you initiate the recovery.
- google.protobuf.Timestamp earliest_version_time = 7
- [(google.api.field_behavior) = OUTPUT_ONLY];
-}
-
-// The request for
-// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
-message ListDatabasesRequest {
- // Required. The instance whose databases should be listed.
- // Values are of the form `projects//instances/`.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Instance"
- }
- ];
-
- // Number of databases to be returned in the response. If 0 or less,
- // defaults to the server's maximum allowed page size.
- int32 page_size = 3;
-
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
- // from a previous
- // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
- string page_token = 4;
-}
-
-// The response for
-// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
-message ListDatabasesResponse {
- // Databases that matched the request.
- repeated Database databases = 1;
-
- // `next_page_token` can be sent in a subsequent
- // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
- // call to fetch more of the matching databases.
- string next_page_token = 2;
-}
-
-// The request for
-// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
-message CreateDatabaseRequest {
- // Required. The name of the instance that will serve the new database.
- // Values are of the form `projects//instances/`.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Instance"
- }
- ];
-
- // Required. A `CREATE DATABASE` statement, which specifies the ID of the
- // new database. The database ID must conform to the regular expression
- // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
- // If the database ID is a reserved word or if it contains a hyphen, the
- // database ID must be enclosed in backticks (`` ` ``).
- string create_statement = 2 [(google.api.field_behavior) = REQUIRED];
-
- // Optional. A list of DDL statements to run inside the newly created
- // database. Statements can create tables, indexes, etc. These
- // statements execute atomically with the creation of the database:
- // if there is an error in any statement, the database is not created.
- repeated string extra_statements = 3 [(google.api.field_behavior) = OPTIONAL];
-
- // Optional. The encryption configuration for the database. If this field is
- // not specified, Cloud Spanner will encrypt/decrypt all data at rest using
- // Google default encryption.
- EncryptionConfig encryption_config = 4
- [(google.api.field_behavior) = OPTIONAL];
-}
-
-// Metadata type for the operation returned by
-// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
-message CreateDatabaseMetadata {
- // The database being created.
- string database = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
-}
-
-// The request for
-// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
-message GetDatabaseRequest {
- // Required. The name of the requested database. Values are of the form
- // `projects//instances//databases/`.
- string name = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }
- ];
-}
-
-// Enqueues the given DDL statements to be applied, in order but not
-// necessarily all at once, to the database schema at some point (or
-// points) in the future. The server checks that the statements
-// are executable (syntactically valid, name tables that exist, etc.)
-// before enqueueing them, but they may still fail upon
-// later execution (e.g., if a statement from another batch of
-// statements is applied first and it conflicts in some way, or if
-// there is some data-related problem like a `NULL` value in a column to
-// which `NOT NULL` would be added). If a statement fails, all
-// subsequent statements in the batch are automatically cancelled.
-//
-// Each batch of statements is assigned a name which can be used with
-// the [Operations][google.longrunning.Operations] API to monitor
-// progress. See the
-// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
-// field for more details.
-message UpdateDatabaseDdlRequest {
- // Required. The database to update.
- string database = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }
- ];
-
- // Required. DDL statements to be applied to the database.
- repeated string statements = 2 [(google.api.field_behavior) = REQUIRED];
-
- // If empty, the new update request is assigned an
- // automatically-generated operation ID. Otherwise, `operation_id`
- // is used to construct the name of the resulting
- // [Operation][google.longrunning.Operation].
- //
- // Specifying an explicit operation ID simplifies determining
- // whether the statements were executed in the event that the
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
- // call is replayed, or the return value is otherwise lost: the
- // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
- // and `operation_id` fields can be combined to form the
- // [name][google.longrunning.Operation.name] of the resulting
- // [longrunning.Operation][google.longrunning.Operation]:
- // `/operations/`.
- //
- // `operation_id` should be unique within the database, and must be
- // a valid identifier: `[a-z][a-z0-9_]*`. Note that
- // automatically-generated operation IDs always begin with an
- // underscore. If the named operation already exists,
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
- // returns `ALREADY_EXISTS`.
- string operation_id = 3;
-}
-
-// Metadata type for the operation returned by
-// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
-message UpdateDatabaseDdlMetadata {
- // The database being modified.
- string database = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
-
- // For an update this list contains all the statements. For an
- // individual statement, this list contains only that statement.
- repeated string statements = 2;
-
- // Reports the commit timestamps of all statements that have
- // succeeded so far, where `commit_timestamps[i]` is the commit
- // timestamp for the statement `statements[i]`.
- repeated google.protobuf.Timestamp commit_timestamps = 3;
-
- // Output only. When true, indicates that the operation is throttled e.g
- // due to resource constraints. When resources become available the operation
- // will resume and this field will be false again.
- bool throttled = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
-}
-
-// The request for
-// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
-message DropDatabaseRequest {
- // Required. The database to be dropped.
- string database = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }
- ];
-}
-
-// The request for
-// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
-message GetDatabaseDdlRequest {
- // Required. The database whose schema we wish to get.
- // Values are of the form
- // `projects//instances//databases/`
- string database = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }
- ];
-}
-
-// The response for
-// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
-message GetDatabaseDdlResponse {
- // A list of formatted DDL statements defining the schema of the database
- // specified in the request.
- repeated string statements = 1;
-}
-
-// The request for
-// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
-message ListDatabaseOperationsRequest {
- // Required. The instance of the database operations.
- // Values are of the form `projects//instances/`.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Instance"
- }
- ];
-
- // An expression that filters the list of returned operations.
- //
- // A filter expression consists of a field name, a
- // comparison operator, and a value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the [Operation][google.longrunning.Operation]
- // are eligible for filtering:
- //
- // * `name` - The name of the long-running operation
- // * `done` - False if the operation is in progress, else true.
- // * `metadata.@type` - the type of metadata. For example, the type string
- // for
- // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
- // is
- // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
- // * `metadata.` - any field in metadata.value.
- // * `error` - Error associated with the long-running operation.
- // * `response.@type` - the type of response.
- // * `response.` - any field in response.value.
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic. However,
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // * `done:true` - The operation is complete.
- // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
- // `(metadata.source_type:BACKUP) AND` \
- // `(metadata.backup_info.backup:backup_howl) AND` \
- // `(metadata.name:restored_howl) AND` \
- // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
- // `(error:*)` - Return operations where:
- // * The operation's metadata type is
- // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
- // * The database is restored from a backup.
- // * The backup name contains "backup_howl".
- // * The restored database's name contains "restored_howl".
- // * The operation started before 2018-03-28T14:50:00Z.
- // * The operation resulted in an error.
- string filter = 2;
-
- // Number of operations to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- int32 page_size = 3;
-
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
- // from a previous
- // [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
- // to the same `parent` and with the same `filter`.
- string page_token = 4;
-}
-
-// The response for
-// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
-message ListDatabaseOperationsResponse {
- // The list of matching database [long-running
- // operations][google.longrunning.Operation]. Each operation's name will be
- // prefixed by the database's name. The operation's
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata.
- repeated google.longrunning.Operation operations = 1;
-
- // `next_page_token` can be sent in a subsequent
- // [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
- // call to fetch more of the matching metadata.
- string next_page_token = 2;
-}
-
-// The request for
-// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
-message RestoreDatabaseRequest {
- // Required. The name of the instance in which to create the
- // restored database. This instance must be in the same project and
- // have the same instance configuration as the instance containing
- // the source backup. Values are of the form
- // `projects//instances/`.
- string parent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Instance"
- }
- ];
-
- // Required. The id of the database to create and restore to. This
- // database must not already exist. The `database_id` appended to
- // `parent` forms the full database name of the form
- // `projects//instances//databases/`.
- string database_id = 2 [(google.api.field_behavior) = REQUIRED];
-
- // Required. The source from which to restore.
- oneof source {
- // Name of the backup from which to restore. Values are of the form
- // `projects//instances//backups/`.
- string backup = 3 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Backup"
- }];
- }
-
- // Optional. An encryption configuration describing the encryption type and
- // key resources in Cloud KMS used to encrypt/decrypt the database to restore
- // to. If this field is not specified, the restored database will use the same
- // encryption configuration as the backup by default, namely
- // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
- // = `USE_CONFIG_DEFAULT_OR_DATABASE_ENCRYPTION`.
- RestoreDatabaseEncryptionConfig encryption_config = 4
- [(google.api.field_behavior) = OPTIONAL];
-}
-
-// Encryption configuration for the restored database.
-message RestoreDatabaseEncryptionConfig {
- // Encryption types for the database to be restored.
- enum EncryptionType {
- // Unspecified. Do not use.
- ENCRYPTION_TYPE_UNSPECIFIED = 0;
-
- // This is the default option when
- // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
- // is not specified.
- USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1;
-
- // Use Google default encryption.
- GOOGLE_DEFAULT_ENCRYPTION = 2;
-
- // Use customer managed encryption. If specified, `kms_key_name` must
- // must contain a valid Cloud KMS key.
- CUSTOMER_MANAGED_ENCRYPTION = 3;
- }
-
- // Required. The encryption type of the restored database.
- EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED];
-
- // Optional. The Cloud KMS key that will be used to encrypt/decrypt the
- // restored database. This field should be set only when
- // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
- // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
- // `projects//locations//keyRings//cryptoKeys/`.
- string kms_key_name = 2 [
- (google.api.field_behavior) = OPTIONAL,
- (google.api.resource_reference) = {
- type: "cloudkms.googleapis.com/CryptoKey"
- }
- ];
-}
-
-// Metadata type for the long-running operation returned by
-// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
-message RestoreDatabaseMetadata {
- // Name of the database being created and restored to.
- string name = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
-
- // The type of the restore source.
- RestoreSourceType source_type = 2;
-
- // Information about the source used to restore the database, as specified by
- // `source` in
- // [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest].
- oneof source_info {
- // Information about the backup used to restore the database.
- BackupInfo backup_info = 3;
- }
-
- // The progress of the
- // [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
- // operation.
- OperationProgress progress = 4;
-
- // The time at which cancellation of this operation was received.
- // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
- // starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not guaranteed.
- // Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a
- // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
- // `Code.CANCELLED`.
- google.protobuf.Timestamp cancel_time = 5;
-
- // If exists, the name of the long-running operation that will be used to
- // track the post-restore optimization process to optimize the performance of
- // the restored database, and remove the dependency on the restore source.
- // The name is of the form
- // `projects//instances//databases//operations/`
- // where the is the name of database being created and restored to.
- // The metadata type of the long-running operation is
- // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
- // This long-running operation will be automatically created by the system
- // after the RestoreDatabase long-running operation completes successfully.
- // This operation will not be created if the restore was not successful.
- string optimize_database_operation_name = 6;
-}
-
-// Metadata type for the long-running operation used to track the progress
-// of optimizations performed on a newly restored database. This long-running
-// operation is automatically created by the system after the successful
-// completion of a database restore, and cannot be cancelled.
-message OptimizeRestoredDatabaseMetadata {
- // Name of the restored database being optimized.
- string name = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
-
- // The progress of the post-restore optimizations.
- OperationProgress progress = 2;
-}
-
-// Indicates the type of the restore source.
-enum RestoreSourceType {
- // No restore associated.
- TYPE_UNSPECIFIED = 0;
-
- // A backup was used as the source of the restore.
- BACKUP = 1;
-}
diff --git a/google/cloud/spanner_admin_database_v1/services/__init__.py b/google/cloud/spanner_admin_database_v1/services/__init__.py
index 42ffdf2bc4..cbf94b283c 100644
--- a/google/cloud/spanner_admin_database_v1/services/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/__init__.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
index 1fd198c176..580a7ed2a2 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from .client import DatabaseAdminClient
from .async_client import DatabaseAdminAsyncClient
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
index 31b97af061..0e08065a7d 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,55 +13,98 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
+import logging as std_logging
from collections import OrderedDict
-import functools
import re
-from typing import Dict, Sequence, Tuple, Type, Union
-import pkg_resources
-
-import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+import uuid
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry_async as retries
+from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
+
+try:
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
-from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
-from google.iam.v1 import policy_pb2 as policy # type: ignore
-from google.longrunning import operations_pb2 as operations # type: ignore
-from google.protobuf import empty_pb2 as empty # type: ignore
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
from .client import DatabaseAdminClient
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
class DatabaseAdminAsyncClient:
"""Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
"""
_client: DatabaseAdminClient
+ # Copy defaults from the synchronous client for use here.
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = DatabaseAdminClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DatabaseAdminClient.DEFAULT_MTLS_ENDPOINT
+ _DEFAULT_ENDPOINT_TEMPLATE = DatabaseAdminClient._DEFAULT_ENDPOINT_TEMPLATE
+ _DEFAULT_UNIVERSE = DatabaseAdminClient._DEFAULT_UNIVERSE
backup_path = staticmethod(DatabaseAdminClient.backup_path)
parse_backup_path = staticmethod(DatabaseAdminClient.parse_backup_path)
+ backup_schedule_path = staticmethod(DatabaseAdminClient.backup_schedule_path)
+ parse_backup_schedule_path = staticmethod(
+ DatabaseAdminClient.parse_backup_schedule_path
+ )
crypto_key_path = staticmethod(DatabaseAdminClient.crypto_key_path)
parse_crypto_key_path = staticmethod(DatabaseAdminClient.parse_crypto_key_path)
crypto_key_version_path = staticmethod(DatabaseAdminClient.crypto_key_version_path)
@@ -71,64 +113,151 @@ class DatabaseAdminAsyncClient:
)
database_path = staticmethod(DatabaseAdminClient.database_path)
parse_database_path = staticmethod(DatabaseAdminClient.parse_database_path)
+ database_role_path = staticmethod(DatabaseAdminClient.database_role_path)
+ parse_database_role_path = staticmethod(
+ DatabaseAdminClient.parse_database_role_path
+ )
instance_path = staticmethod(DatabaseAdminClient.instance_path)
parse_instance_path = staticmethod(DatabaseAdminClient.parse_instance_path)
-
+ instance_partition_path = staticmethod(DatabaseAdminClient.instance_partition_path)
+ parse_instance_partition_path = staticmethod(
+ DatabaseAdminClient.parse_instance_partition_path
+ )
common_billing_account_path = staticmethod(
DatabaseAdminClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
DatabaseAdminClient.parse_common_billing_account_path
)
-
common_folder_path = staticmethod(DatabaseAdminClient.common_folder_path)
parse_common_folder_path = staticmethod(
DatabaseAdminClient.parse_common_folder_path
)
-
common_organization_path = staticmethod(
DatabaseAdminClient.common_organization_path
)
parse_common_organization_path = staticmethod(
DatabaseAdminClient.parse_common_organization_path
)
-
common_project_path = staticmethod(DatabaseAdminClient.common_project_path)
parse_common_project_path = staticmethod(
DatabaseAdminClient.parse_common_project_path
)
-
common_location_path = staticmethod(DatabaseAdminClient.common_location_path)
parse_common_location_path = staticmethod(
DatabaseAdminClient.parse_common_location_path
)
- from_service_account_info = DatabaseAdminClient.from_service_account_info
- from_service_account_file = DatabaseAdminClient.from_service_account_file
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ DatabaseAdminAsyncClient: The constructed client.
+ """
+ return DatabaseAdminClient.from_service_account_info.__func__(DatabaseAdminAsyncClient, info, *args, **kwargs) # type: ignore
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ DatabaseAdminAsyncClient: The constructed client.
+ """
+ return DatabaseAdminClient.from_service_account_file.__func__(DatabaseAdminAsyncClient, filename, *args, **kwargs) # type: ignore
+
from_service_account_json = from_service_account_file
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variable is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ return DatabaseAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+
@property
def transport(self) -> DatabaseAdminTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
DatabaseAdminTransport: The transport used by the client instance.
"""
return self._client.transport
- get_transport_class = functools.partial(
- type(DatabaseAdminClient).get_transport_class, type(DatabaseAdminClient)
- )
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._client._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used
+ by the client instance.
+ """
+ return self._client._universe_domain
+
+ get_transport_class = DatabaseAdminClient.get_transport_class
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
- transport: Union[str, DatabaseAdminTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[str, DatabaseAdminTransport, Callable[..., DatabaseAdminTransport]]
+ ] = "grpc_asyncio",
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the database admin client.
+ """Instantiates the database admin async client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -136,31 +265,47 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, ~.DatabaseAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,DatabaseAdminTransport,Callable[..., DatabaseAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the DatabaseAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
-
self._client = DatabaseAdminClient(
credentials=credentials,
transport=transport,
@@ -168,19 +313,70 @@ def __init__(
client_info=client_info,
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.database_v1.DatabaseAdminAsyncClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "universeDomain": getattr(
+ self._client._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._client._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "credentialsType": None,
+ },
+ )
+
async def list_databases(
self,
- request: spanner_database_admin.ListDatabasesRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabasesRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabasesAsyncPager:
r"""Lists Cloud Spanner databases.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_databases():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabasesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_databases(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest, dict]]):
The request object. The request for
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
parent (:class:`str`):
@@ -191,12 +387,13 @@ async def list_databases(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesAsyncPager:
@@ -208,38 +405,33 @@ async def list_databases(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.ListDatabasesRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabasesRequest):
+ request = spanner_database_admin.ListDatabasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_databases,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_databases
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -247,13 +439,26 @@ async def list_databases(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDatabasesAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -261,13 +466,15 @@ async def list_databases(
async def create_database(
self,
- request: spanner_database_admin.CreateDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.CreateDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- create_statement: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ create_statement: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Cloud Spanner database and starts to prepare it
for serving. The returned [long-running
@@ -280,8 +487,39 @@ async def create_database(
is [Database][google.spanner.admin.database.v1.Database], if
successful.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_create_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateDatabaseRequest(
+ parent="parent_value",
+ create_statement="create_statement_value",
+ )
+
+ # Make the request
+ operation = client.create_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest, dict]]):
The request object. The request for
[CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
parent (:class:`str`):
@@ -304,12 +542,13 @@ async def create_database(
This corresponds to the ``create_statement`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -321,20 +560,25 @@ async def create_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, create_statement])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, create_statement]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.CreateDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.CreateDatabaseRequest):
+ request = spanner_database_admin.CreateDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if create_statement is not None:
@@ -342,11 +586,9 @@ async def create_database(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.create_database,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -354,8 +596,16 @@ async def create_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
@@ -370,17 +620,45 @@ async def create_database(
async def get_database(
self,
- request: spanner_database_admin.GetDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseRequest, dict]
+ ] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.Database:
r"""Gets the state of a Cloud Spanner database.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_database(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest, dict]]):
The request object. The request for
[GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
name (:class:`str`):
@@ -391,50 +669,46 @@ async def get_database(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Database:
A Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.GetDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.GetDatabaseRequest):
+ request = spanner_database_admin.GetDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_database,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -442,21 +716,212 @@ async def get_database(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_database(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseRequest, dict]
+ ] = None,
+ *,
+ database: Optional[spanner_database_admin.Database] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ database = spanner_admin_database_v1.Database()
+ database.name = "name_value"
+
+ request = spanner_admin_database_v1.UpdateDatabaseRequest(
+ database=database,
+ )
+
+ # Make the request
+ operation = client.update_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest, dict]]):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ database (:class:`google.cloud.spanner_admin_database_v1.types.Database`):
+ Required. The database to update. The ``name`` field of
+ the database is of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Database`
+ A Cloud Spanner database.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseRequest):
+ request = spanner_database_admin.UpdateDatabaseRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_database
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("database.name", request.database.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_database_admin.Database,
+ metadata_type=spanner_database_admin.UpdateDatabaseMetadata,
+ )
# Done; return the response.
return response
async def update_database_ddl(
self,
- request: spanner_database_admin.UpdateDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
- statements: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ statements: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the schema of a Cloud Spanner database by
creating/altering/dropping tables, columns, indexes, etc. The
@@ -468,21 +933,52 @@ async def update_database_ddl(
[UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
The operation has no response.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
+ database="database_value",
+ statements=['statements_value1', 'statements_value2'],
+ )
+
+ # Make the request
+ operation = client.update_database_ddl(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest`):
- The request object. Enqueues the given DDL statements to
- be applied, in order but not necessarily all at once, to
- the database schema at some point (or points) in the
- future. The server checks that the statements are
- executable (syntactically valid, name tables that exist,
- etc.) before enqueueing them, but they may still fail
- upon
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]]):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
later execution (e.g., if a statement from another batch
of statements is applied first and it conflicts in some
way, or if there is some data-related problem like a
- `NULL` value in a column to which `NOT NULL` would be
- added). If a statement fails, all subsequent statements
- in the batch are automatically cancelled.
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
Each batch of statements is assigned a name which can be
used with the
[Operations][google.longrunning.Operations] API to
@@ -494,19 +990,20 @@ async def update_database_ddl(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- statements (:class:`Sequence[str]`):
+ statements (:class:`MutableSequence[str]`):
Required. DDL statements to be
applied to the database.
This corresponds to the ``statements`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -523,46 +1020,37 @@ async def update_database_ddl(
}
- The JSON representation for Empty is empty JSON
- object {}.
-
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database, statements])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, statements]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest):
+ request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if database is not None:
request.database = database
-
if statements:
request.statements.extend(statements)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.update_database_ddl,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_database_ddl
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -570,14 +1058,22 @@ async def update_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=spanner_database_admin.UpdateDatabaseDdlMetadata,
)
@@ -586,19 +1082,45 @@ async def update_database_ddl(
async def drop_database(
self,
- request: spanner_database_admin.DropDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.DropDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_drop_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DropDatabaseRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ await client.drop_database(request=request)
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]]):
The request object. The request for
[DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
database (:class:`str`):
@@ -606,46 +1128,42 @@ async def drop_database(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.DropDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.DropDatabaseRequest):
+ request = spanner_database_admin.DropDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if database is not None:
request.database = database
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.drop_database,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.drop_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -653,27 +1171,61 @@ async def drop_database(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
async def get_database_ddl(
self,
- request: spanner_database_admin.GetDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.GetDatabaseDdlResponse:
r"""Returns the schema of a Cloud Spanner database as a list of
formatted DDL statements. This method does not show pending
schema updates, those may be queried using the
[Operations][google.longrunning.Operations] API.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseDdlRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = await client.get_database_ddl(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest, dict]]):
The request object. The request for
[GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
database (:class:`str`):
@@ -684,12 +1236,13 @@ async def get_database_ddl(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
@@ -698,38 +1251,33 @@ async def get_database_ddl(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.GetDatabaseDdlRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.GetDatabaseDdlRequest):
+ request = spanner_database_admin.GetDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if database is not None:
request.database = database
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_database_ddl,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_database_ddl
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -737,21 +1285,29 @@ async def get_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def set_iam_policy(
self,
- request: iam_policy.SetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> policy.Policy:
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
r"""Sets the access control policy on a database or backup resource.
Replaces any existing policy.
@@ -762,10 +1318,36 @@ async def set_iam_policy(
permission on
[resource][google.iam.v1.SetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_set_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`):
- The request object. Request message for `SetIamPolicy`
- method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
+ The request object. Request message for ``SetIamPolicy`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
@@ -775,97 +1357,73 @@ async def set_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
- **YAML Example**
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
- request = iam_policy.SetIamPolicyRequest(**request)
-
+ request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
- request = iam_policy.SetIamPolicyRequest(resource=resource,)
+ request = iam_policy_pb2.SetIamPolicyRequest(resource=resource)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.set_iam_policy,
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.set_iam_policy
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -873,21 +1431,29 @@ async def set_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def get_iam_policy(
self,
- request: iam_policy.GetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> policy.Policy:
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
r"""Gets the access control policy for a database or backup
resource. Returns an empty policy if a database or backup exists
but does not have a policy set.
@@ -899,10 +1465,36 @@ async def get_iam_policy(
permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_get_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`):
- The request object. Request message for `GetIamPolicy`
- method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
+ The request object. Request message for ``GetIamPolicy`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
@@ -912,105 +1504,73 @@ async def get_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
+
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
- **YAML Example**
+ **YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
- request = iam_policy.GetIamPolicyRequest(**request)
-
+ request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
- request = iam_policy.GetIamPolicyRequest(resource=resource,)
+ request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_iam_policy,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_iam_policy
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1018,22 +1578,30 @@ async def get_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def test_iam_permissions(
self,
- request: iam_policy.TestIamPermissionsRequest = None,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
- resource: str = None,
- permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> iam_policy.TestIamPermissionsResponse:
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the specified
database or backup resource.
@@ -1045,10 +1613,37 @@ async def test_iam_permissions(
in a NOT_FOUND error if the user has ``spanner.backups.list``
permission on the containing instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_test_iam_permissions():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`):
- The request object. Request message for
- `TestIamPermissions` method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
+ The request object. Request message for ``TestIamPermissions`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy detail is being requested. See
@@ -1058,7 +1653,7 @@ async def test_iam_permissions(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- permissions (:class:`Sequence[str]`):
+ permissions (:class:`MutableSequence[str]`):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
@@ -1067,44 +1662,45 @@ async def test_iam_permissions(
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource, permissions])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
- request = iam_policy.TestIamPermissionsRequest(**request)
-
+ request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
- request = iam_policy.TestIamPermissionsRequest(
- resource=resource, permissions=permissions,
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource=resource, permissions=permissions
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.test_iam_permissions,
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.test_iam_permissions
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1112,22 +1708,30 @@ async def test_iam_permissions(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def create_backup(
self,
- request: gsad_backup.CreateBackupRequest = None,
+ request: Optional[Union[gsad_backup.CreateBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup: gsad_backup.Backup = None,
- backup_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ backup_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Starts creating a new Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1143,8 +1747,39 @@ async def create_backup(
backup creation per database. Backup creation of different
databases can run concurrently.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_create_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.CreateBackupRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateBackupRequest, dict]]):
The request object. The request for
[CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
parent (:class:`str`):
@@ -1173,12 +1808,13 @@ async def create_backup(
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -1190,20 +1826,25 @@ async def create_backup(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup, backup_id])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup, backup_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = gsad_backup.CreateBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup.CreateBackupRequest):
+ request = gsad_backup.CreateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if backup is not None:
@@ -1213,11 +1854,9 @@ async def create_backup(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.create_backup,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1225,8 +1864,16 @@ async def create_backup(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
@@ -1239,20 +1886,228 @@ async def create_backup(
# Done; return the response.
return response
+ async def copy_backup(
+ self,
+ request: Optional[Union[backup.CopyBackupRequest, dict]] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_copy_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CopyBackupRequest, dict]]):
+ The request object. The request for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+ parent (:class:`str`):
+ Required. The name of the destination instance that will
+ contain the backup copy. Values are of the form:
+ ``projects//instances/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_id (:class:`str`):
+ Required. The id of the backup copy. The ``backup_id``
+ appended to ``parent`` forms the full backup_uri of the
+ form
+ ``projects//instances//backups/``.
+
+ This corresponds to the ``backup_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ source_backup (:class:`str`):
+ Required. The source backup to be copied. The source
+ backup needs to be in READY state for it to be copied.
+ Once CopyBackup is in progress, the source backup cannot
+ be deleted or cleaned up on expiration until CopyBackup
+ is finished. Values are of the form:
+ ``projects//instances//backups/``.
+
+ This corresponds to the ``source_backup`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`):
+ Required. The expiration time of the backup in
+ microsecond granularity. The expiration time must be at
+ least 6 hours and at most 366 days from the
+ ``create_time`` of the source backup. Once the
+ ``expire_time`` has passed, the backup is eligible to be
+ automatically deleted by Cloud Spanner to free the
+ resources used by the backup.
+
+ This corresponds to the ``expire_time`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Backup`
+ A backup of a Cloud Spanner database.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_id, source_backup, expire_time]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.CopyBackupRequest):
+ request = backup.CopyBackupRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_id is not None:
+ request.backup_id = backup_id
+ if source_backup is not None:
+ request.source_backup = source_backup
+ if expire_time is not None:
+ request.expire_time = expire_time
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.copy_backup
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ backup.Backup,
+ metadata_type=backup.CopyBackupMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
async def get_backup(
self,
- request: backup.GetBackupRequest = None,
+ request: Optional[Union[backup.GetBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> backup.Backup:
r"""Gets metadata on a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.GetBackupRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetBackupRequest, dict]]):
The request object. The request for
[GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
name (:class:`str`):
@@ -1262,50 +2117,46 @@ async def get_backup(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.GetBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.GetBackupRequest):
+ request = backup.GetBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1313,27 +2164,60 @@ async def get_backup(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def update_backup(
self,
- request: gsad_backup.UpdateBackupRequest = None,
+ request: Optional[Union[gsad_backup.UpdateBackupRequest, dict]] = None,
*,
- backup: gsad_backup.Backup = None,
- update_mask: field_mask.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ backup: Optional[gsad_backup.Backup] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> gsad_backup.Backup:
r"""Updates a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupRequest(
+ )
+
+ # Make the request
+ response = await client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest, dict]]):
The request object. The request for
[UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
backup (:class:`google.cloud.spanner_admin_database_v1.types.Backup`):
@@ -1342,8 +2226,7 @@ async def update_backup(
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
-
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -1360,32 +2243,38 @@ async def update_backup(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([backup, update_mask])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = gsad_backup.UpdateBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup.UpdateBackupRequest):
+ request = gsad_backup.UpdateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if backup is not None:
request.backup = backup
if update_mask is not None:
@@ -1393,19 +2282,9 @@ async def update_backup(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.update_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1415,26 +2294,57 @@ async def update_backup(
),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def delete_backup(
self,
- request: backup.DeleteBackupRequest = None,
+ request: Optional[Union[backup.DeleteBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_delete_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_backup(request=request)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest, dict]]):
The request object. The request for
[DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
name (:class:`str`):
@@ -1445,46 +2355,42 @@ async def delete_backup(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.DeleteBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.DeleteBackupRequest):
+ request = backup.DeleteBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.delete_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1492,26 +2398,59 @@ async def delete_backup(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
async def list_backups(
self,
- request: backup.ListBackupsRequest = None,
+ request: Optional[Union[backup.ListBackupsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupsAsyncPager:
r"""Lists completed and pending backups. Backups returned are
ordered by ``create_time`` in descending order, starting from
the most recent ``create_time``.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_backups():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListBackupsRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupsRequest, dict]]):
The request object. The request for
[ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
parent (:class:`str`):
@@ -1521,12 +2460,13 @@ async def list_backups(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsAsyncPager:
@@ -1538,38 +2478,33 @@ async def list_backups(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.ListBackupsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.ListBackupsRequest):
+ request = backup.ListBackupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_backups,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backups
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1577,13 +2512,26 @@ async def list_backups(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListBackupsAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -1591,14 +2539,16 @@ async def list_backups(
async def restore_database(
self,
- request: spanner_database_admin.RestoreDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.RestoreDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- database_id: str = None,
- backup: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ database_id: Optional[str] = None,
+ backup: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Create a new database by restoring from a completed backup. The
new database must be in the same project and in an instance with
@@ -1620,8 +2570,40 @@ async def restore_database(
without waiting for the optimize operation associated with the
first restore to complete.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_restore_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.RestoreDatabaseRequest(
+ backup="backup_value",
+ parent="parent_value",
+ database_id="database_id_value",
+ )
+
+ # Make the request
+ operation = client.restore_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest, dict]]):
The request object. The request for
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
parent (:class:`str`):
@@ -1652,12 +2634,13 @@ async def restore_database(
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -1669,20 +2652,25 @@ async def restore_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, database_id, backup])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, database_id, backup]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.RestoreDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.RestoreDatabaseRequest):
+ request = spanner_database_admin.RestoreDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if database_id is not None:
@@ -1692,11 +2680,9 @@ async def restore_database(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.restore_database,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.restore_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1704,8 +2690,16 @@ async def restore_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
@@ -1720,12 +2714,14 @@ async def restore_database(
async def list_database_operations(
self,
- request: spanner_database_admin.ListDatabaseOperationsRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseOperationsRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseOperationsAsyncPager:
r"""Lists database
[longrunning-operations][google.longrunning.Operation]. A
@@ -1738,8 +2734,35 @@ async def list_database_operations(
completed/failed/canceled within the last 7 days, and pending
operations.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_database_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_operations(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest, dict]]):
The request object. The request for
[ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
parent (:class:`str`):
@@ -1750,12 +2773,13 @@ async def list_database_operations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsAsyncPager:
@@ -1767,38 +2791,35 @@ async def list_database_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.ListDatabaseOperationsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.ListDatabaseOperationsRequest
+ ):
+ request = spanner_database_admin.ListDatabaseOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_database_operations,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_database_operations
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1806,13 +2827,26 @@ async def list_database_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDatabaseOperationsAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -1820,12 +2854,12 @@ async def list_database_operations(
async def list_backup_operations(
self,
- request: backup.ListBackupOperationsRequest = None,
+ request: Optional[Union[backup.ListBackupOperationsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupOperationsAsyncPager:
r"""Lists the backup [long-running
operations][google.longrunning.Operation] in the given instance.
@@ -1840,8 +2874,35 @@ async def list_backup_operations(
``operation.metadata.value.progress.start_time`` in descending
order starting from the most recently started operation.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_backup_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_operations(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest, dict]]):
The request object. The request for
[ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
parent (:class:`str`):
@@ -1852,12 +2913,13 @@ async def list_backup_operations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager:
@@ -1869,38 +2931,33 @@ async def list_backup_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.ListBackupOperationsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.ListBackupOperationsRequest):
+ request = backup.ListBackupOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_backup_operations,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
- ),
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backup_operations
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1908,27 +2965,1254 @@ async def list_backup_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListBackupOperationsAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
return response
+ async def list_database_roles(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseRolesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListDatabaseRolesAsyncPager:
+ r"""Lists Cloud Spanner database roles.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_database_roles():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseRolesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_roles(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest, dict]]):
+ The request object. The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ parent (:class:`str`):
+ Required. The database whose roles should be listed.
+ Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesAsyncPager:
+ The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabaseRolesRequest):
+ request = spanner_database_admin.ListDatabaseRolesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_database_roles
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListDatabaseRolesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def add_split_points(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.AddSplitPointsRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ split_points: Optional[
+ MutableSequence[spanner_database_admin.SplitPoints]
+ ] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Adds split points to specified tables, indexes of a
+ database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_add_split_points():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.AddSplitPointsRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = await client.add_split_points(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]]):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ database (:class:`str`):
+ Required. The database on whose tables/indexes split
+ points are to be added. Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ split_points (:class:`MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]`):
+ Required. The split points to add.
+ This corresponds to the ``split_points`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, split_points]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.AddSplitPointsRequest):
+ request = spanner_database_admin.AddSplitPointsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if split_points:
+ request.split_points.extend(split_points)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.add_split_points
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def create_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.CreateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ backup_schedule_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Creates a new backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_create_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupScheduleRequest(
+ parent="parent_value",
+ backup_schedule_id="backup_schedule_id_value",
+ )
+
+ # Make the request
+ response = await client.create_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ parent (:class:`str`):
+ Required. The name of the database
+ that this backup schedule applies to.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule (:class:`google.cloud.spanner_admin_database_v1.types.BackupSchedule`):
+ Required. The backup schedule to
+ create.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule_id (:class:`str`):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the
+ full backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``backup_schedule_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_schedule, backup_schedule_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.CreateBackupScheduleRequest):
+ request = gsad_backup_schedule.CreateBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if backup_schedule_id is not None:
+ request.backup_schedule_id = backup_schedule_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_backup_schedule(
+ self,
+ request: Optional[Union[backup_schedule.GetBackupScheduleRequest, dict]] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Gets backup schedule for the input schedule name.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ name (:class:`str`):
+ Required. The name of the schedule to retrieve. Values
+ are of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.GetBackupScheduleRequest):
+ request = backup_schedule.GetBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.UpdateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Updates a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupScheduleRequest(
+ )
+
+ # Make the request
+ response = await client.update_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ backup_schedule (:class:`google.cloud.spanner_admin_database_v1.types.BackupSchedule`):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated
+ as specified by ``update_mask`` are required. Other
+ fields are ignored.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. A mask specifying which
+ fields in the BackupSchedule resource
+ should be updated. This mask is relative
+ to the BackupSchedule resource, not to
+ the request message. The field mask must
+ always be specified; this prevents any
+ future fields from being erased
+ accidentally.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup_schedule, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.UpdateBackupScheduleRequest):
+ request = gsad_backup_schedule.UpdateBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("backup_schedule.name", request.backup_schedule.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_backup_schedule(
+ self,
+ request: Optional[
+ Union[backup_schedule.DeleteBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_delete_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_backup_schedule(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ name (:class:`str`):
+ Required. The name of the schedule to delete. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.DeleteBackupScheduleRequest):
+ request = backup_schedule.DeleteBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def list_backup_schedules(
+ self,
+ request: Optional[
+ Union[backup_schedule.ListBackupSchedulesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListBackupSchedulesAsyncPager:
+ r"""Lists all the backup schedules for the database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_backup_schedules():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupSchedulesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_schedules(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest, dict]]):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ parent (:class:`str`):
+ Required. Database is the parent
+ resource whose backup schedules should
+ be listed. Values are of the form
+ projects//instances//databases/
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesAsyncPager:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.ListBackupSchedulesRequest):
+ request = backup_schedule.ListBackupSchedulesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backup_schedules
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListBackupSchedulesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def internal_update_graph_operation(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.InternalUpdateGraphOperationRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ operation_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ r"""This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_internal_update_graph_operation():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.InternalUpdateGraphOperationRequest(
+ database="database_value",
+ operation_id="operation_id_value",
+ vm_identity_token="vm_identity_token_value",
+ )
+
+ # Make the request
+ response = await client.internal_update_graph_operation(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationRequest, dict]]):
+ The request object. Internal request proto, do not use
+ directly.
+ database (:class:`str`):
+ Internal field, do not use directly.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ operation_id (:class:`str`):
+ Internal field, do not use directly.
+ This corresponds to the ``operation_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationResponse:
+ Internal response proto, do not use
+ directly.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, operation_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.InternalUpdateGraphOperationRequest
+ ):
+ request = spanner_database_admin.InternalUpdateGraphOperationRequest(
+ request
+ )
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if operation_id is not None:
+ request.operation_id = operation_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.internal_update_graph_operation
+ ]
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_operations(
+ self,
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Lists operations that match the specified filter in the request.
+
+ Args:
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
+ The request object. Request message for
+ `ListOperations` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.ListOperationsResponse:
+ Response message for ``ListOperations`` method.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.ListOperationsRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.list_operations]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_operation(
+ self,
+ request: Optional[operations_pb2.GetOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Gets the latest state of a long-running operation.
+
+ Args:
+ request (:class:`~.operations_pb2.GetOperationRequest`):
+ The request object. Request message for
+ `GetOperation` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.Operation:
+ An ``Operation`` object.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.GetOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.get_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_operation(
+ self,
+ request: Optional[operations_pb2.DeleteOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a long-running operation.
+
+ This method indicates that the client is no longer interested
+ in the operation result. It does not cancel the operation.
+ If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.DeleteOperationRequest`):
+ The request object. Request message for
+ `DeleteOperation` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.DeleteOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.delete_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def cancel_operation(
+ self,
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Starts asynchronous cancellation on a long-running operation.
+
+ The server makes a best effort to cancel the operation, but success
+ is not guaranteed. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.CancelOperationRequest`):
+ The request object. Request message for
+ `CancelOperation` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.CancelOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def __aenter__(self) -> "DatabaseAdminAsyncClient":
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
__all__ = ("DatabaseAdminAsyncClient",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
index 4dfb39e47b..057aa677f8 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,41 +13,78 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
-from distutils import util
+from http import HTTPStatus
+import json
+import logging as std_logging
import os
import re
-from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
-import pkg_resources
-
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.auth import credentials # type: ignore
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+import uuid
+import warnings
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
-from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
-from google.iam.v1 import policy_pb2 as policy # type: ignore
-from google.longrunning import operations_pb2 as operations # type: ignore
-from google.protobuf import empty_pb2 as empty # type: ignore
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DatabaseAdminGrpcTransport
from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
+from .transports.rest import DatabaseAdminRestTransport
class DatabaseAdminClientMeta(type):
@@ -62,9 +98,13 @@ class DatabaseAdminClientMeta(type):
_transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]]
_transport_registry["grpc"] = DatabaseAdminGrpcTransport
_transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport
+ _transport_registry["rest"] = DatabaseAdminRestTransport
- def get_transport_class(cls, label: str = None,) -> Type[DatabaseAdminTransport]:
- """Return an appropriate transport class.
+ def get_transport_class(
+ cls,
+ label: Optional[str] = None,
+ ) -> Type[DatabaseAdminTransport]:
+ """Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
@@ -84,16 +124,19 @@ def get_transport_class(cls, label: str = None,) -> Type[DatabaseAdminTransport]
class DatabaseAdminClient(metaclass=DatabaseAdminClientMeta):
"""Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
- """Convert api endpoint to mTLS endpoint.
+ """Converts api endpoint to mTLS endpoint.
+
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
@@ -120,14 +163,47 @@ def _get_default_mtls_endpoint(api_endpoint):
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = "spanner.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
+ _DEFAULT_ENDPOINT_TEMPLATE = "spanner.{UNIVERSE_DOMAIN}"
+ _DEFAULT_UNIVERSE = "googleapis.com"
+
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
- """Creates an instance of this client using the provided credentials info.
+ """Creates an instance of this client using the provided credentials
+ info.
Args:
info (dict): The service account private key info.
@@ -144,7 +220,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
- file.
+ file.
Args:
filename (str): The path to the service account private key json
@@ -163,34 +239,68 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
@property
def transport(self) -> DatabaseAdminTransport:
- """Return the transport used by the client instance.
+ """Returns the transport used by the client instance.
Returns:
- DatabaseAdminTransport: The transport used by the client instance.
+ DatabaseAdminTransport: The transport used by the client
+ instance.
"""
return self._transport
@staticmethod
- def backup_path(project: str, instance: str, backup: str,) -> str:
- """Return a fully-qualified backup string."""
+ def backup_path(
+ project: str,
+ instance: str,
+ backup: str,
+ ) -> str:
+ """Returns a fully-qualified backup string."""
return "projects/{project}/instances/{instance}/backups/{backup}".format(
- project=project, instance=instance, backup=backup,
+ project=project,
+ instance=instance,
+ backup=backup,
)
@staticmethod
def parse_backup_path(path: str) -> Dict[str, str]:
- """Parse a backup path into its component segments."""
+ """Parses a backup path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/instances/(?P.+?)/backups/(?P.+?)$",
path,
)
return m.groupdict() if m else {}
+ @staticmethod
+ def backup_schedule_path(
+ project: str,
+ instance: str,
+ database: str,
+ schedule: str,
+ ) -> str:
+ """Returns a fully-qualified backup_schedule string."""
+ return "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}".format(
+ project=project,
+ instance=instance,
+ database=database,
+ schedule=schedule,
+ )
+
+ @staticmethod
+ def parse_backup_schedule_path(path: str) -> Dict[str, str]:
+ """Parses a backup_schedule path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)/backupSchedules/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def crypto_key_path(
- project: str, location: str, key_ring: str, crypto_key: str,
+ project: str,
+ location: str,
+ key_ring: str,
+ crypto_key: str,
) -> str:
- """Return a fully-qualified crypto_key string."""
+ """Returns a fully-qualified crypto_key string."""
return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(
project=project,
location=location,
@@ -200,7 +310,7 @@ def crypto_key_path(
@staticmethod
def parse_crypto_key_path(path: str) -> Dict[str, str]:
- """Parse a crypto_key path into its component segments."""
+ """Parses a crypto_key path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$",
path,
@@ -215,7 +325,7 @@ def crypto_key_version_path(
crypto_key: str,
crypto_key_version: str,
) -> str:
- """Return a fully-qualified crypto_key_version string."""
+ """Returns a fully-qualified crypto_key_version string."""
return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format(
project=project,
location=location,
@@ -226,7 +336,7 @@ def crypto_key_version_path(
@staticmethod
def parse_crypto_key_version_path(path: str) -> Dict[str, str]:
- """Parse a crypto_key_version path into its component segments."""
+ """Parses a crypto_key_version path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)/cryptoKeyVersions/(?P.+?)$",
path,
@@ -234,15 +344,21 @@ def parse_crypto_key_version_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def database_path(project: str, instance: str, database: str,) -> str:
- """Return a fully-qualified database string."""
+ def database_path(
+ project: str,
+ instance: str,
+ database: str,
+ ) -> str:
+ """Returns a fully-qualified database string."""
return "projects/{project}/instances/{instance}/databases/{database}".format(
- project=project, instance=instance, database=database,
+ project=project,
+ instance=instance,
+ database=database,
)
@staticmethod
def parse_database_path(path: str) -> Dict[str, str]:
- """Parse a database path into its component segments."""
+ """Parses a database path into its component segments."""
m = re.match(
r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)$",
path,
@@ -250,21 +366,73 @@ def parse_database_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def instance_path(project: str, instance: str,) -> str:
- """Return a fully-qualified instance string."""
+ def database_role_path(
+ project: str,
+ instance: str,
+ database: str,
+ role: str,
+ ) -> str:
+ """Returns a fully-qualified database_role string."""
+ return "projects/{project}/instances/{instance}/databases/{database}/databaseRoles/{role}".format(
+ project=project,
+ instance=instance,
+ database=database,
+ role=role,
+ )
+
+ @staticmethod
+ def parse_database_role_path(path: str) -> Dict[str, str]:
+ """Parses a database_role path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)/databaseRoles/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def instance_path(
+ project: str,
+ instance: str,
+ ) -> str:
+ """Returns a fully-qualified instance string."""
return "projects/{project}/instances/{instance}".format(
- project=project, instance=instance,
+ project=project,
+ instance=instance,
)
@staticmethod
def parse_instance_path(path: str) -> Dict[str, str]:
- """Parse a instance path into its component segments."""
+ """Parses a instance path into its component segments."""
m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
- def common_billing_account_path(billing_account: str,) -> str:
- """Return a fully-qualified billing_account string."""
+ def instance_partition_path(
+ project: str,
+ instance: str,
+ instance_partition: str,
+ ) -> str:
+ """Returns a fully-qualified instance_partition string."""
+ return "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}".format(
+ project=project,
+ instance=instance,
+ instance_partition=instance_partition,
+ )
+
+ @staticmethod
+ def parse_instance_partition_path(path: str) -> Dict[str, str]:
+ """Parses a instance_partition path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/instancePartitions/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(
+ billing_account: str,
+ ) -> str:
+ """Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@@ -276,9 +444,13 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_folder_path(folder: str,) -> str:
- """Return a fully-qualified folder string."""
- return "folders/{folder}".format(folder=folder,)
+ def common_folder_path(
+ folder: str,
+ ) -> str:
+ """Returns a fully-qualified folder string."""
+ return "folders/{folder}".format(
+ folder=folder,
+ )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
@@ -287,9 +459,13 @@ def parse_common_folder_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_organization_path(organization: str,) -> str:
- """Return a fully-qualified organization string."""
- return "organizations/{organization}".format(organization=organization,)
+ def common_organization_path(
+ organization: str,
+ ) -> str:
+ """Returns a fully-qualified organization string."""
+ return "organizations/{organization}".format(
+ organization=organization,
+ )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
@@ -298,9 +474,13 @@ def parse_common_organization_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_project_path(project: str,) -> str:
- """Return a fully-qualified project string."""
- return "projects/{project}".format(project=project,)
+ def common_project_path(
+ project: str,
+ ) -> str:
+ """Returns a fully-qualified project string."""
+ return "projects/{project}".format(
+ project=project,
+ )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
@@ -309,10 +489,14 @@ def parse_common_project_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_location_path(project: str, location: str,) -> str:
- """Return a fully-qualified location string."""
+ def common_location_path(
+ project: str,
+ location: str,
+ ) -> str:
+ """Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
- project=project, location=location,
+ project=project,
+ location=location,
)
@staticmethod
@@ -321,15 +505,244 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
+ ):
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variable is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+
+ warnings.warn(
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
+ DeprecationWarning,
+ )
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+ use_client_cert = DatabaseAdminClient._use_client_cert_effective()
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Figure out the client cert source to use.
+ client_cert_source = None
+ if use_client_cert:
+ if client_options.client_cert_source:
+ client_cert_source = client_options.client_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = cls.DEFAULT_ENDPOINT
+
+ return api_endpoint, client_cert_source
+
+ @staticmethod
+ def _read_environment_variables():
+ """Returns the environment variables used by the client.
+
+ Returns:
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
+
+ Raises:
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
+ any of ["true", "false"].
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
+ is not any of ["auto", "never", "always"].
+ """
+ use_client_cert = DatabaseAdminClient._use_client_cert_effective()
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
+
+ @staticmethod
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
+ """Return the client cert source to be used by the client.
+
+ Args:
+ provided_cert_source (bytes): The client certificate source provided.
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
+
+ Returns:
+ bytes or None: The client cert source to be used by the client.
+ """
+ client_cert_source = None
+ if use_cert_flag:
+ if provided_cert_source:
+ client_cert_source = provided_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+ return client_cert_source
+
+ @staticmethod
+ def _get_api_endpoint(
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
+ ):
+ """Return the API endpoint used by the client.
+
+ Args:
+ api_override (str): The API endpoint override. If specified, this is always
+ the return value of this function and the other arguments are not used.
+ client_cert_source (bytes): The client certificate source used by the client.
+ universe_domain (str): The universe domain used by the client.
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
+ Possible values are "always", "auto", or "never".
+
+ Returns:
+ str: The API endpoint to be used by the client.
+ """
+ if api_override is not None:
+ api_endpoint = api_override
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ _default_universe = DatabaseAdminClient._DEFAULT_UNIVERSE
+ if universe_domain != _default_universe:
+ raise MutualTLSChannelError(
+ f"mTLS is not supported in any universe other than {_default_universe}."
+ )
+ api_endpoint = DatabaseAdminClient.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = DatabaseAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=universe_domain
+ )
+ return api_endpoint
+
+ @staticmethod
+ def _get_universe_domain(
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
+ ) -> str:
+ """Return the universe domain used by the client.
+
+ Args:
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
+
+ Returns:
+ str: The universe domain to be used by the client.
+
+ Raises:
+ ValueError: If the universe domain is an empty string.
+ """
+ universe_domain = DatabaseAdminClient._DEFAULT_UNIVERSE
+ if client_universe_domain is not None:
+ universe_domain = client_universe_domain
+ elif universe_domain_env is not None:
+ universe_domain = universe_domain_env
+ if len(universe_domain.strip()) == 0:
+ raise ValueError("Universe Domain cannot be an empty string.")
+ return universe_domain
+
+ def _validate_universe_domain(self):
+ """Validates client's and credentials' universe domains are consistent.
+
+ Returns:
+ bool: True iff the configured universe domain is valid.
+
+ Raises:
+ ValueError: If the configured universe domain is not valid.
+ """
+
+ # NOTE (b/349488459): universe validation is disabled until further notice.
+ return True
+
+ def _add_cred_info_for_auth_errors(
+ self, error: core_exceptions.GoogleAPICallError
+ ) -> None:
+ """Adds credential info string to error details for 401/403/404 errors.
+
+ Args:
+ error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info.
+ """
+ if error.code not in [
+ HTTPStatus.UNAUTHORIZED,
+ HTTPStatus.FORBIDDEN,
+ HTTPStatus.NOT_FOUND,
+ ]:
+ return
+
+ cred = self._transport._credentials
+
+ # get_cred_info is only available in google-auth>=2.35.0
+ if not hasattr(cred, "get_cred_info"):
+ return
+
+ # ignore the type check since pypy test fails when get_cred_info
+ # is not available
+ cred_info = cred.get_cred_info() # type: ignore
+ if cred_info and hasattr(error._details, "append"):
+ error._details.append(json.dumps(cred_info))
+
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used by the client instance.
+ """
+ return self._universe_domain
+
def __init__(
self,
*,
- credentials: Optional[credentials.Credentials] = None,
- transport: Union[str, DatabaseAdminTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[str, DatabaseAdminTransport, Callable[..., DatabaseAdminTransport]]
+ ] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiate the database admin client.
+ """Instantiates the database admin client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -337,25 +750,37 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, DatabaseAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
- client. It won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,DatabaseAdminTransport,Callable[..., DatabaseAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the DatabaseAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that the ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
@@ -366,87 +791,167 @@ def __init__(
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
- if isinstance(client_options, dict):
- client_options = client_options_lib.from_dict(client_options)
- if client_options is None:
- client_options = client_options_lib.ClientOptions()
+ self._client_options = client_options
+ if isinstance(self._client_options, dict):
+ self._client_options = client_options_lib.from_dict(self._client_options)
+ if self._client_options is None:
+ self._client_options = client_options_lib.ClientOptions()
+ self._client_options = cast(
+ client_options_lib.ClientOptions, self._client_options
+ )
+
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
- # Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ (
+ self._use_client_cert,
+ self._use_mtls_endpoint,
+ self._universe_domain_env,
+ ) = DatabaseAdminClient._read_environment_variables()
+ self._client_cert_source = DatabaseAdminClient._get_client_cert_source(
+ self._client_options.client_cert_source, self._use_client_cert
)
+ self._universe_domain = DatabaseAdminClient._get_universe_domain(
+ universe_domain_opt, self._universe_domain_env
+ )
+ self._api_endpoint = None # updated below, depending on `transport`
- client_cert_source_func = None
- is_mtls = False
- if use_client_cert:
- if client_options.client_cert_source:
- is_mtls = True
- client_cert_source_func = client_options.client_cert_source
- else:
- is_mtls = mtls.has_default_client_cert_source()
- client_cert_source_func = (
- mtls.default_client_cert_source() if is_mtls else None
- )
+ # Initialize the universe domain validation.
+ self._is_universe_domain_valid = False
- # Figure out which api endpoint to use.
- if client_options.api_endpoint is not None:
- api_endpoint = client_options.api_endpoint
- else:
- use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_mtls_env == "never":
- api_endpoint = self.DEFAULT_ENDPOINT
- elif use_mtls_env == "always":
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- elif use_mtls_env == "auto":
- api_endpoint = (
- self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
- )
- else:
- raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
- )
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
+ # Setup logging.
+ client_logging.initialize_logging()
+
+ api_key_value = getattr(self._client_options, "api_key", None)
+ if api_key_value and credentials:
+ raise ValueError(
+ "client_options.api_key and credentials are mutually exclusive"
+ )
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
- if isinstance(transport, DatabaseAdminTransport):
+ transport_provided = isinstance(transport, DatabaseAdminTransport)
+ if transport_provided:
# transport is a DatabaseAdminTransport instance.
- if credentials or client_options.credentials_file:
+ if credentials or self._client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
- if client_options.scopes:
+ if self._client_options.scopes:
raise ValueError(
- "When providing a transport instance, "
- "provide its scopes directly."
+ "When providing a transport instance, provide its scopes "
+ "directly."
)
- self._transport = transport
- else:
- Transport = type(self).get_transport_class(transport)
- self._transport = Transport(
+ self._transport = cast(DatabaseAdminTransport, transport)
+ self._api_endpoint = self._transport.host
+
+ self._api_endpoint = (
+ self._api_endpoint
+ or DatabaseAdminClient._get_api_endpoint(
+ self._client_options.api_endpoint,
+ self._client_cert_source,
+ self._universe_domain,
+ self._use_mtls_endpoint,
+ )
+ )
+
+ if not transport_provided:
+ import google.auth._default # type: ignore
+
+ if api_key_value and hasattr(
+ google.auth._default, "get_api_key_credentials"
+ ):
+ credentials = google.auth._default.get_api_key_credentials(
+ api_key_value
+ )
+
+ transport_init: Union[
+ Type[DatabaseAdminTransport], Callable[..., DatabaseAdminTransport]
+ ] = (
+ DatabaseAdminClient.get_transport_class(transport)
+ if isinstance(transport, str) or transport is None
+ else cast(Callable[..., DatabaseAdminTransport], transport)
+ )
+ # initialize with the provided callable or the passed in class
+ self._transport = transport_init(
credentials=credentials,
- credentials_file=client_options.credentials_file,
- host=api_endpoint,
- scopes=client_options.scopes,
- client_cert_source_for_mtls=client_cert_source_func,
- quota_project_id=client_options.quota_project_id,
+ credentials_file=self._client_options.credentials_file,
+ host=self._api_endpoint,
+ scopes=self._client_options.scopes,
+ client_cert_source_for_mtls=self._client_cert_source,
+ quota_project_id=self._client_options.quota_project_id,
client_info=client_info,
+ always_use_jwt_access=True,
+ api_audience=self._client_options.api_audience,
)
+ if "async" not in str(self._transport):
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.database_v1.DatabaseAdminClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "universeDomain": getattr(
+ self._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "credentialsType": None,
+ },
+ )
+
def list_databases(
self,
- request: spanner_database_admin.ListDatabasesRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabasesRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabasesPager:
r"""Lists Cloud Spanner databases.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_databases():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabasesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_databases(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest, dict]):
The request object. The request for
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
parent (str):
@@ -457,12 +962,13 @@ def list_databases(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesPager:
@@ -474,25 +980,24 @@ def list_databases(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.ListDatabasesRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.ListDatabasesRequest):
request = spanner_database_admin.ListDatabasesRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -506,13 +1011,26 @@ def list_databases(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDatabasesPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -520,13 +1038,15 @@ def list_databases(
def create_database(
self,
- request: spanner_database_admin.CreateDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.CreateDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- create_statement: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ create_statement: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Creates a new Cloud Spanner database and starts to prepare it
for serving. The returned [long-running
@@ -539,8 +1059,39 @@ def create_database(
is [Database][google.spanner.admin.database.v1.Database], if
successful.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_create_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateDatabaseRequest(
+ parent="parent_value",
+ create_statement="create_statement_value",
+ )
+
+ # Make the request
+ operation = client.create_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest, dict]):
The request object. The request for
[CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
parent (str):
@@ -563,12 +1114,13 @@ def create_database(
This corresponds to the ``create_statement`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -580,25 +1132,24 @@ def create_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, create_statement])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, create_statement]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.CreateDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.CreateDatabaseRequest):
request = spanner_database_admin.CreateDatabaseRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if create_statement is not None:
@@ -614,8 +1165,16 @@ def create_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -630,17 +1189,45 @@ def create_database(
def get_database(
self,
- request: spanner_database_admin.GetDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseRequest, dict]
+ ] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.Database:
r"""Gets the state of a Cloud Spanner database.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_database(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest, dict]):
The request object. The request for
[GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
name (str):
@@ -651,37 +1238,37 @@ def get_database(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Database:
A Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.GetDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.GetDatabaseRequest):
request = spanner_database_admin.GetDatabaseRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -695,47 +1282,266 @@ def get_database(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
- def update_database_ddl(
+ def update_database(
self,
- request: spanner_database_admin.UpdateDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
- statements: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[spanner_database_admin.Database] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
- r"""Updates the schema of a Cloud Spanner database by
- creating/altering/dropping tables, columns, indexes, etc. The
- returned [long-running operation][google.longrunning.Operation]
- will have a name of the format
- ``/operations/`` and can be used to
- track execution of the schema change(s). The
+ r"""Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
[metadata][google.longrunning.Operation.metadata] field type is
- [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
- The operation has no response.
-
- Args:
- request (google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest):
- The request object. Enqueues the given DDL statements to
- be applied, in order but not necessarily all at once, to
- the database schema at some point (or points) in the
- future. The server checks that the statements are
- executable (syntactically valid, name tables that exist,
- etc.) before enqueueing them, but they may still fail
- upon
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ database = spanner_admin_database_v1.Database()
+ database.name = "name_value"
+
+ request = spanner_admin_database_v1.UpdateDatabaseRequest(
+ database=database,
+ )
+
+ # Make the request
+ operation = client.update_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest, dict]):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ database (google.cloud.spanner_admin_database_v1.types.Database):
+ Required. The database to update. The ``name`` field of
+ the database is of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Database`
+ A Cloud Spanner database.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseRequest):
+ request = spanner_database_admin.UpdateDatabaseRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_database]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("database.name", request.database.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ spanner_database_admin.Database,
+ metadata_type=spanner_database_admin.UpdateDatabaseMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_database_ddl(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ statements: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation.Operation:
+ r"""Updates the schema of a Cloud Spanner database by
+ creating/altering/dropping tables, columns, indexes, etc. The
+ returned [long-running operation][google.longrunning.Operation]
+ will have a name of the format
+ ``/operations/`` and can be used to
+ track execution of the schema change(s). The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ The operation has no response.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
+ database="database_value",
+ statements=['statements_value1', 'statements_value2'],
+ )
+
+ # Make the request
+ operation = client.update_database_ddl(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
later execution (e.g., if a statement from another batch
of statements is applied first and it conflicts in some
way, or if there is some data-related problem like a
- `NULL` value in a column to which `NOT NULL` would be
- added). If a statement fails, all subsequent statements
- in the batch are automatically cancelled.
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
Each batch of statements is assigned a name which can be
used with the
[Operations][google.longrunning.Operations] API to
@@ -747,19 +1553,20 @@ def update_database_ddl(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- statements (Sequence[str]):
+ statements (MutableSequence[str]):
Required. DDL statements to be
applied to the database.
This corresponds to the ``statements`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -776,30 +1583,26 @@ def update_database_ddl(
}
- The JSON representation for Empty is empty JSON
- object {}.
-
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database, statements])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, statements]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.UpdateDatabaseDdlRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest):
request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if database is not None:
request.database = database
if statements is not None:
@@ -815,14 +1618,22 @@ def update_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty.Empty,
+ empty_pb2.Empty,
metadata_type=spanner_database_admin.UpdateDatabaseDdlMetadata,
)
@@ -831,19 +1642,45 @@ def update_database_ddl(
def drop_database(
self,
- request: spanner_database_admin.DropDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.DropDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_drop_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DropDatabaseRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ client.drop_database(request=request)
Args:
- request (google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]):
The request object. The request for
[DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
database (str):
@@ -851,33 +1688,33 @@ def drop_database(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.DropDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.DropDatabaseRequest):
request = spanner_database_admin.DropDatabaseRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if database is not None:
request.database = database
@@ -891,27 +1728,61 @@ def drop_database(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
def get_database_ddl(
self,
- request: spanner_database_admin.GetDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.GetDatabaseDdlResponse:
r"""Returns the schema of a Cloud Spanner database as a list of
formatted DDL statements. This method does not show pending
schema updates, those may be queried using the
[Operations][google.longrunning.Operations] API.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseDdlRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = client.get_database_ddl(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest, dict]):
The request object. The request for
[GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
database (str):
@@ -922,12 +1793,13 @@ def get_database_ddl(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
@@ -936,25 +1808,24 @@ def get_database_ddl(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.GetDatabaseDdlRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.GetDatabaseDdlRequest):
request = spanner_database_admin.GetDatabaseDdlRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if database is not None:
request.database = database
@@ -968,21 +1839,29 @@ def get_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def set_iam_policy(
self,
- request: iam_policy.SetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> policy.Policy:
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
r"""Sets the access control policy on a database or backup resource.
Replaces any existing policy.
@@ -993,10 +1872,36 @@ def set_iam_policy(
permission on
[resource][google.iam.v1.SetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_set_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest):
- The request object. Request message for `SetIamPolicy`
- method.
+ request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
+ The request object. Request message for ``SetIamPolicy`` method.
resource (str):
REQUIRED: The resource for which the
policy is being specified. See the
@@ -1006,76 +1911,55 @@ def set_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
- **YAML Example**
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1083,13 +1967,12 @@ def set_iam_policy(
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
- request = iam_policy.SetIamPolicyRequest(**request)
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
- request = iam_policy.SetIamPolicyRequest()
-
+ request = iam_policy_pb2.SetIamPolicyRequest()
if resource is not None:
request.resource = resource
@@ -1103,21 +1986,29 @@ def set_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def get_iam_policy(
self,
- request: iam_policy.GetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> policy.Policy:
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
r"""Gets the access control policy for a database or backup
resource. Returns an empty policy if a database or backup exists
but does not have a policy set.
@@ -1129,10 +2020,36 @@ def get_iam_policy(
permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_get_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest):
- The request object. Request message for `GetIamPolicy`
- method.
+ request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
+ The request object. Request message for ``GetIamPolicy`` method.
resource (str):
REQUIRED: The resource for which the
policy is being requested. See the
@@ -1142,76 +2059,55 @@ def get_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
- **YAML Example**
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1219,13 +2115,12 @@ def get_iam_policy(
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
- request = iam_policy.GetIamPolicyRequest(**request)
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
- request = iam_policy.GetIamPolicyRequest()
-
+ request = iam_policy_pb2.GetIamPolicyRequest()
if resource is not None:
request.resource = resource
@@ -1239,22 +2134,30 @@ def get_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def test_iam_permissions(
self,
- request: iam_policy.TestIamPermissionsRequest = None,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
- resource: str = None,
- permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> iam_policy.TestIamPermissionsResponse:
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the specified
database or backup resource.
@@ -1266,10 +2169,37 @@ def test_iam_permissions(
in a NOT_FOUND error if the user has ``spanner.backups.list``
permission on the containing instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_test_iam_permissions():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest):
- The request object. Request message for
- `TestIamPermissions` method.
+ request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
+ The request object. Request message for ``TestIamPermissions`` method.
resource (str):
REQUIRED: The resource for which the
policy detail is being requested. See
@@ -1279,7 +2209,7 @@ def test_iam_permissions(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- permissions (Sequence[str]):
+ permissions (MutableSequence[str]):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
@@ -1288,21 +2218,25 @@ def test_iam_permissions(
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource, permissions])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1310,16 +2244,14 @@ def test_iam_permissions(
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
- request = iam_policy.TestIamPermissionsRequest(**request)
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
# Null request, just make one.
- request = iam_policy.TestIamPermissionsRequest()
-
+ request = iam_policy_pb2.TestIamPermissionsRequest()
if resource is not None:
request.resource = resource
-
if permissions:
request.permissions.extend(permissions)
@@ -1333,22 +2265,30 @@ def test_iam_permissions(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def create_backup(
self,
- request: gsad_backup.CreateBackupRequest = None,
+ request: Optional[Union[gsad_backup.CreateBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup: gsad_backup.Backup = None,
- backup_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ backup_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Starts creating a new Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1364,8 +2304,39 @@ def create_backup(
backup creation per database. Backup creation of different
databases can run concurrently.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_create_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.CreateBackupRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.CreateBackupRequest, dict]):
The request object. The request for
[CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
parent (str):
@@ -1394,12 +2365,13 @@ def create_backup(
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -1411,25 +2383,24 @@ def create_backup(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup, backup_id])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup, backup_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a gsad_backup.CreateBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, gsad_backup.CreateBackupRequest):
request = gsad_backup.CreateBackupRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if backup is not None:
@@ -1447,8 +2418,16 @@ def create_backup(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -1461,110 +2440,347 @@ def create_backup(
# Done; return the response.
return response
- def get_backup(
+ def copy_backup(
self,
- request: backup.GetBackupRequest = None,
+ request: Optional[Union[backup.CopyBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> backup.Backup:
- r"""Gets metadata on a pending or completed
- [Backup][google.spanner.admin.database.v1.Backup].
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation.Operation:
+ r"""Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_copy_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
Args:
- request (google.cloud.spanner_admin_database_v1.types.GetBackupRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.CopyBackupRequest, dict]):
The request object. The request for
- [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
- name (str):
- Required. Name of the backup. Values are of the form
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+ parent (str):
+ Required. The name of the destination instance that will
+ contain the backup copy. Values are of the form:
+ ``projects//instances/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_id (str):
+ Required. The id of the backup copy. The ``backup_id``
+ appended to ``parent`` forms the full backup_uri of the
+ form
``projects//instances//backups/``.
- This corresponds to the ``name`` field
+ This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
+ source_backup (str):
+ Required. The source backup to be copied. The source
+ backup needs to be in READY state for it to be copied.
+ Once CopyBackup is in progress, the source backup cannot
+ be deleted or cleaned up on expiration until CopyBackup
+ is finished. Values are of the form:
+ ``projects//instances//backups/``.
+ This corresponds to the ``source_backup`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Required. The expiration time of the backup in
+ microsecond granularity. The expiration time must be at
+ least 6 hours and at most 366 days from the
+ ``create_time`` of the source backup. Once the
+ ``expire_time`` has passed, the backup is eligible to be
+ automatically deleted by Cloud Spanner to free the
+ resources used by the backup.
+
+ This corresponds to the ``expire_time`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
- google.cloud.spanner_admin_database_v1.types.Backup:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Backup`
A backup of a Cloud Spanner database.
+
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_id, source_backup, expire_time]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.GetBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
- if not isinstance(request, backup.GetBackupRequest):
- request = backup.GetBackupRequest(request)
-
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.CopyBackupRequest):
+ request = backup.CopyBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
- if name is not None:
- request.name = name
+ if parent is not None:
+ request.parent = parent
+ if backup_id is not None:
+ request.backup_id = backup_id
+ if source_backup is not None:
+ request.source_backup = source_backup
+ if expire_time is not None:
+ request.expire_time = expire_time
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = self._transport._wrapped_methods[self._transport.get_backup]
+ rpc = self._transport._wrapped_methods[self._transport.copy_backup]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ backup.Backup,
+ metadata_type=backup.CopyBackupMetadata,
+ )
# Done; return the response.
return response
- def update_backup(
+ def get_backup(
self,
- request: gsad_backup.UpdateBackupRequest = None,
+ request: Optional[Union[backup.GetBackupRequest, dict]] = None,
*,
- backup: gsad_backup.Backup = None,
- update_mask: field_mask.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> gsad_backup.Backup:
- r"""Updates a pending or completed
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.Backup:
+ r"""Gets metadata on a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
- Args:
- request (google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest):
- The request object. The request for
- [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
- backup (google.cloud.spanner_admin_database_v1.types.Backup):
- Required. The backup to update. ``backup.name``, and the
- fields to be updated as specified by ``update_mask`` are
- required. Other fields are ignored. Update is only
- supported for the following fields:
+ .. code-block:: python
- - ``backup.expire_time``.
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ def sample_get_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
- This corresponds to the ``backup`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
- update_mask (google.protobuf.field_mask_pb2.FieldMask):
- Required. A mask specifying which fields (e.g.
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetBackupRequest, dict]):
+ The request object. The request for
+ [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+ name (str):
+ Required. Name of the backup. Values are of the form
+ ``projects//instances//backups/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.Backup:
+ A backup of a Cloud Spanner database.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.GetBackupRequest):
+ request = backup.GetBackupRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_backup]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_backup(
+ self,
+ request: Optional[Union[gsad_backup.UpdateBackupRequest, dict]] = None,
+ *,
+ backup: Optional[gsad_backup.Backup] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup.Backup:
+ r"""Updates a pending or completed
+ [Backup][google.spanner.admin.database.v1.Backup].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupRequest(
+ )
+
+ # Make the request
+ response = client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest, dict]):
+ The request object. The request for
+ [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+ backup (google.cloud.spanner_admin_database_v1.types.Backup):
+ Required. The backup to update. ``backup.name``, and the
+ fields to be updated as specified by ``update_mask`` are
+ required. Other fields are ignored. Update is only
+ supported for the following fields:
+
+ - ``backup.expire_time``.
+
+ This corresponds to the ``backup`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. A mask specifying which fields (e.g.
``expire_time``) in the Backup resource should be
updated. This mask is relative to the Backup resource,
not to the request message. The field mask must always
@@ -1575,37 +2791,37 @@ def update_backup(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([backup, update_mask])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a gsad_backup.UpdateBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, gsad_backup.UpdateBackupRequest):
request = gsad_backup.UpdateBackupRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if backup is not None:
request.backup = backup
if update_mask is not None:
@@ -1623,26 +2839,57 @@ def update_backup(
),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def delete_backup(
self,
- request: backup.DeleteBackupRequest = None,
+ request: Optional[Union[backup.DeleteBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_delete_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_backup(request=request)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest, dict]):
The request object. The request for
[DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
name (str):
@@ -1653,33 +2900,33 @@ def delete_backup(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.DeleteBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.DeleteBackupRequest):
request = backup.DeleteBackupRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if name is not None:
request.name = name
@@ -1693,26 +2940,59 @@ def delete_backup(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
def list_backups(
self,
- request: backup.ListBackupsRequest = None,
+ request: Optional[Union[backup.ListBackupsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupsPager:
r"""Lists completed and pending backups. Backups returned are
ordered by ``create_time`` in descending order, starting from
the most recent ``create_time``.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_backups():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListBackupsRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupsRequest, dict]):
The request object. The request for
[ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
parent (str):
@@ -1722,12 +3002,13 @@ def list_backups(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsPager:
@@ -1739,25 +3020,24 @@ def list_backups(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.ListBackupsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.ListBackupsRequest):
request = backup.ListBackupsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1771,13 +3051,26 @@ def list_backups(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBackupsPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -1785,14 +3078,16 @@ def list_backups(
def restore_database(
self,
- request: spanner_database_admin.RestoreDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.RestoreDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- database_id: str = None,
- backup: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ database_id: Optional[str] = None,
+ backup: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Create a new database by restoring from a completed backup. The
new database must be in the same project and in an instance with
@@ -1814,8 +3109,40 @@ def restore_database(
without waiting for the optimize operation associated with the
first restore to complete.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_restore_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.RestoreDatabaseRequest(
+ backup="backup_value",
+ parent="parent_value",
+ database_id="database_id_value",
+ )
+
+ # Make the request
+ operation = client.restore_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest, dict]):
The request object. The request for
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
parent (str):
@@ -1846,12 +3173,13 @@ def restore_database(
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -1863,25 +3191,24 @@ def restore_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, database_id, backup])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, database_id, backup]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.RestoreDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.RestoreDatabaseRequest):
request = spanner_database_admin.RestoreDatabaseRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
if database_id is not None:
@@ -1899,8 +3226,16 @@ def restore_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -1915,12 +3250,14 @@ def restore_database(
def list_database_operations(
self,
- request: spanner_database_admin.ListDatabaseOperationsRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseOperationsRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseOperationsPager:
r"""Lists database
[longrunning-operations][google.longrunning.Operation]. A
@@ -1933,8 +3270,35 @@ def list_database_operations(
completed/failed/canceled within the last 7 days, and pending
operations.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_database_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_operations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest, dict]):
The request object. The request for
[ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
parent (str):
@@ -1945,12 +3309,13 @@ def list_database_operations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsPager:
@@ -1962,27 +3327,26 @@ def list_database_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.ListDatabaseOperationsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(
request, spanner_database_admin.ListDatabaseOperationsRequest
):
request = spanner_database_admin.ListDatabaseOperationsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -1996,13 +3360,26 @@ def list_database_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDatabaseOperationsPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -2010,12 +3387,12 @@ def list_database_operations(
def list_backup_operations(
self,
- request: backup.ListBackupOperationsRequest = None,
+ request: Optional[Union[backup.ListBackupOperationsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupOperationsPager:
r"""Lists the backup [long-running
operations][google.longrunning.Operation] in the given instance.
@@ -2030,8 +3407,35 @@ def list_backup_operations(
``operation.metadata.value.progress.start_time`` in descending
order starting from the most recently started operation.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_backup_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_operations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest, dict]):
The request object. The request for
[ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
parent (str):
@@ -2042,12 +3446,13 @@ def list_backup_operations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
-
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsPager:
@@ -2059,25 +3464,24 @@ def list_backup_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.ListBackupOperationsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.ListBackupOperationsRequest):
request = backup.ListBackupOperationsRequest(request)
-
# If we have keyword arguments corresponding to fields on the
# request, apply these.
-
if parent is not None:
request.parent = parent
@@ -2091,27 +3495,1246 @@ def list_backup_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBackupOperationsPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
return response
+ def list_database_roles(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseRolesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListDatabaseRolesPager:
+ r"""Lists Cloud Spanner database roles.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_database_roles():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseRolesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_roles(request=request)
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest, dict]):
+ The request object. The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ parent (str):
+ Required. The database whose roles should be listed.
+ Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesPager:
+ The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabaseRolesRequest):
+ request = spanner_database_admin.ListDatabaseRolesRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_database_roles]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListDatabaseRolesPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def add_split_points(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.AddSplitPointsRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ split_points: Optional[
+ MutableSequence[spanner_database_admin.SplitPoints]
+ ] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Adds split points to specified tables, indexes of a
+ database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_add_split_points():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.AddSplitPointsRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = client.add_split_points(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ database (str):
+ Required. The database on whose tables/indexes split
+ points are to be added. Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ split_points (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]):
+ Required. The split points to add.
+ This corresponds to the ``split_points`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, split_points]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.AddSplitPointsRequest):
+ request = spanner_database_admin.AddSplitPointsRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if split_points is not None:
+ request.split_points = split_points
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.add_split_points]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def create_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.CreateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ backup_schedule_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Creates a new backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_create_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupScheduleRequest(
+ parent="parent_value",
+ backup_schedule_id="backup_schedule_id_value",
+ )
+
+ # Make the request
+ response = client.create_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.CreateBackupScheduleRequest, dict]):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ parent (str):
+ Required. The name of the database
+ that this backup schedule applies to.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to
+ create.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule_id (str):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the
+ full backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``backup_schedule_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_schedule, backup_schedule_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.CreateBackupScheduleRequest):
+ request = gsad_backup_schedule.CreateBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if backup_schedule_id is not None:
+ request.backup_schedule_id = backup_schedule_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_backup_schedule(
+ self,
+ request: Optional[Union[backup_schedule.GetBackupScheduleRequest, dict]] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Gets backup schedule for the input schedule name.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetBackupScheduleRequest, dict]):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ name (str):
+ Required. The name of the schedule to retrieve. Values
+ are of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.GetBackupScheduleRequest):
+ request = backup_schedule.GetBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.UpdateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Updates a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupScheduleRequest(
+ )
+
+ # Make the request
+ response = client.update_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupScheduleRequest, dict]):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated
+ as specified by ``update_mask`` are required. Other
+ fields are ignored.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. A mask specifying which
+ fields in the BackupSchedule resource
+ should be updated. This mask is relative
+ to the BackupSchedule resource, not to
+ the request message. The field mask must
+ always be specified; this prevents any
+ future fields from being erased
+ accidentally.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup_schedule, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.UpdateBackupScheduleRequest):
+ request = gsad_backup_schedule.UpdateBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("backup_schedule.name", request.backup_schedule.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_backup_schedule(
+ self,
+ request: Optional[
+ Union[backup_schedule.DeleteBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_delete_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_backup_schedule(request=request)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupScheduleRequest, dict]):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ name (str):
+ Required. The name of the schedule to delete. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.DeleteBackupScheduleRequest):
+ request = backup_schedule.DeleteBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ def list_backup_schedules(
+ self,
+ request: Optional[
+ Union[backup_schedule.ListBackupSchedulesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListBackupSchedulesPager:
+ r"""Lists all the backup schedules for the database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_backup_schedules():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupSchedulesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_schedules(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest, dict]):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ parent (str):
+ Required. Database is the parent
+ resource whose backup schedules should
+ be listed. Values are of the form
+ projects//instances//databases/
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesPager:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.ListBackupSchedulesRequest):
+ request = backup_schedule.ListBackupSchedulesRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_backup_schedules]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListBackupSchedulesPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def internal_update_graph_operation(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.InternalUpdateGraphOperationRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ operation_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ r"""This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_internal_update_graph_operation():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.InternalUpdateGraphOperationRequest(
+ database="database_value",
+ operation_id="operation_id_value",
+ vm_identity_token="vm_identity_token_value",
+ )
+
+ # Make the request
+ response = client.internal_update_graph_operation(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationRequest, dict]):
+ The request object. Internal request proto, do not use
+ directly.
+ database (str):
+ Internal field, do not use directly.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ operation_id (str):
+ Internal field, do not use directly.
+ This corresponds to the ``operation_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationResponse:
+ Internal response proto, do not use
+ directly.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, operation_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.InternalUpdateGraphOperationRequest
+ ):
+ request = spanner_database_admin.InternalUpdateGraphOperationRequest(
+ request
+ )
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if operation_id is not None:
+ request.operation_id = operation_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.internal_update_graph_operation
+ ]
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def __enter__(self) -> "DatabaseAdminClient":
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
+ def list_operations(
+ self,
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Lists operations that match the specified filter in the request.
+
+ Args:
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
+ The request object. Request message for
+ `ListOperations` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.ListOperationsResponse:
+ Response message for ``ListOperations`` method.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.ListOperationsRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_operations]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ try:
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+ except core_exceptions.GoogleAPICallError as e:
+ self._add_cred_info_for_auth_errors(e)
+ raise e
+
+ def get_operation(
+ self,
+ request: Optional[operations_pb2.GetOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Gets the latest state of a long-running operation.
+
+ Args:
+ request (:class:`~.operations_pb2.GetOperationRequest`):
+ The request object. Request message for
+ `GetOperation` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.Operation:
+ An ``Operation`` object.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.GetOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ try:
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+ except core_exceptions.GoogleAPICallError as e:
+ self._add_cred_info_for_auth_errors(e)
+ raise e
+
+ def delete_operation(
+ self,
+ request: Optional[operations_pb2.DeleteOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a long-running operation.
+
+ This method indicates that the client is no longer interested
+ in the operation result. It does not cancel the operation.
+ If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.DeleteOperationRequest`):
+ The request object. Request message for
+ `DeleteOperation` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.DeleteOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ def cancel_operation(
+ self,
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Starts asynchronous cancellation on a long-running operation.
+
+ The server makes a best effort to cancel the operation, but success
+ is not guaranteed. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.CancelOperationRequest`):
+ The request object. Request message for
+ `CancelOperation` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.CancelOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.cancel_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
__all__ = ("DatabaseAdminClient",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
index 933ca91c5a..c9e2e14d52 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,21 +13,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import retry_async as retries_async
from typing import (
Any,
- AsyncIterable,
+ AsyncIterator,
Awaitable,
Callable,
- Iterable,
Sequence,
Tuple,
Optional,
+ Iterator,
+ Union,
)
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+ OptionalAsyncRetry = Union[
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
+ ]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
+
from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
-from google.longrunning import operations_pb2 as operations # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
class ListDatabasesPager:
@@ -55,7 +67,9 @@ def __init__(
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -66,26 +80,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[spanner_database_admin.ListDatabasesResponse]:
+ def pages(self) -> Iterator[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[spanner_database_admin.Database]:
+ def __iter__(self) -> Iterator[spanner_database_admin.Database]:
for page in self.pages:
yield from page.databases
@@ -117,9 +143,11 @@ def __init__(
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -128,12 +156,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -142,14 +177,19 @@ def __getattr__(self, name: str) -> Any:
@property
async def pages(
self,
- ) -> AsyncIterable[spanner_database_admin.ListDatabasesResponse]:
+ ) -> AsyncIterator[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[spanner_database_admin.Database]:
+ def __aiter__(self) -> AsyncIterator[spanner_database_admin.Database]:
async def async_generator():
async for page in self.pages:
for response in page.databases:
@@ -185,7 +225,9 @@ def __init__(
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -196,26 +238,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[backup.ListBackupsResponse]:
+ def pages(self) -> Iterator[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[backup.Backup]:
+ def __iter__(self) -> Iterator[backup.Backup]:
for page in self.pages:
yield from page.backups
@@ -247,9 +301,11 @@ def __init__(
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -258,26 +314,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- async def pages(self) -> AsyncIterable[backup.ListBackupsResponse]:
+ async def pages(self) -> AsyncIterator[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[backup.Backup]:
+ def __aiter__(self) -> AsyncIterator[backup.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
@@ -313,7 +381,9 @@ def __init__(
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -324,26 +394,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[spanner_database_admin.ListDatabaseOperationsResponse]:
+ def pages(self) -> Iterator[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[operations.Operation]:
+ def __iter__(self) -> Iterator[operations_pb2.Operation]:
for page in self.pages:
yield from page.operations
@@ -377,9 +459,11 @@ def __init__(
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -388,12 +472,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -402,14 +493,19 @@ def __getattr__(self, name: str) -> Any:
@property
async def pages(
self,
- ) -> AsyncIterable[spanner_database_admin.ListDatabaseOperationsResponse]:
+ ) -> AsyncIterator[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[operations.Operation]:
+ def __aiter__(self) -> AsyncIterator[operations_pb2.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
@@ -445,7 +541,9 @@ def __init__(
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -456,26 +554,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[backup.ListBackupOperationsResponse]:
+ def pages(self) -> Iterator[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[operations.Operation]:
+ def __iter__(self) -> Iterator[operations_pb2.Operation]:
for page in self.pages:
yield from page.operations
@@ -507,9 +617,11 @@ def __init__(
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
- """Instantiate the pager.
+ """Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
@@ -518,26 +630,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- async def pages(self) -> AsyncIterable[backup.ListBackupOperationsResponse]:
+ async def pages(self) -> AsyncIterator[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[operations.Operation]:
+ def __aiter__(self) -> AsyncIterator[operations_pb2.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
@@ -547,3 +671,319 @@ async def async_generator():
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDatabaseRolesPager:
+ """A pager for iterating through ``list_database_roles`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``database_roles`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListDatabaseRoles`` requests and continue to iterate
+ through the ``database_roles`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., spanner_database_admin.ListDatabaseRolesResponse],
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ response: spanner_database_admin.ListDatabaseRolesResponse,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse):
+ The initial response object.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = spanner_database_admin.ListDatabaseRolesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterator[spanner_database_admin.ListDatabaseRolesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __iter__(self) -> Iterator[spanner_database_admin.DatabaseRole]:
+ for page in self.pages:
+ yield from page.database_roles
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDatabaseRolesAsyncPager:
+ """A pager for iterating through ``list_database_roles`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``database_roles`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListDatabaseRoles`` requests and continue to iterate
+ through the ``database_roles`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[spanner_database_admin.ListDatabaseRolesResponse]
+ ],
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ response: spanner_database_admin.ListDatabaseRolesResponse,
+ *,
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiates the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse):
+ The initial response object.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = spanner_database_admin.ListDatabaseRolesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterator[spanner_database_admin.ListDatabaseRolesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterator[spanner_database_admin.DatabaseRole]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.database_roles:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBackupSchedulesPager:
+ """A pager for iterating through ``list_backup_schedules`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``backup_schedules`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListBackupSchedules`` requests and continue to iterate
+ through the ``backup_schedules`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., backup_schedule.ListBackupSchedulesResponse],
+ request: backup_schedule.ListBackupSchedulesRequest,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse):
+ The initial response object.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = backup_schedule.ListBackupSchedulesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterator[backup_schedule.ListBackupSchedulesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __iter__(self) -> Iterator[backup_schedule.BackupSchedule]:
+ for page in self.pages:
+ yield from page.backup_schedules
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBackupSchedulesAsyncPager:
+ """A pager for iterating through ``list_backup_schedules`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``backup_schedules`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListBackupSchedules`` requests and continue to iterate
+ through the ``backup_schedules`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[backup_schedule.ListBackupSchedulesResponse]],
+ request: backup_schedule.ListBackupSchedulesRequest,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ *,
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiates the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse):
+ The initial response object.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = backup_schedule.ListBackupSchedulesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterator[backup_schedule.ListBackupSchedulesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterator[backup_schedule.BackupSchedule]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.backup_schedules:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst
new file mode 100644
index 0000000000..f70c023a98
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst
@@ -0,0 +1,9 @@
+
+transport inheritance structure
+_______________________________
+
+`DatabaseAdminTransport` is the ABC for all transports.
+- public child `DatabaseAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`).
+- public child `DatabaseAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`).
+- private child `_BaseDatabaseAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`).
+- public child `DatabaseAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`).
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
index 00a3ab8549..23ba04ea21 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,22 +13,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
from collections import OrderedDict
from typing import Dict, Type
from .base import DatabaseAdminTransport
from .grpc import DatabaseAdminGrpcTransport
from .grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
+from .rest import DatabaseAdminRestTransport
+from .rest import DatabaseAdminRestInterceptor
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]]
_transport_registry["grpc"] = DatabaseAdminGrpcTransport
_transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport
+_transport_registry["rest"] = DatabaseAdminRestTransport
__all__ = (
"DatabaseAdminTransport",
"DatabaseAdminGrpcTransport",
"DatabaseAdminGrpcAsyncIOTransport",
+ "DatabaseAdminRestTransport",
+ "DatabaseAdminRestInterceptor",
)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
index 779f02e840..16a075d983 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,35 +13,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
import abc
-import typing
-import pkg_resources
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
-from google import auth # type: ignore
-from google.api_core import exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.auth import credentials # type: ignore
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+import google.auth # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+import google.protobuf
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
-from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
-from google.iam.v1 import policy_pb2 as policy # type: ignore
-from google.longrunning import operations_pb2 as operations # type: ignore
-from google.protobuf import empty_pb2 as empty # type: ignore
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
class DatabaseAdminTransport(abc.ABC):
@@ -53,65 +56,94 @@ class DatabaseAdminTransport(abc.ABC):
"https://www.googleapis.com/auth/spanner.admin",
)
+ DEFAULT_HOST: str = "spanner.googleapis.com"
+
def __init__(
self,
*,
- host: str = "spanner.googleapis.com",
- credentials: credentials.Credentials = None,
- credentials_file: typing.Optional[str] = None,
- scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
- quota_project_id: typing.Optional[str] = None,
+ host: str = DEFAULT_HOST,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
- scope (Optional[Sequence[str]]): A list of scopes.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
+ scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
"""
- # Save the hostname. Default to port 443 (HTTPS) if none is specified.
- if ":" not in host:
- host += ":443"
- self._host = host
+
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
+
+ # Save the scopes.
+ self._scopes = scopes
+ if not hasattr(self, "_ignore_credentials"):
+ self._ignore_credentials: bool = False
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
- raise exceptions.DuplicateCredentialArgs(
+ raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
- credentials, _ = auth.load_credentials_from_file(
- credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ credentials, _ = google.auth.load_credentials_from_file(
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
- elif credentials is None:
- credentials, _ = auth.default(
- scopes=scopes, quota_project_id=quota_project_id
+ elif credentials is None and not self._ignore_credentials:
+ credentials, _ = google.auth.default(
+ **scopes_kwargs, quota_project_id=quota_project_id
)
+ # Don't apply audience if the credentials file passed from user.
+ if hasattr(credentials, "with_gdch_audience"):
+ credentials = credentials.with_gdch_audience(
+ api_audience if api_audience else host
+ )
+
+ # If the credentials are service account credentials, then always try to use self signed JWT.
+ if (
+ always_use_jwt_access
+ and isinstance(credentials, service_account.Credentials)
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
+ ):
+ credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
- # Lifted into its own function so it can be stubbed out during tests.
- self._prep_wrapped_messages(client_info)
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ @property
+ def host(self):
+ return self._host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
@@ -123,14 +155,18 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
),
self.create_database: gapic_v1.method.wrap_method(
- self.create_database, default_timeout=3600.0, client_info=client_info,
+ self.create_database,
+ default_timeout=3600.0,
+ client_info=client_info,
),
self.get_database: gapic_v1.method.wrap_method(
self.get_database,
@@ -139,8 +175,25 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_database: gapic_v1.method.wrap_method(
+ self.update_database,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
@@ -152,8 +205,10 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
@@ -165,8 +220,10 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
@@ -178,14 +235,18 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
- self.set_iam_policy, default_timeout=30.0, client_info=client_info,
+ self.set_iam_policy,
+ default_timeout=30.0,
+ client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy,
@@ -194,8 +255,10 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
@@ -206,7 +269,14 @@ def _prep_wrapped_messages(self, client_info):
client_info=client_info,
),
self.create_backup: gapic_v1.method.wrap_method(
- self.create_backup, default_timeout=3600.0, client_info=client_info,
+ self.create_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.copy_backup: gapic_v1.method.wrap_method(
+ self.copy_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
),
self.get_backup: gapic_v1.method.wrap_method(
self.get_backup,
@@ -215,8 +285,10 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
@@ -228,8 +300,10 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
@@ -241,8 +315,10 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
@@ -254,14 +330,18 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
),
self.restore_database: gapic_v1.method.wrap_method(
- self.restore_database, default_timeout=3600.0, client_info=client_info,
+ self.restore_database,
+ default_timeout=3600.0,
+ client_info=client_info,
),
self.list_database_operations: gapic_v1.method.wrap_method(
self.list_database_operations,
@@ -270,8 +350,10 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
@@ -283,27 +365,168 @@ def _prep_wrapped_messages(self, client_info):
maximum=32.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_database_roles: gapic_v1.method.wrap_method(
+ self.list_database_roles,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.add_split_points: gapic_v1.method.wrap_method(
+ self.add_split_points,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_backup_schedule: gapic_v1.method.wrap_method(
+ self.create_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
),
+ deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
),
+ self.get_backup_schedule: gapic_v1.method.wrap_method(
+ self.get_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup_schedule: gapic_v1.method.wrap_method(
+ self.update_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup_schedule: gapic_v1.method.wrap_method(
+ self.delete_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_schedules: gapic_v1.method.wrap_method(
+ self.list_backup_schedules,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.internal_update_graph_operation: gapic_v1.method.wrap_method(
+ self.internal_update_graph_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_operation: gapic_v1.method.wrap_method(
+ self.cancel_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_operation: gapic_v1.method.wrap_method(
+ self.delete_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_operation: gapic_v1.method.wrap_method(
+ self.get_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_operations: gapic_v1.method.wrap_method(
+ self.list_operations,
+ default_timeout=None,
+ client_info=client_info,
+ ),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
- def operations_client(self) -> operations_v1.OperationsClient:
+ def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_databases(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.ListDatabasesRequest],
- typing.Union[
+ Union[
spanner_database_admin.ListDatabasesResponse,
- typing.Awaitable[spanner_database_admin.ListDatabasesResponse],
+ Awaitable[spanner_database_admin.ListDatabasesResponse],
],
]:
raise NotImplementedError()
@@ -311,50 +534,58 @@ def list_databases(
@property
def create_database(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.CreateDatabaseRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_database(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.GetDatabaseRequest],
- typing.Union[
- spanner_database_admin.Database,
- typing.Awaitable[spanner_database_admin.Database],
+ Union[
+ spanner_database_admin.Database, Awaitable[spanner_database_admin.Database]
],
]:
raise NotImplementedError()
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
@property
def update_database_ddl(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.UpdateDatabaseDdlRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def drop_database(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.DropDatabaseRequest],
- typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_database_ddl(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.GetDatabaseDdlRequest],
- typing.Union[
+ Union[
spanner_database_admin.GetDatabaseDdlResponse,
- typing.Awaitable[spanner_database_admin.GetDatabaseDdlResponse],
+ Awaitable[spanner_database_admin.GetDatabaseDdlResponse],
],
]:
raise NotImplementedError()
@@ -362,29 +593,29 @@ def get_database_ddl(
@property
def set_iam_policy(
self,
- ) -> typing.Callable[
- [iam_policy.SetIamPolicyRequest],
- typing.Union[policy.Policy, typing.Awaitable[policy.Policy]],
+ ) -> Callable[
+ [iam_policy_pb2.SetIamPolicyRequest],
+ Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def get_iam_policy(
self,
- ) -> typing.Callable[
- [iam_policy.GetIamPolicyRequest],
- typing.Union[policy.Policy, typing.Awaitable[policy.Policy]],
+ ) -> Callable[
+ [iam_policy_pb2.GetIamPolicyRequest],
+ Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
- ) -> typing.Callable[
- [iam_policy.TestIamPermissionsRequest],
- typing.Union[
- iam_policy.TestIamPermissionsResponse,
- typing.Awaitable[iam_policy.TestIamPermissionsResponse],
+ ) -> Callable[
+ [iam_policy_pb2.TestIamPermissionsRequest],
+ Union[
+ iam_policy_pb2.TestIamPermissionsResponse,
+ Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
],
]:
raise NotImplementedError()
@@ -392,67 +623,72 @@ def test_iam_permissions(
@property
def create_backup(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[gsad_backup.CreateBackupRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[
+ [backup.CopyBackupRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_backup(
self,
- ) -> typing.Callable[
- [backup.GetBackupRequest],
- typing.Union[backup.Backup, typing.Awaitable[backup.Backup]],
+ ) -> Callable[
+ [backup.GetBackupRequest], Union[backup.Backup, Awaitable[backup.Backup]]
]:
raise NotImplementedError()
@property
def update_backup(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[gsad_backup.UpdateBackupRequest],
- typing.Union[gsad_backup.Backup, typing.Awaitable[gsad_backup.Backup]],
+ Union[gsad_backup.Backup, Awaitable[gsad_backup.Backup]],
]:
raise NotImplementedError()
@property
def delete_backup(
self,
- ) -> typing.Callable[
- [backup.DeleteBackupRequest],
- typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ ) -> Callable[
+ [backup.DeleteBackupRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
]:
raise NotImplementedError()
@property
def list_backups(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[backup.ListBackupsRequest],
- typing.Union[
- backup.ListBackupsResponse, typing.Awaitable[backup.ListBackupsResponse]
- ],
+ Union[backup.ListBackupsResponse, Awaitable[backup.ListBackupsResponse]],
]:
raise NotImplementedError()
@property
def restore_database(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.RestoreDatabaseRequest],
- typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_database_operations(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[spanner_database_admin.ListDatabaseOperationsRequest],
- typing.Union[
+ Union[
spanner_database_admin.ListDatabaseOperationsResponse,
- typing.Awaitable[spanner_database_admin.ListDatabaseOperationsResponse],
+ Awaitable[spanner_database_admin.ListDatabaseOperationsResponse],
],
]:
raise NotImplementedError()
@@ -460,14 +696,143 @@ def list_database_operations(
@property
def list_backup_operations(
self,
- ) -> typing.Callable[
+ ) -> Callable[
[backup.ListBackupOperationsRequest],
- typing.Union[
+ Union[
backup.ListBackupOperationsResponse,
- typing.Awaitable[backup.ListBackupOperationsResponse],
+ Awaitable[backup.ListBackupOperationsResponse],
],
]:
raise NotImplementedError()
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ Union[
+ spanner_database_admin.ListDatabaseRolesResponse,
+ Awaitable[spanner_database_admin.ListDatabaseRolesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ Union[
+ spanner_database_admin.AddSplitPointsResponse,
+ Awaitable[spanner_database_admin.AddSplitPointsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ Union[
+ gsad_backup_schedule.BackupSchedule,
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest],
+ Union[
+ backup_schedule.BackupSchedule, Awaitable[backup_schedule.BackupSchedule]
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ Union[
+ gsad_backup_schedule.BackupSchedule,
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.DeleteBackupScheduleRequest],
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ Union[
+ backup_schedule.ListBackupSchedulesResponse,
+ Awaitable[backup_schedule.ListBackupSchedulesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ Union[
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ Awaitable[spanner_database_admin.InternalUpdateGraphOperationResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_operations(
+ self,
+ ) -> Callable[
+ [operations_pb2.ListOperationsRequest],
+ Union[
+ operations_pb2.ListOperationsResponse,
+ Awaitable[operations_pb2.ListOperationsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_operation(
+ self,
+ ) -> Callable[
+ [operations_pb2.GetOperationRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def cancel_operation(
+ self,
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:
+ raise NotImplementedError()
+
+ @property
+ def delete_operation(
+ self,
+ ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]:
+ raise NotImplementedError()
+
+ @property
+ def kind(self) -> str:
+ raise NotImplementedError()
+
__all__ = ("DatabaseAdminTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
index 665ed4fc15..0888d9af16 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,39 +13,123 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
+import json
+import logging as std_logging
+import pickle
import warnings
-from typing import Callable, Dict, Optional, Sequence, Tuple
+from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
+import google.auth # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.protobuf.json_format import MessageToJson
+import google.protobuf.message
import grpc # type: ignore
+import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
-from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
-from google.iam.v1 import policy_pb2 as policy # type: ignore
-from google.longrunning import operations_pb2 as operations # type: ignore
-from google.protobuf import empty_pb2 as empty # type: ignore
-
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+
+class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
+ def intercept_unary_unary(self, continuation, client_call_details, request):
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ )
+ if logging_enabled: # pragma: NO COVER
+ request_metadata = client_call_details.metadata
+ if isinstance(request, proto.Message):
+ request_payload = type(request).to_json(request)
+ elif isinstance(request, google.protobuf.message.Message):
+ request_payload = MessageToJson(request)
+ else:
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
+
+ request_metadata = {
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
+ for key, value in request_metadata
+ }
+ grpc_request = {
+ "payload": request_payload,
+ "requestMethod": "grpc",
+ "metadata": dict(request_metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for {client_call_details.method}",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "request": grpc_request,
+ "metadata": grpc_request["metadata"],
+ },
+ )
+ response = continuation(client_call_details, request)
+ if logging_enabled: # pragma: NO COVER
+ response_metadata = response.trailing_metadata()
+ # Convert gRPC metadata `` to list of tuples
+ metadata = (
+ dict([(k, str(v)) for k, v in response_metadata])
+ if response_metadata
+ else None
+ )
+ result = response.result()
+ if isinstance(result, proto.Message):
+ response_payload = type(result).to_json(result)
+ elif isinstance(result, google.protobuf.message.Message):
+ response_payload = MessageToJson(result)
+ else:
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
+ grpc_response = {
+ "payload": response_payload,
+ "metadata": metadata,
+ "status": "OK",
+ }
+ _LOGGER.debug(
+ f"Received response for {client_call_details.method}.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": client_call_details.method,
+ "response": grpc_response,
+ "metadata": grpc_response["metadata"],
+ },
+ )
+ return response
+
class DatabaseAdminGrpcTransport(DatabaseAdminTransport):
"""gRPC backend transport for DatabaseAdmin.
Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -62,48 +145,55 @@ def __init__(
self,
*,
host: str = "spanner.googleapis.com",
- credentials: credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- This argument is ignored if ``channel`` is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ This argument is ignored if a ``channel`` instance is provided.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
- ignored if ``channel`` is provided.
- channel (Optional[grpc.Channel]): A ``Channel`` instance through
- which to make calls.
+ ignored if a ``channel`` instance is provided.
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
+ A ``Channel`` instance through which to make calls, or a Callable
+ that constructs and returns one. If set to None, ``self.create_channel``
+ is used to create the channel. If a Callable is given, it will be called
+ with the same arguments as used in ``self.create_channel``.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
- ``client_cert_source`` or applicatin default SSL credentials.
+ ``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
- for grpc channel. It is ignored if ``channel`` is provided.
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
- both in PEM format. It is used to configure mutual TLS channel. It is
- ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ both in PEM format. It is used to configure a mutual TLS channel. It is
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -111,6 +201,8 @@ def __init__(
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
@@ -118,119 +210,107 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
+ self._stubs: Dict[str, Callable] = {}
+ self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
- if channel:
- # Sanity check: Ensure that channel and credentials are not both
- # provided.
- credentials = False
-
+ if isinstance(channel, grpc.Channel):
+ # Ignore credentials if a channel was passed.
+ credentials = None
+ self._ignore_credentials = True
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
- elif api_mtls_endpoint:
- host = (
- api_mtls_endpoint
- if ":" in api_mtls_endpoint
- else api_mtls_endpoint + ":443"
- )
-
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
-
- # Create SSL credentials with client_cert_source or application
- # default SSL credentials.
- if client_cert_source:
- cert, key = client_cert_source()
- ssl_credentials = grpc.ssl_channel_credentials(
- certificate_chain=cert, private_key=key
- )
- else:
- ssl_credentials = SslCredentials().ssl_credentials
-
- # create a new channel. The provided one is ignored.
- self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
- credentials_file=credentials_file,
- ssl_credentials=ssl_credentials,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
- self._ssl_channel_credentials = ssl_credentials
+
else:
- host = host if ":" in host else host + ":443"
-
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
-
- if client_cert_source_for_mtls and not ssl_channel_credentials:
- cert, key = client_cert_source_for_mtls()
- self._ssl_channel_credentials = grpc.ssl_channel_credentials(
- certificate_chain=cert, private_key=key
- )
-
- # create a new channel. The provided one is ignored.
- self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
- credentials_file=credentials_file,
- ssl_credentials=self._ssl_channel_credentials,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
+ if api_mtls_endpoint:
+ host = api_mtls_endpoint
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
- self._stubs = {} # type: Dict[str, Callable]
- self._operations_client = None
+ else:
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
- # Run the base constructor.
+ # The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes or self.AUTH_SCOPES,
+ scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ api_audience=api_audience,
)
+ if not self._grpc_channel:
+ # initialize with the provided callable or the default channel
+ channel_init = channel or type(self).create_channel
+ self._grpc_channel = channel_init(
+ self._host,
+ # use the credentials which are saved
+ credentials=self._credentials,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
+ scopes=self._scopes,
+ ssl_credentials=self._ssl_channel_credentials,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ("grpc.keepalive_time_ms", 120000),
+ ],
+ )
+
+ self._interceptor = _LoggingClientInterceptor()
+ self._logged_channel = grpc.intercept_channel(
+ self._grpc_channel, self._interceptor
+ )
+
+ # Wrap messages. This must be done after self._logged_channel exists
+ self._prep_wrapped_messages(client_info)
+
@classmethod
def create_channel(
cls,
host: str = "spanner.googleapis.com",
- credentials: credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
- address (Optional[str]): The host for the channel to use.
+ host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -245,20 +325,21 @@ def create_channel(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
- """Return the channel designed to connect to this service.
- """
+ """Return the channel designed to connect to this service."""
return self._grpc_channel
@property
@@ -268,9 +349,11 @@ def operations_client(self) -> operations_v1.OperationsClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
- self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+ self._operations_client = operations_v1.OperationsClient(
+ self._logged_channel
+ )
# Return the client from cache.
return self._operations_client
@@ -297,7 +380,7 @@ def list_databases(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_databases" not in self._stubs:
- self._stubs["list_databases"] = self.grpc_channel.unary_unary(
+ self._stubs["list_databases"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=spanner_database_admin.ListDatabasesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize,
@@ -307,7 +390,9 @@ def list_databases(
@property
def create_database(
self,
- ) -> Callable[[spanner_database_admin.CreateDatabaseRequest], operations.Operation]:
+ ) -> Callable[
+ [spanner_database_admin.CreateDatabaseRequest], operations_pb2.Operation
+ ]:
r"""Return a callable for the create database method over gRPC.
Creates a new Cloud Spanner database and starts to prepare it
@@ -332,10 +417,10 @@ def create_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_database" not in self._stubs:
- self._stubs["create_database"] = self.grpc_channel.unary_unary(
+ self._stubs["create_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_database"]
@@ -360,18 +445,83 @@ def get_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database" not in self._stubs:
- self._stubs["get_database"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=spanner_database_admin.GetDatabaseRequest.serialize,
response_deserializer=spanner_database_admin.Database.deserialize,
)
return self._stubs["get_database"]
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest], operations_pb2.Operation
+ ]:
+ r"""Return a callable for the update database method over gRPC.
+
+ Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ Returns:
+ Callable[[~.UpdateDatabaseRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_database" not in self._stubs:
+ self._stubs["update_database"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_database"]
+
@property
def update_database_ddl(
self,
) -> Callable[
- [spanner_database_admin.UpdateDatabaseDdlRequest], operations.Operation
+ [spanner_database_admin.UpdateDatabaseDdlRequest], operations_pb2.Operation
]:
r"""Return a callable for the update database ddl method over gRPC.
@@ -396,22 +546,23 @@ def update_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_database_ddl" not in self._stubs:
- self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["update_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_database_ddl"]
@property
def drop_database(
self,
- ) -> Callable[[spanner_database_admin.DropDatabaseRequest], empty.Empty]:
+ ) -> Callable[[spanner_database_admin.DropDatabaseRequest], empty_pb2.Empty]:
r"""Return a callable for the drop database method over gRPC.
Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
Returns:
Callable[[~.DropDatabaseRequest],
@@ -424,10 +575,10 @@ def drop_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_database" not in self._stubs:
- self._stubs["drop_database"] = self.grpc_channel.unary_unary(
+ self._stubs["drop_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=spanner_database_admin.DropDatabaseRequest.serialize,
- response_deserializer=empty.Empty.FromString,
+ response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["drop_database"]
@@ -456,7 +607,7 @@ def get_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database_ddl" not in self._stubs:
- self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize,
response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize,
@@ -466,7 +617,7 @@ def get_database_ddl(
@property
def set_iam_policy(
self,
- ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]:
+ ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on a database or backup resource.
@@ -490,17 +641,17 @@ def set_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
- self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["set_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
- request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString,
- response_deserializer=policy.Policy.FromString,
+ request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
+ response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
- ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]:
+ ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a database or backup
@@ -525,10 +676,10 @@ def get_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
- self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["get_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
- request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString,
- response_deserializer=policy.Policy.FromString,
+ request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
+ response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@@ -536,7 +687,8 @@ def get_iam_policy(
def test_iam_permissions(
self,
) -> Callable[
- [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse
+ [iam_policy_pb2.TestIamPermissionsRequest],
+ iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
@@ -562,17 +714,17 @@ def test_iam_permissions(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
- self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
+ self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
- request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString,
- response_deserializer=iam_policy.TestIamPermissionsResponse.FromString,
+ request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
+ response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def create_backup(
self,
- ) -> Callable[[gsad_backup.CreateBackupRequest], operations.Operation]:
+ ) -> Callable[[gsad_backup.CreateBackupRequest], operations_pb2.Operation]:
r"""Return a callable for the create backup method over gRPC.
Starts creating a new Cloud Spanner Backup. The returned backup
@@ -600,13 +752,51 @@ def create_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
- self._stubs["create_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["create_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
request_serializer=gsad_backup.CreateBackupRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[backup.CopyBackupRequest], operations_pb2.Operation]:
+ r"""Return a callable for the copy backup method over gRPC.
+
+ Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ Returns:
+ Callable[[~.CopyBackupRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "copy_backup" not in self._stubs:
+ self._stubs["copy_backup"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
+ request_serializer=backup.CopyBackupRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["copy_backup"]
+
@property
def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
r"""Return a callable for the get backup method over gRPC.
@@ -625,7 +815,7 @@ def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
- self._stubs["get_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["get_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
request_serializer=backup.GetBackupRequest.serialize,
response_deserializer=backup.Backup.deserialize,
@@ -652,7 +842,7 @@ def update_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
- self._stubs["update_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["update_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
request_serializer=gsad_backup.UpdateBackupRequest.serialize,
response_deserializer=gsad_backup.Backup.deserialize,
@@ -660,7 +850,7 @@ def update_backup(
return self._stubs["update_backup"]
@property
- def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty.Empty]:
+ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty_pb2.Empty]:
r"""Return a callable for the delete backup method over gRPC.
Deletes a pending or completed
@@ -677,10 +867,10 @@ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty.Empty]:
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
- self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
request_serializer=backup.DeleteBackupRequest.serialize,
- response_deserializer=empty.Empty.FromString,
+ response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_backup"]
@@ -705,7 +895,7 @@ def list_backups(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
- self._stubs["list_backups"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backups"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
request_serializer=backup.ListBackupsRequest.serialize,
response_deserializer=backup.ListBackupsResponse.deserialize,
@@ -716,7 +906,7 @@ def list_backups(
def restore_database(
self,
) -> Callable[
- [spanner_database_admin.RestoreDatabaseRequest], operations.Operation
+ [spanner_database_admin.RestoreDatabaseRequest], operations_pb2.Operation
]:
r"""Return a callable for the restore database method over gRPC.
@@ -751,10 +941,10 @@ def restore_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_database" not in self._stubs:
- self._stubs["restore_database"] = self.grpc_channel.unary_unary(
+ self._stubs["restore_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_database"]
@@ -789,7 +979,7 @@ def list_database_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_operations" not in self._stubs:
- self._stubs["list_database_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize,
@@ -828,12 +1018,322 @@ def list_backup_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backup_operations" not in self._stubs:
- self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backup_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
request_serializer=backup.ListBackupOperationsRequest.serialize,
response_deserializer=backup.ListBackupOperationsResponse.deserialize,
)
return self._stubs["list_backup_operations"]
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ spanner_database_admin.ListDatabaseRolesResponse,
+ ]:
+ r"""Return a callable for the list database roles method over gRPC.
+
+ Lists Cloud Spanner database roles.
+
+ Returns:
+ Callable[[~.ListDatabaseRolesRequest],
+ ~.ListDatabaseRolesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_database_roles" not in self._stubs:
+ self._stubs["list_database_roles"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
+ request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize,
+ response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize,
+ )
+ return self._stubs["list_database_roles"]
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ spanner_database_admin.AddSplitPointsResponse,
+ ]:
+ r"""Return a callable for the add split points method over gRPC.
+
+ Adds split points to specified tables, indexes of a
+ database.
+
+ Returns:
+ Callable[[~.AddSplitPointsRequest],
+ ~.AddSplitPointsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "add_split_points" not in self._stubs:
+ self._stubs["add_split_points"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints",
+ request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize,
+ response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize,
+ )
+ return self._stubs["add_split_points"]
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ r"""Return a callable for the create backup schedule method over gRPC.
+
+ Creates a new backup schedule.
+
+ Returns:
+ Callable[[~.CreateBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_backup_schedule" not in self._stubs:
+ self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["create_backup_schedule"]
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest], backup_schedule.BackupSchedule
+ ]:
+ r"""Return a callable for the get backup schedule method over gRPC.
+
+ Gets backup schedule for the input schedule name.
+
+ Returns:
+ Callable[[~.GetBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_backup_schedule" not in self._stubs:
+ self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ request_serializer=backup_schedule.GetBackupScheduleRequest.serialize,
+ response_deserializer=backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["get_backup_schedule"]
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ r"""Return a callable for the update backup schedule method over gRPC.
+
+ Updates a backup schedule.
+
+ Returns:
+ Callable[[~.UpdateBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_backup_schedule" not in self._stubs:
+ self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["update_backup_schedule"]
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[[backup_schedule.DeleteBackupScheduleRequest], empty_pb2.Empty]:
+ r"""Return a callable for the delete backup schedule method over gRPC.
+
+ Deletes a backup schedule.
+
+ Returns:
+ Callable[[~.DeleteBackupScheduleRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_backup_schedule" not in self._stubs:
+ self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_backup_schedule"]
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ backup_schedule.ListBackupSchedulesResponse,
+ ]:
+ r"""Return a callable for the list backup schedules method over gRPC.
+
+ Lists all the backup schedules for the database.
+
+ Returns:
+ Callable[[~.ListBackupSchedulesRequest],
+ ~.ListBackupSchedulesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_backup_schedules" not in self._stubs:
+ self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize,
+ response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize,
+ )
+ return self._stubs["list_backup_schedules"]
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ ]:
+ r"""Return a callable for the internal update graph
+ operation method over gRPC.
+
+ This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ Returns:
+ Callable[[~.InternalUpdateGraphOperationRequest],
+ ~.InternalUpdateGraphOperationResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "internal_update_graph_operation" not in self._stubs:
+ self._stubs[
+ "internal_update_graph_operation"
+ ] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation",
+ request_serializer=spanner_database_admin.InternalUpdateGraphOperationRequest.serialize,
+ response_deserializer=spanner_database_admin.InternalUpdateGraphOperationResponse.deserialize,
+ )
+ return self._stubs["internal_update_graph_operation"]
+
+ def close(self):
+ self._logged_channel.close()
+
+ @property
+ def delete_operation(
+ self,
+ ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
+ r"""Return a callable for the delete_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_operation" not in self._stubs:
+ self._stubs["delete_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/DeleteOperation",
+ request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["delete_operation"]
+
+ @property
+ def cancel_operation(
+ self,
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
+ r"""Return a callable for the cancel_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_operation" not in self._stubs:
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/CancelOperation",
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["cancel_operation"]
+
+ @property
+ def get_operation(
+ self,
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
+ r"""Return a callable for the get_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_operation" not in self._stubs:
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/GetOperation",
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["get_operation"]
+
+ @property
+ def list_operations(
+ self,
+ ) -> Callable[
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
+ ]:
+ r"""Return a callable for the list_operations method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_operations" not in self._stubs:
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/ListOperations",
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
+ )
+ return self._stubs["list_operations"]
+
+ @property
+ def kind(self) -> str:
+ return "grpc"
+
__all__ = ("DatabaseAdminGrpcTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
index 25229d58cd..145c6ebf03 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,41 +13,129 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
+import inspect
+import json
+import pickle
+import logging as std_logging
import warnings
-from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
-
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google import auth # type: ignore
-from google.auth import credentials # type: ignore
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
+
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry_async as retries
+from google.api_core import operations_v1
+from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.protobuf.json_format import MessageToJson
+import google.protobuf.message
import grpc # type: ignore
+import proto # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
-from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
-from google.iam.v1 import policy_pb2 as policy # type: ignore
-from google.longrunning import operations_pb2 as operations # type: ignore
-from google.protobuf import empty_pb2 as empty # type: ignore
-
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatabaseAdminGrpcTransport
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+
+class _LoggingClientAIOInterceptor(
+ grpc.aio.UnaryUnaryClientInterceptor
+): # pragma: NO COVER
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ )
+ if logging_enabled: # pragma: NO COVER
+ request_metadata = client_call_details.metadata
+ if isinstance(request, proto.Message):
+ request_payload = type(request).to_json(request)
+ elif isinstance(request, google.protobuf.message.Message):
+ request_payload = MessageToJson(request)
+ else:
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
+
+ request_metadata = {
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
+ for key, value in request_metadata
+ }
+ grpc_request = {
+ "payload": request_payload,
+ "requestMethod": "grpc",
+ "metadata": dict(request_metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for {client_call_details.method}",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "request": grpc_request,
+ "metadata": grpc_request["metadata"],
+ },
+ )
+ response = await continuation(client_call_details, request)
+ if logging_enabled: # pragma: NO COVER
+ response_metadata = await response.trailing_metadata()
+ # Convert gRPC metadata `` to list of tuples
+ metadata = (
+ dict([(k, str(v)) for k, v in response_metadata])
+ if response_metadata
+ else None
+ )
+ result = await response
+ if isinstance(result, proto.Message):
+ response_payload = type(result).to_json(result)
+ elif isinstance(result, google.protobuf.message.Message):
+ response_payload = MessageToJson(result)
+ else:
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
+ grpc_response = {
+ "payload": response_payload,
+ "metadata": metadata,
+ "status": "OK",
+ }
+ _LOGGER.debug(
+ f"Received response to rpc {client_call_details.method}.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "response": grpc_response,
+ "metadata": grpc_response["metadata"],
+ },
+ )
+ return response
+
class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
"""gRPC AsyncIO backend transport for DatabaseAdmin.
Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -65,7 +152,7 @@ class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
def create_channel(
cls,
host: str = "spanner.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -73,15 +160,15 @@ def create_channel(
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
- address (Optional[str]): The host for the channel to use.
+ host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -92,13 +179,15 @@ def create_channel(
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
- scopes = scopes or cls.AUTH_SCOPES
+
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes,
quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
**kwargs,
)
@@ -106,56 +195,65 @@ def __init__(
self,
*,
host: str = "spanner.googleapis.com",
- credentials: credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
- host (Optional[str]): The hostname to connect to.
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- This argument is ignored if ``channel`` is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ This argument is ignored if a ``channel`` instance is provided.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
- channel (Optional[aio.Channel]): A ``Channel`` instance through
- which to make calls.
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
+ A ``Channel`` instance through which to make calls, or a Callable
+ that constructs and returns one. If set to None, ``self.create_channel``
+ is used to create the channel. If a Callable is given, it will be called
+ with the same arguments as used in ``self.create_channel``.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
- ``client_cert_source`` or applicatin default SSL credentials.
+ ``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
- for grpc channel. It is ignored if ``channel`` is provided.
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
- both in PEM format. It is used to configure mutual TLS channel. It is
- ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ both in PEM format. It is used to configure a mutual TLS channel. It is
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
@@ -163,97 +261,84 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
+ self._stubs: Dict[str, Callable] = {}
+ self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
- if channel:
- # Sanity check: Ensure that channel and credentials are not both
- # provided.
- credentials = False
-
+ if isinstance(channel, aio.Channel):
+ # Ignore credentials if a channel was passed.
+ credentials = None
+ self._ignore_credentials = True
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
- elif api_mtls_endpoint:
- host = (
- api_mtls_endpoint
- if ":" in api_mtls_endpoint
- else api_mtls_endpoint + ":443"
- )
-
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
-
- # Create SSL credentials with client_cert_source or application
- # default SSL credentials.
- if client_cert_source:
- cert, key = client_cert_source()
- ssl_credentials = grpc.ssl_channel_credentials(
- certificate_chain=cert, private_key=key
- )
- else:
- ssl_credentials = SslCredentials().ssl_credentials
-
- # create a new channel. The provided one is ignored.
- self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
- credentials_file=credentials_file,
- ssl_credentials=ssl_credentials,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
- self._ssl_channel_credentials = ssl_credentials
else:
- host = host if ":" in host else host + ":443"
-
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
-
- if client_cert_source_for_mtls and not ssl_channel_credentials:
- cert, key = client_cert_source_for_mtls()
- self._ssl_channel_credentials = grpc.ssl_channel_credentials(
- certificate_chain=cert, private_key=key
- )
-
- # create a new channel. The provided one is ignored.
- self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
- credentials_file=credentials_file,
- ssl_credentials=self._ssl_channel_credentials,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
+ if api_mtls_endpoint:
+ host = api_mtls_endpoint
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
- # Run the base constructor.
+ else:
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
- scopes=scopes or self.AUTH_SCOPES,
+ scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ api_audience=api_audience,
)
- self._stubs = {}
- self._operations_client = None
+ if not self._grpc_channel:
+ # initialize with the provided callable or the default channel
+ channel_init = channel or type(self).create_channel
+ self._grpc_channel = channel_init(
+ self._host,
+ # use the credentials which are saved
+ credentials=self._credentials,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
+ scopes=self._scopes,
+ ssl_credentials=self._ssl_channel_credentials,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ("grpc.keepalive_time_ms", 120000),
+ ],
+ )
+
+ self._interceptor = _LoggingClientAIOInterceptor()
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
+ self._logged_channel = self._grpc_channel
+ self._wrap_with_kind = (
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
+ )
+ # Wrap messages. This must be done after self._logged_channel exists
+ self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
@@ -272,10 +357,10 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
- self.grpc_channel
+ self._logged_channel
)
# Return the client from cache.
@@ -303,7 +388,7 @@ def list_databases(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_databases" not in self._stubs:
- self._stubs["list_databases"] = self.grpc_channel.unary_unary(
+ self._stubs["list_databases"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=spanner_database_admin.ListDatabasesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize,
@@ -314,7 +399,8 @@ def list_databases(
def create_database(
self,
) -> Callable[
- [spanner_database_admin.CreateDatabaseRequest], Awaitable[operations.Operation]
+ [spanner_database_admin.CreateDatabaseRequest],
+ Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create database method over gRPC.
@@ -340,10 +426,10 @@ def create_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_database" not in self._stubs:
- self._stubs["create_database"] = self.grpc_channel.unary_unary(
+ self._stubs["create_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_database"]
@@ -369,19 +455,85 @@ def get_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database" not in self._stubs:
- self._stubs["get_database"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=spanner_database_admin.GetDatabaseRequest.serialize,
response_deserializer=spanner_database_admin.Database.deserialize,
)
return self._stubs["get_database"]
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest],
+ Awaitable[operations_pb2.Operation],
+ ]:
+ r"""Return a callable for the update database method over gRPC.
+
+ Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ Returns:
+ Callable[[~.UpdateDatabaseRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_database" not in self._stubs:
+ self._stubs["update_database"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_database"]
+
@property
def update_database_ddl(
self,
) -> Callable[
[spanner_database_admin.UpdateDatabaseDdlRequest],
- Awaitable[operations.Operation],
+ Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the update database ddl method over gRPC.
@@ -406,22 +558,25 @@ def update_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_database_ddl" not in self._stubs:
- self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["update_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_database_ddl"]
@property
def drop_database(
self,
- ) -> Callable[[spanner_database_admin.DropDatabaseRequest], Awaitable[empty.Empty]]:
+ ) -> Callable[
+ [spanner_database_admin.DropDatabaseRequest], Awaitable[empty_pb2.Empty]
+ ]:
r"""Return a callable for the drop database method over gRPC.
Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
Returns:
Callable[[~.DropDatabaseRequest],
@@ -434,10 +589,10 @@ def drop_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_database" not in self._stubs:
- self._stubs["drop_database"] = self.grpc_channel.unary_unary(
+ self._stubs["drop_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=spanner_database_admin.DropDatabaseRequest.serialize,
- response_deserializer=empty.Empty.FromString,
+ response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["drop_database"]
@@ -466,7 +621,7 @@ def get_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database_ddl" not in self._stubs:
- self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize,
response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize,
@@ -476,7 +631,7 @@ def get_database_ddl(
@property
def set_iam_policy(
self,
- ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]:
+ ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on a database or backup resource.
@@ -500,17 +655,17 @@ def set_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
- self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["set_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
- request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString,
- response_deserializer=policy.Policy.FromString,
+ request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
+ response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
- ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]:
+ ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a database or backup
@@ -535,10 +690,10 @@ def get_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
- self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["get_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
- request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString,
- response_deserializer=policy.Policy.FromString,
+ request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
+ response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@@ -546,8 +701,8 @@ def get_iam_policy(
def test_iam_permissions(
self,
) -> Callable[
- [iam_policy.TestIamPermissionsRequest],
- Awaitable[iam_policy.TestIamPermissionsResponse],
+ [iam_policy_pb2.TestIamPermissionsRequest],
+ Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
@@ -573,17 +728,19 @@ def test_iam_permissions(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
- self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
+ self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
- request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString,
- response_deserializer=iam_policy.TestIamPermissionsResponse.FromString,
+ request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
+ response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def create_backup(
self,
- ) -> Callable[[gsad_backup.CreateBackupRequest], Awaitable[operations.Operation]]:
+ ) -> Callable[
+ [gsad_backup.CreateBackupRequest], Awaitable[operations_pb2.Operation]
+ ]:
r"""Return a callable for the create backup method over gRPC.
Starts creating a new Cloud Spanner Backup. The returned backup
@@ -611,13 +768,51 @@ def create_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
- self._stubs["create_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["create_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
request_serializer=gsad_backup.CreateBackupRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[backup.CopyBackupRequest], Awaitable[operations_pb2.Operation]]:
+ r"""Return a callable for the copy backup method over gRPC.
+
+ Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ Returns:
+ Callable[[~.CopyBackupRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "copy_backup" not in self._stubs:
+ self._stubs["copy_backup"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
+ request_serializer=backup.CopyBackupRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["copy_backup"]
+
@property
def get_backup(
self,
@@ -638,7 +833,7 @@ def get_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
- self._stubs["get_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["get_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
request_serializer=backup.GetBackupRequest.serialize,
response_deserializer=backup.Backup.deserialize,
@@ -665,7 +860,7 @@ def update_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
- self._stubs["update_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["update_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
request_serializer=gsad_backup.UpdateBackupRequest.serialize,
response_deserializer=gsad_backup.Backup.deserialize,
@@ -675,7 +870,7 @@ def update_backup(
@property
def delete_backup(
self,
- ) -> Callable[[backup.DeleteBackupRequest], Awaitable[empty.Empty]]:
+ ) -> Callable[[backup.DeleteBackupRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete backup method over gRPC.
Deletes a pending or completed
@@ -692,10 +887,10 @@ def delete_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
- self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
request_serializer=backup.DeleteBackupRequest.serialize,
- response_deserializer=empty.Empty.FromString,
+ response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_backup"]
@@ -720,7 +915,7 @@ def list_backups(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
- self._stubs["list_backups"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backups"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
request_serializer=backup.ListBackupsRequest.serialize,
response_deserializer=backup.ListBackupsResponse.deserialize,
@@ -731,7 +926,8 @@ def list_backups(
def restore_database(
self,
) -> Callable[
- [spanner_database_admin.RestoreDatabaseRequest], Awaitable[operations.Operation]
+ [spanner_database_admin.RestoreDatabaseRequest],
+ Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the restore database method over gRPC.
@@ -766,10 +962,10 @@ def restore_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_database" not in self._stubs:
- self._stubs["restore_database"] = self.grpc_channel.unary_unary(
+ self._stubs["restore_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize,
- response_deserializer=operations.Operation.FromString,
+ response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_database"]
@@ -804,7 +1000,7 @@ def list_database_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_operations" not in self._stubs:
- self._stubs["list_database_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize,
@@ -844,12 +1040,690 @@ def list_backup_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backup_operations" not in self._stubs:
- self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backup_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
request_serializer=backup.ListBackupOperationsRequest.serialize,
response_deserializer=backup.ListBackupOperationsResponse.deserialize,
)
return self._stubs["list_backup_operations"]
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ Awaitable[spanner_database_admin.ListDatabaseRolesResponse],
+ ]:
+ r"""Return a callable for the list database roles method over gRPC.
+
+ Lists Cloud Spanner database roles.
+
+ Returns:
+ Callable[[~.ListDatabaseRolesRequest],
+ Awaitable[~.ListDatabaseRolesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_database_roles" not in self._stubs:
+ self._stubs["list_database_roles"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
+ request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize,
+ response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize,
+ )
+ return self._stubs["list_database_roles"]
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ Awaitable[spanner_database_admin.AddSplitPointsResponse],
+ ]:
+ r"""Return a callable for the add split points method over gRPC.
+
+ Adds split points to specified tables, indexes of a
+ database.
+
+ Returns:
+ Callable[[~.AddSplitPointsRequest],
+ Awaitable[~.AddSplitPointsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "add_split_points" not in self._stubs:
+ self._stubs["add_split_points"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints",
+ request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize,
+ response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize,
+ )
+ return self._stubs["add_split_points"]
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the create backup schedule method over gRPC.
+
+ Creates a new backup schedule.
+
+ Returns:
+ Callable[[~.CreateBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_backup_schedule" not in self._stubs:
+ self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["create_backup_schedule"]
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest],
+ Awaitable[backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the get backup schedule method over gRPC.
+
+ Gets backup schedule for the input schedule name.
+
+ Returns:
+ Callable[[~.GetBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_backup_schedule" not in self._stubs:
+ self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ request_serializer=backup_schedule.GetBackupScheduleRequest.serialize,
+ response_deserializer=backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["get_backup_schedule"]
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the update backup schedule method over gRPC.
+
+ Updates a backup schedule.
+
+ Returns:
+ Callable[[~.UpdateBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_backup_schedule" not in self._stubs:
+ self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["update_backup_schedule"]
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.DeleteBackupScheduleRequest], Awaitable[empty_pb2.Empty]
+ ]:
+ r"""Return a callable for the delete backup schedule method over gRPC.
+
+ Deletes a backup schedule.
+
+ Returns:
+ Callable[[~.DeleteBackupScheduleRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_backup_schedule" not in self._stubs:
+ self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_backup_schedule"]
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ Awaitable[backup_schedule.ListBackupSchedulesResponse],
+ ]:
+ r"""Return a callable for the list backup schedules method over gRPC.
+
+ Lists all the backup schedules for the database.
+
+ Returns:
+ Callable[[~.ListBackupSchedulesRequest],
+ Awaitable[~.ListBackupSchedulesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_backup_schedules" not in self._stubs:
+ self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize,
+ response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize,
+ )
+ return self._stubs["list_backup_schedules"]
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ Awaitable[spanner_database_admin.InternalUpdateGraphOperationResponse],
+ ]:
+ r"""Return a callable for the internal update graph
+ operation method over gRPC.
+
+ This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ Returns:
+ Callable[[~.InternalUpdateGraphOperationRequest],
+ Awaitable[~.InternalUpdateGraphOperationResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "internal_update_graph_operation" not in self._stubs:
+ self._stubs[
+ "internal_update_graph_operation"
+ ] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation",
+ request_serializer=spanner_database_admin.InternalUpdateGraphOperationRequest.serialize,
+ response_deserializer=spanner_database_admin.InternalUpdateGraphOperationResponse.deserialize,
+ )
+ return self._stubs["internal_update_graph_operation"]
+
+ def _prep_wrapped_messages(self, client_info):
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
+ self._wrapped_methods = {
+ self.list_databases: self._wrap_method(
+ self.list_databases,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_database: self._wrap_method(
+ self.create_database,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_database: self._wrap_method(
+ self.get_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_database: self._wrap_method(
+ self.update_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_database_ddl: self._wrap_method(
+ self.update_database_ddl,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.drop_database: self._wrap_method(
+ self.drop_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_database_ddl: self._wrap_method(
+ self.get_database_ddl,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.set_iam_policy: self._wrap_method(
+ self.set_iam_policy,
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.get_iam_policy: self._wrap_method(
+ self.get_iam_policy,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=30.0,
+ ),
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.test_iam_permissions: self._wrap_method(
+ self.test_iam_permissions,
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.create_backup: self._wrap_method(
+ self.create_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.copy_backup: self._wrap_method(
+ self.copy_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup: self._wrap_method(
+ self.get_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup: self._wrap_method(
+ self.update_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup: self._wrap_method(
+ self.delete_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backups: self._wrap_method(
+ self.list_backups,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.restore_database: self._wrap_method(
+ self.restore_database,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_database_operations: self._wrap_method(
+ self.list_database_operations,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_operations: self._wrap_method(
+ self.list_backup_operations,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_database_roles: self._wrap_method(
+ self.list_database_roles,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.add_split_points: self._wrap_method(
+ self.add_split_points,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_backup_schedule: self._wrap_method(
+ self.create_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup_schedule: self._wrap_method(
+ self.get_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup_schedule: self._wrap_method(
+ self.update_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup_schedule: self._wrap_method(
+ self.delete_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_schedules: self._wrap_method(
+ self.list_backup_schedules,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.internal_update_graph_operation: self._wrap_method(
+ self.internal_update_graph_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_operation: self._wrap_method(
+ self.cancel_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_operation: self._wrap_method(
+ self.delete_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_operation: self._wrap_method(
+ self.get_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_operations: self._wrap_method(
+ self.list_operations,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ def _wrap_method(self, func, *args, **kwargs):
+ if self._wrap_with_kind: # pragma: NO COVER
+ kwargs["kind"] = self.kind
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
+
+ def close(self):
+ return self._logged_channel.close()
+
+ @property
+ def kind(self) -> str:
+ return "grpc_asyncio"
+
+ @property
+ def delete_operation(
+ self,
+ ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
+ r"""Return a callable for the delete_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_operation" not in self._stubs:
+ self._stubs["delete_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/DeleteOperation",
+ request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["delete_operation"]
+
+ @property
+ def cancel_operation(
+ self,
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
+ r"""Return a callable for the cancel_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_operation" not in self._stubs:
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/CancelOperation",
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["cancel_operation"]
+
+ @property
+ def get_operation(
+ self,
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
+ r"""Return a callable for the get_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_operation" not in self._stubs:
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/GetOperation",
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["get_operation"]
+
+ @property
+ def list_operations(
+ self,
+ ) -> Callable[
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
+ ]:
+ r"""Return a callable for the list_operations method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_operations" not in self._stubs:
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/ListOperations",
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
+ )
+ return self._stubs["list_operations"]
+
__all__ = ("DatabaseAdminGrpcAsyncIOTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
new file mode 100644
index 0000000000..dfec442041
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
@@ -0,0 +1,6551 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+import json # type: ignore
+
+from google.auth.transport.requests import AuthorizedSession # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry as retries
+from google.api_core import rest_helpers
+from google.api_core import rest_streaming
+from google.api_core import gapic_v1
+import google.protobuf
+
+from google.protobuf import json_format
+from google.api_core import operations_v1
+
+from requests import __version__ as requests_version
+import dataclasses
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+import warnings
+
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
+from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+
+
+from .rest_base import _BaseDatabaseAdminRestTransport
+from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = logging.getLogger(__name__)
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
+ grpc_version=None,
+ rest_version=f"requests@{requests_version}",
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
+
+
+class DatabaseAdminRestInterceptor:
+ """Interceptor for DatabaseAdmin.
+
+ Interceptors are used to manipulate requests, request metadata, and responses
+ in arbitrary ways.
+ Example use cases include:
+ * Logging
+ * Verifying requests according to service or custom semantics
+ * Stripping extraneous information from responses
+
+ These use cases and more can be enabled by injecting an
+ instance of a custom subclass when constructing the DatabaseAdminRestTransport.
+
+ .. code-block:: python
+ class MyCustomDatabaseAdminInterceptor(DatabaseAdminRestInterceptor):
+ def pre_add_split_points(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_add_split_points(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_copy_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_copy_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_delete_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_delete_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_drop_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_get_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_database_ddl(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_database_ddl(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_iam_policy(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_iam_policy(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_internal_update_graph_operation(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_internal_update_graph_operation(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backup_operations(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backup_operations(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backups(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backups(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backup_schedules(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backup_schedules(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_database_operations(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_database_operations(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_database_roles(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_database_roles(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_databases(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_databases(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_restore_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_restore_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_set_iam_policy(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_set_iam_policy(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_test_iam_permissions(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_test_iam_permissions(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_database_ddl(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_database_ddl(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ transport = DatabaseAdminRestTransport(interceptor=MyCustomDatabaseAdminInterceptor())
+ client = DatabaseAdminClient(transport=transport)
+
+
+ """
+
+ def pre_add_split_points(
+ self,
+ request: spanner_database_admin.AddSplitPointsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.AddSplitPointsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for add_split_points
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_add_split_points(
+ self, response: spanner_database_admin.AddSplitPointsResponse
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ """Post-rpc interceptor for add_split_points
+
+ DEPRECATED. Please use the `post_add_split_points_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_add_split_points` interceptor runs
+ before the `post_add_split_points_with_metadata` interceptor.
+ """
+ return response
+
+ def post_add_split_points_with_metadata(
+ self,
+ response: spanner_database_admin.AddSplitPointsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.AddSplitPointsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for add_split_points
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_add_split_points_with_metadata`
+ interceptor in new development instead of the `post_add_split_points` interceptor.
+ When both interceptors are used, this `post_add_split_points_with_metadata` interceptor runs after the
+ `post_add_split_points` interceptor. The (possibly modified) response returned by
+ `post_add_split_points` will be passed to
+ `post_add_split_points_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_copy_backup(
+ self,
+ request: backup.CopyBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for copy_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_copy_backup(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for copy_backup
+
+ DEPRECATED. Please use the `post_copy_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_copy_backup` interceptor runs
+ before the `post_copy_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_copy_backup_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for copy_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_copy_backup_with_metadata`
+ interceptor in new development instead of the `post_copy_backup` interceptor.
+ When both interceptors are used, this `post_copy_backup_with_metadata` interceptor runs after the
+ `post_copy_backup` interceptor. The (possibly modified) response returned by
+ `post_copy_backup` will be passed to
+ `post_copy_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_backup(
+ self,
+ request: gsad_backup.CreateBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup.CreateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for create_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_backup(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for create_backup
+
+ DEPRECATED. Please use the `post_create_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_backup` interceptor runs
+ before the `post_create_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_backup_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for create_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_backup_with_metadata`
+ interceptor in new development instead of the `post_create_backup` interceptor.
+ When both interceptors are used, this `post_create_backup_with_metadata` interceptor runs after the
+ `post_create_backup` interceptor. The (possibly modified) response returned by
+ `post_create_backup` will be passed to
+ `post_create_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_backup_schedule(
+ self,
+ request: gsad_backup_schedule.CreateBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.CreateBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for create_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_backup_schedule(
+ self, response: gsad_backup_schedule.BackupSchedule
+ ) -> gsad_backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for create_backup_schedule
+
+ DEPRECATED. Please use the `post_create_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_backup_schedule` interceptor runs
+ before the `post_create_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_backup_schedule_with_metadata(
+ self,
+ response: gsad_backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for create_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_create_backup_schedule` interceptor.
+ When both interceptors are used, this `post_create_backup_schedule_with_metadata` interceptor runs after the
+ `post_create_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_create_backup_schedule` will be passed to
+ `post_create_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_database(
+ self,
+ request: spanner_database_admin.CreateDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.CreateDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for create_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for create_database
+
+ DEPRECATED. Please use the `post_create_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_database` interceptor runs
+ before the `post_create_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for create_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_database_with_metadata`
+ interceptor in new development instead of the `post_create_database` interceptor.
+ When both interceptors are used, this `post_create_database_with_metadata` interceptor runs after the
+ `post_create_database` interceptor. The (possibly modified) response returned by
+ `post_create_database` will be passed to
+ `post_create_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_delete_backup(
+ self,
+ request: backup.DeleteBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.DeleteBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for delete_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_delete_backup_schedule(
+ self,
+ request: backup_schedule.DeleteBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.DeleteBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for delete_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_drop_database(
+ self,
+ request: spanner_database_admin.DropDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.DropDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for drop_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_get_backup(
+ self,
+ request: backup.GetBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for get_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_backup(self, response: backup.Backup) -> backup.Backup:
+ """Post-rpc interceptor for get_backup
+
+ DEPRECATED. Please use the `post_get_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_backup` interceptor runs
+ before the `post_get_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_backup_with_metadata(
+ self, response: backup.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]]
+ ) -> Tuple[backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_backup_with_metadata`
+ interceptor in new development instead of the `post_get_backup` interceptor.
+ When both interceptors are used, this `post_get_backup_with_metadata` interceptor runs after the
+ `post_get_backup` interceptor. The (possibly modified) response returned by
+ `post_get_backup` will be passed to
+ `post_get_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_backup_schedule(
+ self,
+ request: backup_schedule.GetBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.GetBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_backup_schedule(
+ self, response: backup_schedule.BackupSchedule
+ ) -> backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for get_backup_schedule
+
+ DEPRECATED. Please use the `post_get_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_backup_schedule` interceptor runs
+ before the `post_get_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_backup_schedule_with_metadata(
+ self,
+ response: backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_get_backup_schedule` interceptor.
+ When both interceptors are used, this `post_get_backup_schedule_with_metadata` interceptor runs after the
+ `post_get_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_get_backup_schedule` will be passed to
+ `post_get_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_database(
+ self,
+ request: spanner_database_admin.GetDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_database(
+ self, response: spanner_database_admin.Database
+ ) -> spanner_database_admin.Database:
+ """Post-rpc interceptor for get_database
+
+ DEPRECATED. Please use the `post_get_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_database` interceptor runs
+ before the `post_get_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_database_with_metadata(
+ self,
+ response: spanner_database_admin.Database,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.Database, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for get_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_database_with_metadata`
+ interceptor in new development instead of the `post_get_database` interceptor.
+ When both interceptors are used, this `post_get_database_with_metadata` interceptor runs after the
+ `post_get_database` interceptor. The (possibly modified) response returned by
+ `post_get_database` will be passed to
+ `post_get_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_database_ddl(
+ self,
+ request: spanner_database_admin.GetDatabaseDdlRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseDdlRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_database_ddl
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_database_ddl(
+ self, response: spanner_database_admin.GetDatabaseDdlResponse
+ ) -> spanner_database_admin.GetDatabaseDdlResponse:
+ """Post-rpc interceptor for get_database_ddl
+
+ DEPRECATED. Please use the `post_get_database_ddl_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_database_ddl` interceptor runs
+ before the `post_get_database_ddl_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_database_ddl_with_metadata(
+ self,
+ response: spanner_database_admin.GetDatabaseDdlResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseDdlResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for get_database_ddl
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_database_ddl_with_metadata`
+ interceptor in new development instead of the `post_get_database_ddl` interceptor.
+ When both interceptors are used, this `post_get_database_ddl_with_metadata` interceptor runs after the
+ `post_get_database_ddl` interceptor. The (possibly modified) response returned by
+ `post_get_database_ddl` will be passed to
+ `post_get_database_ddl_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_iam_policy(
+ self,
+ request: iam_policy_pb2.GetIamPolicyRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for get_iam_policy
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
+ """Post-rpc interceptor for get_iam_policy
+
+ DEPRECATED. Please use the `post_get_iam_policy_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_iam_policy` interceptor runs
+ before the `post_get_iam_policy_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_iam_policy_with_metadata(
+ self,
+ response: policy_pb2.Policy,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_iam_policy
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_iam_policy_with_metadata`
+ interceptor in new development instead of the `post_get_iam_policy` interceptor.
+ When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the
+ `post_get_iam_policy` interceptor. The (possibly modified) response returned by
+ `post_get_iam_policy` will be passed to
+ `post_get_iam_policy_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backup_operations(
+ self,
+ request: backup.ListBackupOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup.ListBackupOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for list_backup_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backup_operations(
+ self, response: backup.ListBackupOperationsResponse
+ ) -> backup.ListBackupOperationsResponse:
+ """Post-rpc interceptor for list_backup_operations
+
+ DEPRECATED. Please use the `post_list_backup_operations_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backup_operations` interceptor runs
+ before the `post_list_backup_operations_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backup_operations_with_metadata(
+ self,
+ response: backup.ListBackupOperationsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup.ListBackupOperationsResponse, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for list_backup_operations
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backup_operations_with_metadata`
+ interceptor in new development instead of the `post_list_backup_operations` interceptor.
+ When both interceptors are used, this `post_list_backup_operations_with_metadata` interceptor runs after the
+ `post_list_backup_operations` interceptor. The (possibly modified) response returned by
+ `post_list_backup_operations` will be passed to
+ `post_list_backup_operations_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backups(
+ self,
+ request: backup.ListBackupsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for list_backups
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backups(
+ self, response: backup.ListBackupsResponse
+ ) -> backup.ListBackupsResponse:
+ """Post-rpc interceptor for list_backups
+
+ DEPRECATED. Please use the `post_list_backups_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backups` interceptor runs
+ before the `post_list_backups_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backups_with_metadata(
+ self,
+ response: backup.ListBackupsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.ListBackupsResponse, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for list_backups
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backups_with_metadata`
+ interceptor in new development instead of the `post_list_backups` interceptor.
+ When both interceptors are used, this `post_list_backups_with_metadata` interceptor runs after the
+ `post_list_backups` interceptor. The (possibly modified) response returned by
+ `post_list_backups` will be passed to
+ `post_list_backups_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backup_schedules(
+ self,
+ request: backup_schedule.ListBackupSchedulesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.ListBackupSchedulesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_backup_schedules
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backup_schedules(
+ self, response: backup_schedule.ListBackupSchedulesResponse
+ ) -> backup_schedule.ListBackupSchedulesResponse:
+ """Post-rpc interceptor for list_backup_schedules
+
+ DEPRECATED. Please use the `post_list_backup_schedules_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backup_schedules` interceptor runs
+ before the `post_list_backup_schedules_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backup_schedules_with_metadata(
+ self,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.ListBackupSchedulesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_backup_schedules
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backup_schedules_with_metadata`
+ interceptor in new development instead of the `post_list_backup_schedules` interceptor.
+ When both interceptors are used, this `post_list_backup_schedules_with_metadata` interceptor runs after the
+ `post_list_backup_schedules` interceptor. The (possibly modified) response returned by
+ `post_list_backup_schedules` will be passed to
+ `post_list_backup_schedules_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_database_operations(
+ self,
+ request: spanner_database_admin.ListDatabaseOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseOperationsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_database_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_database_operations(
+ self, response: spanner_database_admin.ListDatabaseOperationsResponse
+ ) -> spanner_database_admin.ListDatabaseOperationsResponse:
+ """Post-rpc interceptor for list_database_operations
+
+ DEPRECATED. Please use the `post_list_database_operations_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_database_operations` interceptor runs
+ before the `post_list_database_operations_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_database_operations_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabaseOperationsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseOperationsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_database_operations
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_database_operations_with_metadata`
+ interceptor in new development instead of the `post_list_database_operations` interceptor.
+ When both interceptors are used, this `post_list_database_operations_with_metadata` interceptor runs after the
+ `post_list_database_operations` interceptor. The (possibly modified) response returned by
+ `post_list_database_operations` will be passed to
+ `post_list_database_operations_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_database_roles(
+ self,
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseRolesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_database_roles
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_database_roles(
+ self, response: spanner_database_admin.ListDatabaseRolesResponse
+ ) -> spanner_database_admin.ListDatabaseRolesResponse:
+ """Post-rpc interceptor for list_database_roles
+
+ DEPRECATED. Please use the `post_list_database_roles_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_database_roles` interceptor runs
+ before the `post_list_database_roles_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_database_roles_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabaseRolesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseRolesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_database_roles
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_database_roles_with_metadata`
+ interceptor in new development instead of the `post_list_database_roles` interceptor.
+ When both interceptors are used, this `post_list_database_roles_with_metadata` interceptor runs after the
+ `post_list_database_roles` interceptor. The (possibly modified) response returned by
+ `post_list_database_roles` will be passed to
+ `post_list_database_roles_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_databases(
+ self,
+ request: spanner_database_admin.ListDatabasesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabasesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_databases
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_databases(
+ self, response: spanner_database_admin.ListDatabasesResponse
+ ) -> spanner_database_admin.ListDatabasesResponse:
+ """Post-rpc interceptor for list_databases
+
+ DEPRECATED. Please use the `post_list_databases_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_databases` interceptor runs
+ before the `post_list_databases_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_databases_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabasesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabasesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_databases
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_databases_with_metadata`
+ interceptor in new development instead of the `post_list_databases` interceptor.
+ When both interceptors are used, this `post_list_databases_with_metadata` interceptor runs after the
+ `post_list_databases` interceptor. The (possibly modified) response returned by
+ `post_list_databases` will be passed to
+ `post_list_databases_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_restore_database(
+ self,
+ request: spanner_database_admin.RestoreDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.RestoreDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for restore_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_restore_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for restore_database
+
+ DEPRECATED. Please use the `post_restore_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_restore_database` interceptor runs
+ before the `post_restore_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_restore_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for restore_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_restore_database_with_metadata`
+ interceptor in new development instead of the `post_restore_database` interceptor.
+ When both interceptors are used, this `post_restore_database_with_metadata` interceptor runs after the
+ `post_restore_database` interceptor. The (possibly modified) response returned by
+ `post_restore_database` will be passed to
+ `post_restore_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_set_iam_policy(
+ self,
+ request: iam_policy_pb2.SetIamPolicyRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for set_iam_policy
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
+ """Post-rpc interceptor for set_iam_policy
+
+ DEPRECATED. Please use the `post_set_iam_policy_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_set_iam_policy` interceptor runs
+ before the `post_set_iam_policy_with_metadata` interceptor.
+ """
+ return response
+
+ def post_set_iam_policy_with_metadata(
+ self,
+ response: policy_pb2.Policy,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for set_iam_policy
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_set_iam_policy_with_metadata`
+ interceptor in new development instead of the `post_set_iam_policy` interceptor.
+ When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the
+ `post_set_iam_policy` interceptor. The (possibly modified) response returned by
+ `post_set_iam_policy` will be passed to
+ `post_set_iam_policy_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_test_iam_permissions(
+ self,
+ request: iam_policy_pb2.TestIamPermissionsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.TestIamPermissionsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for test_iam_permissions
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_test_iam_permissions(
+ self, response: iam_policy_pb2.TestIamPermissionsResponse
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ """Post-rpc interceptor for test_iam_permissions
+
+ DEPRECATED. Please use the `post_test_iam_permissions_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_test_iam_permissions` interceptor runs
+ before the `post_test_iam_permissions_with_metadata` interceptor.
+ """
+ return response
+
+ def post_test_iam_permissions_with_metadata(
+ self,
+ response: iam_policy_pb2.TestIamPermissionsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.TestIamPermissionsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for test_iam_permissions
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_test_iam_permissions_with_metadata`
+ interceptor in new development instead of the `post_test_iam_permissions` interceptor.
+ When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the
+ `post_test_iam_permissions` interceptor. The (possibly modified) response returned by
+ `post_test_iam_permissions` will be passed to
+ `post_test_iam_permissions_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_backup(
+ self,
+ request: gsad_backup.UpdateBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup.UpdateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for update_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_backup(self, response: gsad_backup.Backup) -> gsad_backup.Backup:
+ """Post-rpc interceptor for update_backup
+
+ DEPRECATED. Please use the `post_update_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_backup` interceptor runs
+ before the `post_update_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_backup_with_metadata(
+ self,
+ response: gsad_backup.Backup,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[gsad_backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_backup_with_metadata`
+ interceptor in new development instead of the `post_update_backup` interceptor.
+ When both interceptors are used, this `post_update_backup_with_metadata` interceptor runs after the
+ `post_update_backup` interceptor. The (possibly modified) response returned by
+ `post_update_backup` will be passed to
+ `post_update_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_backup_schedule(
+ self,
+ request: gsad_backup_schedule.UpdateBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.UpdateBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_backup_schedule(
+ self, response: gsad_backup_schedule.BackupSchedule
+ ) -> gsad_backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for update_backup_schedule
+
+ DEPRECATED. Please use the `post_update_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_backup_schedule` interceptor runs
+ before the `post_update_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_backup_schedule_with_metadata(
+ self,
+ response: gsad_backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for update_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_update_backup_schedule` interceptor.
+ When both interceptors are used, this `post_update_backup_schedule_with_metadata` interceptor runs after the
+ `post_update_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_update_backup_schedule` will be passed to
+ `post_update_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_database(
+ self,
+ request: spanner_database_admin.UpdateDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.UpdateDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for update_database
+
+ DEPRECATED. Please use the `post_update_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_database` interceptor runs
+ before the `post_update_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_database_with_metadata`
+ interceptor in new development instead of the `post_update_database` interceptor.
+ When both interceptors are used, this `post_update_database_with_metadata` interceptor runs after the
+ `post_update_database` interceptor. The (possibly modified) response returned by
+ `post_update_database` will be passed to
+ `post_update_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_database_ddl(
+ self,
+ request: spanner_database_admin.UpdateDatabaseDdlRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.UpdateDatabaseDdlRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_database_ddl
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_database_ddl(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for update_database_ddl
+
+ DEPRECATED. Please use the `post_update_database_ddl_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_database_ddl` interceptor runs
+ before the `post_update_database_ddl_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_database_ddl_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_database_ddl
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_database_ddl_with_metadata`
+ interceptor in new development instead of the `post_update_database_ddl` interceptor.
+ When both interceptors are used, this `post_update_database_ddl_with_metadata` interceptor runs after the
+ `post_update_database_ddl` interceptor. The (possibly modified) response returned by
+ `post_update_database_ddl` will be passed to
+ `post_update_database_ddl_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_cancel_operation(
+ self,
+ request: operations_pb2.CancelOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for cancel_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_cancel_operation(self, response: None) -> None:
+ """Post-rpc interceptor for cancel_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_delete_operation(
+ self,
+ request: operations_pb2.DeleteOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for delete_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_delete_operation(self, response: None) -> None:
+ """Post-rpc interceptor for delete_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_get_operation(
+ self,
+ request: operations_pb2.GetOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for get_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_operation(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for get_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_list_operations(
+ self,
+ request: operations_pb2.ListOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for list_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_operations(
+ self, response: operations_pb2.ListOperationsResponse
+ ) -> operations_pb2.ListOperationsResponse:
+ """Post-rpc interceptor for list_operations
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+
+@dataclasses.dataclass
+class DatabaseAdminRestStub:
+ _session: AuthorizedSession
+ _host: str
+ _interceptor: DatabaseAdminRestInterceptor
+
+
+class DatabaseAdminRestTransport(_BaseDatabaseAdminRestTransport):
+ """REST backend synchronous transport for DatabaseAdmin.
+
+ Cloud Spanner Database Admin API
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends JSON representations of protocol buffers over HTTP/1.1
+ """
+
+ def __init__(
+ self,
+ *,
+ host: str = "spanner.googleapis.com",
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ url_scheme: str = "https",
+ interceptor: Optional[DatabaseAdminRestInterceptor] = None,
+ api_audience: Optional[str] = None,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
+ certificate to configure mutual TLS HTTP channel. It is ignored
+ if ``channel`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you are developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+ url_scheme: the protocol scheme for the API endpoint. Normally
+ "https", but for testing or local servers,
+ "http" can be specified.
+ """
+ # Run the base constructor
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
+ # credentials object
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ url_scheme=url_scheme,
+ api_audience=api_audience,
+ )
+ self._session = AuthorizedSession(
+ self._credentials, default_host=self.DEFAULT_HOST
+ )
+ self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None
+ if client_cert_source_for_mtls:
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
+ self._interceptor = interceptor or DatabaseAdminRestInterceptor()
+ self._prep_wrapped_messages(client_info)
+
+ @property
+ def operations_client(self) -> operations_v1.AbstractOperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ http_options: Dict[str, List[Dict[str, str]]] = {
+ "google.longrunning.Operations.CancelOperation": [
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel",
+ },
+ ],
+ "google.longrunning.Operations.DeleteOperation": [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ],
+ "google.longrunning.Operations.GetOperation": [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ],
+ "google.longrunning.Operations.ListOperations": [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations}",
+ },
+ ],
+ }
+
+ rest_transport = operations_v1.OperationsRestTransport(
+ host=self._host,
+ # use the credentials which are saved
+ credentials=self._credentials,
+ scopes=self._scopes,
+ http_options=http_options,
+ path_prefix="v1",
+ )
+
+ self._operations_client = operations_v1.AbstractOperationsClient(
+ transport=rest_transport
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ class _AddSplitPoints(
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.AddSplitPoints")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.AddSplitPointsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Call the add split points method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.AddSplitPointsRequest):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_add_split_points(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.AddSplitPoints",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "AddSplitPoints",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._AddSplitPoints._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.AddSplitPointsResponse()
+ pb_resp = spanner_database_admin.AddSplitPointsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_add_split_points(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_add_split_points_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.AddSplitPointsResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.add_split_points",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "AddSplitPoints",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CopyBackup(
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CopyBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.CopyBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the copy backup method over HTTP.
+
+ Args:
+ request (~.backup.CopyBackupRequest):
+ The request object. The request for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_copy_backup(request, metadata)
+ transcoded_request = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_transcoded_request(
+ http_options, request
+ )
+ )
+
+ body = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_request_body_json(
+ transcoded_request
+ )
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CopyBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CopyBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CopyBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_copy_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_copy_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.copy_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CopyBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateBackup(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup.CreateBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the create backup method over HTTP.
+
+ Args:
+ request (~.gsad_backup.CreateBackupRequest):
+ The request object. The request for
+ [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup_schedule.CreateBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Call the create backup schedule method over HTTP.
+
+ Args:
+ request (~.gsad_backup_schedule.CreateBackupScheduleRequest):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup_schedule.BackupSchedule()
+ pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup_schedule.BackupSchedule.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateDatabase(
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.CreateDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the create database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.CreateDatabaseRequest):
+ The request object. The request for
+ [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _DeleteBackup(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.DeleteBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the delete backup method over HTTP.
+
+ Args:
+ request (~.backup.DeleteBackupRequest):
+ The request object. The request for
+ [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _DeleteBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.DeleteBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the delete backup schedule method over HTTP.
+
+ Args:
+ request (~.backup_schedule.DeleteBackupScheduleRequest):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _DropDatabase(
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DropDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.DropDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the drop database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.DropDatabaseRequest):
+ The request object. The request for
+ [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_drop_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DropDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DropDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DropDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _GetBackup(
+ _BaseDatabaseAdminRestTransport._BaseGetBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.GetBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.Backup:
+ r"""Call the get backup method over HTTP.
+
+ Args:
+ request (~.backup.GetBackupRequest):
+ The request object. The request for
+ [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.Backup:
+ A backup of a Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_backup(request, metadata)
+ transcoded_request = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_transcoded_request(
+ http_options, request
+ )
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.Backup()
+ pb_resp = backup.Backup.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.Backup.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.GetBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Call the get backup schedule method over HTTP.
+
+ Args:
+ request (~.backup_schedule.GetBackupScheduleRequest):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup_schedule.BackupSchedule()
+ pb_resp = backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup_schedule.BackupSchedule.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetDatabase(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.GetDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.Database:
+ r"""Call the get database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.GetDatabaseRequest):
+ The request object. The request for
+ [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.Database:
+ A Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.Database()
+ pb_resp = spanner_database_admin.Database.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = spanner_database_admin.Database.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetDatabaseDdl(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetDatabaseDdl")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.GetDatabaseDdlRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.GetDatabaseDdlResponse:
+ r"""Call the get database ddl method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.GetDatabaseDdlRequest):
+ The request object. The request for
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.GetDatabaseDdlResponse:
+ The response for
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_database_ddl(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabaseDdl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabaseDdl",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetDatabaseDdl._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.GetDatabaseDdlResponse()
+ pb_resp = spanner_database_admin.GetDatabaseDdlResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_database_ddl(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_database_ddl_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.GetDatabaseDdlResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database_ddl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabaseDdl",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetIamPolicy(
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetIamPolicy")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.GetIamPolicyRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Call the get iam policy method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.GetIamPolicyRequest):
+ The request object. Request message for ``GetIamPolicy`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which
+ specifies access controls for Google Cloud resources.
+
+ A ``Policy`` is a collection of ``bindings``. A
+ ``binding`` binds one or more ``members``, or
+ principals, to a single ``role``. Principals can be user
+ accounts, service accounts, Google groups, and domains
+ (such as G Suite). A ``role`` is a named list of
+ permissions; each ``role`` can be an IAM predefined role
+ or a user-created custom role.
+
+ For some types of Google Cloud resources, a ``binding``
+ can also specify a ``condition``, which is a logical
+ expression that allows access to a resource only if the
+ expression evaluates to ``true``. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the `IAM
+ documentation `__.
+
+ **JSON example:**
+
+ ::
+
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
+
+ **YAML example:**
+
+ ::
+
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
+
+ For a description of IAM and its features, see the `IAM
+ documentation `__.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_iam_policy(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetIamPolicy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetIamPolicy",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetIamPolicy._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = policy_pb2.Policy()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_iam_policy(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_iam_policy_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_iam_policy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetIamPolicy",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _InternalUpdateGraphOperation(
+ _BaseDatabaseAdminRestTransport._BaseInternalUpdateGraphOperation,
+ DatabaseAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.InternalUpdateGraphOperation")
+
+ def __call__(
+ self,
+ request: spanner_database_admin.InternalUpdateGraphOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ raise NotImplementedError(
+ "Method InternalUpdateGraphOperation is not available over REST transport"
+ )
+
+ class _ListBackupOperations(
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackupOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.ListBackupOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.ListBackupOperationsResponse:
+ r"""Call the list backup operations method over HTTP.
+
+ Args:
+ request (~.backup.ListBackupOperationsRequest):
+ The request object. The request for
+ [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.ListBackupOperationsResponse:
+ The response for
+ [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backup_operations(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackupOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.ListBackupOperationsResponse()
+ pb_resp = backup.ListBackupOperationsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backup_operations(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backup_operations_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.ListBackupOperationsResponse.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_operations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupOperations",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListBackups(
+ _BaseDatabaseAdminRestTransport._BaseListBackups, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackups")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.ListBackupsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.ListBackupsResponse:
+ r"""Call the list backups method over HTTP.
+
+ Args:
+ request (~.backup.ListBackupsRequest):
+ The request object. The request for
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.ListBackupsResponse:
+ The response for
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backups(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackups._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackups",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackups",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackups._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.ListBackupsResponse()
+ pb_resp = backup.ListBackupsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backups(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backups_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.ListBackupsResponse.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backups",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackups",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListBackupSchedules(
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackupSchedules")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.ListBackupSchedulesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.ListBackupSchedulesResponse:
+ r"""Call the list backup schedules method over HTTP.
+
+ Args:
+ request (~.backup_schedule.ListBackupSchedulesRequest):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup_schedule.ListBackupSchedulesResponse:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backup_schedules(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupSchedules",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupSchedules",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackupSchedules._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup_schedule.ListBackupSchedulesResponse()
+ pb_resp = backup_schedule.ListBackupSchedulesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backup_schedules(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backup_schedules_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ backup_schedule.ListBackupSchedulesResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_schedules",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupSchedules",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabaseOperations(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations,
+ DatabaseAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabaseOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabaseOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabaseOperationsResponse:
+ r"""Call the list database operations method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabaseOperationsRequest):
+ The request object. The request for
+ [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabaseOperationsResponse:
+ The response for
+ [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_database_operations(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabaseOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabaseOperationsResponse()
+ pb_resp = spanner_database_admin.ListDatabaseOperationsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_database_operations(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_database_operations_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabaseOperationsResponse.to_json(
+ response
+ )
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_operations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseOperations",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabaseRoles(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabaseRoles")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabaseRolesResponse:
+ r"""Call the list database roles method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabaseRolesRequest):
+ The request object. The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabaseRolesResponse:
+ The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_database_roles(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseRoles",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseRoles",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabaseRoles._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabaseRolesResponse()
+ pb_resp = spanner_database_admin.ListDatabaseRolesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_database_roles(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_database_roles_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabaseRolesResponse.to_json(
+ response
+ )
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_roles",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseRoles",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabases(
+ _BaseDatabaseAdminRestTransport._BaseListDatabases, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabases")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabasesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabasesResponse:
+ r"""Call the list databases method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabasesRequest):
+ The request object. The request for
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabasesResponse:
+ The response for
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabases._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_databases(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabases._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabases._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabases",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabases",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabases._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabasesResponse()
+ pb_resp = spanner_database_admin.ListDatabasesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_databases(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_databases_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabasesResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_databases",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabases",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _RestoreDatabase(
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.RestoreDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.RestoreDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the restore database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.RestoreDatabaseRequest):
+ The request object. The request for
+ [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_restore_database(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.RestoreDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "RestoreDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._RestoreDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_restore_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_restore_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.restore_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "RestoreDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _SetIamPolicy(
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.SetIamPolicy")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.SetIamPolicyRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Call the set iam policy method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.SetIamPolicyRequest):
+ The request object. Request message for ``SetIamPolicy`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which
+ specifies access controls for Google Cloud resources.
+
+ A ``Policy`` is a collection of ``bindings``. A
+ ``binding`` binds one or more ``members``, or
+ principals, to a single ``role``. Principals can be user
+ accounts, service accounts, Google groups, and domains
+ (such as G Suite). A ``role`` is a named list of
+ permissions; each ``role`` can be an IAM predefined role
+ or a user-created custom role.
+
+ For some types of Google Cloud resources, a ``binding``
+ can also specify a ``condition``, which is a logical
+ expression that allows access to a resource only if the
+ expression evaluates to ``true``. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the `IAM
+ documentation `__.
+
+ **JSON example:**
+
+ ::
+
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
+
+ **YAML example:**
+
+ ::
+
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
+
+ For a description of IAM and its features, see the `IAM
+ documentation `__.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_set_iam_policy(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.SetIamPolicy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "SetIamPolicy",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._SetIamPolicy._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = policy_pb2.Policy()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_set_iam_policy(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_set_iam_policy_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.set_iam_policy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "SetIamPolicy",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _TestIamPermissions(
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.TestIamPermissions")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.TestIamPermissionsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ r"""Call the test iam permissions method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.TestIamPermissionsRequest):
+ The request object. Request message for ``TestIamPermissions`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.iam_policy_pb2.TestIamPermissionsResponse:
+ Response message for ``TestIamPermissions`` method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_test_iam_permissions(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.TestIamPermissions",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "TestIamPermissions",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._TestIamPermissions._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = iam_policy_pb2.TestIamPermissionsResponse()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_test_iam_permissions(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_test_iam_permissions_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.test_iam_permissions",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "TestIamPermissions",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateBackup(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup.UpdateBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup.Backup:
+ r"""Call the update backup method over HTTP.
+
+ Args:
+ request (~.gsad_backup.UpdateBackupRequest):
+ The request object. The request for
+ [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup.Backup:
+ A backup of a Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup.Backup()
+ pb_resp = gsad_backup.Backup.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup.Backup.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup_schedule.UpdateBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Call the update backup schedule method over HTTP.
+
+ Args:
+ request (~.gsad_backup_schedule.UpdateBackupScheduleRequest):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup_schedule.BackupSchedule()
+ pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup_schedule.BackupSchedule.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateDatabase(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.UpdateDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the update database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.UpdateDatabaseRequest):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateDatabaseDdl(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateDatabaseDdl")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.UpdateDatabaseDdlRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the update database ddl method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.UpdateDatabaseDdlRequest):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
+ later execution (e.g., if a statement from another batch
+ of statements is applied first and it conflicts in some
+ way, or if there is some data-related problem like a
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
+ Each batch of statements is assigned a name which can be
+ used with the
+ [Operations][google.longrunning.Operations] API to
+ monitor progress. See the
+ [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+ field for more details.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_database_ddl(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabaseDdl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabaseDdl",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateDatabaseDdl._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_database_ddl(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_database_ddl_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database_ddl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabaseDdl",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ spanner_database_admin.AddSplitPointsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._AddSplitPoints(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[backup.CopyBackupRequest], operations_pb2.Operation]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_backup(
+ self,
+ ) -> Callable[[gsad_backup.CreateBackupRequest], operations_pb2.Operation]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.CreateDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[[backup_schedule.DeleteBackupScheduleRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DeleteBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def drop_database(
+ self,
+ ) -> Callable[[spanner_database_admin.DropDatabaseRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DropDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest], backup_schedule.BackupSchedule
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.GetDatabaseRequest], spanner_database_admin.Database
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_database_ddl(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.GetDatabaseDdlRequest],
+ spanner_database_admin.GetDatabaseDdlResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetDatabaseDdl(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_iam_policy(
+ self,
+ ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._InternalUpdateGraphOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backup_operations(
+ self,
+ ) -> Callable[
+ [backup.ListBackupOperationsRequest], backup.ListBackupOperationsResponse
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackupOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backups(
+ self,
+ ) -> Callable[[backup.ListBackupsRequest], backup.ListBackupsResponse]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ backup_schedule.ListBackupSchedulesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackupSchedules(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_database_operations(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseOperationsRequest],
+ spanner_database_admin.ListDatabaseOperationsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabaseOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ spanner_database_admin.ListDatabaseRolesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabaseRoles(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_databases(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabasesRequest],
+ spanner_database_admin.ListDatabasesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabases(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def restore_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.RestoreDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._RestoreDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def set_iam_policy(
+ self,
+ ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def test_iam_permissions(
+ self,
+ ) -> Callable[
+ [iam_policy_pb2.TestIamPermissionsRequest],
+ iam_policy_pb2.TestIamPermissionsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_backup(
+ self,
+ ) -> Callable[[gsad_backup.UpdateBackupRequest], gsad_backup.Backup]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_database_ddl(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseDdlRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateDatabaseDdl(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def cancel_operation(self):
+ return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _CancelOperation(
+ _BaseDatabaseAdminRestTransport._BaseCancelOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CancelOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.CancelOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Call the cancel operation method over HTTP.
+
+ Args:
+ request (operations_pb2.CancelOperationRequest):
+ The request object for CancelOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_cancel_operation(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CancelOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CancelOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CancelOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ return self._interceptor.post_cancel_operation(None)
+
+ @property
+ def delete_operation(self):
+ return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _DeleteOperation(
+ _BaseDatabaseAdminRestTransport._BaseDeleteOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.DeleteOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Call the delete operation method over HTTP.
+
+ Args:
+ request (operations_pb2.DeleteOperationRequest):
+ The request object for DeleteOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_operation(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ return self._interceptor.post_delete_operation(None)
+
+ @property
+ def get_operation(self):
+ return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _GetOperation(
+ _BaseDatabaseAdminRestTransport._BaseGetOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.GetOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the get operation method over HTTP.
+
+ Args:
+ request (operations_pb2.GetOperationRequest):
+ The request object for GetOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ operations_pb2.Operation: Response from GetOperation method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_operation(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ content = response.content.decode("utf-8")
+ resp = operations_pb2.Operation()
+ resp = json_format.Parse(content, resp)
+ resp = self._interceptor.post_get_operation(resp)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.GetOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetOperation",
+ "httpResponse": http_response,
+ "metadata": http_response["headers"],
+ },
+ )
+ return resp
+
+ @property
+ def list_operations(self):
+ return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ class _ListOperations(
+ _BaseDatabaseAdminRestTransport._BaseListOperations, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.ListOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Call the list operations method over HTTP.
+
+ Args:
+ request (operations_pb2.ListOperationsRequest):
+ The request object for ListOperations method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ operations_pb2.ListOperationsResponse: Response from ListOperations method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_operations(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ content = response.content.decode("utf-8")
+ resp = operations_pb2.ListOperationsResponse()
+ resp = json_format.Parse(content, resp)
+ resp = self._interceptor.post_list_operations(resp)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.ListOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListOperations",
+ "httpResponse": http_response,
+ "metadata": http_response["headers"],
+ },
+ )
+ return resp
+
+ @property
+ def kind(self) -> str:
+ return "rest"
+
+ def close(self):
+ self._session.close()
+
+
+__all__ = ("DatabaseAdminRestTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py
new file mode 100644
index 0000000000..d0ee0a2cbb
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py
@@ -0,0 +1,1654 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import json # type: ignore
+from google.api_core import path_template
+from google.api_core import gapic_v1
+
+from google.protobuf import json_format
+from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
+
+import re
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
+from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+
+
+class _BaseDatabaseAdminRestTransport(DatabaseAdminTransport):
+ """Base REST backend transport for DatabaseAdmin.
+
+ Note: This class is not meant to be used directly. Use its sync and
+ async sub-classes instead.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends JSON representations of protocol buffers over HTTP/1.1
+ """
+
+ def __init__(
+ self,
+ *,
+ host: str = "spanner.googleapis.com",
+ credentials: Optional[Any] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ url_scheme: str = "https",
+ api_audience: Optional[str] = None,
+ ) -> None:
+ """Instantiate the transport.
+ Args:
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
+ credentials (Optional[Any]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you are developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+ url_scheme: the protocol scheme for the API endpoint. Normally
+ "https", but for testing or local servers,
+ "http" can be specified.
+ """
+ # Run the base constructor
+ maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host)
+ if maybe_url_match is None:
+ raise ValueError(
+ f"Unexpected hostname structure: {host}"
+ ) # pragma: NO COVER
+
+ url_match_items = maybe_url_match.groupdict()
+
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
+
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ api_audience=api_audience,
+ )
+
+ class _BaseAddSplitPoints:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.AddSplitPointsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCopyBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups:copy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.CopyBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "backupId": "",
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups",
+ "body": "backup",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup.CreateBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "backupScheduleId": "",
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules",
+ "body": "backup_schedule",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup_schedule.CreateBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.CreateDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDeleteBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.DeleteBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDeleteBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.DeleteBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDropDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.DropDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.GetBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.GetBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.GetDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetDatabaseDdl:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}/ddl",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.GetDatabaseDdlRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetIamPolicy:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseInternalUpdateGraphOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ class _BaseListBackupOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/backupOperations",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.ListBackupOperationsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListBackups:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.ListBackupsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListBackupSchedules:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.ListBackupSchedulesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabaseOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/databaseOperations",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabaseOperationsRequest.pb(
+ request
+ )
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabaseRoles:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabaseRolesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabases:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabasesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabases._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseRestoreDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases:restore",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.RestoreDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseSetIamPolicy:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseTestIamPermissions:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{backup.name=projects/*/instances/*/backups/*}",
+ "body": "backup",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup.UpdateBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{backup_schedule.name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ "body": "backup_schedule",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup_schedule.UpdateBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{database.name=projects/*/instances/*/databases/*}",
+ "body": "database",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.UpdateDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateDatabaseDdl:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}/ddl",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.UpdateDatabaseDdlRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCancelOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseDeleteOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseGetOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseListOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+
+__all__ = ("_BaseDatabaseAdminRestTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py
index 9749add377..ca79ddec90 100644
--- a/google/cloud/spanner_admin_database_v1/types/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/types/__init__.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,81 +13,136 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
-from .common import (
- OperationProgress,
- EncryptionConfig,
- EncryptionInfo,
-)
from .backup import (
Backup,
- CreateBackupRequest,
+ BackupInfo,
+ BackupInstancePartition,
+ CopyBackupEncryptionConfig,
+ CopyBackupMetadata,
+ CopyBackupRequest,
+ CreateBackupEncryptionConfig,
CreateBackupMetadata,
- UpdateBackupRequest,
- GetBackupRequest,
+ CreateBackupRequest,
DeleteBackupRequest,
- ListBackupsRequest,
- ListBackupsResponse,
+ FullBackupSpec,
+ GetBackupRequest,
+ IncrementalBackupSpec,
ListBackupOperationsRequest,
ListBackupOperationsResponse,
- BackupInfo,
- CreateBackupEncryptionConfig,
+ ListBackupsRequest,
+ ListBackupsResponse,
+ UpdateBackupRequest,
+)
+from .backup_schedule import (
+ BackupSchedule,
+ BackupScheduleSpec,
+ CreateBackupScheduleRequest,
+ CrontabSpec,
+ DeleteBackupScheduleRequest,
+ GetBackupScheduleRequest,
+ ListBackupSchedulesRequest,
+ ListBackupSchedulesResponse,
+ UpdateBackupScheduleRequest,
+)
+from .common import (
+ EncryptionConfig,
+ EncryptionInfo,
+ OperationProgress,
+ DatabaseDialect,
)
from .spanner_database_admin import (
- RestoreInfo,
- Database,
- ListDatabasesRequest,
- ListDatabasesResponse,
- CreateDatabaseRequest,
+ AddSplitPointsRequest,
+ AddSplitPointsResponse,
CreateDatabaseMetadata,
- GetDatabaseRequest,
- UpdateDatabaseDdlRequest,
- UpdateDatabaseDdlMetadata,
+ CreateDatabaseRequest,
+ Database,
+ DatabaseRole,
+ DdlStatementActionInfo,
DropDatabaseRequest,
GetDatabaseDdlRequest,
GetDatabaseDdlResponse,
+ GetDatabaseRequest,
+ InternalUpdateGraphOperationRequest,
+ InternalUpdateGraphOperationResponse,
ListDatabaseOperationsRequest,
ListDatabaseOperationsResponse,
- RestoreDatabaseRequest,
+ ListDatabaseRolesRequest,
+ ListDatabaseRolesResponse,
+ ListDatabasesRequest,
+ ListDatabasesResponse,
+ OptimizeRestoredDatabaseMetadata,
RestoreDatabaseEncryptionConfig,
RestoreDatabaseMetadata,
- OptimizeRestoredDatabaseMetadata,
+ RestoreDatabaseRequest,
+ RestoreInfo,
+ SplitPoints,
+ UpdateDatabaseDdlMetadata,
+ UpdateDatabaseDdlRequest,
+ UpdateDatabaseMetadata,
+ UpdateDatabaseRequest,
RestoreSourceType,
)
__all__ = (
- "OperationProgress",
- "EncryptionConfig",
- "EncryptionInfo",
"Backup",
- "CreateBackupRequest",
+ "BackupInfo",
+ "BackupInstancePartition",
+ "CopyBackupEncryptionConfig",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
+ "CreateBackupEncryptionConfig",
"CreateBackupMetadata",
- "UpdateBackupRequest",
- "GetBackupRequest",
+ "CreateBackupRequest",
"DeleteBackupRequest",
- "ListBackupsRequest",
- "ListBackupsResponse",
+ "FullBackupSpec",
+ "GetBackupRequest",
+ "IncrementalBackupSpec",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
- "BackupInfo",
- "CreateBackupEncryptionConfig",
- "RestoreInfo",
- "Database",
- "ListDatabasesRequest",
- "ListDatabasesResponse",
- "CreateDatabaseRequest",
+ "ListBackupsRequest",
+ "ListBackupsResponse",
+ "UpdateBackupRequest",
+ "BackupSchedule",
+ "BackupScheduleSpec",
+ "CreateBackupScheduleRequest",
+ "CrontabSpec",
+ "DeleteBackupScheduleRequest",
+ "GetBackupScheduleRequest",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
+ "UpdateBackupScheduleRequest",
+ "EncryptionConfig",
+ "EncryptionInfo",
+ "OperationProgress",
+ "DatabaseDialect",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
"CreateDatabaseMetadata",
- "GetDatabaseRequest",
- "UpdateDatabaseDdlRequest",
- "UpdateDatabaseDdlMetadata",
+ "CreateDatabaseRequest",
+ "Database",
+ "DatabaseRole",
+ "DdlStatementActionInfo",
"DropDatabaseRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
+ "GetDatabaseRequest",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
- "RestoreDatabaseRequest",
+ "ListDatabaseRolesRequest",
+ "ListDatabaseRolesResponse",
+ "ListDatabasesRequest",
+ "ListDatabasesResponse",
+ "OptimizeRestoredDatabaseMetadata",
"RestoreDatabaseEncryptionConfig",
"RestoreDatabaseMetadata",
- "OptimizeRestoredDatabaseMetadata",
+ "RestoreDatabaseRequest",
+ "RestoreInfo",
+ "SplitPoints",
+ "UpdateDatabaseDdlMetadata",
+ "UpdateDatabaseDdlRequest",
+ "UpdateDatabaseMetadata",
+ "UpdateDatabaseRequest",
"RestoreSourceType",
)
diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py
index 7d95a007f4..da236fb4ff 100644
--- a/google/cloud/spanner_admin_database_v1/types/backup.py
+++ b/google/cloud/spanner_admin_database_v1/types/backup.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,14 +13,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from __future__ import annotations
-import proto # type: ignore
+from typing import MutableMapping, MutableSequence
+import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import common
-from google.longrunning import operations_pb2 as gl_operations # type: ignore
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -30,6 +31,8 @@
"Backup",
"CreateBackupRequest",
"CreateBackupMetadata",
+ "CopyBackupRequest",
+ "CopyBackupMetadata",
"UpdateBackupRequest",
"GetBackupRequest",
"DeleteBackupRequest",
@@ -39,6 +42,10 @@
"ListBackupOperationsResponse",
"BackupInfo",
"CreateBackupEncryptionConfig",
+ "CopyBackupEncryptionConfig",
+ "FullBackupSpec",
+ "IncrementalBackupSpec",
+ "BackupInstancePartition",
},
)
@@ -93,9 +100,33 @@ class Backup(proto.Message):
equivalent to the ``create_time``.
size_bytes (int):
Output only. Size of the backup in bytes.
+ freeable_size_bytes (int):
+ Output only. The number of bytes that will be
+ freed by deleting this backup. This value will
+ be zero if, for example, this backup is part of
+ an incremental backup chain and younger backups
+ in the chain require that we keep its data. For
+ backups not in an incremental backup chain, this
+ is always the size of the backup. This value may
+ change if backups on the same chain get created,
+ deleted or expired.
+ exclusive_size_bytes (int):
+ Output only. For a backup in an incremental
+ backup chain, this is the storage space needed
+ to keep the data that has changed since the
+ previous backup. For all other backups, this is
+ always the size of the backup. This value may
+ change if backups on the same chain get deleted
+ or expired.
+
+ This field can be used to calculate the total
+ storage space used by a set of backups. For
+ example, the total space used by all backups of
+ a database can be computed by summing up this
+ field.
state (google.cloud.spanner_admin_database_v1.types.Backup.State):
Output only. The current state of the backup.
- referencing_databases (Sequence[str]):
+ referencing_databases (MutableSequence[str]):
Output only. The names of the restored databases that
reference the backup. The database names are of the form
``projects//instances//databases/``.
@@ -107,32 +138,181 @@ class Backup(proto.Message):
encryption_info (google.cloud.spanner_admin_database_v1.types.EncryptionInfo):
Output only. The encryption information for
the backup.
+ encryption_information (MutableSequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
+ Output only. The encryption information for the backup,
+ whether it is protected by one or more KMS keys. The
+ information includes all Cloud KMS key versions used to
+ encrypt the backup. The
+ ``encryption_status' field inside of each``\ EncryptionInfo\`
+ is not populated. At least one of the key versions must be
+ available for the backup to be restored. If a key version is
+ revoked in the middle of a restore, the restore behavior is
+ undefined.
+ database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect):
+ Output only. The database dialect information
+ for the backup.
+ referencing_backups (MutableSequence[str]):
+ Output only. The names of the destination backups being
+ created by copying this source backup. The backup names are
+ of the form
+ ``projects//instances//backups/``.
+ Referencing backups may exist in different instances. The
+ existence of any referencing backup prevents the backup from
+ being deleted. When the copy operation is done (either
+ successfully completed or cancelled or the destination
+ backup is deleted), the reference to the backup is removed.
+ max_expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. The max allowed expiration time of the backup,
+ with microseconds granularity. A backup's expiration time
+ can be configured in multiple APIs: CreateBackup,
+ UpdateBackup, CopyBackup. When updating or copying an
+ existing backup, the expiration time specified must be less
+ than ``Backup.max_expire_time``.
+ backup_schedules (MutableSequence[str]):
+ Output only. List of backup schedule URIs
+ that are associated with creating this backup.
+ This is only applicable for scheduled backups,
+ and is empty for on-demand backups.
+
+ To optimize for storage, whenever possible,
+ multiple schedules are collapsed together to
+ create one backup. In such cases, this field
+ captures the list of all backup schedule URIs
+ that are associated with creating this backup.
+ If collapsing is not done, then this field
+ captures the single backup schedule URI
+ associated with creating this backup.
+ incremental_backup_chain_id (str):
+ Output only. Populated only for backups in an incremental
+ backup chain. Backups share the same chain id if and only if
+ they belong to the same incremental backup chain. Use this
+ field to determine which backups are part of the same
+ incremental backup chain. The ordering of backups in the
+ chain can be determined by ordering the backup
+ ``version_time``.
+ oldest_version_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Data deleted at a time older
+ than this is guaranteed not to be retained in
+ order to support this backup. For a backup in an
+ incremental backup chain, this is the version
+ time of the oldest backup that exists or ever
+ existed in the chain. For all other backups,
+ this is the version time of the backup. This
+ field can be used to understand what data is
+ being retained by the backup system.
+ instance_partitions (MutableSequence[google.cloud.spanner_admin_database_v1.types.BackupInstancePartition]):
+ Output only. The instance partition(s) storing the backup.
+
+ This is the same as the list of the instance partition(s)
+ that the database had footprint in at the backup's
+ ``version_time``.
"""
class State(proto.Enum):
- r"""Indicates the current state of the backup."""
+ r"""Indicates the current state of the backup.
+
+ Values:
+ STATE_UNSPECIFIED (0):
+ Not specified.
+ CREATING (1):
+ The pending backup is still being created. Operations on the
+ backup may fail with ``FAILED_PRECONDITION`` in this state.
+ READY (2):
+ The backup is complete and ready for use.
+ """
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
- database = proto.Field(proto.STRING, number=2)
-
- version_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,)
-
- expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
-
- name = proto.Field(proto.STRING, number=1)
-
- create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
-
- size_bytes = proto.Field(proto.INT64, number=5)
-
- state = proto.Field(proto.ENUM, number=6, enum=State,)
-
- referencing_databases = proto.RepeatedField(proto.STRING, number=7)
-
- encryption_info = proto.Field(
- proto.MESSAGE, number=8, message=common.EncryptionInfo,
+ database: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ version_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message=timestamp_pb2.Timestamp,
+ )
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ create_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
+ size_bytes: int = proto.Field(
+ proto.INT64,
+ number=5,
+ )
+ freeable_size_bytes: int = proto.Field(
+ proto.INT64,
+ number=15,
+ )
+ exclusive_size_bytes: int = proto.Field(
+ proto.INT64,
+ number=16,
+ )
+ state: State = proto.Field(
+ proto.ENUM,
+ number=6,
+ enum=State,
+ )
+ referencing_databases: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=7,
+ )
+ encryption_info: common.EncryptionInfo = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=common.EncryptionInfo,
+ )
+ encryption_information: MutableSequence[
+ common.EncryptionInfo
+ ] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=13,
+ message=common.EncryptionInfo,
+ )
+ database_dialect: common.DatabaseDialect = proto.Field(
+ proto.ENUM,
+ number=10,
+ enum=common.DatabaseDialect,
+ )
+ referencing_backups: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=11,
+ )
+ max_expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=12,
+ message=timestamp_pb2.Timestamp,
+ )
+ backup_schedules: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=14,
+ )
+ incremental_backup_chain_id: str = proto.Field(
+ proto.STRING,
+ number=17,
+ )
+ oldest_version_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=18,
+ message=timestamp_pb2.Timestamp,
+ )
+ instance_partitions: MutableSequence[
+ "BackupInstancePartition"
+ ] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=19,
+ message="BackupInstancePartition",
)
@@ -164,14 +344,23 @@ class CreateBackupRequest(proto.Message):
= ``USE_DATABASE_ENCRYPTION``.
"""
- parent = proto.Field(proto.STRING, number=1)
-
- backup_id = proto.Field(proto.STRING, number=2)
-
- backup = proto.Field(proto.MESSAGE, number=3, message="Backup",)
-
- encryption_config = proto.Field(
- proto.MESSAGE, number=4, message="CreateBackupEncryptionConfig",
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ backup_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ backup: "Backup" = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message="Backup",
+ )
+ encryption_config: "CreateBackupEncryptionConfig" = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message="CreateBackupEncryptionConfig",
)
@@ -206,13 +395,138 @@ class CreateBackupMetadata(proto.Message):
1, corresponding to ``Code.CANCELLED``.
"""
- name = proto.Field(proto.STRING, number=1)
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ database: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=common.OperationProgress,
+ )
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
- database = proto.Field(proto.STRING, number=2)
- progress = proto.Field(proto.MESSAGE, number=3, message=common.OperationProgress,)
+class CopyBackupRequest(proto.Message):
+ r"""The request for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
- cancel_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+ Attributes:
+ parent (str):
+ Required. The name of the destination instance that will
+ contain the backup copy. Values are of the form:
+ ``projects//instances/``.
+ backup_id (str):
+ Required. The id of the backup copy. The ``backup_id``
+ appended to ``parent`` forms the full backup_uri of the form
+ ``projects//instances//backups/