From b6bb39b9ac862f0e3e85b41d9bf0c518c623ac68 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Mon, 2 Dec 2024 02:20:04 -0800 Subject: [PATCH 01/16] feat(x-goog-spanner-request-id): implement request_id generation and propagation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Generates a request_id that is then injected inside metadata that's sent over to the Cloud Spanner backend. Officially inject the first set of x-goog-spanner-request-id values into header metadata Add request-id interceptor to use in asserting tests Wrap Snapshot methods with x-goog-request-id metadata injector Setup scaffolding for XGoogRequestIdHeader checks Wire up XGoogSpannerRequestIdInterceptor for TestDatabase checks Inject header in more Session using spots plus more tests Base for tests with retries on abort More plumbing for Transaction and Database Update unit tests for Transaction Wrap more in Transaction + update tests Update tests Plumb in more tests Update TestDatabase Fixes #1261 --- google/cloud/spanner_v1/client.py | 9 + google/cloud/spanner_v1/database.py | 66 ++- google/cloud/spanner_v1/instance.py | 1 + google/cloud/spanner_v1/pool.py | 6 +- google/cloud/spanner_v1/session.py | 21 +- google/cloud/spanner_v1/snapshot.py | 137 +++++-- .../cloud/spanner_v1/testing/database_test.py | 5 + .../cloud/spanner_v1/testing/interceptors.py | 74 ++++ google/cloud/spanner_v1/transaction.py | 120 ++++-- .../mockserver_tests/mock_server_test_base.py | 4 +- tests/unit/test_atomic_counter.py | 1 + tests/unit/test_database.py | 175 ++++++-- tests/unit/test_request_id_header.py | 387 ++++++++++++++++++ tests/unit/test_snapshot.py | 78 +++- tests/unit/test_spanner.py | 149 +++++++ tests/unit/test_transaction.py | 59 +++ 16 files changed, 1165 insertions(+), 127 deletions(-) create mode 100644 tests/unit/test_request_id_header.py diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index afe6264717..772abe8261 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -48,6 +48,7 @@ from google.cloud.spanner_v1._helpers import _merge_query_options from google.cloud.spanner_v1._helpers import _metadata_with_prefix from google.cloud.spanner_v1.instance import Instance +from google.cloud.spanner_v1._helpers import AtomicCounter _CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) EMULATOR_ENV_VAR = "SPANNER_EMULATOR_HOST" @@ -147,6 +148,8 @@ class Client(ClientWithProject): SCOPE = (SPANNER_ADMIN_SCOPE,) """The scopes required for Google Cloud Spanner.""" + NTH_CLIENT = AtomicCounter() + def __init__( self, project=None, @@ -199,6 +202,12 @@ def __init__( self._route_to_leader_enabled = route_to_leader_enabled self._directed_read_options = directed_read_options self._observability_options = observability_options + self._nth_client_id = Client.NTH_CLIENT.increment() + self._nth_request = AtomicCounter() + + @property + def _next_nth_request(self): + return self._nth_request.increment() @property def credentials(self): diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 963debdab8..987ed97b96 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -50,8 +50,10 @@ from google.cloud.spanner_v1 import SpannerClient from google.cloud.spanner_v1._helpers import _merge_query_options from google.cloud.spanner_v1._helpers import ( + AtomicCounter, _metadata_with_prefix, _metadata_with_leader_aware_routing, + _metadata_with_request_id, ) from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.batch import MutationGroups @@ -149,6 +151,9 @@ class Database(object): _spanner_api: SpannerClient = None + __transport_lock = threading.Lock() + __transports_to_channel_id = dict() + def __init__( self, database_id, @@ -443,6 +448,31 @@ def spanner_api(self): ) return self._spanner_api + @property + def _channel_id(self): + """ + Helper to retrieve the associated channelID for the spanner_api. + This property is paramount to x-goog-spanner-request-id. + """ + with self.__transport_lock: + api = self.spanner_api + channel_id = self.__transports_to_channel_id.get(api._transport, None) + if channel_id is None: + channel_id = len(self.__transports_to_channel_id) + 1 + self.__transports_to_channel_id[api._transport] = channel_id + + return channel_id + + def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): + client_id = self._nth_client_id + return _metadata_with_request_id( + self._nth_client_id, + self._channel_id, + nth_request, + nth_attempt, + prior_metadata, + ) + def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented @@ -698,6 +728,12 @@ def execute_partitioned_dml( _metadata_with_leader_aware_routing(self._route_to_leader_enabled) ) + # Attempt will be incremented inside _restart_on_unavailable. + begin_txn_nth_request = self._next_nth_request + begin_txn_attempt = AtomicCounter(1) + partial_nth_request = self._next_nth_request + partial_attempt = AtomicCounter(0) + def execute_pdml(): with trace_call( "CloudSpanner.Database.execute_partitioned_pdml", @@ -706,7 +742,10 @@ def execute_pdml(): with SessionCheckout(self._pool) as session: add_span_event(span, "Starting BeginTransaction") txn = api.begin_transaction( - session=session.name, options=txn_options, metadata=metadata + session=session.name, options=txn_options, + metadata=self.metadata_with_request_id( + begin_txn_nth_request, begin_txn_attempt.value, metadata + ), ) txn_selector = TransactionSelector(id=txn.id) @@ -719,17 +758,24 @@ def execute_pdml(): query_options=query_options, request_options=request_options, ) - method = functools.partial( - api.execute_streaming_sql, - metadata=metadata, - ) + + def wrapped_method(*args, **kwargs): + partial_attempt.increment() + method = functools.partial( + api.execute_streaming_sql, + metadata=self.metadata_with_request_id( + partial_nth_request, partial_attempt.value, metadata + ), + ) + return method(*args, **kwargs) iterator = _restart_on_unavailable( - method=method, + method=wrapped_method, trace_name="CloudSpanner.ExecuteStreamingSql", request=request, transaction_selector=txn_selector, observability_options=self.observability_options, + attempt=begin_txn_attempt, ) result_set = StreamedResultSet(iterator) @@ -739,6 +785,14 @@ def execute_pdml(): return _retry_on_aborted(execute_pdml, DEFAULT_RETRY_BACKOFF)() + @property + def _next_nth_request(self): + return self._instance._client._next_nth_request + + @property + def _nth_client_id(self): + return self._instance._client._nth_client_id + def session(self, labels=None, database_role=None): """Factory to create a session for this database. diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index a67e0e630b..06ac1c4593 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -501,6 +501,7 @@ def database( proto_descriptors=proto_descriptors, ) else: + print("enabled interceptors") return TestDatabase( database_id, self, diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index 596f76a1f1..c47c6ecfe3 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -243,6 +243,7 @@ def bind(self, database): "CloudSpanner.FixedPool.BatchCreateSessions", observability_options=observability_options, ) as span: + attempt = 1 returned_session_count = 0 while not self._sessions.full(): request.session_count = requested_session_count - self._sessions.qsize() @@ -251,9 +252,12 @@ def bind(self, database): f"Creating {request.session_count} sessions", span_event_attributes, ) + all_metadata = database.metadata_with_request_id( + database._next_nth_request, attempt, metadata + ) resp = api.batch_create_sessions( request=request, - metadata=metadata, + metadata=all_metadata, ) add_span_event( diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index ccc0c4ebdc..163d5fe61e 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -193,7 +193,8 @@ def exists(self): current_span, "Checking if Session exists", {"session.id": self._session_id} ) - api = self._database.spanner_api + database = self._database + api = database.spanner_api metadata = _metadata_with_prefix(self._database.name) if self._database._route_to_leader_enabled: metadata.append( @@ -202,12 +203,16 @@ def exists(self): ) ) + all_metadata = database.metadata_with_request_id( + database._next_nth_request, 1, metadata + ) + observability_options = getattr(self._database, "observability_options", None) with trace_call( "CloudSpanner.GetSession", self, observability_options=observability_options ) as span: try: - api.get_session(name=self.name, metadata=metadata) + api.get_session(name=self.name, metadata=all_metadata) if span: span.set_attribute("session_found", True) except NotFound: @@ -237,8 +242,11 @@ def delete(self): current_span, "Deleting Session", {"session.id": self._session_id} ) - api = self._database.spanner_api - metadata = _metadata_with_prefix(self._database.name) + database = self._database + api = database.spanner_api + metadata = database.metadata_with_request_id( + database._next_nth_request, 1, _metadata_with_prefix(database.name) + ) observability_options = getattr(self._database, "observability_options", None) with trace_call( "CloudSpanner.DeleteSession", @@ -259,7 +267,10 @@ def ping(self): if self._session_id is None: raise ValueError("Session ID not set by back-end") api = self._database.spanner_api - metadata = _metadata_with_prefix(self._database.name) + database = self._database + metadata = database.metadata_with_request_id( + database._next_nth_request, 1, _metadata_with_prefix(database.name) + ) request = ExecuteSqlRequest(session=self.name, sql="SELECT 1") api.execute_sql(request=request, metadata=metadata) self._last_use_time = datetime.now() diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index f9edbe96fa..b450f2e88c 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -35,9 +35,11 @@ _merge_query_options, _metadata_with_prefix, _metadata_with_leader_aware_routing, + _metadata_with_request_id, _retry, _check_rst_stream_error, _SessionWrapper, + AtomicCounter, ) from google.cloud.spanner_v1._opentelemetry_tracing import trace_call from google.cloud.spanner_v1.streamed import StreamedResultSet @@ -58,6 +60,7 @@ def _restart_on_unavailable( transaction=None, transaction_selector=None, observability_options=None, + attempt=0, ): """Restart iteration after :exc:`.ServiceUnavailable`. @@ -89,6 +92,7 @@ def _restart_on_unavailable( iterator = None while True: + attempt += 1 try: if iterator is None: with trace_call( @@ -323,13 +327,24 @@ def read( data_boost_enabled=data_boost_enabled, directed_read_options=directed_read_options, ) - restart = functools.partial( - api.streaming_read, - request=request, - metadata=metadata, - retry=retry, - timeout=timeout, - ) + + nth_request = getattr(database, "_next_nth_request", 0) + attempt = AtomicCounter(0) + + def wrapped_restart(*args, **kwargs): + attempt.increment() + all_metadata = database.metadata_with_request_id( + nth_request, attempt.value, metadata + ) + + restart = functools.partial( + api.streaming_read, + request=request, + metadata=all_metadata, + retry=retry, + timeout=timeout, + ) + return restart(*args, **kwargs) trace_attributes = {"table_id": table, "columns": columns} observability_options = getattr(database, "observability_options", None) @@ -338,7 +353,7 @@ def read( # lock is added to handle the inline begin for first rpc with self._lock: iterator = _restart_on_unavailable( - restart, + wrapped_restart, request, f"CloudSpanner.{type(self).__name__}.read", self._session, @@ -360,7 +375,7 @@ def read( ) else: iterator = _restart_on_unavailable( - restart, + wrapped_restart, request, f"CloudSpanner.{type(self).__name__}.read", self._session, @@ -539,13 +554,23 @@ def execute_sql( data_boost_enabled=data_boost_enabled, directed_read_options=directed_read_options, ) - restart = functools.partial( - api.execute_streaming_sql, - request=request, - metadata=metadata, - retry=retry, - timeout=timeout, - ) + + nth_request = getattr(database, "_next_nth_request", 0) + attempt = AtomicCounter(0) + + def wrapped_restart(*args, **kwargs): + attempt.increment() + restart = functools.partial( + api.execute_streaming_sql, + request=request, + metadata=database.metadata_with_request_id( + nth_request, attempt.value, metadata + ), + retry=retry, + timeout=timeout, + ) + + return restart(*args, **kwargs) trace_attributes = {"db.statement": sql} observability_options = getattr(database, "observability_options", None) @@ -554,7 +579,7 @@ def execute_sql( # lock is added to handle the inline begin for first rpc with self._lock: return self._get_streamed_result_set( - restart, + wrapped_restart, request, trace_attributes, column_info, @@ -563,7 +588,7 @@ def execute_sql( ) else: return self._get_streamed_result_set( - restart, + wrapped_restart, request, trace_attributes, column_info, @@ -690,15 +715,25 @@ def partition_read( extra_attributes=trace_attributes, observability_options=getattr(database, "observability_options", None), ): - method = functools.partial( - api.partition_read, - request=request, - metadata=metadata, - retry=retry, - timeout=timeout, - ) + nth_request = getattr(database, "_next_nth_request", 0) + attempt = AtomicCounter(0) + + def wrapped_method(*args, **kwargs): + attempt.increment() + all_metadata = database.metadata_with_request_id( + nth_request, attempt.value, metadata + ) + method = functools.partial( + api.partition_read, + request=request, + metadata=all_metadata, + retry=retry, + timeout=timeout, + ) + return method(*args, **kwargs) + response = _retry( - method, + wrapped_method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) @@ -793,15 +828,25 @@ def partition_query( trace_attributes, observability_options=getattr(database, "observability_options", None), ): - method = functools.partial( - api.partition_query, - request=request, - metadata=metadata, - retry=retry, - timeout=timeout, - ) + nth_request = getattr(database, "_next_nth_request", 0) + attempt = AtomicCounter(0) + + def wrapped_method(*args, **kwargs): + attempt.increment() + all_metadata = database.metadata_with_request_id( + nth_request, attempt.value, metadata + ) + method = functools.partial( + api.partition_query, + request=request, + metadata=all_metadata, + retry=retry, + timeout=timeout, + ) + return method(*args, **kwargs) + response = _retry( - method, + wrapped_method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) @@ -939,14 +984,24 @@ def begin(self): self._session, observability_options=getattr(database, "observability_options", None), ): - method = functools.partial( - api.begin_transaction, - session=self._session.name, - options=txn_selector.begin, - metadata=metadata, - ) + nth_request = getattr(database, "_next_nth_request", 0) + attempt = AtomicCounter(0) + + def wrapped_method(*args, **kwargs): + attempt.increment() + all_metadata = database.metadata_with_request_id( + nth_request, attempt.value, metadata + ) + method = functools.partial( + api.begin_transaction, + session=self._session.name, + options=txn_selector.begin, + metadata=all_metadata, + ) + return method(*args, **kwargs) + response = _retry( - method, + wrapped_method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) self._transaction_id = response.id diff --git a/google/cloud/spanner_v1/testing/database_test.py b/google/cloud/spanner_v1/testing/database_test.py index 54afda11e0..80f040d7e0 100644 --- a/google/cloud/spanner_v1/testing/database_test.py +++ b/google/cloud/spanner_v1/testing/database_test.py @@ -25,6 +25,7 @@ from google.cloud.spanner_v1.testing.interceptors import ( MethodCountInterceptor, MethodAbortInterceptor, + XGoogRequestIDHeaderInterceptor, ) @@ -34,6 +35,8 @@ class TestDatabase(Database): currently, and we don't want to make changes in the Database class for testing purpose as this is a hack to use interceptors in tests.""" + _interceptors = [] + def __init__( self, database_id, @@ -74,6 +77,8 @@ def spanner_api(self): client_options = client._client_options if self._instance.emulator_host is not None: channel = grpc.insecure_channel(self._instance.emulator_host) + self._x_goog_request_id_interceptor = XGoogRequestIDHeaderInterceptor() + self._interceptors.append(self._x_goog_request_id_interceptor) channel = grpc.intercept_channel(channel, *self._interceptors) transport = SpannerGrpcTransport(channel=channel) self._spanner_api = SpannerClient( diff --git a/google/cloud/spanner_v1/testing/interceptors.py b/google/cloud/spanner_v1/testing/interceptors.py index a8b015a87d..918c7d76b9 100644 --- a/google/cloud/spanner_v1/testing/interceptors.py +++ b/google/cloud/spanner_v1/testing/interceptors.py @@ -13,6 +13,8 @@ # limitations under the License. from collections import defaultdict +import threading + from grpc_interceptor import ClientInterceptor from google.api_core.exceptions import Aborted @@ -63,3 +65,75 @@ def reset(self): self._method_to_abort = None self._count = 0 self._connection = None + + +X_GOOG_REQUEST_ID = "x-goog-spanner-request-id" + + +class XGoogRequestIDHeaderInterceptor(ClientInterceptor): + def __init__(self): + self._unary_req_segments = [] + self._stream_req_segments = [] + self.__lock = threading.Lock() + + def intercept(self, method, request_or_iterator, call_details): + metadata = call_details.metadata + x_goog_request_id = None + for key, value in metadata: + if key == X_GOOG_REQUEST_ID: + x_goog_request_id = value + break + + if not x_goog_request_id: + raise Exception( + f"Missing {X_GOOG_REQUEST_ID} header in {call_details.method}" + ) + + response_or_iterator = method(request_or_iterator, call_details) + streaming = getattr(response_or_iterator, "__iter__", None) is not None + print( + "intercept got", + x_goog_request_id, + call_details.method, + "streaming", + streaming, + ) + with self.__lock: + if streaming: + self._stream_req_segments.append( + (call_details.method, parse_request_id(x_goog_request_id)) + ) + else: + self._unary_req_segments.append( + (call_details.method, parse_request_id(x_goog_request_id)) + ) + + return response_or_iterator + + @property + def unary_request_ids(self): + return self._unary_req_segments + + @property + def stream_request_ids(self): + return self._stream_req_segments + + def reset(self): + self._stream_req_segments.clear() + self._unary_req_segments.clear() + pass + + +def parse_request_id(request_id_str): + splits = request_id_str.split(".") + version, rand_process_id, client_id, channel_id, nth_request, nth_attempt = list( + map(lambda v: int(v), splits) + ) + return ( + version, + rand_process_id, + client_id, + channel_id, + nth_request, + nth_attempt, + ) diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index cc59789248..7f15c1ff00 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -30,6 +30,7 @@ from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1 import TransactionOptions +from google.cloud.spanner_v1._helpers import AtomicCounter from google.cloud.spanner_v1.snapshot import _SnapshotBase from google.cloud.spanner_v1.batch import _BatchBase from google.cloud.spanner_v1._opentelemetry_tracing import add_span_event, trace_call @@ -161,12 +162,20 @@ def begin(self): self._session, observability_options=observability_options, ) as span: - method = functools.partial( - api.begin_transaction, - session=self._session.name, - options=txn_options, - metadata=metadata, - ) + attempt = AtomicCounter(0) + nth_request = database._next_nth_request + + def wrapped_method(*args, **kwargs): + attempt.increment() + method = functools.partial( + api.begin_transaction, + session=self._session.name, + options=txn_options, + metadata=database.metadata_with_request_id( + nth_request, attempt.value, metadata + ), + ) + return method(*args, **kwargs) def beforeNextRetry(nthRetry, delayInSeconds): add_span_event( @@ -176,7 +185,7 @@ def beforeNextRetry(nthRetry, delayInSeconds): ) response = _retry( - method, + wrapped_method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, beforeNextRetry=beforeNextRetry, ) @@ -197,22 +206,33 @@ def rollback(self): database._route_to_leader_enabled ) ) + observability_options = getattr(database, "observability_options", None) with trace_call( f"CloudSpanner.{type(self).__name__}.rollback", self._session, observability_options=observability_options, ): - method = functools.partial( - api.rollback, - session=self._session.name, - transaction_id=self._transaction_id, - metadata=metadata, - ) + attempt = AtomicCounter(0) + nth_request = database._next_nth_request + + def wrapped_method(*args, **kwargs): + attempt.increment() + method = functools.partial( + api.rollback, + session=self._session.name, + transaction_id=self._transaction_id, + metadata=database.metadata_with_request_id( + nth_request, attempt.value, metadata + ), + ) + return method(*args, **kwargs) + _retry( - method, + wrapped_method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) + self.rolled_back = True del self._session._transaction @@ -287,11 +307,19 @@ def commit( add_span_event(span, "Starting Commit") - method = functools.partial( - api.commit, - request=request, - metadata=metadata, - ) + attempt = AtomicCounter(0) + nth_request = database._next_nth_request + + def wrapped_method(*args, **kwargs): + attempt.increment() + method = functools.partial( + api.commit, + request=request, + metadata=database.metadata_with_request_id( + nth_request, attempt.value, metadata + ), + ) + return method(*args, **kwargs) def beforeNextRetry(nthRetry, delayInSeconds): add_span_event( @@ -301,7 +329,7 @@ def beforeNextRetry(nthRetry, delayInSeconds): ) response = _retry( - method, + wrapped_method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, beforeNextRetry=beforeNextRetry, ) @@ -435,19 +463,27 @@ def execute_update( request_options=request_options, ) - method = functools.partial( - api.execute_sql, - request=request, - metadata=metadata, - retry=retry, - timeout=timeout, - ) + nth_request = database._next_nth_request + attempt = AtomicCounter(0) + + def wrapped_method(*args, **kwargs): + attempt.increment() + method = functools.partial( + api.execute_sql, + request=request, + metadata=database.metadata_with_request_id( + nth_request, attempt.value, metadata + ), + retry=retry, + timeout=timeout, + ) + return method(*args, **kwargs) if self._transaction_id is None: # lock is added to handle the inline begin for first rpc with self._lock: response = self._execute_request( - method, + wrapped_method, request, f"CloudSpanner.{type(self).__name__}.execute_update", self._session, @@ -464,7 +500,7 @@ def execute_update( self._transaction_id = response.metadata.transaction.id else: response = self._execute_request( - method, + wrapped_method, request, f"CloudSpanner.{type(self).__name__}.execute_update", self._session, @@ -560,19 +596,27 @@ def batch_update( request_options=request_options, ) - method = functools.partial( - api.execute_batch_dml, - request=request, - metadata=metadata, - retry=retry, - timeout=timeout, - ) + nth_request = database._next_nth_request + attempt = AtomicCounter(0) + + def wrapped_method(*args, **kwargs): + attempt.increment() + method = functools.partial( + api.execute_batch_dml, + request=request, + metadata=database.metadata_with_request_id( + nth_request, attempt.value, metadata + ), + retry=retry, + timeout=timeout, + ) + return method(*args, **kwargs) if self._transaction_id is None: # lock is added to handle the inline begin for first rpc with self._lock: response = self._execute_request( - method, + wrapped_method, request, "CloudSpanner.DMLTransaction", self._session, @@ -590,7 +634,7 @@ def batch_update( break else: response = self._execute_request( - method, + wrapped_method, request, "CloudSpanner.DMLTransaction", self._session, diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py index b332c88d7c..3a826720c1 100644 --- a/tests/mockserver_tests/mock_server_test_base.py +++ b/tests/mockserver_tests/mock_server_test_base.py @@ -186,6 +186,8 @@ def instance(self) -> Instance: def database(self) -> Database: if self._database is None: self._database = self.instance.database( - "test-database", pool=FixedSizePool(size=10) + "test-database", + pool=FixedSizePool(size=10), + enable_interceptors_in_tests=True, ) return self._database diff --git a/tests/unit/test_atomic_counter.py b/tests/unit/test_atomic_counter.py index 92d10cac79..d1b22cf841 100644 --- a/tests/unit/test_atomic_counter.py +++ b/tests/unit/test_atomic_counter.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import time import random import threading import unittest diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 13a37f66fe..9ec8b5ddfb 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -26,6 +26,11 @@ from google.protobuf.field_mask_pb2 import FieldMask from google.cloud.spanner_v1 import RequestOptions, DirectedReadOptions +from google.cloud.spanner_v1._helpers import ( + AtomicCounter, + _metadata_with_request_id, +) +from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID DML_WO_PARAM = """ DELETE FROM citizens @@ -111,7 +116,9 @@ def _make_database_admin_api(): def _make_spanner_api(): from google.cloud.spanner_v1 import SpannerClient - return mock.create_autospec(SpannerClient, instance=True) + api = mock.create_autospec(SpannerClient, instance=True) + api._transport = "transport" + return api def test_ctor_defaults(self): from google.cloud.spanner_v1.pool import BurstyPool @@ -545,7 +552,9 @@ def test_create_grpc_error(self): api.create_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_create_already_exists(self): @@ -572,7 +581,9 @@ def test_create_already_exists(self): api.create_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_create_instance_not_found(self): @@ -598,7 +609,9 @@ def test_create_instance_not_found(self): api.create_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_create_success(self): @@ -634,7 +647,9 @@ def test_create_success(self): api.create_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_create_success_w_encryption_config_dict(self): @@ -671,7 +686,9 @@ def test_create_success_w_encryption_config_dict(self): api.create_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_create_success_w_proto_descriptors(self): @@ -706,7 +723,9 @@ def test_create_success_w_proto_descriptors(self): api.create_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_exists_grpc_error(self): @@ -724,7 +743,9 @@ def test_exists_grpc_error(self): api.get_database_ddl.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_exists_not_found(self): @@ -741,7 +762,9 @@ def test_exists_not_found(self): api.get_database_ddl.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_exists_success(self): @@ -760,7 +783,9 @@ def test_exists_success(self): api.get_database_ddl.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_reload_grpc_error(self): @@ -778,7 +803,9 @@ def test_reload_grpc_error(self): api.get_database_ddl.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_reload_not_found(self): @@ -796,7 +823,9 @@ def test_reload_not_found(self): api.get_database_ddl.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_reload_success(self): @@ -855,11 +884,15 @@ def test_reload_success(self): api.get_database_ddl.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) api.get_database.assert_called_once_with( name=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_update_ddl_grpc_error(self): @@ -885,7 +918,9 @@ def test_update_ddl_grpc_error(self): api.update_database_ddl.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_update_ddl_not_found(self): @@ -911,7 +946,9 @@ def test_update_ddl_not_found(self): api.update_database_ddl.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_update_ddl(self): @@ -938,7 +975,9 @@ def test_update_ddl(self): api.update_database_ddl.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_update_ddl_w_operation_id(self): @@ -965,7 +1004,9 @@ def test_update_ddl_w_operation_id(self): api.update_database_ddl.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_update_success(self): @@ -991,7 +1032,9 @@ def test_update_success(self): api.update_database.assert_called_once_with( database=expected_database, update_mask=field_mask, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_update_ddl_w_proto_descriptors(self): @@ -1019,7 +1062,9 @@ def test_update_ddl_w_proto_descriptors(self): api.update_database_ddl.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_drop_grpc_error(self): @@ -1037,7 +1082,9 @@ def test_drop_grpc_error(self): api.drop_database.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_drop_not_found(self): @@ -1055,7 +1102,9 @@ def test_drop_not_found(self): api.drop_database.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_drop_success(self): @@ -1072,7 +1121,9 @@ def test_drop_success(self): api.drop_database.assert_called_once_with( database=self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def _execute_partitioned_dml_helper( @@ -1151,6 +1202,10 @@ def _execute_partitioned_dml_helper( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) if retried: @@ -1192,6 +1247,10 @@ def _execute_partitioned_dml_helper( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) if retried: @@ -1486,7 +1545,9 @@ def test_restore_grpc_error(self): api.restore_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_restore_not_found(self): @@ -1512,7 +1573,9 @@ def test_restore_not_found(self): api.restore_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_restore_success(self): @@ -1549,7 +1612,9 @@ def test_restore_success(self): api.restore_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_restore_success_w_encryption_config_dict(self): @@ -1590,7 +1655,9 @@ def test_restore_success_w_encryption_config_dict(self): api.restore_database.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_restore_w_invalid_encryption_config_dict(self): @@ -1737,7 +1804,9 @@ def test_list_database_roles_grpc_error(self): api.list_database_roles.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) def test_list_database_roles_defaults(self): @@ -1758,7 +1827,9 @@ def test_list_database_roles_defaults(self): api.list_database_roles.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ], ) self.assertIsNotNone(resp) @@ -1845,6 +1916,10 @@ def test_context_mgr_success(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -1892,6 +1967,10 @@ def test_context_mgr_w_commit_stats_success(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -1939,6 +2018,10 @@ def test_context_mgr_w_aborted_commit_status(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -3013,6 +3096,10 @@ def test_context_mgr_success(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -3111,6 +3198,8 @@ def _make_database_admin_api(): class _Client(object): + NTH_CLIENT = AtomicCounter() + def __init__( self, project=TestDatabase.PROJECT_ID, @@ -3129,6 +3218,12 @@ def __init__( self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") self.route_to_leader_enabled = route_to_leader_enabled self.directed_read_options = directed_read_options + self._nth_client_id = _Client.NTH_CLIENT.increment() + self._nth_request = AtomicCounter() + + @property + def _next_nth_request(self): + return self._nth_request.increment() class _Instance(object): @@ -3157,6 +3252,28 @@ def __init__(self, name, instance=None): self.logger = mock.create_autospec(Logger, instance=True) self._directed_read_options = None + @property + def _next_nth_request(self): + return self._instance._client._next_nth_request + + @property + def _nth_client_id(self): + return self._instance._client._nth_client_id + + def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): + client_id = self._nth_client_id + return _metadata_with_request_id( + self._nth_client_id, + self._channel_id, + nth_request, + nth_attempt, + prior_metadata, + ) + + @property + def _channel_id(self): + return 1 + class _Pool(object): _bound = None diff --git a/tests/unit/test_request_id_header.py b/tests/unit/test_request_id_header.py new file mode 100644 index 0000000000..35c7de7410 --- /dev/null +++ b/tests/unit/test_request_id_header.py @@ -0,0 +1,387 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import threading + +from tests.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_select1_result, +) +from google.cloud.spanner_v1.testing.interceptors import XGoogRequestIDHeaderInterceptor +from google.cloud.spanner_v1 import ( + BatchCreateSessionsRequest, + BeginTransactionRequest, + ExecuteSqlRequest, +) +from google.api_core.exceptions import Aborted +from google.rpc import code_pb2 +from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID + + +class TestRequestIDHeader(MockServerTestBase): + def tearDown(self): + self.database._x_goog_request_id_interceptor.reset() + + def test_snapshot_read(self): + add_select1_result() + if not getattr(self.database, "_interceptors", None): + self.database._interceptors = MockServerTestBase._interceptors + with self.database.snapshot() as snapshot: + results = snapshot.execute_sql("select 1") + result_list = [] + for row in results: + result_list.append(row) + self.assertEqual(1, row[0]) + self.assertEqual(1, len(result_list)) + + requests = self.spanner_service.requests + self.assertEqual(2, len(requests), msg=requests) + self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) + self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) + + # Now ensure monotonicity of the received request-id segments. + got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() + want_unary_segments = [ + ( + "/google.spanner.v1.Spanner/BatchCreateSessions", + (1, REQ_RAND_PROCESS_ID, 1, 1, 1, 1), + ) + ] + want_stream_segments = [ + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, 1, 1, 2, 1), + ) + ] + + assert got_unary_segments == want_unary_segments + assert got_stream_segments == want_stream_segments + + def test_snapshot_read_concurrent(self): + def select1(): + with self.database.snapshot() as snapshot: + rows = snapshot.execute_sql("select 1") + res_list = [] + for row in rows: + self.assertEqual(1, row[0]) + res_list.append(row) + self.assertEqual(1, len(res_list)) + + n = 10 + threads = [] + for i in range(n): + th = threading.Thread(target=select1, name=f"snapshot-select1-{i}") + th.run() + threads.append(th) + + random.shuffle(threads) + + while True: + n_finished = 0 + for thread in threads: + if thread.is_alive(): + thread.join() + else: + n_finished += 1 + + if n_finished == len(threads): + break + + time.sleep(1) + + requests = self.spanner_service.requests + self.assertEqual(n * 2, len(requests), msg=requests) + + client_id = self.database._nth_client_id + channel_id = self.database._channel_id + got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() + + want_unary_segments = [ + ( + "/google.spanner.v1.Spanner/BatchCreateSessions", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 1, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 3, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 5, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 7, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 9, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 11, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 13, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 15, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 17, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 19, 1), + ), + ] + assert got_unary_segments == want_unary_segments + + want_stream_segments = [ + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 2, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 4, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 6, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 8, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 10, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 12, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 14, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 16, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 18, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 20, 1), + ), + ] + assert got_stream_segments == want_stream_segments + + def test_database_run_in_transaction_retries_on_abort(self): + counters = dict(aborted=0) + want_failed_attempts = 2 + + def select_in_txn(txn): + results = txn.execute_sql("select 1") + for row in results: + _ = row + + if counters["aborted"] < want_failed_attempts: + counters["aborted"] += 1 + raise Aborted( + "Thrown from ClientInterceptor for testing", + errors=[FauxCall(code_pb2.ABORTED)], + ) + + add_select1_result() + if not getattr(self.database, "_interceptors", None): + self.database._interceptors = MockServerTestBase._interceptors + + self.database.run_in_transaction(select_in_txn) + + def test_database_execute_partitioned_dml_request_id(self): + add_select1_result() + if not getattr(self.database, "_interceptors", None): + self.database._interceptors = MockServerTestBase._interceptors + _ = self.database.execute_partitioned_dml("select 1") + + requests = self.spanner_service.requests + self.assertEqual(3, len(requests), msg=requests) + self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) + self.assertTrue(isinstance(requests[1], BeginTransactionRequest)) + self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) + + # Now ensure monotonicity of the received request-id segments. + got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() + want_unary_segments = [ + ( + "/google.spanner.v1.Spanner/BatchCreateSessions", + (1, REQ_RAND_PROCESS_ID, 1, 1, 1, 1), + ), + ( + "/google.spanner.v1.Spanner/BeginTransaction", + (1, REQ_RAND_PROCESS_ID, 1, 1, 2, 1), + ), + ] + want_stream_segments = [ + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, 1, 1, 3, 1), + ) + ] + + assert got_unary_segments == want_unary_segments + assert got_stream_segments == want_stream_segments + + def test_snapshot_read(self): + add_select1_result() + if not getattr(self.database, "_interceptors", None): + self.database._interceptors = MockServerTestBase._interceptors + with self.database.snapshot() as snapshot: + results = snapshot.read("select 1") + result_list = [] + for row in results: + result_list.append(row) + self.assertEqual(1, row[0]) + self.assertEqual(1, len(result_list)) + + requests = self.spanner_service.requests + self.assertEqual(2, len(requests), msg=requests) + self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) + self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) + + requests = self.spanner_service.requests + self.assertEqual(n * 2, len(requests), msg=requests) + + client_id = self.database._nth_client_id + channel_id = self.database._channel_id + got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() + + want_unary_segments = [ + ( + "/google.spanner.v1.Spanner/BatchCreateSessions", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 1, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 3, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 5, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 7, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 9, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 11, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 13, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 15, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 17, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 19, 1), + ), + ] + assert got_unary_segments == want_unary_segments + + want_stream_segments = [ + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 2, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 4, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 6, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 8, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 10, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 12, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 14, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 16, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 18, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 20, 1), + ), + ] + assert got_stream_segments == want_stream_segments + + def canonicalize_request_id_headers(self): + src = self.database._x_goog_request_id_interceptor + return src._stream_req_segments, src._unary_req_segments + + +class FauxCall: + def __init__(self, code, details="FauxCall"): + self._code = code + self._details = details + + def initial_metadata(self): + return {} + + def trailing_metadata(self): + return {} + + def code(self): + return self._code + + def details(self): + return self._details diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index 02cc35e017..68e7ff1739 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -25,6 +25,11 @@ ) from google.cloud.spanner_v1.param_types import INT64 from google.api_core.retry import Retry +from google.cloud.spanner_v1._helpers import ( + AtomicCounter, + _metadata_with_request_id, +) +from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] @@ -278,7 +283,7 @@ def test_iteration_w_raw_raising_retryable_internal_error(self): fail_after=True, error=InternalServerError( "Received unexpected EOS on DATA frame from server" - ) + ), ) after = _MockIterator(*LAST) request = mock.Mock(test="test", spec=["test", "resume_token"]) @@ -450,7 +455,7 @@ def test_iteration_w_raw_raising_retryable_internal_error_after_token(self): fail_after=True, error=InternalServerError( "Received unexpected EOS on DATA frame from server" - ) + ), ) after = _MockIterator(*SECOND) request = mock.Mock(test="test", spec=["test", "resume_token"]) @@ -767,7 +772,13 @@ def _read_helper( ) api.streaming_read.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), + ], retry=retry, timeout=timeout, ) @@ -1016,7 +1027,13 @@ def _execute_sql_helper( ) api.execute_streaming_sql.assert_called_once_with( request=expected_request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), + ], timeout=timeout, retry=retry, ) @@ -1189,6 +1206,10 @@ def _partition_read_helper( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=retry, timeout=timeout, @@ -1368,6 +1389,10 @@ def _partition_query_helper( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=retry, timeout=timeout, @@ -1764,7 +1789,13 @@ def test_begin_ok_exact_staleness(self): api.begin_transaction.assert_called_once_with( session=session.name, options=expected_txn_options, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), + ], ) self.assertSpanAttributes( @@ -1800,7 +1831,13 @@ def test_begin_ok_exact_strong(self): api.begin_transaction.assert_called_once_with( session=session.name, options=expected_txn_options, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), + ], ) self.assertSpanAttributes( @@ -1811,10 +1848,18 @@ def test_begin_ok_exact_strong(self): class _Client(object): + NTH_CLIENT = AtomicCounter() + def __init__(self): from google.cloud.spanner_v1 import ExecuteSqlRequest self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") + self._nth_client_id = _Client.NTH_CLIENT.increment() + self._nth_request = AtomicCounter() + + @property + def _next_nth_request(self): + return self._nth_request.increment() class _Instance(object): @@ -1833,6 +1878,27 @@ def __init__(self, directed_read_options=None): def observability_options(self): return dict(db_name=self.name) + def _next_nth_request(self): + return self._instance._client._next_nth_request + + @property + def _nth_client_id(self): + return self._instance._client._nth_client_id + + def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): + client_id = self._nth_client_id + return _metadata_with_request_id( + self._nth_client_id, + self._channel_id, + nth_request, + nth_attempt, + prior_metadata, + ) + + @property + def _channel_id(self): + return 1 + class _Session(object): def __init__(self, database=None, name=TestSnapshot.SESSION_NAME): diff --git a/tests/unit/test_spanner.py b/tests/unit/test_spanner.py index ff34a109af..fdf477279b 100644 --- a/tests/unit/test_spanner.py +++ b/tests/unit/test_spanner.py @@ -37,9 +37,12 @@ from google.cloud.spanner_v1.keyset import KeySet from google.cloud.spanner_v1._helpers import ( + AtomicCounter, _make_value_pb, _merge_query_options, + _metadata_with_request_id, ) +from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID import mock @@ -517,6 +520,10 @@ def test_transaction_should_include_begin_with_first_update(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -532,6 +539,10 @@ def test_transaction_should_include_begin_with_first_query(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], timeout=TIMEOUT, retry=RETRY, @@ -549,6 +560,10 @@ def test_transaction_should_include_begin_with_first_read(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -565,6 +580,10 @@ def test_transaction_should_include_begin_with_first_batch_update(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -590,6 +609,10 @@ def test_transaction_should_include_begin_w_exclude_txn_from_change_streams_with metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -608,6 +631,10 @@ def test_transaction_should_use_transaction_id_if_error_with_first_batch_update( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -622,6 +649,10 @@ def test_transaction_should_use_transaction_id_if_error_with_first_batch_update( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -638,6 +669,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_query(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -651,6 +686,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_query(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -667,6 +706,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_update(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -680,6 +723,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_update(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -701,6 +748,10 @@ def test_transaction_execute_sql_w_directed_read_options(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, @@ -724,6 +775,10 @@ def test_transaction_streaming_read_w_directed_read_options(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -740,6 +795,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_read(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -751,6 +810,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_read(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -767,6 +830,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_batch_update(se metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -779,6 +846,10 @@ def test_transaction_should_use_transaction_id_returned_by_first_batch_update(se metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -819,6 +890,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -829,6 +904,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -837,6 +916,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -880,6 +963,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -888,6 +975,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -898,6 +989,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -946,6 +1041,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -954,6 +1053,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -964,6 +1067,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -1012,6 +1119,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) req = self._execute_sql_expected_request(database) @@ -1020,6 +1131,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -1030,6 +1145,10 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=RETRY, timeout=TIMEOUT, @@ -1055,11 +1174,19 @@ def test_transaction_should_execute_sql_with_route_to_leader_disabled(self): class _Client(object): + NTH_CLIENT = AtomicCounter() + def __init__(self): from google.cloud.spanner_v1 import ExecuteSqlRequest self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") self.directed_read_options = None + self._nth_client_id = _Client.NTH_CLIENT.increment() + self._nth_request = AtomicCounter() + + @property + def _next_nth_request(self): + return self._nth_request.increment() class _Instance(object): @@ -1074,6 +1201,28 @@ def __init__(self): self._route_to_leader_enabled = True self._directed_read_options = None + @property + def _next_nth_request(self): + return self._instance._client._next_nth_request + + @property + def _nth_client_id(self): + return self._instance._client._nth_client_id + + def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): + client_id = self._nth_client_id + return _metadata_with_request_id( + self._nth_client_id, + self._channel_id, + nth_request, + nth_attempt, + prior_metadata, + ) + + @property + def _channel_id(self): + return 1 + class _Session(object): _transaction = None diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index 9707632421..08ae928d4c 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -20,6 +20,11 @@ from google.cloud.spanner_v1 import TypeCode from google.api_core.retry import Retry from google.api_core import gapic_v1 +from google.cloud.spanner_v1._helpers import ( + AtomicCounter, + _metadata_with_request_id, +) +from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID from tests._helpers import ( HAS_OPENTELEMETRY_INSTALLED, @@ -192,6 +197,10 @@ def test_begin_ok(self): [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -296,6 +305,10 @@ def test_rollback_ok(self): [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -481,6 +494,10 @@ def _commit_helper( [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) self.assertEqual(actual_request_options, expected_request_options) @@ -655,6 +672,10 @@ def _execute_update_helper( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -848,6 +869,10 @@ def _batch_update_helper( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ), ], retry=retry, timeout=timeout, @@ -963,6 +988,10 @@ def test_context_mgr_success(self): [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", + ), ], ) @@ -993,11 +1022,19 @@ def test_context_mgr_failure(self): class _Client(object): + NTH_CLIENT = AtomicCounter() + def __init__(self): from google.cloud.spanner_v1 import ExecuteSqlRequest self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") self.directed_read_options = None + self._nth_client_id = _Client.NTH_CLIENT.increment() + self._nth_request = AtomicCounter() + + @property + def _next_nth_request(self): + return self._nth_request.increment() class _Instance(object): @@ -1012,6 +1049,28 @@ def __init__(self): self._route_to_leader_enabled = True self._directed_read_options = None + @property + def _next_nth_request(self): + return self._instance._client._next_nth_request + + @property + def _nth_client_id(self): + return self._instance._client._nth_client_id + + def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): + client_id = self._nth_client_id + return _metadata_with_request_id( + self._nth_client_id, + self._channel_id, + nth_request, + nth_attempt, + prior_metadata, + ) + + @property + def _channel_id(self): + return 1 + class _Session(object): _transaction = None From 7a8ce09f38b3da6c81640d370dd3fdefa0c27cc0 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 20 Dec 2024 00:03:18 -0800 Subject: [PATCH 02/16] More plumbing for Database DDL methods --- google/cloud/spanner_v1/database.py | 53 +++++++++++++++++----- google/cloud/spanner_v1/pool.py | 4 +- google/cloud/spanner_v1/session.py | 26 ++++++++--- tests/unit/test_pool.py | 30 +++++++++++++ tests/unit/test_session.py | 70 +++++++++++++++++++++++++++-- 5 files changed, 161 insertions(+), 22 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 987ed97b96..84f0e2bafa 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -515,7 +515,10 @@ def create(self): database_dialect=self._database_dialect, proto_descriptors=self._proto_descriptors, ) - future = api.create_database(request=request, metadata=metadata) + future = api.create_database( + request=request, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) return future def exists(self): @@ -531,7 +534,12 @@ def exists(self): metadata = _metadata_with_prefix(self.name) try: - api.get_database_ddl(database=self.name, metadata=metadata) + api.get_database_ddl( + database=self.name, + metadata=self.metadata_with_request_id( + self._next_nth_request, 1, metadata + ), + ) except NotFound: return False return True @@ -548,10 +556,16 @@ def reload(self): """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - response = api.get_database_ddl(database=self.name, metadata=metadata) + response = api.get_database_ddl( + database=self.name, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) self._ddl_statements = tuple(response.statements) self._proto_descriptors = response.proto_descriptors - response = api.get_database(name=self.name, metadata=metadata) + response = api.get_database( + name=self.name, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) self._state = DatabasePB.State(response.state) self._create_time = response.create_time self._restore_info = response.restore_info @@ -596,7 +610,10 @@ def update_ddl(self, ddl_statements, operation_id="", proto_descriptors=None): proto_descriptors=proto_descriptors, ) - future = api.update_database_ddl(request=request, metadata=metadata) + future = api.update_database_ddl( + request=request, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) return future def update(self, fields): @@ -634,7 +651,9 @@ def update(self, fields): metadata = _metadata_with_prefix(self.name) future = api.update_database( - database=database_pb, update_mask=field_mask, metadata=metadata + database=database_pb, + update_mask=field_mask, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), ) return future @@ -647,7 +666,10 @@ def drop(self): """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - api.drop_database(database=self.name, metadata=metadata) + api.drop_database( + database=self.name, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) def execute_partitioned_dml( self, @@ -1004,7 +1026,7 @@ def restore(self, source): ) future = api.restore_database( request=request, - metadata=metadata, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), ) return future @@ -1073,7 +1095,10 @@ def list_database_roles(self, page_size=None): parent=self.name, page_size=page_size, ) - return api.list_database_roles(request=request, metadata=metadata) + return api.list_database_roles( + request=request, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) def table(self, table_id): """Factory to create a table object within this database. @@ -1157,7 +1182,10 @@ def get_iam_policy(self, policy_version=None): requested_policy_version=policy_version ), ) - response = api.get_iam_policy(request=request, metadata=metadata) + response = api.get_iam_policy( + request=request, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) return response def set_iam_policy(self, policy): @@ -1179,7 +1207,10 @@ def set_iam_policy(self, policy): resource=self.name, policy=policy, ) - response = api.set_iam_policy(request=request, metadata=metadata) + response = api.set_iam_policy( + request=request, + metadata=self.metadata_with_request_id(self._next_nth_request, 1, metadata), + ) return response @property diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index c47c6ecfe3..981ba8d75a 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -561,7 +561,9 @@ def bind(self, database): while returned_session_count < self.size: resp = api.batch_create_sessions( request=request, - metadata=metadata, + metadata=database.metadata_with_request_id( + database._next_nth_request, 1, metadata + ), ) add_span_event( diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index 163d5fe61e..9f187fc46d 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -168,7 +168,9 @@ def create(self): ): session_pb = api.create_session( request=request, - metadata=metadata, + metadata=self._database.metadata_with_request_id( + self._database._next_nth_request, 1, metadata + ), ) self._session_id = session_pb.name.split("/")[-1] @@ -257,7 +259,12 @@ def delete(self): }, observability_options=observability_options, ): - api.delete_session(name=self.name, metadata=metadata) + api.delete_session( + name=self.name, + metadata=database.metadata_with_request_id( + database._next_nth_request, 1, metadata + ), + ) def ping(self): """Ping the session to keep it alive by executing "SELECT 1". @@ -266,13 +273,18 @@ def ping(self): """ if self._session_id is None: raise ValueError("Session ID not set by back-end") - api = self._database.spanner_api database = self._database - metadata = database.metadata_with_request_id( - database._next_nth_request, 1, _metadata_with_prefix(database.name) - ) + api = database.spanner_api + database = self._database request = ExecuteSqlRequest(session=self.name, sql="SELECT 1") - api.execute_sql(request=request, metadata=metadata) + api.execute_sql( + request=request, + metadata=database.metadata_with_request_id( + database._next_nth_request, + 1, + _metadata_with_prefix(database.name), + ), + ) self._last_use_time = datetime.now() def snapshot(self, **kw): diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py index 9b5d2c9885..a9845942c3 100644 --- a/tests/unit/test_pool.py +++ b/tests/unit/test_pool.py @@ -19,6 +19,11 @@ from datetime import datetime, timedelta import mock +from google.cloud.spanner_v1._helpers import ( + _metadata_with_request_id, + AtomicCounter, +) + from google.cloud.spanner_v1._opentelemetry_tracing import trace_call from tests._helpers import ( OpenTelemetryBase, @@ -1183,6 +1188,9 @@ def session_id(self): class _Database(object): + NTH_REQUEST = AtomicCounter() + NTH_CLIENT_ID = AtomicCounter() + def __init__(self, name): self.name = name self._sessions = [] @@ -1237,6 +1245,28 @@ def session(self, **kwargs): def observability_options(self): return dict(db_name=self.name) + @property + def _next_nth_request(self): + return self.NTH_REQUEST.increment() + + @property + def _nth_client_id(self): + return self.NTH_CLIENT_ID.increment() + + def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): + client_id = self._nth_client_id + return _metadata_with_request_id( + self._nth_client_id, + self._channel_id, + nth_request, + nth_attempt, + prior_metadata, + ) + + @property + def _channel_id(self): + return 1 + class _Queue(object): _size = 1 diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 55c91435f8..75ec0ba13a 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -22,6 +22,11 @@ StatusCode, enrich_with_otel_scope, ) +from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID +from google.cloud.spanner_v1._helpers import ( + _metadata_with_request_id, + AtomicCounter, +) def _make_rpc_error(error_cls, trailing_metadata=None): @@ -66,6 +71,7 @@ def _make_database(name=DATABASE_NAME, database_role=None): database.log_commit_stats = False database.database_role = database_role database._route_to_leader_enabled = True + database.NTH_CLIENT = AtomicCounter() return database @staticmethod @@ -168,6 +174,10 @@ def test_create_w_database_role(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -206,6 +216,10 @@ def test_create_session_span_annotations(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -235,6 +249,10 @@ def test_create_wo_database_role(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -265,6 +283,10 @@ def test_create_ok(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -298,6 +320,10 @@ def test_create_w_labels(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -483,7 +509,13 @@ def test_ping_hit(self): gax_api.execute_sql.assert_called_once_with( request=request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], ) def test_ping_miss(self): @@ -507,7 +539,13 @@ def test_ping_miss(self): gax_api.execute_sql.assert_called_once_with( request=request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], ) def test_ping_error(self): @@ -531,7 +569,13 @@ def test_ping_error(self): gax_api.execute_sql.assert_called_once_with( request=request, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], ) def test_delete_wo_session_id(self): @@ -1722,6 +1766,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -1785,6 +1833,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) request = CommitRequest( @@ -1798,6 +1850,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -1885,6 +1941,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) ] @@ -1904,6 +1964,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) ] From 26d0b5d9781779b60c14b610a8530c6aa8c9f525 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 20 Dec 2024 05:15:47 -0800 Subject: [PATCH 03/16] Update test_spannner.test_transaction tests --- tests/unit/test_spanner.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/tests/unit/test_spanner.py b/tests/unit/test_spanner.py index fdf477279b..e2095bd671 100644 --- a/tests/unit/test_spanner.py +++ b/tests/unit/test_spanner.py @@ -651,7 +651,7 @@ def test_transaction_should_use_transaction_id_if_error_with_first_batch_update( ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -688,7 +688,7 @@ def test_transaction_should_use_transaction_id_returned_by_first_query(self): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -725,7 +725,7 @@ def test_transaction_should_use_transaction_id_returned_by_first_update(self): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -812,7 +812,7 @@ def test_transaction_should_use_transaction_id_returned_by_first_read(self): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], retry=RETRY, @@ -848,7 +848,7 @@ def test_transaction_should_use_transaction_id_returned_by_first_batch_update(se ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], retry=RETRY, @@ -906,7 +906,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -918,7 +918,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.3.1", ), ], retry=RETRY, @@ -965,7 +965,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.3.1", ), ], ) @@ -991,7 +991,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], retry=RETRY, @@ -1043,7 +1043,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.3.1", ), ], ) @@ -1069,7 +1069,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], retry=RETRY, @@ -1121,7 +1121,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.3.1", ), ], ) @@ -1147,7 +1147,7 @@ def test_transaction_for_concurrent_statement_should_begin_one_transaction_with_ ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.2.1", ), ], retry=RETRY, @@ -1167,7 +1167,13 @@ def test_transaction_should_execute_sql_with_route_to_leader_disabled(self): api.execute_streaming_sql.assert_called_once_with( request=self._execute_sql_expected_request(database=database), - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + ) + ], timeout=TIMEOUT, retry=RETRY, ) From d74fa97d0bd0ee7c6d89c7fb73bff921eb7269a3 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 20 Dec 2024 06:23:50 -0800 Subject: [PATCH 04/16] Update test_session tests --- google/cloud/spanner_v1/database.py | 1 - google/cloud/spanner_v1/transaction.py | 1 + tests/unit/test_session.py | 94 +++++++++++++++++++++++--- tests/unit/test_spanner.py | 2 +- 4 files changed, 86 insertions(+), 12 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 84f0e2bafa..fd1d329f6f 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -464,7 +464,6 @@ def _channel_id(self): return channel_id def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): - client_id = self._nth_client_id return _metadata_with_request_id( self._nth_client_id, self._channel_id, diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index 7f15c1ff00..ec6f88e44a 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -22,6 +22,7 @@ _merge_query_options, _metadata_with_prefix, _metadata_with_leader_aware_routing, + _metadata_with_request_id, _retry, _check_rst_stream_error, ) diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 75ec0ba13a..8dfacb9363 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -71,7 +71,20 @@ def _make_database(name=DATABASE_NAME, database_role=None): database.log_commit_stats = False database.database_role = database_role database._route_to_leader_enabled = True - database.NTH_CLIENT = AtomicCounter() + nth_client_id = AtomicCounter(1) + database.NTH_CLIENT = nth_client_id + next_nth_request = AtomicCounter(1) + + def metadata_with_request_id(nth_request, nth_attempt, prior_metadata=[]): + return _metadata_with_request_id( + nth_client_id.value, + 1, + next_nth_request.increment(), + nth_attempt, + prior_metadata, + ) + + database.metadata_with_request_id = metadata_with_request_id return database @staticmethod @@ -1095,6 +1108,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) @@ -1157,10 +1174,25 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], - ) - ] - * 2, + ), + mock.call( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.2", + ), + ], + ), + ], ) request = CommitRequest( session=self.SESSION_NAME, @@ -1176,10 +1208,24 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), ], - ) - ] - * 2, + ), + mock.call( + request=request, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.2", + ), + ], + ), + ], ) def test_run_in_transaction_w_abort_w_retry_metadata(self): @@ -1524,6 +1570,10 @@ def _time(_results=[1, 2, 4, 8]): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) ] @@ -1543,6 +1593,10 @@ def _time(_results=[1, 2, 4, 8]): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), ], ) ] @@ -1606,6 +1660,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) request = CommitRequest( @@ -1620,6 +1678,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), ], ) database.logger.info.assert_called_once_with( @@ -1676,6 +1738,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) request = CommitRequest( @@ -1690,6 +1756,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), ], ) database.logger.info.assert_not_called() @@ -1753,6 +1823,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) request = CommitRequest( @@ -1768,7 +1842,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", ), ], ) @@ -1835,7 +1909,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -1852,7 +1926,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", ), ], ) diff --git a/tests/unit/test_spanner.py b/tests/unit/test_spanner.py index e2095bd671..e59c6fd564 100644 --- a/tests/unit/test_spanner.py +++ b/tests/unit/test_spanner.py @@ -1172,7 +1172,7 @@ def test_transaction_should_execute_sql_with_route_to_leader_disabled(self): ( "x-goog-spanner-request-id", f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", - ) + ), ], timeout=TIMEOUT, retry=RETRY, From b531bdc787cd6c5e1134ffad9b61bbed00fae1eb Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Tue, 24 Dec 2024 17:55:56 -0800 Subject: [PATCH 05/16] Update tests --- google/cloud/spanner_v1/database.py | 17 +++++++++++++++-- google/cloud/spanner_v1/instance.py | 1 - google/cloud/spanner_v1/pool.py | 3 +-- google/cloud/spanner_v1/snapshot.py | 3 --- google/cloud/spanner_v1/testing/interceptors.py | 7 ------- google/cloud/spanner_v1/testing/mock_spanner.py | 6 +----- google/cloud/spanner_v1/transaction.py | 1 - tests/unit/test_atomic_counter.py | 1 - tests/unit/test_database.py | 1 - tests/unit/test_pool.py | 1 - tests/unit/test_request_id_header.py | 9 +++------ tests/unit/test_session.py | 16 ++++++++++++++++ tests/unit/test_snapshot.py | 1 - tests/unit/test_spanner.py | 1 - tests/unit/test_transaction.py | 1 - 15 files changed, 36 insertions(+), 33 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index fd1d329f6f..48691bccdc 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -749,10 +749,10 @@ def execute_partitioned_dml( _metadata_with_leader_aware_routing(self._route_to_leader_enabled) ) - # Attempt will be incremented inside _restart_on_unavailable. begin_txn_nth_request = self._next_nth_request - begin_txn_attempt = AtomicCounter(1) + begin_txn_attempt = AtomicCounter(0) partial_nth_request = self._next_nth_request + # partial_attempt will be incremented inside _restart_on_unavailable. partial_attempt = AtomicCounter(0) def execute_pdml(): @@ -762,6 +762,7 @@ def execute_pdml(): ) as span: with SessionCheckout(self._pool) as session: add_span_event(span, "Starting BeginTransaction") + begin_txn_attempt.increment() txn = api.begin_transaction( session=session.name, options=txn_options, metadata=self.metadata_with_request_id( @@ -798,6 +799,18 @@ def wrapped_method(*args, **kwargs): observability_options=self.observability_options, attempt=begin_txn_attempt, ) +<<<<<<< HEAD +======= + return method(*args, **kwargs) + + iterator = _restart_on_unavailable( + method=wrapped_method, + trace_name="CloudSpanner.ExecuteStreamingSql", + request=request, + transaction_selector=txn_selector, + observability_options=self.observability_options, + ) +>>>>>>> 54df502... Update tests result_set = StreamedResultSet(iterator) list(result_set) # consume all partials diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index 06ac1c4593..a67e0e630b 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -501,7 +501,6 @@ def database( proto_descriptors=proto_descriptors, ) else: - print("enabled interceptors") return TestDatabase( database_id, self, diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index 981ba8d75a..95d4fb2c46 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -243,7 +243,6 @@ def bind(self, database): "CloudSpanner.FixedPool.BatchCreateSessions", observability_options=observability_options, ) as span: - attempt = 1 returned_session_count = 0 while not self._sessions.full(): request.session_count = requested_session_count - self._sessions.qsize() @@ -253,7 +252,7 @@ def bind(self, database): span_event_attributes, ) all_metadata = database.metadata_with_request_id( - database._next_nth_request, attempt, metadata + database._next_nth_request, 1, metadata ) resp = api.batch_create_sessions( request=request, diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index b450f2e88c..63556c07b9 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -35,7 +35,6 @@ _merge_query_options, _metadata_with_prefix, _metadata_with_leader_aware_routing, - _metadata_with_request_id, _retry, _check_rst_stream_error, _SessionWrapper, @@ -60,7 +59,6 @@ def _restart_on_unavailable( transaction=None, transaction_selector=None, observability_options=None, - attempt=0, ): """Restart iteration after :exc:`.ServiceUnavailable`. @@ -92,7 +90,6 @@ def _restart_on_unavailable( iterator = None while True: - attempt += 1 try: if iterator is None: with trace_call( diff --git a/google/cloud/spanner_v1/testing/interceptors.py b/google/cloud/spanner_v1/testing/interceptors.py index 918c7d76b9..4cd3abd306 100644 --- a/google/cloud/spanner_v1/testing/interceptors.py +++ b/google/cloud/spanner_v1/testing/interceptors.py @@ -91,13 +91,6 @@ def intercept(self, method, request_or_iterator, call_details): response_or_iterator = method(request_or_iterator, call_details) streaming = getattr(response_or_iterator, "__iter__", None) is not None - print( - "intercept got", - x_goog_request_id, - call_details.method, - "streaming", - streaming, - ) with self.__lock: if streaming: self._stream_req_segments.append( diff --git a/google/cloud/spanner_v1/testing/mock_spanner.py b/google/cloud/spanner_v1/testing/mock_spanner.py index f60dbbe72a..13ead85b13 100644 --- a/google/cloud/spanner_v1/testing/mock_spanner.py +++ b/google/cloud/spanner_v1/testing/mock_spanner.py @@ -22,8 +22,6 @@ from google.cloud.spanner_v1 import ( TransactionOptions, ResultSetMetadata, - ExecuteSqlRequest, - ExecuteBatchDmlRequest, ) from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer import google.cloud.spanner_v1.testing.spanner_database_admin_pb2_grpc as database_admin_grpc @@ -186,9 +184,7 @@ def BeginTransaction(self, request, context): self._requests.append(request) return self.__create_transaction(request.session, request.options) - def __maybe_create_transaction( - self, request: ExecuteSqlRequest | ExecuteBatchDmlRequest - ): + def __maybe_create_transaction(self, request): started_transaction = None if not request.transaction.begin == TransactionOptions(): started_transaction = self.__create_transaction( diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index ec6f88e44a..7f15c1ff00 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -22,7 +22,6 @@ _merge_query_options, _metadata_with_prefix, _metadata_with_leader_aware_routing, - _metadata_with_request_id, _retry, _check_rst_stream_error, ) diff --git a/tests/unit/test_atomic_counter.py b/tests/unit/test_atomic_counter.py index d1b22cf841..92d10cac79 100644 --- a/tests/unit/test_atomic_counter.py +++ b/tests/unit/test_atomic_counter.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time import random import threading import unittest diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 9ec8b5ddfb..3be792265f 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -3261,7 +3261,6 @@ def _nth_client_id(self): return self._instance._client._nth_client_id def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): - client_id = self._nth_client_id return _metadata_with_request_id( self._nth_client_id, self._channel_id, diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py index a9845942c3..8e2baf976b 100644 --- a/tests/unit/test_pool.py +++ b/tests/unit/test_pool.py @@ -1254,7 +1254,6 @@ def _nth_client_id(self): return self.NTH_CLIENT_ID.increment() def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): - client_id = self._nth_client_id return _metadata_with_request_id( self._nth_client_id, self._channel_id, diff --git a/tests/unit/test_request_id_header.py b/tests/unit/test_request_id_header.py index 35c7de7410..e399593d9c 100644 --- a/tests/unit/test_request_id_header.py +++ b/tests/unit/test_request_id_header.py @@ -19,7 +19,6 @@ MockServerTestBase, add_select1_result, ) -from google.cloud.spanner_v1.testing.interceptors import XGoogRequestIDHeaderInterceptor from google.cloud.spanner_v1 import ( BatchCreateSessionsRequest, BeginTransactionRequest, @@ -99,8 +98,6 @@ def select1(): if n_finished == len(threads): break - time.sleep(1) - requests = self.spanner_service.requests self.assertEqual(n * 2, len(requests), msg=requests) @@ -252,7 +249,7 @@ def test_database_execute_partitioned_dml_request_id(self): assert got_unary_segments == want_unary_segments assert got_stream_segments == want_stream_segments - def test_snapshot_read(self): + def test_snapshot_read_with_request_ids(self): add_select1_result() if not getattr(self.database, "_interceptors", None): self.database._interceptors = MockServerTestBase._interceptors @@ -269,8 +266,8 @@ def test_snapshot_read(self): self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) - requests = self.spanner_service.requests - self.assertEqual(n * 2, len(requests), msg=requests) + # requests = self.spanner_service.requests + # self.assertEqual(n * 2, len(requests), msg=requests) client_id = self.database._nth_client_id channel_id = self.database._channel_id diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 8dfacb9363..cb1f6b74e1 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -1040,6 +1040,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) request = CommitRequest( @@ -1053,6 +1057,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), ], ) @@ -1400,6 +1408,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) request = CommitRequest( @@ -1413,6 +1425,10 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), ], ) diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index 68e7ff1739..e07c77d09f 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -1886,7 +1886,6 @@ def _nth_client_id(self): return self._instance._client._nth_client_id def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): - client_id = self._nth_client_id return _metadata_with_request_id( self._nth_client_id, self._channel_id, diff --git a/tests/unit/test_spanner.py b/tests/unit/test_spanner.py index e59c6fd564..327b7ef49b 100644 --- a/tests/unit/test_spanner.py +++ b/tests/unit/test_spanner.py @@ -1216,7 +1216,6 @@ def _nth_client_id(self): return self._instance._client._nth_client_id def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): - client_id = self._nth_client_id return _metadata_with_request_id( self._nth_client_id, self._channel_id, diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index 08ae928d4c..8169a873f6 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -1058,7 +1058,6 @@ def _nth_client_id(self): return self._instance._client._nth_client_id def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): - client_id = self._nth_client_id return _metadata_with_request_id( self._nth_client_id, self._channel_id, From a79d32c549738f69f722a8dae9da5edbf4aed8dd Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Tue, 24 Dec 2024 19:33:56 -0800 Subject: [PATCH 06/16] Update propagation of changes --- google/cloud/spanner_v1/client.py | 4 +- google/cloud/spanner_v1/session.py | 7 +- tests/unit/test_session.py | 225 +++++++++++++++++++++++------ 3 files changed, 187 insertions(+), 49 deletions(-) diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index 772abe8261..31556a66e7 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -25,6 +25,7 @@ """ import grpc import os +import sys import warnings from google.api_core.gapic_v1 import client_info @@ -203,10 +204,11 @@ def __init__( self._directed_read_options = directed_read_options self._observability_options = observability_options self._nth_client_id = Client.NTH_CLIENT.increment() - self._nth_request = AtomicCounter() + self._nth_request = AtomicCounter(0) @property def _next_nth_request(self): + print("next_nth_request called by", sys._getframe().f_back.f_code.co_name) return self._nth_request.increment() @property diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index 9f187fc46d..20eb060855 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -259,12 +259,7 @@ def delete(self): }, observability_options=observability_options, ): - api.delete_session( - name=self.name, - metadata=database.metadata_with_request_id( - database._next_nth_request, 1, metadata - ), - ) + api.delete_session(name=self.name, metadata=metadata) def ping(self): """Ping the session to keep it alive by executing "SELECT 1". diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index cb1f6b74e1..d6b7b369db 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -73,7 +73,7 @@ def _make_database(name=DATABASE_NAME, database_role=None): database._route_to_leader_enabled = True nth_client_id = AtomicCounter(1) database.NTH_CLIENT = nth_client_id - next_nth_request = AtomicCounter(1) + next_nth_request = AtomicCounter(0) def metadata_with_request_id(nth_request, nth_attempt, prior_metadata=[]): return _metadata_with_request_id( @@ -386,6 +386,10 @@ def test_exists_hit(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -414,6 +418,10 @@ def test_exists_hit_wo_span(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -436,6 +444,10 @@ def test_exists_miss(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -465,6 +477,10 @@ def test_exists_miss_wo_span(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -488,6 +504,10 @@ def test_exists_error(self): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) @@ -612,7 +632,13 @@ def test_delete_hit(self): gax_api.delete_session.assert_called_once_with( name=self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], ) attrs = {"session.id": session._session_id, "session.name": session.name} @@ -637,7 +663,13 @@ def test_delete_miss(self): gax_api.delete_session.assert_called_once_with( name=self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], ) attrs = {"session.id": session._session_id, "session.name": session.name} @@ -664,7 +696,13 @@ def test_delete_error(self): gax_api.delete_session.assert_called_once_with( name=self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], + metadata=[ + ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], ) attrs = {"session.id": session._session_id, "session.name": session.name} @@ -1042,7 +1080,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ) @@ -1059,7 +1097,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -1118,7 +1156,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ) @@ -1184,7 +1222,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ), @@ -1196,7 +1234,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.2", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", ), ], ), @@ -1218,7 +1256,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ), @@ -1229,7 +1267,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.2", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.4.1", ), ], ), @@ -1310,10 +1348,25 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], + ), + mock.call( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), ], - ) - ] - * 2, + ), + ], ) request = CommitRequest( session=self.SESSION_NAME, @@ -1329,10 +1382,24 @@ def unit_of_work(txn, *args, **kw): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), + ], + ), + mock.call( + request=request, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.4.1", + ), ], - ) - ] - * 2, + ), + ], ) def test_run_in_transaction_w_callback_raises_abort_wo_metadata(self): @@ -1410,7 +1477,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ) @@ -1427,7 +1494,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -1506,6 +1573,10 @@ def _time(_results=[1, 1.5]): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), ], ) request = CommitRequest( @@ -1519,6 +1590,10 @@ def _time(_results=[1, 1.5]): metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + ), ], ) @@ -1577,6 +1652,7 @@ def _time(_results=[1, 2, 4, 8]): self.assertEqual(kw, {}) expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) + print("gax_api", gax_api.begin_transaction.call_args_list[2]) self.assertEqual( gax_api.begin_transaction.call_args_list, [ @@ -1588,12 +1664,35 @@ def _time(_results=[1, 2, 4, 8]): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + ), + ], + ), + mock.call( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", ), ], - ) - ] - * 3, + ), + mock.call( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.5.1", + ), + ], + ), + ], ) request = CommitRequest( session=self.SESSION_NAME, @@ -1611,12 +1710,33 @@ def _time(_results=[1, 2, 4, 8]): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], - ) - ] - * 3, + ), + mock.call( + request=request, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.4.1", + ), + ], + ), + mock.call( + request=request, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.6.1", + ), + ], + ), + ], ) def test_run_in_transaction_w_commit_stats_success(self): @@ -1678,7 +1798,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ) @@ -1696,7 +1816,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -1756,7 +1876,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ) @@ -1774,7 +1894,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -1841,7 +1961,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ) @@ -1858,7 +1978,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -1925,7 +2045,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], ) @@ -1942,7 +2062,7 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], ) @@ -2036,9 +2156,20 @@ def unit_of_work(txn, *args, **kw): f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", ), ], - ) - ] - * 2, + ), + mock.call( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.3.1", + ), + ], + ), + ], ) request = CommitRequest( session=self.SESSION_NAME, @@ -2056,12 +2187,22 @@ def unit_of_work(txn, *args, **kw): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.2.1", ), ], - ) - ] - * 2, + ), + mock.call( + request=request, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database.NTH_CLIENT.value}.1.4.1", + ), + ], + ), + ], ) def test_delay_helper_w_no_delay(self): From 37d548e341a5b233f4e5dd2ec8d9fec21c1308d2 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Wed, 25 Dec 2024 04:15:03 -0800 Subject: [PATCH 07/16] Plumb test_context_manager --- google/cloud/spanner_v1/_helpers.py | 4 ++ google/cloud/spanner_v1/batch.py | 43 ++++++++++++++----- google/cloud/spanner_v1/client.py | 2 - google/cloud/spanner_v1/database.py | 4 +- .../mockserver_tests/mock_server_test_base.py | 2 + tests/unit/test_batch.py | 34 +++++++++++++++ tests/unit/test_database.py | 8 +++- 7 files changed, 82 insertions(+), 15 deletions(-) diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py index 27e53200ed..756ab13ab1 100644 --- a/google/cloud/spanner_v1/_helpers.py +++ b/google/cloud/spanner_v1/_helpers.py @@ -641,6 +641,10 @@ def __radd__(self, n): """ return self.__add__(n) + def reset(self): + with self.__lock: + self.__value = 0 + def _metadata_with_request_id(*args, **kwargs): return with_request_id(*args, **kwargs) diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py index 6a9f1f48f5..8ec84b75ab 100644 --- a/google/cloud/spanner_v1/batch.py +++ b/google/cloud/spanner_v1/batch.py @@ -25,6 +25,7 @@ from google.cloud.spanner_v1._helpers import ( _metadata_with_prefix, _metadata_with_leader_aware_routing, + AtomicCounter, ) from google.cloud.spanner_v1._opentelemetry_tracing import trace_call from google.cloud.spanner_v1 import RequestOptions @@ -227,11 +228,22 @@ def commit( trace_attributes, observability_options=observability_options, ): - method = functools.partial( - api.commit, - request=request, - metadata=metadata, - ) + attempt = AtomicCounter(0) + next_nth_request = database._next_nth_request + + def wrapped_method(*args, **kwargs): + all_metadata = database.metadata_with_request_id( + next_nth_request, + attempt.increment(), + metadata, + ) + method = functools.partial( + api.commit, + request=request, + metadata=all_metadata, + ) + return method(*args, **kwargs) + deadline = time.time() + kwargs.get( "timeout_secs", DEFAULT_RETRY_TIMEOUT_SECS ) @@ -349,11 +361,22 @@ def batch_write(self, request_options=None, exclude_txn_from_change_streams=Fals trace_attributes, observability_options=observability_options, ): - method = functools.partial( - api.batch_write, - request=request, - metadata=metadata, - ) + attempt = AtomicCounter(0) + next_nth_request = database._next_nth_request + + def wrapped_method(*args, **kwargs): + all_metadata = database.metadata_with_request_id( + next_nth_request, + attempt.increment(), + metadata, + ) + method = functools.partial( + api.batch_write, + request=request, + metadata=all_metadata, + ) + return method(*args, **kwargs) + response = _retry( method, allowed_exceptions={ diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index 31556a66e7..58c2e1f552 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -25,7 +25,6 @@ """ import grpc import os -import sys import warnings from google.api_core.gapic_v1 import client_info @@ -208,7 +207,6 @@ def __init__( @property def _next_nth_request(self): - print("next_nth_request called by", sys._getframe().f_back.f_code.co_name) return self._nth_request.increment() @property diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 48691bccdc..2cd1b9b6ed 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -821,7 +821,9 @@ def wrapped_method(*args, **kwargs): @property def _next_nth_request(self): - return self._instance._client._next_nth_request + if self._instance and self._instance._client: + return self._instance._client._next_nth_request + return 1 @property def _nth_client_id(self): diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py index 3a826720c1..474d078052 100644 --- a/tests/mockserver_tests/mock_server_test_base.py +++ b/tests/mockserver_tests/mock_server_test_base.py @@ -20,6 +20,7 @@ start_mock_server, SpannerServicer, ) +from google.cloud.spanner_v1.client import Client import google.cloud.spanner_v1.types.type as spanner_type import google.cloud.spanner_v1.types.result_set as result_set from google.api_core.client_options import ClientOptions @@ -154,6 +155,7 @@ def teardown_class(cls): if MockServerTestBase.server is not None: MockServerTestBase.server.stop(grace=None) MockServerTestBase.server = None + Client.NTH_CLIENT.reset() def setup_method(self, *args, **kwargs): self._client = None diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py index eb5069b497..2589650d28 100644 --- a/tests/unit/test_batch.py +++ b/tests/unit/test_batch.py @@ -21,6 +21,10 @@ enrich_with_otel_scope, ) from google.cloud.spanner_v1 import RequestOptions +from google.cloud.spanner_v1._helpers import ( + _metadata_with_request_id, +) +from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] @@ -467,6 +471,10 @@ def test_context_mgr_success(self): [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{_Database.NTH_CLIENT}.1.1.1", + ), ], ) self.assertEqual(request_options, RequestOptions()) @@ -654,6 +662,32 @@ def session_id(self): class _Database(object): name = "testing" _route_to_leader_enabled = True + NTH_CLIENT = 1 + + def __init__(self): + self._nth_request = 0 + + @property + def _next_nth_request(self): + self._nth_request += 1 + return self._nth_request + + @property + def _nth_client_id(self): + return 1 + + def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): + return _metadata_with_request_id( + self._nth_client_id, + self._channel_id, + nth_request, + nth_attempt, + prior_metadata, + ) + + @property + def _channel_id(self): + return 1 class _FauxSpannerAPI: diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 3be792265f..7c82750b11 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -3254,11 +3254,15 @@ def __init__(self, name, instance=None): @property def _next_nth_request(self): - return self._instance._client._next_nth_request + if self._instance and self._instance._client: + return self._instance._client._next_nth_request + return 1 @property def _nth_client_id(self): - return self._instance._client._nth_client_id + if self._instance and self._instance._client: + return self._instance._client._nth_client_id + return 1 def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): return _metadata_with_request_id( From 39c048f0ad79ea3e69b61b6dae043595a30c8a64 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Wed, 25 Dec 2024 05:31:08 -0800 Subject: [PATCH 08/16] Fix more tests --- tests/mockserver_tests/mock_server_test_base.py | 3 +++ tests/unit/test_database.py | 8 ++++---- tests/unit/test_request_id_header.py | 14 +++++++++----- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py index 474d078052..c07907f476 100644 --- a/tests/mockserver_tests/mock_server_test_base.py +++ b/tests/mockserver_tests/mock_server_test_base.py @@ -155,6 +155,9 @@ def teardown_class(cls): if MockServerTestBase.server is not None: MockServerTestBase.server.stop(grace=None) MockServerTestBase.server = None + self.reset() + + def reset(self): Client.NTH_CLIENT.reset() def setup_method(self, *args, **kwargs): diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 7c82750b11..535cd4b156 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -1918,7 +1918,7 @@ def test_context_mgr_success(self): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.1.1.1", ), ], ) @@ -1969,7 +1969,7 @@ def test_context_mgr_w_commit_stats_success(self): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.1.1.1", ), ], ) @@ -2020,7 +2020,7 @@ def test_context_mgr_w_aborted_commit_status(self): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.1.1.1", ), ], ) @@ -3098,7 +3098,7 @@ def test_context_mgr_success(self): ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.1.1.1", ), ], ) diff --git a/tests/unit/test_request_id_header.py b/tests/unit/test_request_id_header.py index e399593d9c..280b8b24cf 100644 --- a/tests/unit/test_request_id_header.py +++ b/tests/unit/test_request_id_header.py @@ -50,18 +50,20 @@ def test_snapshot_read(self): self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) + NTH_CLIENT = self.database._nth_client_id + CHANNEL_ID = self.database._channel_id # Now ensure monotonicity of the received request-id segments. got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() want_unary_segments = [ ( "/google.spanner.v1.Spanner/BatchCreateSessions", - (1, REQ_RAND_PROCESS_ID, 1, 1, 1, 1), + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 1, 1), ) ] want_stream_segments = [ ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, 1, 1, 2, 1), + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 2, 1), ) ] @@ -229,20 +231,22 @@ def test_database_execute_partitioned_dml_request_id(self): # Now ensure monotonicity of the received request-id segments. got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() + NTH_CLIENT = self.database._nth_client_id + CHANNEL_ID = self.database._channel_id want_unary_segments = [ ( "/google.spanner.v1.Spanner/BatchCreateSessions", - (1, REQ_RAND_PROCESS_ID, 1, 1, 1, 1), + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 1, 1), ), ( "/google.spanner.v1.Spanner/BeginTransaction", - (1, REQ_RAND_PROCESS_ID, 1, 1, 2, 1), + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 2, 1), ), ] want_stream_segments = [ ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, 1, 1, 3, 1), + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 3, 1), ) ] From 05b6721cd6cb5e2e61bf49e9aeb676b5fdb37032 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Wed, 25 Dec 2024 05:58:47 -0800 Subject: [PATCH 09/16] More test plumbing --- tests/unit/test_atomic_counter.py | 1 + tests/unit/test_batch.py | 8 ++ tests/unit/test_database.py | 3 +- tests/unit/test_request_id_header.py | 127 ++------------------------- 4 files changed, 19 insertions(+), 120 deletions(-) diff --git a/tests/unit/test_atomic_counter.py b/tests/unit/test_atomic_counter.py index 92d10cac79..e8d8b6b7ce 100644 --- a/tests/unit/test_atomic_counter.py +++ b/tests/unit/test_atomic_counter.py @@ -15,6 +15,7 @@ import random import threading import unittest + from google.cloud.spanner_v1._helpers import AtomicCounter diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py index 2589650d28..a1f7b63087 100644 --- a/tests/unit/test_batch.py +++ b/tests/unit/test_batch.py @@ -260,6 +260,10 @@ def test_commit_ok(self): [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.1.1.1", + ), ], ) self.assertEqual(request_options, RequestOptions()) @@ -609,6 +613,10 @@ def _test_batch_write_with_request_options( [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.1.1.1", + ), ], ) if request_options is None: diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 535cd4b156..dac4783ce0 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -1204,7 +1204,7 @@ def _execute_partitioned_dml_helper( ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", ), ], ) @@ -3220,6 +3220,7 @@ def __init__( self.directed_read_options = directed_read_options self._nth_client_id = _Client.NTH_CLIENT.increment() self._nth_request = AtomicCounter() + self.credentials = {} @property def _next_nth_request(self): diff --git a/tests/unit/test_request_id_header.py b/tests/unit/test_request_id_header.py index 280b8b24cf..a49d0521ce 100644 --- a/tests/unit/test_request_id_header.py +++ b/tests/unit/test_request_id_header.py @@ -15,25 +15,26 @@ import random import threading -from tests.mockserver_tests.mock_server_test_base import ( - MockServerTestBase, - add_select1_result, -) +from google.api_core.exceptions import Aborted +from google.rpc import code_pb2 + from google.cloud.spanner_v1 import ( BatchCreateSessionsRequest, BeginTransactionRequest, ExecuteSqlRequest, ) -from google.api_core.exceptions import Aborted -from google.rpc import code_pb2 from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID +from tests.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_select1_result, +) class TestRequestIDHeader(MockServerTestBase): def tearDown(self): self.database._x_goog_request_id_interceptor.reset() - def test_snapshot_read(self): + def test_snapshot_execute_sql(self): add_select1_result() if not getattr(self.database, "_interceptors", None): self.database._interceptors = MockServerTestBase._interceptors @@ -253,118 +254,6 @@ def test_database_execute_partitioned_dml_request_id(self): assert got_unary_segments == want_unary_segments assert got_stream_segments == want_stream_segments - def test_snapshot_read_with_request_ids(self): - add_select1_result() - if not getattr(self.database, "_interceptors", None): - self.database._interceptors = MockServerTestBase._interceptors - with self.database.snapshot() as snapshot: - results = snapshot.read("select 1") - result_list = [] - for row in results: - result_list.append(row) - self.assertEqual(1, row[0]) - self.assertEqual(1, len(result_list)) - - requests = self.spanner_service.requests - self.assertEqual(2, len(requests), msg=requests) - self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) - self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) - - # requests = self.spanner_service.requests - # self.assertEqual(n * 2, len(requests), msg=requests) - - client_id = self.database._nth_client_id - channel_id = self.database._channel_id - got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() - - want_unary_segments = [ - ( - "/google.spanner.v1.Spanner/BatchCreateSessions", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 1, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 3, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 5, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 7, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 9, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 11, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 13, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 15, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 17, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 19, 1), - ), - ] - assert got_unary_segments == want_unary_segments - - want_stream_segments = [ - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 2, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 4, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 6, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 8, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 10, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 12, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 14, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 16, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 18, 1), - ), - ( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 20, 1), - ), - ] - assert got_stream_segments == want_stream_segments - def canonicalize_request_id_headers(self): src = self.database._x_goog_request_id_interceptor return src._stream_req_segments, src._unary_req_segments From 9fe7d401b491d21946cb4c795238edf5862d6321 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 27 Dec 2024 06:56:35 -0800 Subject: [PATCH 10/16] Infer database._channel_id only once along with spanner_api --- google/cloud/spanner_v1/database.py | 25 ++- .../mockserver_tests/mock_server_test_base.py | 5 +- tests/unit/test_database.py | 154 ++++++++++++++++-- 3 files changed, 152 insertions(+), 32 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 2cd1b9b6ed..e40c0d54a3 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -188,6 +188,7 @@ def __init__( self._reconciling = False self._directed_read_options = self._instance._client.directed_read_options self._proto_descriptors = proto_descriptors + self._channel_id = 0 # It'll be created when _spanner_api is created. if pool is None: pool = BurstyPool(database_role=database_role) @@ -446,22 +447,16 @@ def spanner_api(self): client_info=client_info, client_options=client_options, ) - return self._spanner_api - @property - def _channel_id(self): - """ - Helper to retrieve the associated channelID for the spanner_api. - This property is paramount to x-goog-spanner-request-id. - """ - with self.__transport_lock: - api = self.spanner_api - channel_id = self.__transports_to_channel_id.get(api._transport, None) - if channel_id is None: - channel_id = len(self.__transports_to_channel_id) + 1 - self.__transports_to_channel_id[api._transport] = channel_id - - return channel_id + with self.__transport_lock: + transport = self._spanner_api._transport + channel_id = self.__transports_to_channel_id.get(transport, None) + if channel_id is None: + channel_id = len(self.__transports_to_channel_id) + 1 + self.__transports_to_channel_id[transport] = channel_id + self._channel_id = channel_id + + return self._spanner_api def metadata_with_request_id(self, nth_request, nth_attempt, prior_metadata=[]): return _metadata_with_request_id( diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py index c07907f476..fec056b523 100644 --- a/tests/mockserver_tests/mock_server_test_base.py +++ b/tests/mockserver_tests/mock_server_test_base.py @@ -154,11 +154,8 @@ def setup_class(cls): def teardown_class(cls): if MockServerTestBase.server is not None: MockServerTestBase.server.stop(grace=None) + Client.NTH_CLIENT.reset() MockServerTestBase.server = None - self.reset() - - def reset(self): - Client.NTH_CLIENT.reset() def setup_method(self, *args, **kwargs): self._client = None diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index dac4783ce0..e715decdb8 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -554,6 +554,10 @@ def test_create_grpc_error(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -583,6 +587,10 @@ def test_create_already_exists(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -611,6 +619,10 @@ def test_create_instance_not_found(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -649,6 +661,10 @@ def test_create_success(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -688,6 +704,10 @@ def test_create_success_w_encryption_config_dict(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -725,6 +745,10 @@ def test_create_success_w_proto_descriptors(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -745,6 +769,10 @@ def test_exists_grpc_error(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -764,6 +792,10 @@ def test_exists_not_found(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -785,6 +817,10 @@ def test_exists_success(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -805,6 +841,10 @@ def test_reload_grpc_error(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -825,6 +865,10 @@ def test_reload_not_found(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -886,12 +930,20 @@ def test_reload_success(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) api.get_database.assert_called_once_with( name=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.2.1", + ), ], ) @@ -920,6 +972,10 @@ def test_update_ddl_grpc_error(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -948,6 +1004,10 @@ def test_update_ddl_not_found(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -977,6 +1037,10 @@ def test_update_ddl(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1006,6 +1070,10 @@ def test_update_ddl_w_operation_id(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1034,6 +1102,10 @@ def test_update_success(self): update_mask=field_mask, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1064,6 +1136,10 @@ def test_update_ddl_w_proto_descriptors(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1084,6 +1160,10 @@ def test_drop_grpc_error(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1104,6 +1184,10 @@ def test_drop_not_found(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1123,6 +1207,10 @@ def test_drop_success(self): database=self.DATABASE_NAME, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1196,22 +1284,34 @@ def _execute_partitioned_dml_helper( exclude_txn_from_change_streams=exclude_txn_from_change_streams, ) - api.begin_transaction.assert_called_with( - session=session.name, - options=txn_options, - metadata=[ - ("google-cloud-resource-prefix", database.name), - ("x-goog-spanner-route-to-leader", "true"), - ( - "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", - ), - ], - ) if retried: self.assertEqual(api.begin_transaction.call_count, 2) + api.begin_transaction.assert_called_with( + session=session.name, + options=txn_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.2", + ), + ], + ) else: self.assertEqual(api.begin_transaction.call_count, 1) + api.begin_transaction.assert_called_with( + session=session.name, + options=txn_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), + ], + ) if params: expected_params = Struct( @@ -1249,7 +1349,7 @@ def _execute_partitioned_dml_helper( ("x-goog-spanner-route-to-leader", "true"), ( "x-goog-spanner-request-id", - f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.1.1.1", + f"1.{REQ_RAND_PROCESS_ID}.{_Client.NTH_CLIENT.value}.{database._channel_id}.2.1", ), ], ) @@ -1271,6 +1371,10 @@ def _execute_partitioned_dml_helper( metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.2.2", + ), ], ) self.assertEqual(api.execute_streaming_sql.call_count, 2) @@ -1547,6 +1651,10 @@ def test_restore_grpc_error(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1575,6 +1683,10 @@ def test_restore_not_found(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1614,6 +1726,10 @@ def test_restore_success(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1657,6 +1773,10 @@ def test_restore_success_w_encryption_config_dict(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1806,6 +1926,10 @@ def test_list_database_roles_grpc_error(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) @@ -1829,6 +1953,10 @@ def test_list_database_roles_defaults(self): request=expected_request, metadata=[ ("google-cloud-resource-prefix", database.name), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) self.assertIsNotNone(resp) From 2344fa3b22dc57719c5f04195b5bbea8d9b047e7 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 27 Dec 2024 07:11:46 -0800 Subject: [PATCH 11/16] Update batch tests --- tests/unit/test_batch.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py index a1f7b63087..26a8210918 100644 --- a/tests/unit/test_batch.py +++ b/tests/unit/test_batch.py @@ -363,6 +363,10 @@ def _test_commit_with_options( [ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), + ( + "x-goog-spanner-request-id", + f"1.{REQ_RAND_PROCESS_ID}.{database._nth_client_id}.{database._channel_id}.1.1", + ), ], ) self.assertEqual(actual_request_options, expected_request_options) From a22d2779818ad9be5a10d7a5348c8d99d12a0700 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Fri, 27 Dec 2024 20:25:48 +0100 Subject: [PATCH 12/16] test: add tests for retries and move mock server test to correct directory This commit contains a couple of changes: 1. Moves the request-ID tests using the mock server to the mockserver tests directory. 2. Adds tests for retries of RPCs. These currently fail, as the request ID is not updated correctly when a retry is executed for a unary RPC. Also, the retry test for streaming RPCs fails, but this is due to an existing bug in the client lib. --- .../cloud/spanner_v1/testing/mock_spanner.py | 1 + noxfile.py | 2 +- .../mockserver_tests/mock_server_test_base.py | 20 ++ .../test_request_id_header.py | 171 +++++++++++------- 4 files changed, 124 insertions(+), 70 deletions(-) rename tests/{unit => mockserver_tests}/test_request_id_header.py (66%) diff --git a/google/cloud/spanner_v1/testing/mock_spanner.py b/google/cloud/spanner_v1/testing/mock_spanner.py index 13ead85b13..f8971a6098 100644 --- a/google/cloud/spanner_v1/testing/mock_spanner.py +++ b/google/cloud/spanner_v1/testing/mock_spanner.py @@ -105,6 +105,7 @@ def CreateSession(self, request, context): def BatchCreateSessions(self, request, context): self._requests.append(request) + self.mock_spanner.pop_error(context) sessions = [] for i in range(request.session_count): sessions.append( diff --git a/noxfile.py b/noxfile.py index f32c24f1e3..c7c3074bc7 100644 --- a/noxfile.py +++ b/noxfile.py @@ -32,7 +32,7 @@ ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.8" +DEFAULT_PYTHON_VERSION = "3.12" DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12" UNIT_TEST_PYTHON_VERSIONS: List[str] = [ diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py index fec056b523..24bbac0861 100644 --- a/tests/mockserver_tests/mock_server_test_base.py +++ b/tests/mockserver_tests/mock_server_test_base.py @@ -57,6 +57,26 @@ def aborted_status() -> _Status: ) return status +# Creates an UNAVAILABLE status with the smallest possible retry delay. +def unavailable_status() -> _Status: + error = status_pb2.Status( + code=code_pb2.UNAVAILABLE, + message="Service unavailable.", + ) + retry_info = RetryInfo(retry_delay=Duration(seconds=0, nanos=1)) + status = _Status( + code=code_to_grpc_status_code(error.code), + details=error.message, + trailing_metadata=( + ("grpc-status-details-bin", error.SerializeToString()), + ( + "google.rpc.retryinfo-bin", + retry_info.SerializeToString(), + ), + ), + ) + return status + # Creates an UNAVAILABLE status with the smallest possible retry delay. def unavailable_status() -> _Status: diff --git a/tests/unit/test_request_id_header.py b/tests/mockserver_tests/test_request_id_header.py similarity index 66% rename from tests/unit/test_request_id_header.py rename to tests/mockserver_tests/test_request_id_header.py index a49d0521ce..494a4f3879 100644 --- a/tests/unit/test_request_id_header.py +++ b/tests/mockserver_tests/test_request_id_header.py @@ -15,18 +15,16 @@ import random import threading -from google.api_core.exceptions import Aborted -from google.rpc import code_pb2 - from google.cloud.spanner_v1 import ( BatchCreateSessionsRequest, BeginTransactionRequest, ExecuteSqlRequest, ) from google.cloud.spanner_v1.request_id_header import REQ_RAND_PROCESS_ID +from google.cloud.spanner_v1.testing.mock_spanner import SpannerServicer from tests.mockserver_tests.mock_server_test_base import ( MockServerTestBase, - add_select1_result, + add_select1_result, aborted_status, add_error, unavailable_status, ) @@ -102,7 +100,7 @@ def select1(): break requests = self.spanner_service.requests - self.assertEqual(n * 2, len(requests), msg=requests) + self.assertEqual(n + 1, len(requests), msg=requests) client_id = self.database._nth_client_id channel_id = self.database._channel_id @@ -113,42 +111,6 @@ def select1(): "/google.spanner.v1.Spanner/BatchCreateSessions", (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 1, 1), ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 3, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 5, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 7, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 9, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 11, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 13, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 15, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 17, 1), - ), - ( - "/google.spanner.v1.Spanner/GetSession", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 19, 1), - ), ] assert got_unary_segments == want_unary_segments @@ -159,39 +121,39 @@ def select1(): ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 4, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 3, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 6, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 4, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 8, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 5, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 10, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 6, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 12, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 7, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 14, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 8, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 16, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 9, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 18, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 10, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 20, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 11, 1), ), ] assert got_stream_segments == want_stream_segments @@ -207,10 +169,7 @@ def select_in_txn(txn): if counters["aborted"] < want_failed_attempts: counters["aborted"] += 1 - raise Aborted( - "Thrown from ClientInterceptor for testing", - errors=[FauxCall(code_pb2.ABORTED)], - ) + add_error(SpannerServicer.Commit.__name__, aborted_status()) add_select1_result() if not getattr(self.database, "_interceptors", None): @@ -254,24 +213,98 @@ def test_database_execute_partitioned_dml_request_id(self): assert got_unary_segments == want_unary_segments assert got_stream_segments == want_stream_segments - def canonicalize_request_id_headers(self): - src = self.database._x_goog_request_id_interceptor - return src._stream_req_segments, src._unary_req_segments + def test_unary_retryable_error(self): + add_select1_result() + add_error(SpannerServicer.BatchCreateSessions.__name__, unavailable_status()) + + if not getattr(self.database, "_interceptors", None): + self.database._interceptors = MockServerTestBase._interceptors + with self.database.snapshot() as snapshot: + results = snapshot.execute_sql("select 1") + result_list = [] + for row in results: + result_list.append(row) + self.assertEqual(1, row[0]) + self.assertEqual(1, len(result_list)) + + requests = self.spanner_service.requests + self.assertEqual(3, len(requests), msg=requests) + self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) + self.assertTrue(isinstance(requests[1], BatchCreateSessionsRequest)) + self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) + + NTH_CLIENT = self.database._nth_client_id + CHANNEL_ID = self.database._channel_id + # Now ensure monotonicity of the received request-id segments. + got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() + want_unary_segments = [ + ( + "/google.spanner.v1.Spanner/BatchCreateSessions", + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 1, 1), + ), + ( + "/google.spanner.v1.Spanner/BatchCreateSessions", + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 1, 2), + ), + ] + want_stream_segments = [ + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 2, 1), + ) + ] + assert got_unary_segments == want_unary_segments + assert got_stream_segments == want_stream_segments -class FauxCall: - def __init__(self, code, details="FauxCall"): - self._code = code - self._details = details + def test_streaming_retryable_error(self): + add_select1_result() + # TODO: UNAVAILABLE errors are not correctly handled by the client lib. + # This is probably the reason behind + # https://github.com/googleapis/python-spanner/issues/1150. + # The fix + add_error(SpannerServicer.ExecuteStreamingSql.__name__, unavailable_status()) - def initial_metadata(self): - return {} + if not getattr(self.database, "_interceptors", None): + self.database._interceptors = MockServerTestBase._interceptors + with self.database.snapshot() as snapshot: + results = snapshot.execute_sql("select 1") + result_list = [] + for row in results: + result_list.append(row) + self.assertEqual(1, row[0]) + self.assertEqual(1, len(result_list)) - def trailing_metadata(self): - return {} + requests = self.spanner_service.requests + self.assertEqual(3, len(requests), msg=requests) + self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) + self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) + self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) - def code(self): - return self._code + NTH_CLIENT = self.database._nth_client_id + CHANNEL_ID = self.database._channel_id + # Now ensure monotonicity of the received request-id segments. + got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() + want_unary_segments = [ + ( + "/google.spanner.v1.Spanner/BatchCreateSessions", + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 1, 1), + ), + ] + want_stream_segments = [ + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 2, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, NTH_CLIENT, CHANNEL_ID, 2, 2), + ), + ] + + assert got_unary_segments == want_unary_segments + assert got_stream_segments == want_stream_segments - def details(self): - return self._details + def canonicalize_request_id_headers(self): + src = self.database._x_goog_request_id_interceptor + return src._stream_req_segments, src._unary_req_segments From 11043637d2a923f398fbb71dfb609fdc7cd98d9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Fri, 27 Dec 2024 20:28:25 +0100 Subject: [PATCH 13/16] fix: revert default Python version --- noxfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index c7c3074bc7..f32c24f1e3 100644 --- a/noxfile.py +++ b/noxfile.py @@ -32,7 +32,7 @@ ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.12" +DEFAULT_PYTHON_VERSION = "3.8" DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12" UNIT_TEST_PYTHON_VERSIONS: List[str] = [ From 9960af2cc3b6883c23a1854004cab45a11e0b6aa Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 3 Jan 2025 22:04:58 -0800 Subject: [PATCH 14/16] Fix discrepancy with api.batch_create_sessions automatically retrying assuming idempotency with headers too --- google/cloud/spanner_v1/pool.py | 78 +++++++++++++++---- google/cloud/spanner_v1/snapshot.py | 5 +- .../cloud/spanner_v1/testing/database_test.py | 5 ++ .../cloud/spanner_v1/testing/interceptors.py | 3 +- .../mockserver_tests/mock_server_test_base.py | 1 + .../test_request_id_header.py | 21 +++-- 6 files changed, 87 insertions(+), 26 deletions(-) diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index 95d4fb2c46..eab2dbe809 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -15,10 +15,12 @@ """Pools managing shared Session objects.""" import datetime +import random import queue import time from google.cloud.exceptions import NotFound +from google.api_core.exceptions import ServiceUnavailable from google.cloud.spanner_v1 import BatchCreateSessionsRequest from google.cloud.spanner_v1 import Session from google.cloud.spanner_v1._helpers import ( @@ -251,13 +253,23 @@ def bind(self, database): f"Creating {request.session_count} sessions", span_event_attributes, ) - all_metadata = database.metadata_with_request_id( - database._next_nth_request, 1, metadata - ) - resp = api.batch_create_sessions( - request=request, - metadata=all_metadata, - ) + nth_req = database._next_nth_request + + def create_sessions(attempt): + all_metadata = database.metadata_with_request_id( + nth_req, attempt, metadata + ) + return api.batch_create_sessions( + request=request, + metadata=all_metadata, + # Manually passing retry=None because otherwise any + # UNAVAILABLE retry will be retried without replenishing + # the metadata, hence this allows us to manually update + # the metadata using retry_on_unavailable. + retry=None, + ) + + resp = retry_on_unavailable(create_sessions) add_span_event( span, @@ -556,14 +568,26 @@ def bind(self, database): "CloudSpanner.PingingPool.BatchCreateSessions", observability_options=observability_options, ) as span: - returned_session_count = 0 - while returned_session_count < self.size: - resp = api.batch_create_sessions( - request=request, - metadata=database.metadata_with_request_id( - database._next_nth_request, 1, metadata - ), - ) + while created_session_count < self.size: + nth_req = database._next_nth_request + + def create_sessions(attempt): + all_metadata = database.metadata_with_request_id( + nth_req, attempt, metadata + ) + return api.batch_create_sessions( + request=request, + metadata=all_metadata, + # Manually passing retry=None because otherwise any + # UNAVAILABLE retry will be retried without replenishing + # the metadata, hence this allows us to manually update + # the metadata using retry_on_unavailable. + # TODO: Figure out how to intercept and monkey patch the internals + # of the gRPC transport. + retry=None, + ) + + resp = retry_on_unavailable(create_sessions) add_span_event( span, @@ -572,13 +596,14 @@ def bind(self, database): for session_pb in resp.session: session = self._new_session() - returned_session_count += 1 session._session_id = session_pb.name.split("/")[-1] self.put(session) + created_session_count += len(resp.session) + add_span_event( span, - f"Requested for {requested_session_count} sessions, returned {returned_session_count}", + f"Requested for {requested_session_count} sessions, returned {created_session_count}", span_event_attributes, ) @@ -801,3 +826,22 @@ def __enter__(self): def __exit__(self, *ignored): self._pool.put(self._session) + + +def retry_on_unavailable(fn, max=6): + """ + Retries `fn` to a maximum of `max` times on encountering UNAVAILABLE exceptions, + each time passing in the iteration's ordinal number to signal + the nth attempt. It retries with exponential backoff with jitter. + """ + last_exc = None + for i in range(max): + try: + return fn(i + 1) + except ServiceUnavailable as exc: + last_exc = exc + time.sleep(i**2 + random.random()) + except: + raise + + raise last_exc diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index 63556c07b9..6743c9bd79 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -357,6 +357,7 @@ def wrapped_restart(*args, **kwargs): trace_attributes, transaction=self, observability_options=observability_options, + attempt=attempt, ) self._read_request_count += 1 if self._multi_use: @@ -379,6 +380,7 @@ def wrapped_restart(*args, **kwargs): trace_attributes, transaction=self, observability_options=observability_options, + attempt=attempt, ) self._read_request_count += 1 @@ -556,12 +558,11 @@ def execute_sql( attempt = AtomicCounter(0) def wrapped_restart(*args, **kwargs): - attempt.increment() restart = functools.partial( api.execute_streaming_sql, request=request, metadata=database.metadata_with_request_id( - nth_request, attempt.value, metadata + nth_request, attempt.increment(), metadata ), retry=retry, timeout=timeout, diff --git a/google/cloud/spanner_v1/testing/database_test.py b/google/cloud/spanner_v1/testing/database_test.py index 80f040d7e0..4a6e94c88b 100644 --- a/google/cloud/spanner_v1/testing/database_test.py +++ b/google/cloud/spanner_v1/testing/database_test.py @@ -79,6 +79,7 @@ def spanner_api(self): channel = grpc.insecure_channel(self._instance.emulator_host) self._x_goog_request_id_interceptor = XGoogRequestIDHeaderInterceptor() self._interceptors.append(self._x_goog_request_id_interceptor) + # print("self._interceptors", self._interceptors) channel = grpc.intercept_channel(channel, *self._interceptors) transport = SpannerGrpcTransport(channel=channel) self._spanner_api = SpannerClient( @@ -115,3 +116,7 @@ def _create_spanner_client_for_tests(self, client_options, credentials): client_options=client_options, transport=transport, ) + + def reset(self): + if self._x_goog_request_id_interceptor: + self._x_goog_request_id_interceptor.reset() diff --git a/google/cloud/spanner_v1/testing/interceptors.py b/google/cloud/spanner_v1/testing/interceptors.py index 4cd3abd306..a1ab53af40 100644 --- a/google/cloud/spanner_v1/testing/interceptors.py +++ b/google/cloud/spanner_v1/testing/interceptors.py @@ -90,7 +90,9 @@ def intercept(self, method, request_or_iterator, call_details): ) response_or_iterator = method(request_or_iterator, call_details) + print("call_details", call_details, "\n", response_or_iterator) streaming = getattr(response_or_iterator, "__iter__", None) is not None + print("x_append", call_details.method, x_goog_request_id) with self.__lock: if streaming: self._stream_req_segments.append( @@ -114,7 +116,6 @@ def stream_request_ids(self): def reset(self): self._stream_req_segments.clear() self._unary_req_segments.clear() - pass def parse_request_id(request_id_str): diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py index 24bbac0861..2f89415b55 100644 --- a/tests/mockserver_tests/mock_server_test_base.py +++ b/tests/mockserver_tests/mock_server_test_base.py @@ -57,6 +57,7 @@ def aborted_status() -> _Status: ) return status + # Creates an UNAVAILABLE status with the smallest possible retry delay. def unavailable_status() -> _Status: error = status_pb2.Status( diff --git a/tests/mockserver_tests/test_request_id_header.py b/tests/mockserver_tests/test_request_id_header.py index 494a4f3879..306d2cbf93 100644 --- a/tests/mockserver_tests/test_request_id_header.py +++ b/tests/mockserver_tests/test_request_id_header.py @@ -24,7 +24,10 @@ from google.cloud.spanner_v1.testing.mock_spanner import SpannerServicer from tests.mockserver_tests.mock_server_test_base import ( MockServerTestBase, - add_select1_result, aborted_status, add_error, unavailable_status, + add_select1_result, + aborted_status, + add_error, + unavailable_status, ) @@ -70,6 +73,13 @@ def test_snapshot_execute_sql(self): assert got_stream_segments == want_stream_segments def test_snapshot_read_concurrent(self): + # Trigger BatchCreateSessions firstly. + with self.database.snapshot() as snapshot: + rows = snapshot.execute_sql("select 1") + for row in rows: + _ = row + + # The other requests can then proceed. def select1(): with self.database.snapshot() as snapshot: rows = snapshot.execute_sql("select 1") @@ -100,7 +110,7 @@ def select1(): break requests = self.spanner_service.requests - self.assertEqual(n + 1, len(requests), msg=requests) + self.assertEqual(2 + n * 2, len(requests), msg=requests) client_id = self.database._nth_client_id channel_id = self.database._channel_id @@ -112,6 +122,7 @@ def select1(): (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 1, 1), ), ] + print("got_unary", got_unary_segments) assert got_unary_segments == want_unary_segments want_stream_segments = [ @@ -254,15 +265,13 @@ def test_unary_retryable_error(self): ) ] + print("got_unary_segments", got_unary_segments) assert got_unary_segments == want_unary_segments assert got_stream_segments == want_stream_segments def test_streaming_retryable_error(self): add_select1_result() - # TODO: UNAVAILABLE errors are not correctly handled by the client lib. - # This is probably the reason behind - # https://github.com/googleapis/python-spanner/issues/1150. - # The fix + add_error(SpannerServicer.ExecuteStreamingSql.__name__, unavailable_status()) add_error(SpannerServicer.ExecuteStreamingSql.__name__, unavailable_status()) if not getattr(self.database, "_interceptors", None): From 4d13c9be66b5ab0fcaf7048d08b335f503252f90 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 3 Jan 2025 22:58:56 -0800 Subject: [PATCH 15/16] Take into account current behavior of /GetSession /BatchCreateSession in tests --- .../cloud/spanner_v1/testing/database_test.py | 1 - .../cloud/spanner_v1/testing/interceptors.py | 2 - .../test_request_id_header.py | 73 +++++++++++++++---- 3 files changed, 58 insertions(+), 18 deletions(-) diff --git a/google/cloud/spanner_v1/testing/database_test.py b/google/cloud/spanner_v1/testing/database_test.py index 4a6e94c88b..5af89fea42 100644 --- a/google/cloud/spanner_v1/testing/database_test.py +++ b/google/cloud/spanner_v1/testing/database_test.py @@ -79,7 +79,6 @@ def spanner_api(self): channel = grpc.insecure_channel(self._instance.emulator_host) self._x_goog_request_id_interceptor = XGoogRequestIDHeaderInterceptor() self._interceptors.append(self._x_goog_request_id_interceptor) - # print("self._interceptors", self._interceptors) channel = grpc.intercept_channel(channel, *self._interceptors) transport = SpannerGrpcTransport(channel=channel) self._spanner_api = SpannerClient( diff --git a/google/cloud/spanner_v1/testing/interceptors.py b/google/cloud/spanner_v1/testing/interceptors.py index a1ab53af40..4fe4ed147d 100644 --- a/google/cloud/spanner_v1/testing/interceptors.py +++ b/google/cloud/spanner_v1/testing/interceptors.py @@ -90,9 +90,7 @@ def intercept(self, method, request_or_iterator, call_details): ) response_or_iterator = method(request_or_iterator, call_details) - print("call_details", call_details, "\n", response_or_iterator) streaming = getattr(response_or_iterator, "__iter__", None) is not None - print("x_append", call_details.method, x_goog_request_id) with self.__lock: if streaming: self._stream_req_segments.append( diff --git a/tests/mockserver_tests/test_request_id_header.py b/tests/mockserver_tests/test_request_id_header.py index 306d2cbf93..d0b4ae1ec3 100644 --- a/tests/mockserver_tests/test_request_id_header.py +++ b/tests/mockserver_tests/test_request_id_header.py @@ -73,15 +73,16 @@ def test_snapshot_execute_sql(self): assert got_stream_segments == want_stream_segments def test_snapshot_read_concurrent(self): + db = self.database # Trigger BatchCreateSessions firstly. - with self.database.snapshot() as snapshot: + with db.snapshot() as snapshot: rows = snapshot.execute_sql("select 1") for row in rows: _ = row # The other requests can then proceed. def select1(): - with self.database.snapshot() as snapshot: + with db.snapshot() as snapshot: rows = snapshot.execute_sql("select 1") res_list = [] for row in rows: @@ -112,8 +113,8 @@ def select1(): requests = self.spanner_service.requests self.assertEqual(2 + n * 2, len(requests), msg=requests) - client_id = self.database._nth_client_id - channel_id = self.database._channel_id + client_id = db._nth_client_id + channel_id = db._channel_id got_stream_segments, got_unary_segments = self.canonicalize_request_id_headers() want_unary_segments = [ @@ -121,8 +122,47 @@ def select1(): "/google.spanner.v1.Spanner/BatchCreateSessions", (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 1, 1), ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 3, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 5, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 7, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 9, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 11, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 13, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 15, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 17, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 19, 1), + ), + ( + "/google.spanner.v1.Spanner/GetSession", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 21, 1), + ), ] - print("got_unary", got_unary_segments) assert got_unary_segments == want_unary_segments want_stream_segments = [ @@ -132,39 +172,43 @@ def select1(): ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 3, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 4, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 4, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 6, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 5, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 8, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 6, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 10, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 7, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 12, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 8, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 14, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 9, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 16, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 10, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 18, 1), ), ( "/google.spanner.v1.Spanner/ExecuteStreamingSql", - (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 11, 1), + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 20, 1), + ), + ( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + (1, REQ_RAND_PROCESS_ID, client_id, channel_id, 22, 1), ), ] assert got_stream_segments == want_stream_segments @@ -265,7 +309,6 @@ def test_unary_retryable_error(self): ) ] - print("got_unary_segments", got_unary_segments) assert got_unary_segments == want_unary_segments assert got_stream_segments == want_stream_segments From f92c51db0c097fd29eea77020f68bb5d8213a6cf Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Tue, 14 Jan 2025 23:32:00 -0800 Subject: [PATCH 16/16] Implement interceptor to wrap and increase x-goog-spanner-request-id attempts per retry This monkey patches SpannerClient methods to have an interceptor that increases the attempts per retry. The prelude though is that any callers to it must pass in the attempt value 0 so that each pass through will correctly increase the attempt field's value. --- google/cloud/spanner_v1/_helpers.py | 33 ++++++- google/cloud/spanner_v1/batch.py | 46 ++++----- google/cloud/spanner_v1/database.py | 74 +++++++++------ google/cloud/spanner_v1/pool.py | 13 +-- google/cloud/spanner_v1/snapshot.py | 140 ++++++++++++---------------- tests/unit/test_snapshot.py | 1 + 6 files changed, 160 insertions(+), 147 deletions(-) diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py index 756ab13ab1..8209c89a06 100644 --- a/google/cloud/spanner_v1/_helpers.py +++ b/google/cloud/spanner_v1/_helpers.py @@ -32,10 +32,11 @@ from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import JsonObject -from google.cloud.spanner_v1.request_id_header import with_request_id +from google.cloud.spanner_v1.request_id_header import REQ_ID_HEADER_KEY, with_request_id from google.rpc.error_details_pb2 import RetryInfo import random +from typing import Callable # Validation error messages NUMERIC_MAX_SCALE_ERR_MSG = ( @@ -648,3 +649,33 @@ def reset(self): def _metadata_with_request_id(*args, **kwargs): return with_request_id(*args, **kwargs) + + +class InterceptingHeaderInjector: + def __init__(self, original_callable: Callable): + self._original_callable = original_callable + + def __call__(self, *args, **kwargs): + metadata = kwargs.get("metadata", []) + # Find all the headers that match the x-goog-spanner-request-id + # header an on each retry increment the value. + all_metadata = [] + for key, value in metadata: + if key is REQ_ID_HEADER_KEY: + # Otherwise now increment the count for the attempt number. + splits = value.split(".") + attempt_plus_one = int(splits[-1]) + 1 + splits[-1] = str(attempt_plus_one) + value_before = value + value = ".".join(splits) + print("incrementing value on retry from", value_before, "to", value) + + all_metadata.append( + ( + key, + value, + ) + ) + + kwargs["metadata"] = all_metadata + return self._original_callable(*args, **kwargs) diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py index 8ec84b75ab..094e2530db 100644 --- a/google/cloud/spanner_v1/batch.py +++ b/google/cloud/spanner_v1/batch.py @@ -231,18 +231,16 @@ def commit( attempt = AtomicCounter(0) next_nth_request = database._next_nth_request - def wrapped_method(*args, **kwargs): - all_metadata = database.metadata_with_request_id( - next_nth_request, - attempt.increment(), - metadata, - ) - method = functools.partial( - api.commit, - request=request, - metadata=all_metadata, - ) - return method(*args, **kwargs) + all_metadata = database.metadata_with_request_id( + next_nth_request, + attempt.increment(), + metadata, + ) + method = functools.partial( + api.commit, + request=request, + metadata=all_metadata, + ) deadline = time.time() + kwargs.get( "timeout_secs", DEFAULT_RETRY_TIMEOUT_SECS @@ -361,21 +359,17 @@ def batch_write(self, request_options=None, exclude_txn_from_change_streams=Fals trace_attributes, observability_options=observability_options, ): - attempt = AtomicCounter(0) next_nth_request = database._next_nth_request - - def wrapped_method(*args, **kwargs): - all_metadata = database.metadata_with_request_id( - next_nth_request, - attempt.increment(), - metadata, - ) - method = functools.partial( - api.batch_write, - request=request, - metadata=all_metadata, - ) - return method(*args, **kwargs) + all_metadata = database.metadata_with_request_id( + next_nth_request, + 0, + metadata, + ) + method = functools.partial( + api.batch_write, + request=request, + metadata=all_metadata, + ) response = _retry( method, diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index e40c0d54a3..8a53a6b05f 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -54,6 +54,7 @@ _metadata_with_prefix, _metadata_with_leader_aware_routing, _metadata_with_request_id, + InterceptingHeaderInjector, ) from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.batch import MutationGroups @@ -427,6 +428,43 @@ def logger(self): @property def spanner_api(self): + """Helper for session-related API calls.""" + api = self._generate_spanner_api() + if not api: + return api + + # Now wrap each method's __call__ method with our wrapped one. + # This is how to deal with the fact that there are no proper gRPC + # interceptors for Python hence the remedy is to replace callables + # with our custom wrapper. + attrs = dir(api) + for attr_name in attrs: + mangled = attr_name.startswith("__") + if mangled: + continue + + non_public = attr_name.startswith("_") + if non_public: + continue + + attr = getattr(api, attr_name) + callable_attr = callable(attr) + if callable_attr is None: + continue + + # We should only be looking at bound methods to SpannerClient + # as those are the RPC invoking methods that need to be wrapped + + is_method = type(attr).__name__ == "method" + if not is_method: + continue + + print("attr_name", attr_name, "callable_attr", attr) + setattr(api, attr_name, InterceptingHeaderInjector(attr)) + + return api + + def _generate_spanner_api(self): """Helper for session-related API calls.""" if self._spanner_api is None: client_info = self._instance._client._client_info @@ -757,11 +795,11 @@ def execute_pdml(): ) as span: with SessionCheckout(self._pool) as session: add_span_event(span, "Starting BeginTransaction") - begin_txn_attempt.increment() txn = api.begin_transaction( - session=session.name, options=txn_options, + session=session.name, + options=txn_options, metadata=self.metadata_with_request_id( - begin_txn_nth_request, begin_txn_attempt.value, metadata + begin_txn_nth_request, begin_txn_attempt.increment(), metadata ), ) @@ -776,36 +814,20 @@ def execute_pdml(): request_options=request_options, ) - def wrapped_method(*args, **kwargs): - partial_attempt.increment() - method = functools.partial( - api.execute_streaming_sql, - metadata=self.metadata_with_request_id( - partial_nth_request, partial_attempt.value, metadata - ), - ) - return method(*args, **kwargs) + method = functools.partial( + api.execute_streaming_sql, + metadata=self.metadata_with_request_id( + partial_nth_request, partial_attempt.increment(), metadata + ), + ) iterator = _restart_on_unavailable( - method=wrapped_method, + method=method, trace_name="CloudSpanner.ExecuteStreamingSql", request=request, transaction_selector=txn_selector, observability_options=self.observability_options, - attempt=begin_txn_attempt, ) -<<<<<<< HEAD -======= - return method(*args, **kwargs) - - iterator = _restart_on_unavailable( - method=wrapped_method, - trace_name="CloudSpanner.ExecuteStreamingSql", - request=request, - transaction_selector=txn_selector, - observability_options=self.observability_options, - ) ->>>>>>> 54df502... Update tests result_set = StreamedResultSet(iterator) list(result_set) # consume all partials diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index eab2dbe809..401538b685 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -262,11 +262,6 @@ def create_sessions(attempt): return api.batch_create_sessions( request=request, metadata=all_metadata, - # Manually passing retry=None because otherwise any - # UNAVAILABLE retry will be retried without replenishing - # the metadata, hence this allows us to manually update - # the metadata using retry_on_unavailable. - retry=None, ) resp = retry_on_unavailable(create_sessions) @@ -568,6 +563,7 @@ def bind(self, database): "CloudSpanner.PingingPool.BatchCreateSessions", observability_options=observability_options, ) as span: + created_session_count = 0 while created_session_count < self.size: nth_req = database._next_nth_request @@ -578,13 +574,6 @@ def create_sessions(attempt): return api.batch_create_sessions( request=request, metadata=all_metadata, - # Manually passing retry=None because otherwise any - # UNAVAILABLE retry will be retried without replenishing - # the metadata, hence this allows us to manually update - # the metadata using retry_on_unavailable. - # TODO: Figure out how to intercept and monkey patch the internals - # of the gRPC transport. - retry=None, ) resp = retry_on_unavailable(create_sessions) diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index 6743c9bd79..f68d73df95 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -326,22 +326,17 @@ def read( ) nth_request = getattr(database, "_next_nth_request", 0) - attempt = AtomicCounter(0) - - def wrapped_restart(*args, **kwargs): - attempt.increment() - all_metadata = database.metadata_with_request_id( - nth_request, attempt.value, metadata - ) + all_metadata = database.metadata_with_request_id( + nth_request, 1, metadata + ) - restart = functools.partial( - api.streaming_read, - request=request, - metadata=all_metadata, - retry=retry, - timeout=timeout, - ) - return restart(*args, **kwargs) + restart = functools.partial( + api.streaming_read, + request=request, + metadata=all_metadata, + retry=retry, + timeout=timeout, + ) trace_attributes = {"table_id": table, "columns": columns} observability_options = getattr(database, "observability_options", None) @@ -350,14 +345,13 @@ def wrapped_restart(*args, **kwargs): # lock is added to handle the inline begin for first rpc with self._lock: iterator = _restart_on_unavailable( - wrapped_restart, + restart, request, f"CloudSpanner.{type(self).__name__}.read", self._session, trace_attributes, transaction=self, observability_options=observability_options, - attempt=attempt, ) self._read_request_count += 1 if self._multi_use: @@ -373,14 +367,13 @@ def wrapped_restart(*args, **kwargs): ) else: iterator = _restart_on_unavailable( - wrapped_restart, + restart, request, f"CloudSpanner.{type(self).__name__}.read", self._session, trace_attributes, transaction=self, observability_options=observability_options, - attempt=attempt, ) self._read_request_count += 1 @@ -555,20 +548,18 @@ def execute_sql( ) nth_request = getattr(database, "_next_nth_request", 0) - attempt = AtomicCounter(0) - - def wrapped_restart(*args, **kwargs): - restart = functools.partial( - api.execute_streaming_sql, - request=request, - metadata=database.metadata_with_request_id( - nth_request, attempt.increment(), metadata - ), - retry=retry, - timeout=timeout, - ) - - return restart(*args, **kwargs) + if not isinstance(nth_request, int): + raise Exception(f"failed to get an integer back: {nth_request}") + + restart = functools.partial( + api.execute_streaming_sql, + request=request, + metadata=database.metadata_with_request_id( + nth_request, 1, metadata + ), + retry=retry, + timeout=timeout, + ) trace_attributes = {"db.statement": sql} observability_options = getattr(database, "observability_options", None) @@ -577,7 +568,7 @@ def wrapped_restart(*args, **kwargs): # lock is added to handle the inline begin for first rpc with self._lock: return self._get_streamed_result_set( - wrapped_restart, + restart, request, trace_attributes, column_info, @@ -586,7 +577,7 @@ def wrapped_restart(*args, **kwargs): ) else: return self._get_streamed_result_set( - wrapped_restart, + restart, request, trace_attributes, column_info, @@ -714,24 +705,19 @@ def partition_read( observability_options=getattr(database, "observability_options", None), ): nth_request = getattr(database, "_next_nth_request", 0) - attempt = AtomicCounter(0) - - def wrapped_method(*args, **kwargs): - attempt.increment() - all_metadata = database.metadata_with_request_id( - nth_request, attempt.value, metadata - ) - method = functools.partial( - api.partition_read, - request=request, - metadata=all_metadata, - retry=retry, - timeout=timeout, - ) - return method(*args, **kwargs) + all_metadata = database.metadata_with_request_id( + nth_request, 1, metadata + ) + method = functools.partial( + api.partition_read, + request=request, + metadata=all_metadata, + retry=retry, + timeout=timeout, + ) response = _retry( - wrapped_method, + method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) @@ -827,24 +813,19 @@ def partition_query( observability_options=getattr(database, "observability_options", None), ): nth_request = getattr(database, "_next_nth_request", 0) - attempt = AtomicCounter(0) - - def wrapped_method(*args, **kwargs): - attempt.increment() - all_metadata = database.metadata_with_request_id( - nth_request, attempt.value, metadata - ) - method = functools.partial( - api.partition_query, - request=request, - metadata=all_metadata, - retry=retry, - timeout=timeout, - ) - return method(*args, **kwargs) + all_metadata = database.metadata_with_request_id( + nth_request, 1, metadata + ) + method = functools.partial( + api.partition_query, + request=request, + metadata=all_metadata, + retry=retry, + timeout=timeout, + ) response = _retry( - wrapped_method, + method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) @@ -983,23 +964,18 @@ def begin(self): observability_options=getattr(database, "observability_options", None), ): nth_request = getattr(database, "_next_nth_request", 0) - attempt = AtomicCounter(0) - - def wrapped_method(*args, **kwargs): - attempt.increment() - all_metadata = database.metadata_with_request_id( - nth_request, attempt.value, metadata - ) - method = functools.partial( - api.begin_transaction, - session=self._session.name, - options=txn_selector.begin, - metadata=all_metadata, - ) - return method(*args, **kwargs) + all_metadata = database.metadata_with_request_id( + nth_request, 1, metadata + ) + method = functools.partial( + api.begin_transaction, + session=self._session.name, + options=txn_selector.begin, + metadata=all_metadata, + ) response = _retry( - wrapped_method, + method, allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) self._transaction_id = response.id diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index e07c77d09f..1c1539ef84 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -1878,6 +1878,7 @@ def __init__(self, directed_read_options=None): def observability_options(self): return dict(db_name=self.name) + @property def _next_nth_request(self): return self._instance._client._next_nth_request