GitHub Actions / JUnit Test Report
failed
Apr 17, 2024 in 0s
18 tests run, 15 passed, 0 skipped, 3 failed.
Annotations
Check failure on line 109 in pbm-functional/pytest/test_replicaset.py
github-actions / JUnit Test Report
test_replicaset.test_logical_timeseries_PBM_T224
AssertionError: Starting restore 2024-04-17T06:25:56.449523336Z to point-in-time 2024-04-17T06:25:26 from '2024-04-17T06:24:36Z'...Started logical restore.
Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1713335123.1713335153: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1713335124,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600}]}]]}],"Query":[{"Key":"_id","Value":"test.test2"}],"UI":{"Subtype":4,"Data":"ZoBbprWeR/2FLoWfPqd/hQ=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (66805ba6-b59e-47fd-852e-859f3ea77f85): { ts: Timestamp(1713335124, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600 } } ] }, o2: { _id: "test.test2" }, ui: UUID("66805ba6-b59e-47fd-852e-859f3ea77f85"), h: 0, wall: new Date(0) }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f21c98e7a50>
@pytest.mark.timeout(300,func_only=True)
def test_logical_timeseries_PBM_T224(start_cluster,cluster):
cluster.check_pbm_status()
client=pymongo.MongoClient(cluster.connection)
mongod_version=client.server_info()["version"]
if version.parse(mongod_version) < version.parse("5.0.0"):
pytest.skip("Unsupported version for timeseries")
pymongo.MongoClient(cluster.connection)["test"].create_collection('test1',timeseries={'timeField':'timestamp','metaField': 'data'})
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
time.sleep(0.1)
cluster.make_backup("logical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
time.sleep(30)
#create new timeseries collection
pymongo.MongoClient(cluster.connection)["test"].create_collection('test2',timeseries={'timeField':'timestamp','metaField': 'data'})
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
pymongo.MongoClient(cluster.connection)["test"]["test2"].insert_one({"timestamp": datetime.now(), "data": i})
time.sleep(0.1)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
backup="--time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
time.sleep(30)
pymongo.MongoClient(cluster.connection).drop_database('test')
> cluster.make_restore(backup,check_pbm_status=True)
test_replicaset.py:109:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cluster.Cluster object at 0x7f21c98e7a50>
name = '--time=2024-04-17T06:25:26', kwargs = {'check_pbm_status': True}
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f21cd13ad10>, exit_status=1, command=b'time...o2: { _id: "test.test2" }, ui: UUID("66805ba6-b59e-47fd-852e-859f3ea77f85"), h: 0, wall: new Date(0) }\n', _stderr=b'')
n = <testinfra.host.Host docker://rs101>, timeout = 240, error = ''
host = 'rs103', container = <Container: 3d883f269d13>
def make_restore(self, name, **kwargs):
if self.layout == "sharded":
client = pymongo.MongoClient(self.connection)
result = client.admin.command("balancerStop")
client.close()
Cluster.log("Stopping balancer: " + str(result))
self.stop_mongos()
self.stop_arbiters()
n = testinfra.get_host("docker://" + self.pbm_cli)
timeout = time.time() + 60
while True:
if not self.get_status()['running']:
break
if time.time() > timeout:
assert False, "Cannot start restore, another operation running"
time.sleep(1)
Cluster.log("Restore started")
timeout=kwargs.get('timeout', 240)
result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
if result.rc == 0:
Cluster.log(result.stdout)
else:
# try to catch possible failures if timeout exceeded
error=''
for host in self.mongod_hosts:
try:
container = docker.from_env().containers.get(host)
get_logs = container.exec_run(
'cat /var/lib/mongo/pbm.restore.log', stderr=False)
if get_logs.exit_code == 0:
Cluster.log(
"!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
logs = get_logs.output.decode('utf-8')
Cluster.log(logs)
if '"s":"F"' in logs:
error = logs
except docker.errors.APIError:
pass
if error:
assert False, result.stdout + result.stderr + "\n" + error
else:
> assert False, result.stdout + result.stderr
E AssertionError: Starting restore 2024-04-17T06:25:56.449523336Z to point-in-time 2024-04-17T06:25:26 from '2024-04-17T06:24:36Z'...Started logical restore.
E Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1713335123.1713335153: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1713335124,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600}]}]]}],"Query":[{"Key":"_id","Value":"test.test2"}],"UI":{"Subtype":4,"Data":"ZoBbprWeR/2FLoWfPqd/hQ=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (66805ba6-b59e-47fd-852e-859f3ea77f85): { ts: Timestamp(1713335124, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600 } } ] }, o2: { _id: "test.test2" }, ui: UUID("66805ba6-b59e-47fd-852e-859f3ea77f85"), h: 0, wall: new Date(0) }
cluster.py:462: AssertionError
Check failure on line 64 in pbm-functional/pytest/test_timeseries.py
github-actions / JUnit Test Report
test_timeseries.test_logical_with_data
AssertionError: Starting restore 2024-04-17T06:35:47.307298567Z from '2024-04-17T06:35:18Z'...Started logical restore.
Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1713335717.1713335727: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1713335717,"I":16},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.buckets.test2","Object":[{"Key":"_id","Value":"661f6d947c1648bb3be41fa8"},{"Key":"control","Value":[{"Key":"version","Value":1},{"Key":"min","Value":[{"Key":"_id","Value":"661f6da5396521b1b3bf9a90"},{"Key":"timestamp","Value":"2024-04-17T06:35:00Z"}]},{"Key":"max","Value":[{"Key":"_id","Value":"661f6da5396521b1b3bf9a90"},{"Key":"timestamp","Value":"2024-04-17T06:35:17.94Z"}]}]},{"Key":"meta","Value":9},{"Key":"data","Value":[{"Key":"_id","Value":[{"Key":"0","Value":"661f6da5396521b1b3bf9a90"}]},{"Key":"timestamp","Value":[{"Key":"0","Value":"2024-04-17T06:35:17.94Z"}]}]}],"Query":[{"Key":"meta","Value":9},{"Key":"_id","Value":"661f6d947c1648bb3be41fa8"}],"UI":null,"LSID":"SAAAAAVpZAAQAAAABGkCAI8LAEqGr//KjejsAloFdWlkACAAAAAAY5mrDaxi8gv8RmdTsQ+1j7fmkr7JUsabhNmXAheU0fgA","TxnNumber":1,"PrevOpTime":"HAAAABF0cwAAAAAAAAAAABJ0AP//////////AA=="} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace test.system.buckets.test2: { ts: Timestamp(1713335717, 16), t: 1, v: 2, op: "i", ns: "test.system.buckets.test2", o: { _id: ObjectId('661f6d947c1648bb3be41fa8'), control: { version: 1, min: { _id: ObjectId('661f6da5396521b1b3bf9a90'), timestamp: new Date(1713335700000) }, max: { _id: ObjectId('661f6da5396521b1b3bf9a90'), timestamp: new Date(1713335717940) } }, meta: 9, data: { _id: { 0: ObjectId('661f6da5396521b1b3bf9a90') }, timestamp: { 0: new Date(1713335717940) } } }, o2: { meta: 9, _id: ObjectId('661f6d947c1648bb3be41fa8') }, lsid: { id: UUID("6902008f-0b00-4a86-afff-ca8de8ec025a"), uid: BinData(0, 6399AB0DAC62F20BFC466753B10FB58FB7E692BEC952C69B84D997021794D1F8) }, txnNumber: 1, prevOpTime: { ts: Timestamp(0, 0), t: -1 } }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f21cbb7ab50>
@pytest.mark.timeout(600,func_only=True)
def test_logical_with_data(start_cluster,cluster):
cluster.check_pbm_status()
assert pymongo.MongoClient(cluster.connection)["test"].command("collstats", "test1").get("sharded", False)
assert pymongo.MongoClient(cluster.connection)["test"].command("collstats", "test2").get("sharded", False)
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
pymongo.MongoClient(cluster.connection)["test"]["test2"].insert_one({"timestamp": datetime.now(), "data": i})
time.sleep(0.1)
backup=cluster.make_backup("logical")
pymongo.MongoClient(cluster.connection).drop_database('test')
> cluster.make_restore(backup,check_pbm_status=True)
test_timeseries.py:64:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cluster.Cluster object at 0x7f21cbb7ab50>, name = '2024-04-17T06:35:18Z'
kwargs = {'check_pbm_status': True}
client = MongoClient(host=['mongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f21cbc5cc10>, exit_status=1, command=b'time...10FB58FB7E692BEC952C69B84D997021794D1F8) }, txnNumber: 1, prevOpTime: { ts: Timestamp(0, 0), t: -1 } }\n', _stderr=b'')
n = <testinfra.host.Host docker://rscfg01>, timeout = 240, error = ''
host = 'rscfg01', container = <Container: 3f9ec2278920>
def make_restore(self, name, **kwargs):
if self.layout == "sharded":
client = pymongo.MongoClient(self.connection)
result = client.admin.command("balancerStop")
client.close()
Cluster.log("Stopping balancer: " + str(result))
self.stop_mongos()
self.stop_arbiters()
n = testinfra.get_host("docker://" + self.pbm_cli)
timeout = time.time() + 60
while True:
if not self.get_status()['running']:
break
if time.time() > timeout:
assert False, "Cannot start restore, another operation running"
time.sleep(1)
Cluster.log("Restore started")
timeout=kwargs.get('timeout', 240)
result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
if result.rc == 0:
Cluster.log(result.stdout)
else:
# try to catch possible failures if timeout exceeded
error=''
for host in self.mongod_hosts:
try:
container = docker.from_env().containers.get(host)
get_logs = container.exec_run(
'cat /var/lib/mongo/pbm.restore.log', stderr=False)
if get_logs.exit_code == 0:
Cluster.log(
"!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
logs = get_logs.output.decode('utf-8')
Cluster.log(logs)
if '"s":"F"' in logs:
error = logs
except docker.errors.APIError:
pass
if error:
assert False, result.stdout + result.stderr + "\n" + error
else:
> assert False, result.stdout + result.stderr
E AssertionError: Starting restore 2024-04-17T06:35:47.307298567Z from '2024-04-17T06:35:18Z'...Started logical restore.
E Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1713335717.1713335727: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1713335717,"I":16},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.buckets.test2","Object":[{"Key":"_id","Value":"661f6d947c1648bb3be41fa8"},{"Key":"control","Value":[{"Key":"version","Value":1},{"Key":"min","Value":[{"Key":"_id","Value":"661f6da5396521b1b3bf9a90"},{"Key":"timestamp","Value":"2024-04-17T06:35:00Z"}]},{"Key":"max","Value":[{"Key":"_id","Value":"661f6da5396521b1b3bf9a90"},{"Key":"timestamp","Value":"2024-04-17T06:35:17.94Z"}]}]},{"Key":"meta","Value":9},{"Key":"data","Value":[{"Key":"_id","Value":[{"Key":"0","Value":"661f6da5396521b1b3bf9a90"}]},{"Key":"timestamp","Value":[{"Key":"0","Value":"2024-04-17T06:35:17.94Z"}]}]}],"Query":[{"Key":"meta","Value":9},{"Key":"_id","Value":"661f6d947c1648bb3be41fa8"}],"UI":null,"LSID":"SAAAAAVpZAAQAAAABGkCAI8LAEqGr//KjejsAloFdWlkACAAAAAAY5mrDaxi8gv8RmdTsQ+1j7fmkr7JUsabhNmXAheU0fgA","TxnNumber":1,"PrevOpTime":"HAAAABF0cwAAAAAAAAAAABJ0AP//////////AA=="} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace test.system.buckets.test2: { ts: Timestamp(1713335717, 16), t: 1, v: 2, op: "i", ns: "test.system.buckets.test2", o: { _id: ObjectId('661f6d947c1648bb3be41fa8'), control: { version: 1, min: { _id: ObjectId('661f6da5396521b1b3bf9a90'), timestamp: new Date(1713335700000) }, max: { _id: ObjectId('661f6da5396521b1b3bf9a90'), timestamp: new Date(1713335717940) } }, meta: 9, data: { _id: { 0: ObjectId('661f6da5396521b1b3bf9a90') }, timestamp: { 0: new Date(1713335717940) } } }, o2: { meta: 9, _id: ObjectId('661f6d947c1648bb3be41fa8') }, lsid: { id: UUID("6902008f-0b00-4a86-afff-ca8de8ec025a"), uid: BinData(0, 6399AB0DAC62F20BFC466753B10FB58FB7E692BEC952C69B84D997021794D1F8) }, txnNumber: 1, prevOpTime: { ts: Timestamp(0, 0), t: -1 } }
cluster.py:462: AssertionError
Check failure on line 78 in pbm-functional/pytest/test_timeseries.py
github-actions / JUnit Test Report
test_timeseries.test_logical_without_data
pymongo.errors.OperationFailure: got stale shardVersion response from shard rs2 at host rs201:27017 :: caused by :: timestamp mismatch detected for test.test2, full error: {'ok': 0.0, 'errmsg': 'got stale shardVersion response from shard rs2 at host rs201:27017 :: caused by :: timestamp mismatch detected for test.test2', 'code': 13388, 'codeName': 'StaleConfig', 'ns': 'test.test2', 'vReceived': {'t': Timestamp(1713335787, 51), 'e': ObjectId('661f6debca6f85d0c126c9ca'), 'v': Timestamp(1, 3)}, 'vWanted': {'t': Timestamp(0, 0), 'e': ObjectId('000000000000000000000000'), 'v': Timestamp(0, 0)}, 'shardId': 'rs2', '$clusterTime': {'clusterTime': Timestamp(1713335835, 30), 'signature': {'hash': b'\x04u4\xa2\xc6\x99\xf40\x01\xc6367B\xd2\xbc\x0fx?\xe3', 'keyId': 7358721064857239578}}, 'operationTime': Timestamp(1713335835, 30)}
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f21cbb7ab50>
@pytest.mark.timeout(600,func_only=True)
def test_logical_without_data(start_cluster,cluster):
cluster.check_pbm_status()
assert pymongo.MongoClient(cluster.connection)["test"].command("collstats", "test1").get("sharded", False)
assert pymongo.MongoClient(cluster.connection)["test"].command("collstats", "test2").get("sharded", False)
backup=cluster.make_backup("logical")
pymongo.MongoClient(cluster.connection).drop_database('test')
cluster.make_restore(backup,check_pbm_status=True)
assert pymongo.MongoClient(cluster.connection)["test"].command("collstats", "test1").get("sharded", False)
> assert pymongo.MongoClient(cluster.connection)["test"].command("collstats", "test2").get("sharded", False)
test_timeseries.py:78:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.11/site-packages/pymongo/_csot.py:107: in csot_wrapper
return func(self, *args, **kwargs)
/usr/lib/python3.11/site-packages/pymongo/database.py:894: in command
return self._command(
/usr/lib/python3.11/site-packages/pymongo/database.py:743: in _command
return conn.command(
/usr/lib/python3.11/site-packages/pymongo/helpers.py:322: in inner
return func(*args, **kwargs)
/usr/lib/python3.11/site-packages/pymongo/pool.py:968: in command
return command(
/usr/lib/python3.11/site-packages/pymongo/network.py:192: in command
helpers._check_command_response(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
response = {'$clusterTime': {'clusterTime': Timestamp(1713335835, 30), 'signature': {'hash': b'\x04u4\xa2\xc6\x99\xf40\x01\xc6367...rdVersion response from shard rs2 at host rs201:27017 :: caused by :: timestamp mismatch detected for test.test2', ...}
max_wire_version = 17, allowable_errors = None
parse_write_concern_error = False
def _check_command_response(
response: _DocumentOut,
max_wire_version: Optional[int],
allowable_errors: Optional[Container[Union[int, str]]] = None,
parse_write_concern_error: bool = False,
) -> None:
"""Check the response to a command for errors."""
if "ok" not in response:
# Server didn't recognize our message as a command.
raise OperationFailure(
response.get("$err"), # type: ignore[arg-type]
response.get("code"),
response,
max_wire_version,
)
if parse_write_concern_error and "writeConcernError" in response:
_error = response["writeConcernError"]
_labels = response.get("errorLabels")
if _labels:
_error.update({"errorLabels": _labels})
_raise_write_concern_error(_error)
if response["ok"]:
return
details = response
# Mongos returns the error details in a 'raw' object
# for some errors.
if "raw" in response:
for shard in response["raw"].values():
# Grab the first non-empty raw error from a shard.
if shard.get("errmsg") and not shard.get("ok"):
details = shard
break
errmsg = details["errmsg"]
code = details.get("code")
# For allowable errors, only check for error messages when the code is not
# included.
if allowable_errors:
if code is not None:
if code in allowable_errors:
return
elif errmsg in allowable_errors:
return
# Server is "not primary" or "recovering"
if code is not None:
if code in _NOT_PRIMARY_CODES:
raise NotPrimaryError(errmsg, response)
elif HelloCompat.LEGACY_ERROR in errmsg or "node is recovering" in errmsg:
raise NotPrimaryError(errmsg, response)
# Other errors
# findAndModify with upsert can raise duplicate key error
if code in (11000, 11001, 12582):
raise DuplicateKeyError(errmsg, code, response, max_wire_version)
elif code == 50:
raise ExecutionTimeout(errmsg, code, response, max_wire_version)
elif code == 43:
raise CursorNotFound(errmsg, code, response, max_wire_version)
> raise OperationFailure(errmsg, code, response, max_wire_version)
E pymongo.errors.OperationFailure: got stale shardVersion response from shard rs2 at host rs201:27017 :: caused by :: timestamp mismatch detected for test.test2, full error: {'ok': 0.0, 'errmsg': 'got stale shardVersion response from shard rs2 at host rs201:27017 :: caused by :: timestamp mismatch detected for test.test2', 'code': 13388, 'codeName': 'StaleConfig', 'ns': 'test.test2', 'vReceived': {'t': Timestamp(1713335787, 51), 'e': ObjectId('661f6debca6f85d0c126c9ca'), 'v': Timestamp(1, 3)}, 'vWanted': {'t': Timestamp(0, 0), 'e': ObjectId('000000000000000000000000'), 'v': Timestamp(0, 0)}, 'shardId': 'rs2', '$clusterTime': {'clusterTime': Timestamp(1713335835, 30), 'signature': {'hash': b'\x04u4\xa2\xc6\x99\xf40\x01\xc6367B\xd2\xbc\x0fx?\xe3', 'keyId': 7358721064857239578}}, 'operationTime': Timestamp(1713335835, 30)}
/usr/lib/python3.11/site-packages/pymongo/helpers.py:230: OperationFailure
Loading