Skip to content

PBM-1308 STR

PBM-1308 STR #121

GitHub Actions / JUnit Test Report failed May 2, 2024 in 0s

20 tests run, 16 passed, 2 skipped, 2 failed.

Annotations

Check failure on line 86 in pbm-functional/pytest/test_PBM-1297.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_PBM-1297.test_logical_pitr_PBM_T253

AssertionError: Starting restore 2024-05-02T05:53:24.685828116Z to point-in-time 2024-05-02T05:51:53 from '2024-05-02T05:50:47Z'...Started logical restore.
Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1714629056.1714629110: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1714629081,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":true},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"GkGmQB6MRdGlKz/PPRielA=="}},{"Key":"timestamp","Value":{"T":1714629080,"I":16}},{"Key":"lastMod","Value":1}]}],"Query":null,"UI":{"Subtype":4,"Data":"dOHuHOzpQSGO139tV4DGmA=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1714629081, 2), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: true, version: { uuid: UUID("1a41a640-1e8c-45d1-a52b-3fcf3d189e94"), timestamp: Timestamp(1714629080, 16), lastMod: 1 } }, ui: UUID("74e1ee1c-ece9-4121-8ed7-7f6d5780c698") }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fe605593f50>
newcluster = <cluster.Cluster object at 0x7fe605590310>

    @pytest.mark.timeout(600, func_only=True)
    def test_logical_pitr_PBM_T253(start_cluster,cluster,newcluster):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
        #Create the first database during oplog slicing
        client=pymongo.MongoClient(cluster.connection)
        client.admin.command("enableSharding", "test")
        client.admin.command("shardCollection", "test.test", key={"_id": "hashed"})
        for i in range(100):
            pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc":i})
        time.sleep(30)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        backup="--time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(30)
        cluster.disable_pitr()
        time.sleep(10)
        cluster.destroy()
    
        newcluster.create()
        newcluster.setup_pbm()
        time.sleep(10)
        newcluster.check_pbm_status()
>       newcluster.make_restore(backup,check_pbm_status=True)

test_PBM-1297.py:86: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7fe605590310>
name = '--time=2024-05-02T05:51:53', kwargs = {'check_pbm_status': True}
client = MongoClient(host=['newmongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7fe60531a610>, exit_status=1, command=b'time...mestamp: Timestamp(1714629080, 16), lastMod: 1 } }, ui: UUID("74e1ee1c-ece9-4121-8ed7-7f6d5780c698") }\n', _stderr=b'')
n = <testinfra.host.Host docker://newrscfg01>, timeout = 240, error = ''
host = 'newrscfg01', container = <Container: 15389ab07c3d>

    def make_restore(self, name, **kwargs):
        if self.layout == "sharded":
            client = pymongo.MongoClient(self.connection)
            result = client.admin.command("balancerStop")
            client.close()
            Cluster.log("Stopping balancer: " + str(result))
            self.stop_mongos()
        self.stop_arbiters()
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 60
    
        while True:
            if not self.get_status()['running']:
                break
            if time.time() > timeout:
                assert False, "Cannot start restore, another operation running"
            time.sleep(1)
        Cluster.log("Restore started")
        timeout=kwargs.get('timeout', 240)
        result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
    
        if result.rc == 0:
            Cluster.log(result.stdout)
        else:
            # try to catch possible failures if timeout exceeded
            error=''
            for host in self.mongod_hosts:
                try:
                    container = docker.from_env().containers.get(host)
                    get_logs = container.exec_run(
                        'cat /var/lib/mongo/pbm.restore.log', stderr=False)
                    if get_logs.exit_code == 0:
                        Cluster.log(
                            "!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
                        logs = get_logs.output.decode('utf-8')
                        Cluster.log(logs)
                        if '"s":"F"' in logs:
                            error = logs
                except docker.errors.APIError:
                    pass
            if error:
                assert False, result.stdout + result.stderr + "\n" + error
            else:
>               assert False, result.stdout + result.stderr
E               AssertionError: Starting restore 2024-05-02T05:53:24.685828116Z to point-in-time 2024-05-02T05:51:53 from '2024-05-02T05:50:47Z'...Started logical restore.
E               Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1714629056.1714629110: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1714629081,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":true},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"GkGmQB6MRdGlKz/PPRielA=="}},{"Key":"timestamp","Value":{"T":1714629080,"I":16}},{"Key":"lastMod","Value":1}]}],"Query":null,"UI":{"Subtype":4,"Data":"dOHuHOzpQSGO139tV4DGmA=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1714629081, 2), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: true, version: { uuid: UUID("1a41a640-1e8c-45d1-a52b-3fcf3d189e94"), timestamp: Timestamp(1714629080, 16), lastMod: 1 } }, ui: UUID("74e1ee1c-ece9-4121-8ed7-7f6d5780c698") }

cluster.py:462: AssertionError

Check failure on line 109 in pbm-functional/pytest/test_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_replicaset.test_logical_timeseries_PBM_T224

AssertionError: Starting restore 2024-05-02T06:17:25.030693666Z to point-in-time 2024-05-02T06:16:54 from '2024-05-02T06:16:05Z'...Started logical restore.
Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1714630612.1714630642: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1714630613,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600},{"Key":"exclude","Value":[]}]}]]}],"Query":null,"UI":{"Subtype":4,"Data":"prK5mVUyRCe7uzdGFLaRTw=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (a6b2b999-5532-4427-bbbb-374614b6914f): { ts: Timestamp(1714630613, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600, exclude: [] } } ] }, ui: UUID("a6b2b999-5532-4427-bbbb-374614b6914f"), h: 0, wall: new Date(0) }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fe605633450>

    @pytest.mark.timeout(300,func_only=True)
    def test_logical_timeseries_PBM_T224(start_cluster,cluster):
        cluster.check_pbm_status()
        client=pymongo.MongoClient(cluster.connection)
        mongod_version=client.server_info()["version"]
        if version.parse(mongod_version) < version.parse("5.0.0"):
            pytest.skip("Unsupported version for timeseries")
        pymongo.MongoClient(cluster.connection)["test"].create_collection('test1',timeseries={'timeField':'timestamp','metaField': 'data'})
        for i in range(10):
            pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
            time.sleep(0.1)
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
        time.sleep(30)
        #create new timeseries collection
        pymongo.MongoClient(cluster.connection)["test"].create_collection('test2',timeseries={'timeField':'timestamp','metaField': 'data'})
        for i in range(10):
            pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
            pymongo.MongoClient(cluster.connection)["test"]["test2"].insert_one({"timestamp": datetime.now(), "data": i})
            time.sleep(0.1)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        backup="--time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(30)
        pymongo.MongoClient(cluster.connection).drop_database('test')
>       cluster.make_restore(backup,check_pbm_status=True)

test_replicaset.py:109: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7fe605633450>
name = '--time=2024-05-02T06:16:54', kwargs = {'check_pbm_status': True}
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7fe6068abd90>, exit_status=1, command=b'time...3600, exclude: [] } } ] }, ui: UUID("a6b2b999-5532-4427-bbbb-374614b6914f"), h: 0, wall: new Date(0) }\n', _stderr=b'')
n = <testinfra.host.Host docker://rs101>, timeout = 240, error = ''
host = 'rs103', container = <Container: c40354ff1c54>

    def make_restore(self, name, **kwargs):
        if self.layout == "sharded":
            client = pymongo.MongoClient(self.connection)
            result = client.admin.command("balancerStop")
            client.close()
            Cluster.log("Stopping balancer: " + str(result))
            self.stop_mongos()
        self.stop_arbiters()
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 60
    
        while True:
            if not self.get_status()['running']:
                break
            if time.time() > timeout:
                assert False, "Cannot start restore, another operation running"
            time.sleep(1)
        Cluster.log("Restore started")
        timeout=kwargs.get('timeout', 240)
        result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
    
        if result.rc == 0:
            Cluster.log(result.stdout)
        else:
            # try to catch possible failures if timeout exceeded
            error=''
            for host in self.mongod_hosts:
                try:
                    container = docker.from_env().containers.get(host)
                    get_logs = container.exec_run(
                        'cat /var/lib/mongo/pbm.restore.log', stderr=False)
                    if get_logs.exit_code == 0:
                        Cluster.log(
                            "!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
                        logs = get_logs.output.decode('utf-8')
                        Cluster.log(logs)
                        if '"s":"F"' in logs:
                            error = logs
                except docker.errors.APIError:
                    pass
            if error:
                assert False, result.stdout + result.stderr + "\n" + error
            else:
>               assert False, result.stdout + result.stderr
E               AssertionError: Starting restore 2024-05-02T06:17:25.030693666Z to point-in-time 2024-05-02T06:16:54 from '2024-05-02T06:16:05Z'...Started logical restore.
E               Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1714630612.1714630642: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1714630613,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600},{"Key":"exclude","Value":[]}]}]]}],"Query":null,"UI":{"Subtype":4,"Data":"prK5mVUyRCe7uzdGFLaRTw=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (a6b2b999-5532-4427-bbbb-374614b6914f): { ts: Timestamp(1714630613, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600, exclude: [] } } ] }, ui: UUID("a6b2b999-5532-4427-bbbb-374614b6914f"), h: 0, wall: new Date(0) }

cluster.py:462: AssertionError