From 76382365236024f5a4b1e696c66252a1a0992eb2 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Wed, 4 Oct 2023 16:57:59 -0600 Subject: [PATCH 01/66] uw-246 (#303) --- .github/scripts/tag-check | 8 ++++++++ .github/scripts/tag-create | 6 ++++++ .github/workflows/release.yaml | 15 +++++++++++++++ 3 files changed, 29 insertions(+) create mode 100755 .github/scripts/tag-check create mode 100755 .github/scripts/tag-create create mode 100644 .github/workflows/release.yaml diff --git a/.github/scripts/tag-check b/.github/scripts/tag-check new file mode 100755 index 000000000..77986e85c --- /dev/null +++ b/.github/scripts/tag-check @@ -0,0 +1,8 @@ +#!/bin/bash -eux + +f=recipe/meta.json +tag=v$(jq -r .version $f)-$(jq -r .buildnum $f) +if git ls-remote --tags origin | grep -q "/$tag$"; then + (set +x && echo TAG $tag ALREADY EXISTS) + exit 1 +fi diff --git a/.github/scripts/tag-create b/.github/scripts/tag-create new file mode 100755 index 000000000..d41e03ddf --- /dev/null +++ b/.github/scripts/tag-create @@ -0,0 +1,6 @@ +#!/bin/bash -eux + +f=recipe/meta.json +tag=v$(jq -r .version $f)-$(jq -r .buildnum $f) +git tag $tag +git push --tags diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..2bb4eba0a --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,15 @@ +name: release +on: + push: + branches: + - main +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Check For Existing Tag + run: .github/scripts/tag-check + - name: Tag + run: .github/scripts/tag-create From 4ddf53cf793e9054f9ce0389c31937d0148e9880 Mon Sep 17 00:00:00 2001 From: Brian Weir <94982354+WeirAE@users.noreply.github.com> Date: Thu, 5 Oct 2023 10:41:11 -0500 Subject: [PATCH 02/66] UW-382 Create JSONSchema section for dependency section tags (#292) * First schema draft with one level, untested * draft 1 to try recursion * Combined multiple solutions * preliminary test version * ChatGPT corrections applied, still test error * Comment added to test git actions * Validator update from PR #291 * Updating to test invalid entry Additional validator changes added * Fixed time regex, looping, some errors anyOf still overmatching * Removed anyOf list and added datadep comprehension * cleaned up rocoto.jsonschema merge * Added missing requirements and patterns * quick redundant requirement edit * fixed 'some' tag * Updated to match loop pattern of UW-381 * corrected placement of dependency to under task * Updated to bring in like with UW-381 Still requires fail condition tests for dependency * fix conflict resolution error * Validation failure tests added * added regex_logged() * Update docs from #302 to fix ReadTheDocs * Overriding jq -r to restore jsonschema * Update to dependentSchemas and 2020 draft * Fixed order standards in test_validator --- .gitignore | 1 - src/uwtools/config/validator.py | 2 +- src/uwtools/resources/rocoto.jsonschema | 461 ++++++++++++------ src/uwtools/tests/config/test_validator.py | 46 +- .../tests/fixtures/hello_workflow.yaml | 2 +- src/uwtools/tests/support.py | 12 + 6 files changed, 375 insertions(+), 149 deletions(-) diff --git a/.gitignore b/.gitignore index 7403e43c5..0d4afb70a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -*.egg-info .coverage __pycache__ *.swp diff --git a/src/uwtools/config/validator.py b/src/uwtools/config/validator.py index 3a3920f16..0dbea610e 100644 --- a/src/uwtools/config/validator.py +++ b/src/uwtools/config/validator.py @@ -77,5 +77,5 @@ def _validation_errors(config: dict, schema: dict) -> List[str]: """ Identify schema-validation errors. """ - validator = jsonschema.Draft7Validator(schema) + validator = jsonschema.Draft202012Validator(schema) return list(validator.iter_errors(config)) diff --git a/src/uwtools/resources/rocoto.jsonschema b/src/uwtools/resources/rocoto.jsonschema index b23e9bdbc..270cf47b5 100644 --- a/src/uwtools/resources/rocoto.jsonschema +++ b/src/uwtools/resources/rocoto.jsonschema @@ -1,13 +1,166 @@ { "$defs": { "datePattern": { - "type": "integer", - "format": "string", - "pattern": "^(\\d{12}) (\\d{12})$" + "type": "integer", + "format": "string", + "pattern": "^(\\d{12}) (\\d{12})$" }, "dateTimePattern": { - "type": "string", - "pattern": "^(\\d{12}) (\\d{12}) (\\d{2}):(\\d{2}):(\\d{2})$" + "type": "string", + "pattern": "^(\\d{12}) (\\d{12}) (\\d{2}):(\\d{2}):(\\d{2})$" + }, + "dependency": { + "type": "object", + "patternProperties": { + "^(and|or|not|nand|nor|xor)(_.*)?$": { + "$ref": "#/$defs/dependency" + }, + "^some(_.*)?$": { + "type": "array", + "patternProperties": { + "^threshold(_.*)?$": { + "type": "number", + "minimum": 0, + "maximum": 1 + } + }, + "properties": { + "$ref": "#/$defs/dependency" + } + }, + "^datadep(_.*)?$": { + "type": "object", + "properties": { + "attrs": { + "type": "object", + "properties": { + "age": { + "$ref": "#/$defs/TimePattern" + }, + "minsize": { + "type": "string" + } + }, + "additionalProperties": false + }, + "text": { + "type": "string" + } + }, + "additionalProperties": false + }, + "^taskdep(_.*)?$": { + "type": "object", + "properties": { + "attrs": { + "type": "object", + "properties": { + "task": { + "type": "string" + }, + "cycle_offset": { + "$ref": "#/$defs/TimePattern" + }, + "state": { + "type": "string", + "enum": [ + "RUNNING", + "Running", + "running", + "SUCCEEDED", + "DEAD", + "Succeeded", + "Dead", + "succeeded", + "dead" + ] + } + }, + "required": [ + "task" + ], + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "^(streq|strneq)(_.*)?$": { + "type": "object", + "properties": { + "left": { + "type": "string" + }, + "right": { + "type": "string" + } + }, + "required": [ + "left", + "right" + ], + "additionalProperties": false + }, + "^cycleexistdep(_.*)?$": { + "type": "object", + "properties": { + "cycle_offset": { + "$ref": "#/$defs/TimePattern" + } + }, + "additionalProperties": false + }, + "^taskvalid(_.*)?$": { + "type": "object", + "properties": { + "task": { + "type": "string" + } + }, + "additionalProperties": false + }, + "^metataskdep(_.*)?$": { + "type": "object", + "properties": { + "attrs": { + "type": "object", + "properties": { + "metatask": { + "type": "string" + }, + "cycle_offset": { + "$ref": "#/$defs/TimePattern" + }, + "state": { + "type": "string", + "enum": [ + "SUCCEEDED", + "DEAD", + "Succeeded", + "Dead", + "succeeded", + "dead" + ] + }, + "threshold": { + "type": "number", + "minimum": 0, + "maximum": 1 + } + }, + "required": [ + "metatask" + ], + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "^timedep(_.*)?$": { + "$ref": "#/$defs/TimePattern" + } + }, + "additionalProperties": false, + "minProperties": 1 }, "metatask": { "type": "object", @@ -35,24 +188,24 @@ "type": "object", "properties": { "account": { - "type": "string" + "type": "string" }, "attrs": { - "cycledefs": { - "type": "string" - }, - "final": { - "type": "boolean" - }, - "maxtries": { - "type": "string", - "format": "number", - "minimum": 0 - }, - "throttle": { - "type": "integer", - "minimum": 0 - } + "cycledefs": { + "type": "string" + }, + "final": { + "type": "boolean" + }, + "maxtries": { + "type": "string", + "format": "number", + "minimum": 0 + }, + "throttle": { + "type": "integer", + "minimum": 0 + } }, "command": { "type": "string" @@ -62,46 +215,49 @@ "minimum": 0 }, "deadline": { - "$ref": "#/$defs/datePattern" + "$ref": "#/$defs/datePattern" + }, + "dependency": { + "$ref": "#/$defs/dependency" }, "envar": { - "type": "object" + "type": "object" }, "exclusive": { - "type": "string" + "type": "string" }, "jobname": { - "type": "string" + "type": "string" }, "join": { - "type": "string" + "type": "string" }, "memory": { - "type": "string" + "type": "string" }, "native": { - "type": "string" + "type": "string" }, "nodes": { "type": "string" }, "partition": { - "type": "string" + "type": "string" }, "queue": { - "type": "string" + "type": "string" }, "rewind": { - "type": "string" + "type": "string" }, "shared": { - "type": "string" + "type": "string" }, "stderr": { - "type": "string" + "type": "string" }, "stdout": { - "type": "string" + "type": "string" }, "walltime": { "$ref": "#/$defs/timestr" @@ -112,45 +268,65 @@ "command", "walltime" ], - "anyOf": [ - { - "not": { - "required": ["exclusive"] - } + "dependentSchemas": { + "join": { + "not": { + "required": [ + "stdout" + ] + } }, - { - "not": { - "required": ["shared"] - } - } - ], - "anyOf":[ - { - "not": { - "required": ["join"] - } + "stdout": { + "not": { + "required": [ + "join" + ] + }, + "required": [ + "stderr" + ] }, - { - "not": { - "required": ["stdout"] - } + "stderr": { + "required": [ + "stdout" + ] + }, + "shared": { + "not": { + "required": [ + "exclusive" + ] + } + }, + "exclusive": { + "not": { + "required": [ + "shared" + ] + } } - ], + }, "oneOf": [ { - "required": ["cores"] + "required": [ + "cores" + ] }, { - "required": ["native"] + "required": [ + "native" + ] }, { - "required": ["nodes"] + "required": [ + "nodes" + ] } - ], - "dependencies": { - "stdout": ["stderr"], - "stderr": ["stdout"] - } + ] + }, + "TimePattern": { + "type": "string", + "pattern": "^(\\d{2}:){3}\\d{2}$" }, "timestr": { "type": "string", @@ -169,83 +345,88 @@ }, "type": "object", "properties": { - "workflow": { - "type": "object", - "properties": { - "attrs": { - "type": "object", - "properties": { - "cyclethrottle": { - "type": "integer", - "minimum": 0 - }, - "realtime": { - "type": "boolean" - }, - "scheduler": { - "type": "string", - "enum": [ - "sge", - "lsf", - "lsfcray", - "ll", - "torque", - "moabtorque", - "moab", - "pbspro", - "slurm", - "cobalt" - ] - }, - "taskthrottle": { - "type": "integer", - "minimum": 0 - } - }, - "required": ["realtime", "scheduler"] - }, - "cycledefs": { - "type": "object", - "properties": { - "groupname": { - "type": "array", - "items": { - "$ref": "#/$defs/dateTimePattern" - } - }, - "required": ["items"] - } - }, - "entities": { - "type": "object" - }, - "log": { - "type": "string", - "format": "uri" - }, - "tasks": { - "type": "object", - "patternProperties": { - "^metatask(_[a-z0-9_]+)?$": { - "$ref": "#/$defs/metatask" - }, - "^task(_[a-z0-9_]+)?$": { - "$ref": "#/$defs/task" - } - }, - "additionalProperties": false, - "minProperties": 1 - } - }, - "additionalProperties": false, - "required": [ - "cycledefs", - "log", - "tasks" - ] + "workflow": { + "type": "object", + "properties": { + "attrs": { + "type": "object", + "properties": { + "cyclethrottle": { + "type": "integer", + "minimum": 0 + }, + "realtime": { + "type": "boolean" + }, + "scheduler": { + "type": "string", + "enum": [ + "sge", + "lsf", + "lsfcray", + "ll", + "torque", + "moabtorque", + "moab", + "pbspro", + "slurm", + "cobalt" + ] + }, + "taskthrottle": { + "type": "integer", + "minimum": 0 } + }, + "required": [ + "realtime", + "scheduler" + ] + }, + "cycledefs": { + "type": "object", + "properties": { + "groupname": { + "type": "array", + "items": { + "$ref": "#/$defs/dateTimePattern" + } + }, + "required": [ + "items" + ] + } + }, + "entities": { + "type": "object" }, - "required": [ - "workflow" - ] + "log": { + "type": "string", + "format": "uri" + }, + "tasks": { + "type": "object", + "patternProperties": { + "^metatask(_[a-z0-9_]+)?$": { + "$ref": "#/$defs/metatask" + }, + "^task(_[a-z0-9_]+)?$": { + "$ref": "#/$defs/task" + } + }, + "additionalProperties": false, + "minProperties": 1 + } + }, + "additionalProperties": false, + "required": [ + "cycledefs", + "log", + "tasks" + ] + } + }, + "required": [ + "workflow" + ] } \ No newline at end of file diff --git a/src/uwtools/tests/config/test_validator.py b/src/uwtools/tests/config/test_validator.py index d7db7929f..3f6d0f31a 100644 --- a/src/uwtools/tests/config/test_validator.py +++ b/src/uwtools/tests/config/test_validator.py @@ -12,7 +12,7 @@ from pytest import fixture from uwtools.config import validator -from uwtools.tests.support import logged +from uwtools.tests.support import logged, regex_logged # Support functions @@ -140,21 +140,46 @@ def rocoto_assets(): "tasks": { "metatask": { "var": {"member": "foo bar baz"}, - "task": {"cores": 88, "command": "some-command", "walltime": "00:01:00"}, - } + "task": { + "cores": 88, + "command": "some-command", + "walltime": "00:01:00", + "dependency": { + "taskdep": { + "attrs": { + "task": "hello", + }, + }, + }, + }, + }, }, } } return kwargs, config -def test_validate_yaml_rocoto_invalid_dependency(rocoto_assets, caplog): +def test_validate_yaml_rocoto_invalid_dependency_bool(rocoto_assets, caplog): kwargs, config = rocoto_assets - config["workflow"]["tasks"]["metatask"]["task"].update({"stdout": "hello"}) + config["workflow"]["tasks"]["metatask"]["task"]["dependency"].update( + {"maybe": {"taskdep": {"attrs": {"task": "hello"}}}} + ) + with patch.object(validator, "YAMLConfig") as YAMLConfig: + YAMLConfig().data = config + assert not validator.validate_yaml(**kwargs) + assert regex_logged(caplog, "'maybe' does not match any of the regexes") + + +def test_validate_yaml_rocoto_invalid_dependency_no_task(rocoto_assets, caplog): + kwargs, config = rocoto_assets + del config["workflow"]["tasks"]["metatask"]["task"]["dependency"]["taskdep"]["attrs"]["task"] + config["workflow"]["tasks"]["metatask"]["task"]["dependency"]["taskdep"]["attrs"][ + "state" + ] = "RUNNING" with patch.object(validator, "YAMLConfig") as YAMLConfig: YAMLConfig().data = config assert not validator.validate_yaml(**kwargs) - assert logged(caplog, "'stderr' is a dependency of 'stdout'") + assert logged(caplog, "'task' is a required property") def test_validate_yaml_rocoto_invalid_no_command(rocoto_assets, caplog): @@ -184,6 +209,15 @@ def test_validate_yaml_rocoto_invalid_no_var(rocoto_assets, caplog): assert logged(caplog, "'var' is a required property") +def test_validate_yaml_rocoto_invalid_required_stderr(rocoto_assets, caplog): + kwargs, config = rocoto_assets + config["workflow"]["tasks"]["metatask"]["task"].update({"stdout": "hello"}) + with patch.object(validator, "YAMLConfig") as YAMLConfig: + YAMLConfig().data = config + assert not validator.validate_yaml(**kwargs) + assert logged(caplog, "'stderr' is a required property") + + def test_validate_yaml_rocoto_invalid_type(rocoto_assets, caplog): kwargs, config = rocoto_assets config["workflow"]["tasks"]["metatask"]["task"]["cores"] = "string" diff --git a/src/uwtools/tests/fixtures/hello_workflow.yaml b/src/uwtools/tests/fixtures/hello_workflow.yaml index 3f5bb1672..9db8657d7 100644 --- a/src/uwtools/tests/fixtures/hello_workflow.yaml +++ b/src/uwtools/tests/fixtures/hello_workflow.yaml @@ -34,4 +34,4 @@ dependency: taskdep: attrs: - task: hello + task: hello \ No newline at end of file diff --git a/src/uwtools/tests/support.py b/src/uwtools/tests/support.py index 8444c306c..81467e4f5 100644 --- a/src/uwtools/tests/support.py +++ b/src/uwtools/tests/support.py @@ -1,5 +1,6 @@ # pylint: disable=missing-function-docstring +import re from importlib import resources from pathlib import Path @@ -88,3 +89,14 @@ def logged(caplog: LogCaptureFixture, msg: str) -> bool: :param msg: The message sought. """ return msg in [record.message for record in caplog.records] + + +def regex_logged(caplog: LogCaptureFixture, msg: str) -> bool: + """ + Does the given text occur in the log capture? Expanded to perform regex matching for powerusers. + + :param caplog: The pytest log capture. + :param msg: The message sought. + """ + pattern = re.compile(re.escape(msg)) + return any(pattern.search(record.message) for record in caplog.records) From 4868c49ec250dfc4272650ea27489d5b51246afc Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Thu, 5 Oct 2023 10:24:21 -0600 Subject: [PATCH 03/66] uw-317 (#305) --- .github/scripts/publish | 11 +++++++++++ .github/workflows/release.yaml | 8 ++++++++ .gitignore | 5 +++-- recipe/meta.json | 4 ++-- recipe/meta.yaml | 4 ++-- 5 files changed, 26 insertions(+), 6 deletions(-) create mode 100755 .github/scripts/publish diff --git a/.github/scripts/publish b/.github/scripts/publish new file mode 100755 index 000000000..0c209d096 --- /dev/null +++ b/.github/scripts/publish @@ -0,0 +1,11 @@ +#!/bin/bash -eux + +set +ux +source conda/etc/profile.d/conda.sh +conda activate +set -ux +f=recipe/meta.json +pkg=$(jq -r .name $f)-$(jq -r .version $f)-$(jq -r .build $f).tar.bz2 +for x in $(find conda/conda-bld -type f -name $pkg); do + anaconda -t $ANACONDA_TOKEN upload $x +done diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2bb4eba0a..f0ef13139 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,4 +1,6 @@ name: release +env: + ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} on: push: branches: @@ -9,7 +11,13 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + - name: Install conda + run: .github/scripts/install-conda + - name: Make Package + run: .github/scripts/make-package - name: Check For Existing Tag run: .github/scripts/tag-check - name: Tag run: .github/scripts/tag-create + - name: Publish + run: .github/scripts/publish diff --git a/.gitignore b/.gitignore index 0d4afb70a..fa050f56e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ +*.DS_Store +*.egg-info +*.swp .coverage __pycache__ -*.swp -*.DS_Store diff --git a/recipe/meta.json b/recipe/meta.json index ebdce24da..dcafd3514 100644 --- a/recipe/meta.json +++ b/recipe/meta.json @@ -1,5 +1,5 @@ { - "build": "pyh28a5fc4_0", + "build": "pyh233a6f4_0", "buildnum": "0", "name": "uwtools", "packages": { @@ -30,5 +30,5 @@ "pyyaml =6.0.*" ] }, - "version": "1.0.0" + "version": "1.1.0" } diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 6602f6fbb..b58f4102a 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -1,12 +1,12 @@ package: name: uwtools - version: 1.0.0 + version: 1.1.0 source: path: ../src build: include_recipe: false noarch: python - number: 0 + number: 0 # <= always reset to 0 when version changes requirements: host: - pip From 2bcee703a4ca100672239902663db3f489ccc433 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Thu, 5 Oct 2023 12:05:22 -0600 Subject: [PATCH 04/66] disconnect-jenkins (#308) Remove Jenkins CI config --- .cicd/Jenkinsfile | 75 ------------------------------------- .cicd/jobs/lint_and_test.sh | 25 ------------- 2 files changed, 100 deletions(-) delete mode 100644 .cicd/Jenkinsfile delete mode 100755 .cicd/jobs/lint_and_test.sh diff --git a/.cicd/Jenkinsfile b/.cicd/Jenkinsfile deleted file mode 100644 index 4d0ce26b2..000000000 --- a/.cicd/Jenkinsfile +++ /dev/null @@ -1,75 +0,0 @@ -pipeline { - agent none - stages { - stage('Run UFS Workflow Tools') { - parallel { - stage('Run UFS Workflow tools on Cheyenne') { - agent { - label "cheyenne" - } - steps { - cleanWs() - checkout changelog: false, poll: false, scm: scmGit(branches: [[name: 'develop']], extensions: [], gitTool: 'Default', userRemoteConfigs: [[url: 'https://github.com/ufs-community/workflow-tools']]) - sh ''' - module use /glade/work/epicufsrt/contrib/miniconda3/modulefiles - module load miniconda3/4.12.0 - conda activate workflow_tools - - ${WORKSPACE}/.cicd/jobs/lint_and_test.sh - ''' - } - } - - stage('Run UFS Workflow tools on Jet') { - agent { - label "jet" - } - steps { - cleanWs() - checkout changelog: false, poll: false, scm: scmGit(branches: [[name: 'develop']], extensions: [], gitTool: 'Default', userRemoteConfigs: [[url: 'https://github.com/ufs-community/workflow-tools']]) - sh ''' - module use /mnt/lfs4/HFIP/hfv3gfs/role.epic/miniconda3/modulefiles - module load miniconda3/4.12.0 - conda activate workflow_tools - - ${WORKSPACE}/.cicd/jobs/lint_and_test.sh - ''' - } - } - - stage('Run UFS Workflow tools on Hera') { - agent { - label "hera" - } - steps { - cleanWs() - checkout changelog: false, poll: false, scm: scmGit(branches: [[name: 'develop']], extensions: [], gitTool: 'Default', userRemoteConfigs: [[url: 'https://github.com/ufs-community/workflow-tools']]) - sh ''' - module use /scratch1/NCEPDEV/nems/role.epic/miniconda3/modulefiles - module load miniconda3/4.12.0 - conda activate workflow_tools - - ${WORKSPACE}/.cicd/jobs/lint_and_test.sh - ''' - } - } - stage('Run UFS Workflow tools on Orion') { - agent { - label "Orion" - } - steps { - cleanWs() - checkout changelog: false, poll: false, scm: scmGit(branches: [[name: 'develop']], extensions: [], gitTool: 'Default', userRemoteConfigs: [[url: 'https://github.com/ufs-community/workflow-tools']]) - sh ''' - module use /work/noaa/epic-ps/role-epic-ps/miniconda3/modulefiles - module load miniconda3/4.12.0 - conda activate workflow_tools - - ${WORKSPACE}/.cicd/jobs/lint_and_test.sh - ''' - } - } - } - } - } -} diff --git a/.cicd/jobs/lint_and_test.sh b/.cicd/jobs/lint_and_test.sh deleted file mode 100755 index 047c042d8..000000000 --- a/.cicd/jobs/lint_and_test.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -ux - -# Setup PYTHONPATH for uwtools -export PYTHONPATH=${PWD}:${PWD}/src - -# Check for pytest and pylint -for pkg in pytest pylint ; do - if hash $pkg 2>/dev/null; then - echo "$pkg installed, moving on!". - else - echo "$pkg is not installed" - exit 1 - fi -done - -# Run tests -pytest -k "not test_validate_yaml_salad" | tee -a ${WORKSPACE}/results.txt -status=${PIPESTATUS[0]} -test $status == 0 || ( echo "pytest failed" && exit $status ) - -# Lint -pylint --ignore-imports=y tests scripts src/uwtools -status=$? -test $status == 0 || ( echo "linting failed" && exit $status ) - From cbf3ef3dbaefcc3aa8c31899a530674df107759d Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Thu, 5 Oct 2023 15:15:55 -0600 Subject: [PATCH 05/66] ci-publish-fixes (#311) CI publish fixes from main branch --- .github/scripts/install-conda | 2 +- .github/scripts/publish | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/scripts/install-conda b/.github/scripts/install-conda index bdd882f1e..196bd3e26 100755 --- a/.github/scripts/install-conda +++ b/.github/scripts/install-conda @@ -7,5 +7,5 @@ bash $(basename $url) -bfp conda set -e +ux source conda/etc/profile.d/conda.sh conda activate - conda install -q -y -c maddenp --repodata-fn repodata.json condev=0.4.0 + conda install -q -y -c maddenp --repodata-fn repodata.json anaconda-client condev ) diff --git a/.github/scripts/publish b/.github/scripts/publish index 0c209d096..d48a2f358 100755 --- a/.github/scripts/publish +++ b/.github/scripts/publish @@ -5,7 +5,7 @@ source conda/etc/profile.d/conda.sh conda activate set -ux f=recipe/meta.json -pkg=$(jq -r .name $f)-$(jq -r .version $f)-$(jq -r .build $f).tar.bz2 -for x in $(find conda/conda-bld -type f -name $pkg); do +glob="$(jq -r .name $f)-$(jq -r .version $f)-*_$(jq -r .buildnum $f).tar.bz2" +for x in $(find conda/conda-bld -type f -name "$glob"); do anaconda -t $ANACONDA_TOKEN upload $x done From 8cf22314705ffd976d77e80125a6d6a11ff9115e Mon Sep 17 00:00:00 2001 From: Christina Holt <56881914+christinaholtNOAA@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:49:34 -0600 Subject: [PATCH 06/66] UW-312 Congealing the FV3Forecast Driver (#291) Refactored FV3Forecast methods that create model_configure, namelist, and field table to make calls to the same private base function. Updated doc strings in forecast driver to follow newer format. Added cycle and date to input arguments for uw forecast run Attempted to Clean the run() method. Added a method that will set mpi variables before running the mpi command. Add run job method to scheduler class. Add dump method to BatchScript class Updated the forecast schema to reflect the sections needed to "fully" configure a forecast. Caveat -- fully configure it for standalone regional use case. --- src/uwtools/cli.py | 31 +- src/uwtools/config/core.py | 42 ++- src/uwtools/drivers/driver.py | 103 +++++- src/uwtools/drivers/forecast.py | 295 ++++++++------- src/uwtools/resources/FV3Forecast.jsonschema | 141 ++++++-- src/uwtools/scheduler.py | 121 ++++--- src/uwtools/tests/config/test_core.py | 4 +- src/uwtools/tests/drivers/test_driver.py | 9 +- src/uwtools/tests/drivers/test_forecast.py | 228 +++++++----- .../tests/fixtures/FV3_GFS_v16_update.yaml | 40 +- src/uwtools/tests/fixtures/expt_dir.yaml | 11 +- src/uwtools/tests/fixtures/forecast.yaml | 39 +- .../fruit_config_similar_for_fcst.yaml | 10 + src/uwtools/tests/test_cli.py | 10 +- src/uwtools/tests/test_scheduler.py | 341 +++++++++--------- 15 files changed, 850 insertions(+), 575 deletions(-) create mode 100644 src/uwtools/tests/fixtures/fruit_config_similar_for_fcst.yaml diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 2f5a125ad..3cac4774f 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -2,6 +2,7 @@ Modal CLI. """ +import datetime import logging import sys from argparse import ArgumentParser as Parser @@ -272,8 +273,10 @@ def _add_subparser_forecast_run(subparsers: Subparsers) -> SubmodeChecks: parser = _add_subparser(subparsers, STR.run, "Run a forecast") required = parser.add_argument_group(TITLE_REQ_ARG) _add_arg_config_file(required) + _add_arg_cycle(required) _add_arg_model(required, choices=["FV3"]) optional = _basic_setup(parser) + _add_arg_batch_script(optional) _add_arg_dry_run(optional) checks = _add_args_quiet_and_verbose(optional) return checks @@ -295,8 +298,11 @@ def _dispatch_forecast_run(args: Namespace) -> bool: :param args: Parsed command-line args. """ forecast_class = uwtools.drivers.forecast.CLASSES[args.forecast_model] - forecast_class(config_file=args.config_file).run() - return True + return forecast_class( + batch_script=args.batch_script, config_file=args.config_file, dry_run=args.dry_run + ).run( + cycle=args.cycle, + ) # Mode template @@ -366,6 +372,17 @@ def _dispatch_template_render(args: Namespace) -> bool: # pylint: disable=missing-function-docstring +def _add_arg_batch_script(group: Group, required: bool = False) -> None: + group.add_argument( + _switch(STR.batch_script), + help="Path to output batch file (defaults to stdout)", + metavar="PATH", + required=required, + default=None, + type=str, + ) + + def _add_arg_config_file(group: Group) -> None: group.add_argument( _switch(STR.cfgfile), @@ -377,6 +394,15 @@ def _add_arg_config_file(group: Group) -> None: ) +def _add_arg_cycle(group: Group) -> None: + group.add_argument( + "--cycle", + help="The cycle in ISO8601 format", + required=True, + type=datetime.datetime.fromisoformat, + ) + + def _add_arg_dry_run(group: Group) -> None: group.add_argument( _switch(STR.dryrun), @@ -670,6 +696,7 @@ class _STR: A lookup map for CLI-related strings. """ + batch_script: str = "batch_script" cfgfile: str = "config_file" compare: str = "compare" config: str = "config" diff --git a/src/uwtools/config/core.py b/src/uwtools/config/core.py index df38af16d..0f125bade 100644 --- a/src/uwtools/config/core.py +++ b/src/uwtools/config/core.py @@ -125,11 +125,14 @@ def characterize_values(self, values: dict, parent: str) -> Tuple[list, list, li c, e, t = self.characterize_values(val, f"{parent}{key}.") complete, empty, template = complete + c, empty + e, template + t elif isinstance(val, list): - complete.append(f" {parent}{key}") for item in val: if isinstance(item, dict): c, e, t = self.characterize_values(item, parent) complete, empty, template = complete + c, empty + e, template + t + complete.append(f" {parent}{key}") + elif "{{" in str(val) or "{%" in str(val): + template.append(f" {parent}{key}: {val}") + break elif "{{" in str(val) or "{%" in str(val): template.append(f" {parent}{key}: {val}") elif val == "" or val is None: @@ -265,9 +268,12 @@ def dereference( msg = f"{func_name}: {tmpl} raised {err}" logging.debug(msg) + for tmpl, rendered in zip(templates, data): + v_str = v_str.replace(tmpl, rendered) + # Put the full template line back together as it was, filled or not, and make a # guess on its intended type. - ref_dict[key] = self.reify_scalar_str("".join(data)) + ref_dict[key] = self.reify_scalar_str(v_str) def dereference_all(self) -> None: """ @@ -280,9 +286,9 @@ def dereference_all(self) -> None: prev = copy.deepcopy(self.data) @abstractmethod - def dump(self, path: DefinitePath) -> None: + def dump(self, path: OptionalPath) -> None: """ - Dumps the config as a file. + Dumps the config to stdout or a file. :param path: Path to dump config to. """ @@ -291,7 +297,7 @@ def dump(self, path: DefinitePath) -> None: @abstractmethod def dump_dict(path: OptionalPath, cfg: dict, opts: Optional[ns] = None) -> None: """ - Dumps a provided config dictionary as a file. + Dumps a provided config dictionary to stdout or a file. :param path: Path to dump config to. :param cfg: The in-memory config object to dump. @@ -342,7 +348,7 @@ def reify_scalar_str(self, s: str) -> Union[bool, float, int, str]: r = yaml.safe_load(s) except yaml.YAMLError: return s - return s if type(r) in [dict, list] else r + return r def update_values(self, src: Union[dict, Config], dst: Optional[Config] = None): """ @@ -427,9 +433,9 @@ def _load(self, config_file: OptionalPath) -> dict: # Public methods - def dump(self, path: DefinitePath) -> None: + def dump(self, path: OptionalPath) -> None: """ - Dumps the config as an INI file. + Dumps the config in INI format. :param path: Path to dump config to. """ @@ -438,7 +444,7 @@ def dump(self, path: DefinitePath) -> None: @staticmethod def dump_dict(path: OptionalPath, cfg: dict, opts: Optional[ns] = None) -> None: """ - Dumps a provided config dictionary as an INI file. + Dumps a provided config dictionary in INI format. :param path: Path to dump config to. :param cfg: The in-memory config object to dump. @@ -479,9 +485,9 @@ def _load(self, config_file: OptionalPath) -> dict: # Public methods - def dump(self, path: DefinitePath) -> None: + def dump(self, path: OptionalPath) -> None: """ - Dumps the config as a Fortran namelist file. + Dumps the config in Fortran namelist format. :param path: Path to dump config to. """ @@ -490,7 +496,7 @@ def dump(self, path: DefinitePath) -> None: @staticmethod def dump_dict(path: OptionalPath, cfg: dict, opts: Optional[ns] = None) -> None: """ - Dumps a provided config dictionary as a Fortran namelist file. + Dumps a provided config dictionary in Fortran namelist format. :param path: Path to dump config to. :param cfg: The in-memory config object to dump. @@ -564,9 +570,9 @@ def _yaml_loader(self) -> type[yaml.SafeLoader]: # Public methods - def dump(self, path: DefinitePath) -> None: + def dump(self, path: OptionalPath) -> None: """ - Dumps the config as a YAML file. + Dumps the config in YAML format. :param path: Path to dump config to. """ @@ -575,7 +581,7 @@ def dump(self, path: DefinitePath) -> None: @staticmethod def dump_dict(path: OptionalPath, cfg: dict, opts: Optional[ns] = None) -> None: """ - Dumps a provided config dictionary as a YAML file. + Dumps a provided config dictionary in YAML format. :param path: Path to dump config to. :param cfg: The in-memory config object to dump. @@ -593,9 +599,9 @@ class FieldTableConfig(YAMLConfig): # Public methods - def dump(self, path: DefinitePath) -> None: + def dump(self, path: OptionalPath) -> None: """ - Dumps the config as a Field Table file. + Dumps the config in Field Table format. :param path: Path to dump config to. """ @@ -604,7 +610,7 @@ def dump(self, path: DefinitePath) -> None: @staticmethod def dump_dict(path: OptionalPath, cfg: dict, opts: Optional[ns] = None) -> None: """ - Dumps a provided config dictionary as a Field Table file. + Dumps a provided config dictionary in Field Table format. FMS field and tracer managers must be registered in an ASCII table called 'field_table'. This table lists field type, target model and methods the querying model will ask for. See diff --git a/src/uwtools/drivers/driver.py b/src/uwtools/drivers/driver.py index 0497ee400..843b24488 100644 --- a/src/uwtools/drivers/driver.py +++ b/src/uwtools/drivers/driver.py @@ -2,13 +2,18 @@ Provides an abstract class representing drivers for various NWP tools. """ +import logging +import os +import shutil from abc import ABC, abstractmethod from collections.abc import Mapping -from typing import Optional +from datetime import datetime +from typing import Any, Dict, Optional, Type, Union from uwtools.config import validator -from uwtools.config.core import YAMLConfig -from uwtools.scheduler import BatchScript +from uwtools.config.core import Config, YAMLConfig +from uwtools.scheduler import BatchScript, JobScheduler +from uwtools.types import OptionalPath class Driver(ABC): @@ -30,14 +35,18 @@ def __init__( self._dry_run = dry_run self._batch_script = batch_script self._validate() - self._config = YAMLConfig(config_file=config_file) + self._experiment_config = YAMLConfig(config_file=config_file) + self._platform_config = self._experiment_config.get("platform", {}) + self._config: Dict[str, Any] = {} # Public methods @abstractmethod - def batch_script(self, platform_resources: Mapping) -> BatchScript: + def batch_script(self) -> BatchScript: """ Create a script for submission to the batch scheduler. + + :return: The batch script object with all run commands needed for executing the program. """ @abstractmethod @@ -53,22 +62,40 @@ def requirements(self) -> None: """ @abstractmethod - def resources(self, platform: dict) -> Mapping: + def resources(self) -> Mapping: """ Parses the config and returns a formatted dictionary for the batch script. """ @abstractmethod - def run(self) -> bool: + def run(self, cycle: datetime) -> bool: """ Run the NWP tool. + + :param cycle: The time stamp of the cycle to run. + :return: Did the driver exit with success status? """ - @abstractmethod - def run_cmd(self, *args, run_cmd: str, exec_name: str) -> str: + def run_cmd(self) -> str: """ The command-line command to run the NWP tool. + + :return: The fully formed string that executes the program + """ + run_cmd = self._platform_config["mpicmd"] + exec_name = self._config["exec_name"] + run_time_args = self._config["runtime_info"].get("mpi_args", []) + args_str = " ".join(str(arg) for arg in run_time_args) + return f"{run_cmd} {args_str} {exec_name}" + + @property + def scheduler(self) -> JobScheduler: + """ + The job scheduler speficied by the platform information. + + :return: The scheduler object """ + return JobScheduler.get_scheduler(self.resources()) @property @abstractmethod @@ -77,11 +104,69 @@ def schema_file(self) -> str: The path to the file containing the schema to validate the config file against. """ + @staticmethod + def stage_files( + run_directory: str, files_to_stage: Dict[str, Union[list, str]], link_files: bool = False + ) -> None: + """ + Creates destination files in run directory and copies or links contents from the source path + provided. Source paths could be provided as a single path or a list of paths to be staged in + a common directory. + + :param run_directory: Path of desired run directory. + :param files_to_stage: File names in the run directory (keys) and their source paths + (values). + :param link_files: Whether to link or copy the files. + """ + link_or_copy = os.symlink if link_files else shutil.copyfile + for dst_rel_path, src_path_or_paths in files_to_stage.items(): + dst_path = os.path.join(run_directory, dst_rel_path) + if isinstance(src_path_or_paths, list): + Driver.stage_files( + dst_path, + {os.path.basename(src): src for src in src_path_or_paths}, + link_files, + ) + else: + link_or_copy(src_path_or_paths, dst_path) # type: ignore + msg = f"File {src_path_or_paths} staged as {dst_path}" + logging.info(msg) + # Private methods + @staticmethod + def _create_user_updated_config( + config_class: Type[Config], config_values: dict, output_path: OptionalPath + ) -> None: + """ + The standard procedure for updating a file of a configuration class type with user-provided + values. + + :param config_class: The Config subclass matching the configure file type. + :param config_values: The in-memory configuration object to update base values with. + :param output_path: Optional path to dump file to. + """ + + # User-supplied values that override any settings in the base + # file. + user_values = config_values.get("update_values", {}) + + if base_file := config_values.get("base_file"): + config_obj = config_class(base_file) + config_obj.update_values(user_values) + config_obj.dereference_all() + config_obj.dump(output_path) + else: + config_class.dump_dict(path=output_path, cfg=user_values) + + msg = f"Configure file {output_path} created" + logging.info(msg) + def _validate(self) -> bool: """ Validate the user-supplied config file. + + :return: Was the input configuration file valid against its schema? """ return validator.validate_yaml( config_file=self._config_file, diff --git a/src/uwtools/drivers/forecast.py b/src/uwtools/drivers/forecast.py index 47221f69e..fc9b2fd71 100644 --- a/src/uwtools/drivers/forecast.py +++ b/src/uwtools/drivers/forecast.py @@ -5,18 +5,18 @@ import logging import os -import shutil import sys from collections.abc import Mapping -from functools import cached_property +from datetime import datetime from importlib import resources from pathlib import Path -from typing import Dict +from typing import Dict, Optional -from uwtools.config.core import FieldTableConfig, NMLConfig, realize_config +from uwtools.config.core import FieldTableConfig, NMLConfig, YAMLConfig from uwtools.drivers.driver import Driver -from uwtools.scheduler import BatchScript, JobScheduler -from uwtools.utils.file import FORMAT, handle_existing +from uwtools.scheduler import BatchScript +from uwtools.types import DefinitePath, OptionalPath +from uwtools.utils.file import handle_existing from uwtools.utils.processing import execute @@ -25,37 +25,43 @@ class FV3Forecast(Driver): A driver for the FV3 forecast model. """ - # Properties - @cached_property - def job_scheduler(self) -> str: + def __init__( + self, + config_file: str, + dry_run: bool = False, + batch_script: Optional[str] = None, + ): """ - Get the name of the job scheduler. - - Currently hard-coded pending additional methods + Initialize the Forecast Driver. """ - return "slurm" + + super().__init__(config_file=config_file, dry_run=dry_run, batch_script=batch_script) + self._config = self._experiment_config["forecast"] # Public methods - def batch_script(self, platform_resources: Mapping) -> BatchScript: + def batch_script(self) -> BatchScript: """ - Write to disk, for submission to the batch scheduler, a script to run FV3. + Prepare batch script contents for interaction with system scheduler. + + :return: The batch script object with all run commands needed for executing the program. """ - return JobScheduler.get_scheduler(platform_resources).batch_script + pre_run = self._mpi_env_variables("\n") + bs = self.scheduler.batch_script + bs.append(pre_run) + bs.append(self.run_cmd()) + return bs @staticmethod - def create_directory_structure(run_directory, exist_act="delete"): + def create_directory_structure(run_directory: DefinitePath, exist_act: str = "delete") -> None: """ Collects the name of the desired run directory, and has an optional flag for what to do if the run directory specified already exists. Creates the run directory and adds subdirectories INPUT and RESTART. Verifies creation of all directories. - Args: - run_directory: path of desired run directory - exist_act: - could be any of 'delete', 'rename', 'quit' - - how program should act if run directory exists - - default is to delete old run directory - Returns: None + :param run_directory: Path of desired run directory. + :param exist_act: Could be any of 'delete', 'rename', 'quit'. Sets how the program responds + to a preexisting run directory. The default is to delete the old run directory. """ # Caller should only provide correct argument. @@ -71,7 +77,7 @@ def create_directory_structure(run_directory, exist_act="delete"): # Delete or rename directory if it exists. - handle_existing(run_directory, exist_act) + handle_existing(str(run_directory), exist_act) # Create new run directory with two required subdirectories. @@ -80,53 +86,42 @@ def create_directory_structure(run_directory, exist_act="delete"): logging.info("Creating directory: %s", path) os.makedirs(path) - @staticmethod - def create_field_table(update_obj: dict, outfldtab_file, base_file=None): + def create_field_table(self, output_path: OptionalPath) -> None: """ - Uses an object with user supplied values and an optional base file to create an output field - table file. Will "dereference" the base file. - - Args: - update_obj: in-memory dictionary initialized by object. - values override any settings in base file - outfldtab_file: location of output field table - base_file: optional path to file to use as a base file + Uses the forecast config object to create a Field Table. + + :param output_path: Optional location of output field table. """ - if base_file: - config_obj = FieldTableConfig(base_file) - config_obj.update_values(update_obj) - config_obj.dereference_all() - config_obj.dump(outfldtab_file) - else: - # Dump update object to a Field Table file: - FieldTableConfig.dump_dict(path=outfldtab_file, cfg=update_obj) - - msg = f"Namelist file {outfldtab_file} created" - logging.info(msg) + self._create_user_updated_config( + config_class=FieldTableConfig, + config_values=self._config.get("field_table", {}), + output_path=output_path, + ) - @staticmethod - def create_namelist(update_obj, outnml_file, base_file=None): + def create_model_configure(self, output_path: OptionalPath) -> None: """ - Uses an object with user supplied values and an optional namelist base file to create an - output namelist file. Will "dereference" the base file. + Uses the forecast config object to create a model_configure. - Args: - update_obj: in-memory dictionary initialized by object. - values override any settings in base file - outnml_file: location of output namelist - base_file: optional path to file to use as a base file + :param output_path: Optional location of the output model_configure file. """ + self._create_user_updated_config( + config_class=YAMLConfig, + config_values=self._config.get("model_configure", {}), + output_path=output_path, + ) - if base_file: - config_obj = NMLConfig(base_file) - config_obj.update_values(update_obj) - config_obj.dereference_all() - config_obj.dump(outnml_file) - else: - update_obj.dump(outnml_file) + def create_namelist(self, output_path: OptionalPath) -> None: + """ + Uses an object with user supplied values and an optional namelist base file to create an + output namelist file. Will "dereference" the base file. - msg = f"Namelist file {outnml_file} created" - logging.info(msg) + :param output_path: Optional location of output namelist. + """ + self._create_user_updated_config( + config_class=NMLConfig, + config_values=self._config.get("namelist", {}), + output_path=output_path, + ) def output(self) -> None: """ @@ -138,75 +133,59 @@ def requirements(self) -> None: ??? """ - def resources(self, platform: dict) -> Mapping: + def resources(self) -> Mapping: """ - Parses the config and returns a formatted dictionary for the batch script. + Parses the experiment configuration to provide the information needed for the batch script. + + :return: A formatted dictionary needed to create a batch script """ - # Add required fields to platform. - # Currently supporting only slurm scheduler. + return { - "account": platform["account"], - "nodes": 1, - "queue": "batch", - "scheduler": self.job_scheduler, - "tasks_per_node": 1, - "walltime": "00:01:00", + "account": self._experiment_config["user"]["account"], + "scheduler": self._experiment_config["platform"]["scheduler"], + **self._config["jobinfo"], } - def run(self) -> bool: + def run(self, cycle: datetime) -> bool: """ Runs FV3 either locally or via a batch-script submission. :return: Did the FV3 run exit with success status? """ - # Read in the config file. - forecast_config = self._config["forecast"] - platform_config = self._config["platform"] - # Prepare directories. - run_directory = forecast_config["RUN_DIRECTORY"] + run_directory = self._config["run_dir"] self.create_directory_structure(run_directory, "delete") - static_files = forecast_config["STATIC"] - self.stage_files(run_directory, static_files, link_files=True) - cycledep_files = forecast_config["CYCLEDEP"] - self.stage_files(run_directory, cycledep_files, link_files=True) - - # Create the job script. - platform_resources = self.resources(platform_config) - batch_script = self.batch_script(platform_resources) - args = "--export=None" - run_command = self.run_cmd( - args, - run_cmd=platform_config["MPICMD"], - exec_name=forecast_config["EXEC_NAME"], - ) - batch_script.append(run_command) + self._prepare_config_files(Path(run_directory)) - if self._dry_run: - # Apply switch to allow user to view the run command of config. - # This will not run the job. - logging.info("Batch Script:") - logging.info(batch_script) - return True + self._config["cycle-dependent"].update(self._define_boundary_files()) + + for file_category in ["static", "cycle-dependent"]: + self.stage_files(run_directory, self._config[file_category], link_files=True) - # Run the job. if self._batch_script is not None: + batch_script = self.batch_script() outpath = Path(run_directory) / self._batch_script - with open(outpath, "w+", encoding="utf-8") as file_: - print(batch_script, file=file_) - batch_command = JobScheduler.get_scheduler(platform_resources).submit_command - result = execute(cmd=f"{batch_command} {outpath}") - else: - result = execute(cmd=run_command) - return result.success - def run_cmd(self, *args, run_cmd: str, exec_name: str) -> str: - """ - Constructs the command to run FV3. - """ - args_str = " ".join(str(arg) for arg in args) - return f"{run_cmd} {args_str} {exec_name}" + if self._dry_run: + # Apply switch to allow user to view the run command of config. + # This will not run the job. + logging.info("Batch Script:") + batch_script.dump(None) + return True + + batch_script.dump(outpath) + return self.scheduler.submit_job(outpath) + + pre_run = self._mpi_env_variables(" ") + full_cmd = f"{pre_run} {self.run_cmd()}" + if self._dry_run: + logging.info("Would run: ") + print(full_cmd, file=sys.stdout) + return True + + result = execute(cmd=full_cmd) + return result.success @property def schema_file(self) -> str: @@ -216,51 +195,65 @@ def schema_file(self) -> str: with resources.as_file(resources.files("uwtools.resources")) as path: return (path / "FV3Forecast.jsonschema").as_posix() - @staticmethod - def stage_files( - run_directory: str, files_to_stage: Dict[str, str], link_files: bool = False - ) -> None: + # Private methods + + def _boundary_hours(self, lbcs_config: Dict) -> tuple[int, int, int]: """ - Takes in run directory and dictionary of file names and paths that need to be staged in the - run directory. + Prepares parameters to generate the lateral boundary condition (LBCS) forecast hours from an + external intput data source, e.g. GFS, RAP, etc. - Creates dst file in run directory and copies or links contents from the src path provided. + :return: The offset hours between the cycle and the external input data, the hours between + LBC ingest, and the last hour of the external input data forecast """ + offset = abs(lbcs_config["offset"]) + end_hour = self._config["length"] + offset + 1 + return offset, lbcs_config["interval_hours"], end_hour - os.makedirs(run_directory, exist_ok=True) - for dst_fn, src_path in files_to_stage.items(): - dst_path = os.path.join(run_directory, dst_fn) - if link_files: - os.symlink(src_path, dst_path) - else: - shutil.copyfile(src_path, dst_path) - msg = f"File {src_path} staged in run directory at {dst_fn}" - logging.info(msg) + def _define_boundary_files(self) -> Dict[str, str]: + """ + Maps the prepared boundary conditions to the appropriate hours for the forecast. - # Private methods + :return: A dict of boundary file names mapped to source input file paths + """ + boundary_files = {} + lbcs_config = self._experiment_config["preprocessing"]["lateral_boundary_conditions"] + boundary_file_template = lbcs_config["output_file_template"] + offset, interval, endhour = self._boundary_hours(lbcs_config) + for tile in self._config["tiles"]: + for boundary_hour in range(offset, endhour, interval): + forecast_hour = boundary_hour - offset + link_name = f"INPUT/gfs_bndy.tile{tile}.{forecast_hour:03d}.nc" + boundary_file_path = boundary_file_template.format( + tile=tile, + forecast_hour=boundary_hour, + ) + boundary_files[link_name] = boundary_file_path + + return boundary_files + + def _prepare_config_files(self, run_directory: Path) -> None: + """ + Collect all the configuration files needed for FV3. + """ + + self.create_field_table(run_directory / "field_table") + self.create_model_configure(run_directory / "model_configure") + self.create_namelist(run_directory / "input.nml") - def _create_model_config(self, base_file: str, outconfig_file: str) -> None: + def _mpi_env_variables(self, delimiter: str = " ") -> str: """ - Collects all the user inputs required to create a model config file, calling the existing - model config tools. This will be unique to the app being run and will appropriately parse - subsequent stages of the workflow. Defaults will be filled in if not provided by the user. - Equivalent references to config_default.yaml or config.community.yaml from SRW will need to - be made for the other apps. - - Args: - base_file: Path to base config file - outconfig_file: Path to output configuration file + Set the environment variables needed for the MPI job. + + :return: A bash string of environment variables """ - realize_config( - input_file=base_file, - input_format=FORMAT.yaml, - output_file=outconfig_file, - output_format=FORMAT.yaml, - values_file=self._config_file, - values_format=FORMAT.yaml, - ) - msg = f"Config file {outconfig_file} created" - logging.info(msg) + envvars = { + "KMP_AFFINITY": "scatter", + "OMP_NUM_THREADS": self._config["runtime_info"].get("threads", 1), + "OMP_STACKSIZE": "512m", + "MPI_TYPE_DEPTH": 20, + "ESMF_RUNTIME_COMPLIANCECHECK": "OFF:depth=4", + } + return delimiter.join([f"{k}={v}" for k, v in envvars.items()]) CLASSES = {"FV3": FV3Forecast} diff --git a/src/uwtools/resources/FV3Forecast.jsonschema b/src/uwtools/resources/FV3Forecast.jsonschema index 9a26b236a..f9b1df8f4 100644 --- a/src/uwtools/resources/FV3Forecast.jsonschema +++ b/src/uwtools/resources/FV3Forecast.jsonschema @@ -3,53 +3,128 @@ "description": "This document is to validate user-defined FV3 forecast config files", "type": "object", "properties": { - "platform": { - "description": "Expected resources for the given platform", + "forecast": { + "description": "parameters of the forecast", "type": "object", "properties": { - "MPICMD": { - "type": "string", - "enum": [ - "mpirun", - "mpiexec", - "srun" - ] + "cycle-dependent": { + "type": "object", + "propertyNames": { + "type": "string" + } }, - "ACCOUNT": { + "exec_name": { "type": "string" - } - } - }, - "forecast": { - "description": "parameters of the forecast", - "type": "object", - "properties": { - "MODEL": { - "type": "string", }, - "EXEC_NAME": { - "type": "string", + "field_table": { + "type": "object", + "properties": { + "base_file": { + "type": "string", + "format": "uri" + }, + "update_values": { + "type": "object" + } + } }, - "RUN_DIRECTORY": { - "type": "string", - "format": "uri" + "length": { + "type": "integer", + "minimum": 1 }, - "CYCLEDEP": { - "type": "string", - "format": "uri" + "model": { + "type": "string" }, - "STATIC": { + "model_configure": { + "type": "object", + "properties": { + "base_file": { + "type": "string", + "format": "uri" + }, + "update_values": { + "type": "object" + } + } + }, + "mpicmd": { + "type": "string" + }, + "namelist": { + "type": "object", + "properties": { + "base_file": { + "type": "string", + "format": "uri" + }, + "update_values": { + "type": "object" + } + } + }, + "run_dir": { "type": "string", "format": "uri" }, - "VERBOSE": { + "static": { + "type": "object", + "propertyNames": { + "type": "string" + } + }, + "tiles": { + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "platform": { + "type": "object", + "properties": { + "scheduler": { "type": "string", "enum": [ - "true", - "false" + "lsf", + "pbs", + "slurm" ] + }, + "required": ["mpicmd"] + } + }, + "preprocessing": { + "type": "object", + "properties": { + "lateral_boundary_conditions": { + "type": "object", + "properties": { + "interval_hours": { + "type": "number", + "minimum": 1, + "default": 3 + }, + "offset": { + "type": "number", + "minimum": 0, + "default": 0 + }, + "output_file_template": { + "type": "string", + "format": "uri" + } + } + } + } + }, + "user": { + "type": "object", + "properties": { + "account": { + "type": "string" } } - } + } } -} \ No newline at end of file +} diff --git a/src/uwtools/scheduler.py b/src/uwtools/scheduler.py index b3c83a73b..c55d8ea8f 100644 --- a/src/uwtools/scheduler.py +++ b/src/uwtools/scheduler.py @@ -10,7 +10,10 @@ from collections.abc import Mapping from typing import Any, Dict, List +from uwtools.types import DefinitePath, OptionalPath from uwtools.utils import Memory +from uwtools.utils.file import writable +from uwtools.utils.processing import execute NONEISH = [None, "", " ", "None", "none", False] IGNORED_ATTRIBS = ["scheduler"] @@ -24,8 +27,6 @@ class RequiredAttribs: ACCOUNT = "account" QUEUE = "queue" WALLTIME = "walltime" - NODES = "nodes" - TASKS_PER_NODE = "tasks_per_node" class OptionalAttribs: @@ -33,17 +34,21 @@ class OptionalAttribs: Key for optional attributes. """ - SHELL = "shell" + CORES = "cores" + DEBUG = "debug" + EXCLUSIVE = "exclusive" + EXPORT = "export" JOB_NAME = "jobname" - STDOUT = "stdout" - STDERR = "stderr" JOIN = "join" - PARTITION = "partition" - THREADS = "threads" MEMORY = "memory" - DEBUG = "debug" - EXCLUSIVE = "exclusive" + NODES = "nodes" + PARTITION = "partition" PLACEMENT = "placement" + SHELL = "shell" + STDERR = "stderr" + STDOUT = "stdout" + TASKS_PER_NODE = "tasks_per_node" + THREADS = "threads" class BatchScript(UserList): @@ -69,6 +74,15 @@ def content(self, line_separator: str = "\n") -> str: """ return line_separator.join(self) + def dump(self, output_file: OptionalPath) -> None: + """ + Write a batch script to an output location. + + :param output_file: Path to the file to write the batch script to + """ + with writable(output_file) as f: + print(str(self).strip(), file=f) + class JobScheduler(UserDict): """ @@ -129,9 +143,8 @@ def batch_script(self) -> BatchScript: self._map[key](value) if callable(self._map[key]) else self._map[key] ) scheduler_value = "" if callable(self._map[key]) else value - directive = ( - f"{self.prefix} {scheduler_flag}{self.key_value_separator}{scheduler_value}" - ) + key_value_separator = "" if callable(self._map[key]) else self.key_value_separator + directive = f"{self.prefix} {scheduler_flag}{key_value_separator}{scheduler_value}" known.append(directive.strip()) unknown = [ @@ -178,6 +191,16 @@ def get_scheduler(props: Mapping) -> JobScheduler: ) from error return scheduler(props) + def submit_job(self, script_path: DefinitePath) -> bool: + """ + Submits a job to the scheduler. + + :param script_path: Path to the batch script. + :return: Did the run exit with a success status? + """ + result = execute(cmd=f"{self.submit_command} {script_path}") + return result.success + class Slurm(JobScheduler): """ @@ -185,29 +208,25 @@ class Slurm(JobScheduler): """ prefix = "#SBATCH" + submit_command = "sbatch" _map = { RequiredAttribs.ACCOUNT: "--account", - RequiredAttribs.NODES: "--nodes", RequiredAttribs.QUEUE: "--qos", - RequiredAttribs.TASKS_PER_NODE: "--ntasks-per-node", RequiredAttribs.WALLTIME: "--time", + OptionalAttribs.CORES: "--ntasks", + OptionalAttribs.EXCLUSIVE: lambda x: "--exclusive", + OptionalAttribs.EXPORT: "--export", OptionalAttribs.JOB_NAME: "--job-name", - OptionalAttribs.STDOUT: "--output", - OptionalAttribs.STDERR: "--error", + OptionalAttribs.MEMORY: "--mem", + OptionalAttribs.NODES: "--nodes", OptionalAttribs.PARTITION: "--partition", + OptionalAttribs.STDERR: "--error", + OptionalAttribs.STDOUT: "--output", + OptionalAttribs.TASKS_PER_NODE: "--ntasks-per-node", OptionalAttribs.THREADS: "--cpus-per-task", - OptionalAttribs.MEMORY: "--mem", - OptionalAttribs.EXCLUSIVE: "--exclusive", } - @property - def submit_command(self) -> str: - """ - Returns the command for running a batch script. - """ - return "sbatch" - class PBS(JobScheduler): """ @@ -216,35 +235,29 @@ class PBS(JobScheduler): prefix = "#PBS" key_value_separator = " " + submit_command = "qsub" _map = { RequiredAttribs.ACCOUNT: "-A", - RequiredAttribs.NODES: lambda x: f"-l select={x}", + OptionalAttribs.NODES: lambda x: f"-l select={x}", RequiredAttribs.QUEUE: "-q", + OptionalAttribs.TASKS_PER_NODE: "mpiprocs", RequiredAttribs.WALLTIME: "-l walltime=", - RequiredAttribs.TASKS_PER_NODE: "mpiprocs", - OptionalAttribs.SHELL: "-S", + OptionalAttribs.DEBUG: lambda x: f"-l debug={str(x).lower()}", OptionalAttribs.JOB_NAME: "-N", + OptionalAttribs.MEMORY: "mem", + OptionalAttribs.SHELL: "-S", OptionalAttribs.STDOUT: "-o", - OptionalAttribs.DEBUG: lambda x: f"-l debug={str(x).lower()}", OptionalAttribs.THREADS: "ompthreads", - OptionalAttribs.MEMORY: "mem", } - @property - def submit_command(self) -> str: - """ - Returns the command for running a batch script. - """ - return "qsub" - def pre_process(self) -> Dict[str, Any]: output = self.data output.update(self._select(output)) output.update(self._placement(output)) - output.pop(RequiredAttribs.TASKS_PER_NODE, None) - output.pop(RequiredAttribs.NODES, None) + output.pop(OptionalAttribs.TASKS_PER_NODE, None) + output.pop(OptionalAttribs.NODES, None) output.pop(OptionalAttribs.THREADS, None) output.pop(OptionalAttribs.MEMORY, None) output.pop("exclusive", None) @@ -256,15 +269,15 @@ def _select(self, items) -> Dict[str, Any]: """ Select logic. """ - total_nodes = items.get(RequiredAttribs.NODES, "") - tasks_per_node = items.get(RequiredAttribs.TASKS_PER_NODE, "") + total_nodes = items.get(OptionalAttribs.NODES, "") + tasks_per_node = items.get(OptionalAttribs.TASKS_PER_NODE, "") # Set default threads=1 to address job variability with PBS threads = items.get(OptionalAttribs.THREADS, 1) memory = items.get(OptionalAttribs.MEMORY, "") select = [ f"{total_nodes}", - f"{self._map[RequiredAttribs.TASKS_PER_NODE]}={tasks_per_node}", + f"{self._map[OptionalAttribs.TASKS_PER_NODE]}={tasks_per_node}", f"{self._map[OptionalAttribs.THREADS]}={threads}", f"ncpus={int(tasks_per_node) * int(threads)}", ] @@ -308,39 +321,33 @@ class LSF(JobScheduler): prefix = "#BSUB" key_value_separator = " " + submit_command = "bsub" _map = { - RequiredAttribs.QUEUE: "-q", RequiredAttribs.ACCOUNT: "-P", + OptionalAttribs.NODES: lambda x: f"-n {x}", + RequiredAttribs.QUEUE: "-q", + OptionalAttribs.TASKS_PER_NODE: lambda x: f"-R span[ptile={x}]", RequiredAttribs.WALLTIME: "-W", - RequiredAttribs.NODES: lambda x: f"-n {x}", - RequiredAttribs.TASKS_PER_NODE: lambda x: f"-R span[ptile={x}]", - OptionalAttribs.SHELL: "-L", OptionalAttribs.JOB_NAME: "-J", + OptionalAttribs.MEMORY: lambda x: f"-R rusage[mem={x}]", + OptionalAttribs.SHELL: "-L", OptionalAttribs.STDOUT: "-o", OptionalAttribs.THREADS: lambda x: f"-R affinity[core({x})]", - OptionalAttribs.MEMORY: lambda x: f"-R rusage[mem={x}]", } - @property - def submit_command(self) -> str: - """ - Returns the command for running a batch script. - """ - return "bsub" - def pre_process(self) -> Dict[str, Any]: items = self.data # LSF requires threads to be set (if None is provided, default to 1) items[OptionalAttribs.THREADS] = items.get(OptionalAttribs.THREADS, 1) - nodes = items.get(RequiredAttribs.NODES, "") - tasks_per_node = items.get(RequiredAttribs.TASKS_PER_NODE, "") + nodes = items.get(OptionalAttribs.NODES, "") + tasks_per_node = items.get(OptionalAttribs.TASKS_PER_NODE, "") memory = items.get(OptionalAttribs.MEMORY, None) if memory is not None: mem_value = Memory(memory).convert("KB") items[self._map[OptionalAttribs.MEMORY](mem_value)] = "" - items[RequiredAttribs.NODES] = int(tasks_per_node) * int(nodes) + items[OptionalAttribs.NODES] = int(tasks_per_node) * int(nodes) items.pop(OptionalAttribs.MEMORY, None) return items diff --git a/src/uwtools/tests/config/test_core.py b/src/uwtools/tests/config/test_core.py index b0d928dd0..a8e969828 100644 --- a/src/uwtools/tests/config/test_core.py +++ b/src/uwtools/tests/config/test_core.py @@ -748,7 +748,7 @@ def test_values_needed_yaml(caplog): Keys that have unfilled Jinja2 templates: FV3GFS.nomads.url: https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod/gfs.{{ yyyymmdd }}/{{ hh }}/atmos - FV3GFS.nomads.file_names.grib2.anl: ['gfs.t{{ hh }}z.atmanl.nemsio','gfs.t{{ hh }}z.sfcanl.nemsio'] + FV3GFS.nomads.file_names.grib2.anl: ['gfs.t{{ hh }}z.atmanl.nemsio', 'gfs.t{{ hh }}z.sfcanl.nemsio'] FV3GFS.nomads.file_names.grib2.fcst: ['gfs.t{{ hh }}z.pgrb2.0p25.f{{ fcst_hr03d }}'] Keys that are set to empty: @@ -861,7 +861,7 @@ def test_Config___repr__(capsys, nml_cfgobj): def test_Config_characterize_values(nml_cfgobj): d = {1: "", 2: None, 3: "{{ n }}", 4: {"a": 88}, 5: [{"b": 99}], 6: "string"} complete, empty, template = nml_cfgobj.characterize_values(values=d, parent="p") - assert complete == [" p4", " p4.a", " p5", " pb", " p6"] + assert complete == [" p4", " p4.a", " pb", " p5", " p6"] assert empty == [" p1", " p2"] assert template == [" p3: {{ n }}"] diff --git a/src/uwtools/tests/drivers/test_driver.py b/src/uwtools/tests/drivers/test_driver.py index 9af14b7f1..1b77d0895 100644 --- a/src/uwtools/tests/drivers/test_driver.py +++ b/src/uwtools/tests/drivers/test_driver.py @@ -3,6 +3,7 @@ Tests for uwtools.drivers.driver module. """ +import datetime import logging from collections.abc import Mapping from unittest.mock import patch @@ -19,7 +20,7 @@ class ConcreteDriver(Driver): Driver subclass for testing purposes. """ - def batch_script(self, platform_resources): + def batch_script(self): pass def output(self): @@ -28,13 +29,13 @@ def output(self): def requirements(self): pass - def resources(self, platform: dict) -> Mapping: + def resources(self) -> Mapping: return {} - def run(self) -> bool: + def run(self, cycle: datetime.date) -> bool: return True - def run_cmd(self, *args, run_cmd, exec_name): + def run_cmd(self, *args): pass @property diff --git a/src/uwtools/tests/drivers/test_forecast.py b/src/uwtools/tests/drivers/test_forecast.py index 73a11b88d..52f4f0a87 100644 --- a/src/uwtools/tests/drivers/test_forecast.py +++ b/src/uwtools/tests/drivers/test_forecast.py @@ -2,45 +2,41 @@ """ Tests for forecast driver. """ +import datetime as dt import logging +import os from pathlib import Path from unittest.mock import patch import pytest from pytest import fixture, raises +from uwtools import scheduler from uwtools.config.core import NMLConfig, YAMLConfig from uwtools.drivers import forecast from uwtools.drivers.driver import Driver from uwtools.drivers.forecast import FV3Forecast from uwtools.tests.support import compare_files, fixture_path -from uwtools.utils.file import FORMAT -@fixture -def slurm_props(): - return { - "account": "account_name", - "nodes": 1, - "queue": "batch", - "scheduler": "slurm", - "tasks_per_node": 1, - "walltime": "00:01:00", - } - - -def test_batch_script(slurm_props): +def test_batch_script(): expected = """ -#SBATCH --account=account_name +#SBATCH --account=user_account #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 #SBATCH --qos=batch #SBATCH --time=00:01:00 +KMP_AFFINITY=scatter +OMP_NUM_THREADS=1 +OMP_STACKSIZE=512m +MPI_TYPE_DEPTH=20 +ESMF_RUNTIME_COMPLIANCECHECK=OFF:depth=4 +srun --export=NONE test_exec.py """.strip() config_file = fixture_path("forecast.yaml") with patch.object(Driver, "_validate", return_value=True): forecast = FV3Forecast(config_file=config_file) - assert forecast.batch_script(platform_resources=slurm_props).content() == expected + assert forecast.batch_script().content() == expected def test_schema_file(): @@ -56,20 +52,26 @@ def test_schema_file(): assert path.is_file() -def test_create_config(tmp_path): +def test_create_model_configure(tmp_path): """ Test that providing a YAML base input file and a config file will create and update YAML config file. """ - config_file = fixture_path("fruit_config_similar.yaml") - input_file = fixture_path("fruit_config.yaml") + config_file = fixture_path("fruit_config_similar_for_fcst.yaml") + base_file = fixture_path("fruit_config.yaml") + fcst_config_file = tmp_path / "fcst.yml" + + fcst_config = YAMLConfig(config_file) + fcst_config["forecast"]["model_configure"]["base_file"] = base_file + fcst_config.dump(fcst_config_file) + output_file = (tmp_path / "test_config_from_yaml.yaml").as_posix() with patch.object(FV3Forecast, "_validate", return_value=True): - forecast_obj = FV3Forecast(config_file=config_file) - forecast_obj._create_model_config(base_file=input_file, outconfig_file=output_file) - expected = YAMLConfig(input_file) - expected.update_values(YAMLConfig(config_file)) + forecast_obj = FV3Forecast(config_file=fcst_config_file) + forecast_obj.create_model_configure(output_file) + expected = YAMLConfig(base_file) + expected.update_values(YAMLConfig(config_file)["forecast"]["model_configure"]["update_values"]) expected_file = tmp_path / "expected_yaml.yaml" expected.dump(expected_file) assert compare_files(expected_file, output_file) @@ -121,17 +123,22 @@ def test_create_field_table_with_base_file(create_field_table_update_obj, tmp_pa base_file = fixture_path("FV3_GFS_v16.yaml") outfldtbl_file = tmp_path / "field_table_two.FV3_GFS" expected = fixture_path("field_table_from_base.FV3_GFS") - FV3Forecast.create_field_table(create_field_table_update_obj, outfldtbl_file, base_file) + config_file = tmp_path / "fcst.yaml" + forecast_config = create_field_table_update_obj + forecast_config["forecast"]["field_table"]["base_file"] = base_file + forecast_config.dump(config_file) + FV3Forecast(config_file).create_field_table(outfldtbl_file) assert compare_files(expected, outfldtbl_file) -def test_create_field_table_without_base_file(create_field_table_update_obj, tmp_path): +def test_create_field_table_without_base_file(tmp_path): """ Tests create_field_table without optional base file. """ outfldtbl_file = tmp_path / "field_table_one.FV3_GFS" expected = fixture_path("field_table_from_input.FV3_GFS") - FV3Forecast.create_field_table(create_field_table_update_obj, outfldtbl_file) + config_file = fixture_path("FV3_GFS_v16_update.yaml") + FV3Forecast(config_file).create_field_table(outfldtbl_file) assert compare_files(expected, outfldtbl_file) @@ -140,23 +147,18 @@ def test_create_directory_structure_bad_existing_act(): FV3Forecast.create_directory_structure(run_directory="/some/path", exist_act="foo") -def test_create_model_config(tmp_path): +def test_create_model_configure_call_private(tmp_path): basefile = str(tmp_path / "base.yaml") infile = fixture_path("forecast.yaml") outfile = str(tmp_path / "out.yaml") for path in infile, basefile: Path(path).touch() - with patch.object(forecast, "realize_config") as realize_config: + with patch.object(Driver, "_create_user_updated_config") as _create_user_updated_config: with patch.object(FV3Forecast, "_validate", return_value=True): - FV3Forecast(config_file=infile)._create_model_config( - outconfig_file=outfile, base_file=basefile - ) - assert realize_config.call_args.kwargs["input_file"] == basefile - assert realize_config.call_args.kwargs["input_format"] == FORMAT.yaml - assert realize_config.call_args.kwargs["output_file"] == outfile - assert realize_config.call_args.kwargs["output_format"] == FORMAT.yaml - assert realize_config.call_args.kwargs["values_file"] == infile - assert realize_config.call_args.kwargs["values_format"] == FORMAT.yaml + FV3Forecast(config_file=infile).create_model_configure(outfile) + _create_user_updated_config.assert_called_with( + config_class=YAMLConfig, config_values={}, output_path=outfile + ) @fixture @@ -164,13 +166,23 @@ def create_namelist_assets(tmp_path): return NMLConfig(fixture_path("simple.nml")), tmp_path / "create_out.nml" -def test_create_namelist_with_base_file(create_namelist_assets): +def test_create_namelist_with_base_file(create_namelist_assets, tmp_path): """ Tests create_namelist method with optional base file. """ update_obj, outnml_file = create_namelist_assets base_file = fixture_path("simple3.nml") - FV3Forecast.create_namelist(update_obj, outnml_file, base_file) + fcst_config = { + "forecast": { + "namelist": { + "base_file": base_file, + "update_values": update_obj.data, + }, + }, + } + fcst_config_file = tmp_path / "fcst.yml" + YAMLConfig.dump_dict(cfg=fcst_config, path=fcst_config_file) + FV3Forecast(fcst_config_file).create_namelist(outnml_file) expected = """ &salad base = 'kale' @@ -188,12 +200,21 @@ def test_create_namelist_with_base_file(create_namelist_assets): assert out_file.read() == expected -def test_create_namelist_without_base_file(create_namelist_assets): +def test_create_namelist_without_base_file(create_namelist_assets, tmp_path): """ Tests create_namelist method without optional base file. """ update_obj, outnml_file = create_namelist_assets - FV3Forecast.create_namelist(update_obj, str(outnml_file)) + fcst_config = { + "forecast": { + "namelist": { + "update_values": update_obj.data, + }, + }, + } + fcst_config_file = tmp_path / "fcst.yml" + YAMLConfig.dump_dict(cfg=fcst_config, path=fcst_config_file) + FV3Forecast(fcst_config_file).create_namelist(outnml_file) expected = """ &salad base = 'kale' @@ -214,16 +235,15 @@ def test_forecast_run_cmd(): config_file = fixture_path("forecast.yaml") with patch.object(FV3Forecast, "_validate", return_value=True): fcstobj = FV3Forecast(config_file=config_file) - hera_expected = "srun --export=ALL test_exec.py" - assert hera_expected == fcstobj.run_cmd( - "--export=ALL", run_cmd="srun", exec_name="test_exec.py" - ) - cheyenne_expected = "mpirun -np 4 test_exec.py" - assert cheyenne_expected == fcstobj.run_cmd( - "-np", 4, run_cmd="mpirun", exec_name="test_exec.py" - ) - wcoss2_expected = "mpiexec -n 4 -ppn 8 --cpu-bind core -depth 2 test_exec.py" - assert wcoss2_expected == fcstobj.run_cmd( + srun_expected = "srun --export=NONE test_exec.py" + fcstobj._config["runtime_info"]["mpi_args"] = ["--export=NONE"] + assert srun_expected == fcstobj.run_cmd() + mpirun_expected = "mpirun -np 4 test_exec.py" + fcstobj._experiment_config["platform"]["mpicmd"] = "mpirun" + fcstobj._config["runtime_info"]["mpi_args"] = ["-np", 4] + assert mpirun_expected == fcstobj.run_cmd() + fcstobj._experiment_config["platform"]["mpicmd"] = "mpiexec" + fcstobj._config["runtime_info"]["mpi_args"] = [ "-n", 4, "-ppn", @@ -232,17 +252,17 @@ def test_forecast_run_cmd(): "core", "-depth", 2, - run_cmd="mpiexec", - exec_name="test_exec.py", - ) + ] + mpiexec_expected = "mpiexec -n 4 -ppn 8 --cpu-bind core -depth 2 test_exec.py" + assert mpiexec_expected == fcstobj.run_cmd() -@pytest.mark.parametrize("section", ["static", "cycledep"]) +@pytest.mark.parametrize("section", ["static", "cycle-dependent"]) @pytest.mark.parametrize("link_files", [True, False]) def test_stage_files(tmp_path, section, link_files): """ - Tests that files from static or cycledep sections of the config obj are being staged (copied or - linked) to the run directory. + Tests that files from static or cycle-dependent sections of the config obj are being staged + (copied or linked) to the run directory. """ run_directory = tmp_path / "run" @@ -252,9 +272,12 @@ def test_stage_files(tmp_path, section, link_files): # create the test files. src_directory.mkdir() for dst_fn, src_path in files_to_stage.items(): - fixed_src_path = src_directory / Path(src_path).name - files_to_stage[dst_fn] = str(fixed_src_path) - fixed_src_path.touch() + if isinstance(src_path, list): + files_to_stage[dst_fn] = [str(src_directory / Path(sp).name) for sp in src_path] + else: + fixed_src_path = src_directory / Path(src_path).name + files_to_stage[dst_fn] = str(fixed_src_path) + fixed_src_path.touch() # Test that none of the destination files exist yet: for dst_fn in files_to_stage.keys(): assert not (run_directory / dst_fn).is_file() @@ -262,68 +285,81 @@ def test_stage_files(tmp_path, section, link_files): FV3Forecast.create_directory_structure(run_directory) FV3Forecast.stage_files(run_directory, files_to_stage, link_files=link_files) # Test that all of the destination files now exist: - for dst_fn in files_to_stage.keys(): - if link_files: - assert (run_directory / dst_fn).is_symlink() + link_or_file = Path.is_symlink if link_files else Path.is_file + for dst_rel_path, src_paths in files_to_stage.items(): + if isinstance(src_paths, list): + dst_paths = [run_directory / dst_rel_path / os.path.basename(sp) for sp in src_paths] + assert all(link_or_file(d_fn) for d_fn in dst_paths) else: - assert (run_directory / dst_fn).is_file() + assert link_or_file(run_directory / dst_rel_path) + if section == "cycle-dependent": + assert link_or_file(run_directory / "INPUT" / "gfs_bndy.tile7.006.nc") @fixture def fv3_run_assets(tmp_path): batch_script = tmp_path / "batch.sh" config_file = fixture_path("forecast.yaml") - config = { - "platform": { - "MPICMD": "srun", - "account": "user_account", - }, - "forecast": { - "MODEL": "FV3", - "EXEC_NAME": "test_exec.py", - "RUN_DIRECTORY": tmp_path.as_posix(), - "CYCLEDEP": {"foo-file": str(tmp_path / "foo")}, - "STATIC": {"static-foo-file": str(tmp_path / "foo")}, - "VERBOSE": "False", - }, - } - return batch_script, config_file, config + config = YAMLConfig(config_file) + config["forecast"]["run_dir"] = tmp_path.as_posix() + config["forecast"]["cycle-dependent"] = {"foo-file": str(tmp_path / "foo")} + config["forecast"]["static"] = {"static-foo-file": str(tmp_path / "foo")} + return batch_script, config_file, config.data["forecast"] -def test_run_direct(fv3_run_assets): +@fixture +def fv3_mpi_assets(): + return [ + "KMP_AFFINITY=scatter", + "OMP_NUM_THREADS=1", + "OMP_STACKSIZE=512m", + "MPI_TYPE_DEPTH=20", + "ESMF_RUNTIME_COMPLIANCECHECK=OFF:depth=4", + "srun --export=NONE test_exec.py", + ] + + +def test_run_direct(fv3_mpi_assets, fv3_run_assets): _, config_file, config = fv3_run_assets + expected_command = " ".join(fv3_mpi_assets) with patch.object(FV3Forecast, "_validate", return_value=True): with patch.object(forecast, "execute") as execute: fcstobj = FV3Forecast(config_file=config_file) with patch.object(fcstobj, "_config", config): - fcstobj.run() - execute.assert_called_once_with(cmd="srun --export=None test_exec.py") + fcstobj.run(cycle=dt.datetime.now()) + execute.assert_called_once_with(cmd=expected_command) -def test_FV3Forecast_run_dry_run(caplog, fv3_run_assets): +@pytest.mark.parametrize("with_batch_script", [True, False]) +def test_FV3Forecast_run_dry_run(capsys, fv3_mpi_assets, fv3_run_assets, with_batch_script): logging.getLogger().setLevel(logging.INFO) batch_script, config_file, config = fv3_run_assets - run_expected = """ -#!/bin/bash -#SBATCH --account=user_account -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --qos=batch -#SBATCH --time=00:01:00 -srun --export=None test_exec.py -""".strip() + if with_batch_script: + batch_components = [ + "#!/bin/bash", + "#SBATCH --account=user_account", + "#SBATCH --nodes=1", + "#SBATCH --ntasks-per-node=1", + "#SBATCH --qos=batch", + "#SBATCH --time=00:01:00", + ] + fv3_mpi_assets + run_expected = "\n".join(batch_components) + else: + batch_script = None + run_expected = " ".join(fv3_mpi_assets) + with patch.object(FV3Forecast, "_validate", return_value=True): fcstobj = FV3Forecast(config_file=config_file, dry_run=True, batch_script=batch_script) with patch.object(fcstobj, "_config", config): - fcstobj.run() - assert run_expected in caplog.text + fcstobj.run(cycle=dt.datetime.now()) + assert run_expected in capsys.readouterr().out def test_run_submit(fv3_run_assets): batch_script, config_file, config = fv3_run_assets with patch.object(FV3Forecast, "_validate", return_value=True): - with patch.object(forecast, "execute") as execute: + with patch.object(scheduler, "execute") as execute: fcstobj = FV3Forecast(config_file=config_file, batch_script=batch_script) with patch.object(fcstobj, "_config", config): - fcstobj.run() + fcstobj.run(cycle=dt.datetime.now()) execute.assert_called_once_with(cmd=f"sbatch {batch_script}") diff --git a/src/uwtools/tests/fixtures/FV3_GFS_v16_update.yaml b/src/uwtools/tests/fixtures/FV3_GFS_v16_update.yaml index f7004ce66..a2b974867 100644 --- a/src/uwtools/tests/fixtures/FV3_GFS_v16_update.yaml +++ b/src/uwtools/tests/fixtures/FV3_GFS_v16_update.yaml @@ -1,19 +1,21 @@ -sphum: - longname: specific humidity - units: kg/kg - profile_type: - name: fixed - surface_value: 2.0 -liq_wat: - longname: cloud water mixing ratio - units: kg/kg - profile_type: - name: fixed - surface_value: 2.0 -rainwat: - longname: rain mixing ratio - units: kg/kg - profile_type: - name: fixed - surface_value: 2.0 - +forecast: + field_table: + update_values: + sphum: + longname: specific humidity + units: kg/kg + profile_type: + name: fixed + surface_value: 2.0 + liq_wat: + longname: cloud water mixing ratio + units: kg/kg + profile_type: + name: fixed + surface_value: 2.0 + rainwat: + longname: rain mixing ratio + units: kg/kg + profile_type: + name: fixed + surface_value: 2.0 diff --git a/src/uwtools/tests/fixtures/expt_dir.yaml b/src/uwtools/tests/fixtures/expt_dir.yaml index ea1ea7572..00aac3ae0 100644 --- a/src/uwtools/tests/fixtures/expt_dir.yaml +++ b/src/uwtools/tests/fixtures/expt_dir.yaml @@ -1,10 +1,13 @@ -cycledep: +cycle-dependent: INPUT/gfs_data.nc: path/to/gfs_data.tile7.halo0.nc INPUT/sfc_data.nc: path/to/sfc_data.tile7.halo0.nc INPUT/gfs_bndy.tile7.000.nc: path/to/gfs_bndy.tile7.000.nc - INPUT/gfs_bndy.tile7.006.nc: path/to/gfs_bndy.tile7.006.nc + INPUT/gfs_bndy.tile7.006.nc: path/to/gfs_bndy.tile7.006.nc INPUT/gfs_ctrl.nc: path/to/gfs_ctrl.nc -static: + RESTART/: + - path/to/gfs_bndy.tile7.000.nc + - path/to/gfs_bndy.tile7.006.nc +static: co2historicaldata_2010.txt: src/uwtools/drivers/global_co2historicaldata_2010.txt co2historicaldata_2011.txt: src/uwtools/drivers/global_co2historicaldata_2011.txt co2historicaldata_2012.txt: src/uwtools/drivers/global_co2historicaldata_2012.txt @@ -13,4 +16,4 @@ static: co2historicaldata_2015.txt: src/uwtools/drivers/global_co2historicaldata_2015.txt co2historicaldata_2016.txt: src/uwtools/drivers/global_co2historicaldata_2016.txt co2historicaldata_2017.txt: src/uwtools/drivers/global_co2historicaldata_2017.txt - co2historicaldata_2018.txt: src/uwtools/drivers/global_co2historicaldata_2018.txt \ No newline at end of file + co2historicaldata_2018.txt: src/uwtools/drivers/global_co2historicaldata_2018.txt diff --git a/src/uwtools/tests/fixtures/forecast.yaml b/src/uwtools/tests/fixtures/forecast.yaml index 7dc5f3bfb..120cbcbb1 100644 --- a/src/uwtools/tests/fixtures/forecast.yaml +++ b/src/uwtools/tests/fixtures/forecast.yaml @@ -1,17 +1,37 @@ -platform: - MPICMD: srun +user: account: user_account +platform: + mpicmd: srun + scheduler: slurm +preprocessing: + lateral_boundary_conditions: + interval_hours: 3 + offset: 0 + output_file_template: "gfs_bndy.tile{tile}.f{forecast_hour}.nc" forecast: - MODEL: FV3 - EXEC_NAME: test_exec.py - RUN_DIRECTORY: src/uwtools/tests/drivers/ - CYCLEDEP: - INPUT/gfs_data.nc: path/to/gfs_data.tile7.halo0.nc + model: FV3 + exec_name: test_exec.py + run_dir: some/path + tiles: + - 7 + length: 12 + jobinfo: + nodes: 1 + tasks_per_node: 1 + walltime: 00:01:00 + queue: batch + runtime_info: + threads: 1 + stacksize: 512m + mpi_args: + - "--export=NONE" + cycle-dependent: + INPUT/gfs_data.nc: path/to/gfs_data.tile7.halo0.nc INPUT/sfc_data.nc: path/to/sfc_data.tile7.halo0.nc INPUT/gfs_bndy.tile7.000.nc: path/to/gfs_bndy.tile7.000.nc - INPUT/gfs_bndy.tile7.006.nc: path/to/gfs_bndy.tile7.006.nc + INPUT/gfs_bndy.tile7.006.nc: path/to/gfs_bndy.tile7.006.nc INPUT/gfs_ctrl.nc: path/to/gfs_ctrl.nc - STATIC: + static: co2historicaldata_2010.txt: src/uwtools/drivers/global_co2historicaldata_2010.txt co2historicaldata_2011.txt: src/uwtools/drivers/global_co2historicaldata_2011.txt co2historicaldata_2012.txt: src/uwtools/drivers/global_co2historicaldata_2012.txt @@ -21,4 +41,3 @@ forecast: co2historicaldata_2016.txt: src/uwtools/drivers/global_co2historicaldata_2016.txt co2historicaldata_2017.txt: src/uwtools/drivers/global_co2historicaldata_2017.txt co2historicaldata_2018.txt: src/uwtools/drivers/global_co2historicaldata_2018.txt - VERBOSE: false \ No newline at end of file diff --git a/src/uwtools/tests/fixtures/fruit_config_similar_for_fcst.yaml b/src/uwtools/tests/fixtures/fruit_config_similar_for_fcst.yaml new file mode 100644 index 000000000..04d64a029 --- /dev/null +++ b/src/uwtools/tests/fixtures/fruit_config_similar_for_fcst.yaml @@ -0,0 +1,10 @@ +forecast: + model_configure: + update_values: + fruit: papaya + vegetable: peas + how_many: 17 + dressing: ranch + topping: crouton + size: large + meat: chicken diff --git a/src/uwtools/tests/test_cli.py b/src/uwtools/tests/test_cli.py index 56101fd36..060586d8f 100644 --- a/src/uwtools/tests/test_cli.py +++ b/src/uwtools/tests/test_cli.py @@ -273,14 +273,20 @@ def test__dispatch_forecast(params): def test__dispatch_forecast_run(): - args = ns() + args = ns( + batch_script=None, + cycle="2023-01-01T00:00:00", + config_file=1, + dry_run=True, + forecast_model="foo", + ) vars(args).update({STR.cfgfile: 1, "forecast_model": "foo"}) with patch.object(cli.uwtools.drivers.forecast, "FooForecast", create=True) as m: CLASSES = {"foo": getattr(cli.uwtools.drivers.forecast, "FooForecast")} with patch.object(cli.uwtools.drivers.forecast, "CLASSES", new=CLASSES): cli._dispatch_forecast_run(args) assert m.called_once_with(args) - m().run.assert_called_once_with() + m().run.assert_called_once_with(cycle="2023-01-01T00:00:00") @pytest.mark.parametrize("params", [(STR.render, "_dispatch_template_render")]) diff --git a/src/uwtools/tests/test_scheduler.py b/src/uwtools/tests/test_scheduler.py index 893984c83..115abe6a2 100644 --- a/src/uwtools/tests/test_scheduler.py +++ b/src/uwtools/tests/test_scheduler.py @@ -3,16 +3,20 @@ Tests for uwtools.scheduler module. """ +from unittest.mock import patch + from pytest import fixture, raises +from uwtools import scheduler from uwtools.scheduler import JobScheduler +from uwtools.tests.support import compare_files # LFS tests @fixture def lsf_props(): - return { + config = { "account": "account_name", "nodes": 1, "queue": "batch", @@ -21,64 +25,58 @@ def lsf_props(): "threads": 1, "walltime": "00:01:00", } + expected = [ + "#BSUB -P account_name", + "#BSUB -R affinity[core(1)]", + "#BSUB -R span[ptile=1]", + "#BSUB -W 00:01:00", + "#BSUB -n 1", + "#BSUB -q batch", + ] + return config, expected def test_lsf_1(lsf_props): - expected = """ -#BSUB -P account_name -#BSUB -R affinity[core(1)] -#BSUB -R span[ptile=1] -#BSUB -W 00:01:00 -#BSUB -n 1 -#BSUB -q batch -""".strip() - assert JobScheduler.get_scheduler(lsf_props).batch_script.content() == expected + lsf_config, expected_items = lsf_props + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(lsf_config).batch_script.content() == expected def test_lsf_2(lsf_props): - lsf_props.update({"tasks_per_node": 12}) - expected = """ -#BSUB -P account_name -#BSUB -R affinity[core(1)] -#BSUB -R span[ptile=12] -#BSUB -W 00:01:00 -#BSUB -n 12 -#BSUB -q batch -""".strip() - assert JobScheduler.get_scheduler(lsf_props).batch_script.content() == expected + lsf_config, expected_items = lsf_props + lsf_config.update({"tasks_per_node": 12}) + expected_items[2] = "#BSUB -R span[ptile=12]" + expected_items[4] = "#BSUB -n 12" + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(lsf_config).batch_script.content() == expected def test_lsf_3(lsf_props): - lsf_props.update({"nodes": 2, "tasks_per_node": 6}) - expected = """ -#BSUB -P account_name -#BSUB -R affinity[core(1)] -#BSUB -R span[ptile=6] -#BSUB -W 00:01:00 -#BSUB -n 12 -#BSUB -q batch -""".strip() - assert JobScheduler.get_scheduler(lsf_props).batch_script.content() == expected + lsf_config, expected_items = lsf_props + lsf_config.update({"nodes": 2, "tasks_per_node": 6}) + expected_items[2] = "#BSUB -R span[ptile=6]" + expected_items[4] = "#BSUB -n 12" + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(lsf_config).batch_script.content() == expected def test_lsf_4(lsf_props): - lsf_props.update({"memory": "1MB", "nodes": 2, "tasks_per_node": 3, "threads": 2}) - batch_script = JobScheduler.get_scheduler(lsf_props).batch_script - expected = """ -#BSUB -P account_name -#BSUB -R affinity[core(2)] -#BSUB -R rusage[mem=1000KB] -#BSUB -R span[ptile=3] -#BSUB -W 00:01:00 -#BSUB -n 6 -#BSUB -q batch -""".strip() - assert batch_script.content() == expected + lsf_config, expected_items = lsf_props + lsf_config.update({"memory": "1MB", "nodes": 2, "tasks_per_node": 3, "threads": 2}) + expected_items[1] = "#BSUB -R affinity[core(2)]" + expected_items[2] = "#BSUB -R span[ptile=3]" + expected_items[4] = "#BSUB -n 6" + new_items = [ + "#BSUB -R rusage[mem=1000KB]", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(lsf_config).batch_script.content() == expected def test_lsf_5(lsf_props): + lsf_config, _ = lsf_props expected = "bsub" - assert JobScheduler.get_scheduler(lsf_props).submit_command == expected + assert JobScheduler.get_scheduler(lsf_config).submit_command == expected # PBS tests @@ -86,7 +84,7 @@ def test_lsf_5(lsf_props): @fixture def pbs_props(): - return { + config = { "account": "account_name", "nodes": 1, "queue": "batch", @@ -94,102 +92,89 @@ def pbs_props(): "tasks_per_node": 1, "walltime": "00:01:00", } + expected = [ + "#PBS -A account_name", + "#PBS -l select=1:mpiprocs=1:ompthreads=1:ncpus=1", + "#PBS -l walltime=00:01:00", + "#PBS -q batch", + ] + return config, expected def test_pbs_1(pbs_props): - expected = """ -#PBS -A account_name -#PBS -l select=1:mpiprocs=1:ompthreads=1:ncpus=1 -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_2(pbs_props): - pbs_props.update({"memory": "512M", "tasks_per_node": 4}) - expected = """ -#PBS -A account_name -#PBS -l select=1:mpiprocs=4:ompthreads=1:ncpus=4:mem=512M -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + pbs_config.update({"memory": "512M", "tasks_per_node": 4}) + expected_items[1] = "#PBS -l select=1:mpiprocs=4:ompthreads=1:ncpus=4:mem=512M" + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_3(pbs_props): - pbs_props.update({"nodes": 3, "tasks_per_node": 4, "threads": 2}) - expected = """ -#PBS -A account_name -#PBS -l select=3:mpiprocs=4:ompthreads=2:ncpus=8 -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + pbs_config.update({"nodes": 3, "tasks_per_node": 4, "threads": 2}) + expected_items[1] = "#PBS -l select=3:mpiprocs=4:ompthreads=2:ncpus=8" + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_4(pbs_props): - pbs_props.update({"memory": "512M", "nodes": 3, "tasks_per_node": 4, "threads": 2}) - expected = """ -#PBS -A account_name -#PBS -l select=3:mpiprocs=4:ompthreads=2:ncpus=8:mem=512M -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + pbs_config.update({"memory": "512M", "nodes": 3, "tasks_per_node": 4, "threads": 2}) + expected_items[1] = "#PBS -l select=3:mpiprocs=4:ompthreads=2:ncpus=8:mem=512M" + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_5(pbs_props): - pbs_props.update({"exclusive": "True"}) - expected = """ -#PBS -A account_name -#PBS -l place=excl -#PBS -l select=1:mpiprocs=1:ompthreads=1:ncpus=1 -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + pbs_config.update({"exclusive": "True"}) + new_items = [ + "#PBS -l place=excl", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_6(pbs_props): - pbs_props.update({"exclusive": False, "placement": "vscatter"}) - expected = """ -#PBS -A account_name -#PBS -l place=vscatter -#PBS -l select=1:mpiprocs=1:ompthreads=1:ncpus=1 -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + pbs_config.update({"exclusive": False, "placement": "vscatter"}) + new_items = [ + "#PBS -l place=vscatter", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_7(pbs_props): - pbs_props.update({"exclusive": True, "placement": "vscatter"}) - expected = """ -#PBS -A account_name -#PBS -l place=vscatter:excl -#PBS -l select=1:mpiprocs=1:ompthreads=1:ncpus=1 -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + pbs_config.update({"exclusive": True, "placement": "vscatter"}) + new_items = [ + "#PBS -l place=vscatter:excl", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_8(pbs_props): - pbs_props.update({"debug": "True"}) - expected = """ -#PBS -A account_name -#PBS -l debug=true -#PBS -l select=1:mpiprocs=1:ompthreads=1:ncpus=1 -#PBS -l walltime=00:01:00 -#PBS -q batch -""".strip() - assert JobScheduler.get_scheduler(pbs_props).batch_script.content() == expected + pbs_config, expected_items = pbs_props + pbs_config.update({"debug": "True"}) + new_items = [ + "#PBS -l debug=true", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(pbs_config).batch_script.content() == expected def test_pbs_9(pbs_props): + pbs_config, _ = pbs_props expected = "qsub" - assert JobScheduler.get_scheduler(pbs_props).submit_command == expected + assert JobScheduler.get_scheduler(pbs_config).submit_command == expected # Slurm tests @@ -197,7 +182,7 @@ def test_pbs_9(pbs_props): @fixture def slurm_props(): - return { + config = { "account": "account_name", "nodes": 1, "queue": "batch", @@ -205,81 +190,87 @@ def slurm_props(): "tasks_per_node": 1, "walltime": "00:01:00", } + expected = [ + "#SBATCH --account=account_name", + "#SBATCH --nodes=1", + "#SBATCH --ntasks-per-node=1", + "#SBATCH --qos=batch", + "#SBATCH --time=00:01:00", + ] + return config, expected def test_slurm_1(slurm_props): - expected = """ -#SBATCH --account=account_name -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --qos=batch -#SBATCH --time=00:01:00 -""".strip() - assert JobScheduler.get_scheduler(slurm_props).batch_script.content() == expected + slurm_config, expected_items = slurm_props + expected = "\n".join(expected_items) + assert JobScheduler.get_scheduler(slurm_config).batch_script.content() == expected def test_slurm_2(slurm_props): - slurm_props.update({"partition": "debug"}) - expected = """ -#SBATCH --account=account_name -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --partition=debug -#SBATCH --qos=batch -#SBATCH --time=00:01:00 -""".strip() - assert JobScheduler.get_scheduler(slurm_props).batch_script.content() == expected + slurm_config, expected_items = slurm_props + slurm_config.update({"partition": "debug"}) + new_items = [ + "#SBATCH --partition=debug", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(slurm_config).batch_script.content() == expected def test_slurm_3(slurm_props): - slurm_props.update({"tasks_per_node": 2, "threads": 4}) - expected = """ -#SBATCH --account=account_name -#SBATCH --cpus-per-task=4 -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=2 -#SBATCH --qos=batch -#SBATCH --time=00:01:00 -""".strip() - assert JobScheduler.get_scheduler(slurm_props).batch_script.content() == expected + slurm_config, expected_items = slurm_props + slurm_config.update({"tasks_per_node": 2, "threads": 4}) + expected_items[2] = "#SBATCH --ntasks-per-node=2" + new_items = [ + "#SBATCH --cpus-per-task=4", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(slurm_config).batch_script.content() == expected def test_slurm_4(slurm_props): - slurm_props.update({"memory": "4MB", "tasks_per_node": 2}) - expected = """ -#SBATCH --account=account_name -#SBATCH --mem=4MB -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=2 -#SBATCH --qos=batch -#SBATCH --time=00:01:00 -""".strip() - assert JobScheduler.get_scheduler(slurm_props).batch_script.content() == expected + slurm_config, expected_items = slurm_props + slurm_config.update({"memory": "4MB", "tasks_per_node": 2}) + expected_items[2] = "#SBATCH --ntasks-per-node=2" + new_items = [ + "#SBATCH --mem=4MB", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(slurm_config).batch_script.content() == expected def test_slurm_5(slurm_props): - slurm_props.update({"exclusive": "True"}) - expected = """ -#SBATCH --account=account_name -#SBATCH --exclusive=True -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --qos=batch -#SBATCH --time=00:01:00 -""".strip() - assert JobScheduler.get_scheduler(slurm_props).batch_script.content() == expected + slurm_config, expected_items = slurm_props + slurm_config.update({"exclusive": "True"}) + new_items = [ + "#SBATCH --exclusive", + ] + expected = "\n".join(sorted(expected_items + new_items)) + assert JobScheduler.get_scheduler(slurm_config).batch_script.content() == expected def test_slurm_6(slurm_props): + slurm_config, _ = slurm_props expected = "sbatch" - assert JobScheduler.get_scheduler(slurm_props).submit_command == expected + assert JobScheduler.get_scheduler(slurm_config).submit_command == expected # Generic tests using PBS support. +def test_batchscript_dump(pbs_props, tmpdir): + outfile = tmpdir / "outfile.sh" + pbs_config, expected_items = pbs_props + bs = JobScheduler.get_scheduler(pbs_config).batch_script + bs.dump(outfile) + reference = tmpdir / "reference.sh" + with open(reference, "w", encoding="utf-8") as f: + f.write("\n".join(["#!/bin/bash"] + expected_items)) + assert compare_files(reference, outfile) + + def test_scheduler_bad_attr(pbs_props): - js = JobScheduler.get_scheduler(pbs_props) + pbs_config, _ = pbs_props + js = JobScheduler.get_scheduler(pbs_config) with raises(AttributeError): assert js.bad_attr @@ -291,19 +282,33 @@ def test_scheduler_bad_scheduler(): def test_scheduler_dot_notation(pbs_props): - js = JobScheduler.get_scheduler(pbs_props) + pbs_config, _ = pbs_props + js = JobScheduler.get_scheduler(pbs_config) assert js.account == "account_name" def test_scheduler_prop_not_defined_raises_key_error(pbs_props): - del pbs_props["scheduler"] + pbs_config, _ = pbs_props + del pbs_config["scheduler"] with raises(KeyError) as e: - JobScheduler.get_scheduler(pbs_props) + JobScheduler.get_scheduler(pbs_config) assert "No scheduler defined in props" in str(e.value) def test_scheduler_raises_exception_when_missing_required_attrs(pbs_props): - del pbs_props["account"] + pbs_config, _ = pbs_props + del pbs_config["account"] with raises(ValueError) as e: - JobScheduler.get_scheduler(pbs_props) + JobScheduler.get_scheduler(pbs_config) assert "Missing required attributes: [account]" in str(e.value) + + +def test_scheduler_submit_job(pbs_props): + pbs_config, _ = pbs_props + js = JobScheduler.get_scheduler(pbs_config) + submit_command = js.submit_command + outpath = "/path/to/batch/script" + expected_command = f"{submit_command} {outpath}" + with patch.object(scheduler, "execute") as execute: + js.submit_job(outpath) + execute.assert_called_once_with(cmd=expected_command) From 601ff4e8beb5f5431acdda3ed1730e33579016eb Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Fri, 6 Oct 2023 08:29:00 -0600 Subject: [PATCH 07/66] jq-formatting (#312) --- Makefile | 1 + recipe/meta.json | 1 + recipe/meta.yaml | 1 + src/uwtools/resources/FV3Forecast.jsonschema | 240 ++++++------ src/uwtools/resources/rocoto.jsonschema | 370 +++++++++---------- src/uwtools/resources/workflow.jsonschema | 196 +++++----- 6 files changed, 407 insertions(+), 402 deletions(-) diff --git a/Makefile b/Makefile index d89b0961d..dcf73d982 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,7 @@ format: black src isort src cd src && docformatter . || test $$? -eq 3 + for x in $$(find src -type f -name "*.jsonschema"); do jq -S . $$x >$$x.tmp && mv $$x.tmp $$x || rm $$x.tmp; done lint: recipe/run_test.sh lint diff --git a/recipe/meta.json b/recipe/meta.json index dcafd3514..28822e3a7 100644 --- a/recipe/meta.json +++ b/recipe/meta.json @@ -11,6 +11,7 @@ "f90nml =1.4.*", "isort =5.12.*", "jinja2 =3.0.*", + "jq =1.6.*", "jsonschema =4.17.*", "lxml =4.9.*", "mypy =1.4.*", diff --git a/recipe/meta.yaml b/recipe/meta.yaml index b58f4102a..99cf8ff84 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -24,6 +24,7 @@ test: - coverage 7.2.* - docformatter 1.7.* - isort 5.12.* + - jq 1.6.* - mypy 1.4.* - pylint 2.17.* - pytest 7.4.* diff --git a/src/uwtools/resources/FV3Forecast.jsonschema b/src/uwtools/resources/FV3Forecast.jsonschema index f9b1df8f4..2e070696e 100644 --- a/src/uwtools/resources/FV3Forecast.jsonschema +++ b/src/uwtools/resources/FV3Forecast.jsonschema @@ -1,130 +1,132 @@ { - "title": "FV3 Forecast config", - "description": "This document is to validate user-defined FV3 forecast config files", - "type": "object", - "properties": { - "forecast": { - "description": "parameters of the forecast", - "type": "object", - "properties": { - "cycle-dependent": { - "type": "object", - "propertyNames": { - "type": "string" - } - }, - "exec_name": { - "type": "string" - }, - "field_table": { - "type": "object", - "properties": { - "base_file": { - "type": "string", - "format": "uri" - }, - "update_values": { - "type": "object" - } - } - }, - "length": { - "type": "integer", - "minimum": 1 - }, - "model": { - "type": "string" - }, - "model_configure": { - "type": "object", - "properties": { - "base_file": { - "type": "string", - "format": "uri" - }, - "update_values": { - "type": "object" - } - } - }, - "mpicmd": { - "type": "string" - }, - "namelist": { - "type": "object", - "properties": { - "base_file": { - "type": "string", - "format": "uri" - }, - "update_values": { - "type": "object" - } - } - }, - "run_dir": { - "type": "string", - "format": "uri" - }, - "static": { - "type": "object", - "propertyNames": { - "type": "string" - } - }, - "tiles": { - "type": "array", - "items": { - "type": "integer" - } - } + "description": "This document is to validate user-defined FV3 forecast config files", + "properties": { + "forecast": { + "description": "parameters of the forecast", + "properties": { + "cycle-dependent": { + "propertyNames": { + "type": "string" + }, + "type": "object" + }, + "exec_name": { + "type": "string" + }, + "field_table": { + "properties": { + "base_file": { + "format": "uri", + "type": "string" + }, + "update_values": { + "type": "object" } + }, + "type": "object" + }, + "length": { + "minimum": 1, + "type": "integer" }, - "platform": { - "type": "object", - "properties": { - "scheduler": { - "type": "string", - "enum": [ - "lsf", - "pbs", - "slurm" - ] - }, - "required": ["mpicmd"] + "model": { + "type": "string" + }, + "model_configure": { + "properties": { + "base_file": { + "format": "uri", + "type": "string" + }, + "update_values": { + "type": "object" } + }, + "type": "object" + }, + "mpicmd": { + "type": "string" }, - "preprocessing": { - "type": "object", - "properties": { - "lateral_boundary_conditions": { - "type": "object", - "properties": { - "interval_hours": { - "type": "number", - "minimum": 1, - "default": 3 - }, - "offset": { - "type": "number", - "minimum": 0, - "default": 0 - }, - "output_file_template": { - "type": "string", - "format": "uri" - } - } - } + "namelist": { + "properties": { + "base_file": { + "format": "uri", + "type": "string" + }, + "update_values": { + "type": "object" } + }, + "type": "object" }, - "user": { - "type": "object", - "properties": { - "account": { - "type": "string" - } + "run_dir": { + "format": "uri", + "type": "string" + }, + "static": { + "propertyNames": { + "type": "string" + }, + "type": "object" + }, + "tiles": { + "items": { + "type": "integer" + }, + "type": "array" + } + }, + "type": "object" + }, + "platform": { + "properties": { + "required": [ + "mpicmd" + ], + "scheduler": { + "enum": [ + "lsf", + "pbs", + "slurm" + ], + "type": "string" + } + }, + "type": "object" + }, + "preprocessing": { + "properties": { + "lateral_boundary_conditions": { + "properties": { + "interval_hours": { + "default": 3, + "minimum": 1, + "type": "number" + }, + "offset": { + "default": 0, + "minimum": 0, + "type": "number" + }, + "output_file_template": { + "format": "uri", + "type": "string" } + }, + "type": "object" + } + }, + "type": "object" + }, + "user": { + "properties": { + "account": { + "type": "string" } + }, + "type": "object" } + }, + "title": "FV3 Forecast config", + "type": "object" } diff --git a/src/uwtools/resources/rocoto.jsonschema b/src/uwtools/resources/rocoto.jsonschema index 270cf47b5..5c192c02e 100644 --- a/src/uwtools/resources/rocoto.jsonschema +++ b/src/uwtools/resources/rocoto.jsonschema @@ -1,38 +1,55 @@ { "$defs": { + "TimePattern": { + "pattern": "^(\\d{2}:){3}\\d{2}$", + "type": "string" + }, "datePattern": { - "type": "integer", "format": "string", - "pattern": "^(\\d{12}) (\\d{12})$" + "pattern": "^(\\d{12}) (\\d{12})$", + "type": "integer" }, "dateTimePattern": { - "type": "string", - "pattern": "^(\\d{12}) (\\d{12}) (\\d{2}):(\\d{2}):(\\d{2})$" + "pattern": "^(\\d{12}) (\\d{12}) (\\d{2}):(\\d{2}):(\\d{2})$", + "type": "string" }, "dependency": { - "type": "object", + "additionalProperties": false, + "minProperties": 1, "patternProperties": { "^(and|or|not|nand|nor|xor)(_.*)?$": { "$ref": "#/$defs/dependency" }, - "^some(_.*)?$": { - "type": "array", - "patternProperties": { - "^threshold(_.*)?$": { - "type": "number", - "minimum": 0, - "maximum": 1 + "^(streq|strneq)(_.*)?$": { + "additionalProperties": false, + "properties": { + "left": { + "type": "string" + }, + "right": { + "type": "string" } }, + "required": [ + "left", + "right" + ], + "type": "object" + }, + "^cycleexistdep(_.*)?$": { + "additionalProperties": false, "properties": { - "$ref": "#/$defs/dependency" - } + "cycle_offset": { + "$ref": "#/$defs/TimePattern" + } + }, + "type": "object" }, "^datadep(_.*)?$": { - "type": "object", + "additionalProperties": false, "properties": { "attrs": { - "type": "object", + "additionalProperties": false, "properties": { "age": { "$ref": "#/$defs/TimePattern" @@ -41,151 +58,193 @@ "type": "string" } }, - "additionalProperties": false + "type": "object" }, "text": { "type": "string" } }, - "additionalProperties": false + "type": "object" }, - "^taskdep(_.*)?$": { - "type": "object", + "^metataskdep(_.*)?$": { + "additionalProperties": false, "properties": { "attrs": { - "type": "object", + "additionalProperties": false, "properties": { - "task": { - "type": "string" - }, "cycle_offset": { "$ref": "#/$defs/TimePattern" }, + "metatask": { + "type": "string" + }, "state": { - "type": "string", "enum": [ - "RUNNING", - "Running", - "running", "SUCCEEDED", "DEAD", "Succeeded", "Dead", "succeeded", "dead" - ] + ], + "type": "string" + }, + "threshold": { + "maximum": 1, + "minimum": 0, + "type": "number" } }, "required": [ - "task" + "metatask" ], - "additionalProperties": false - } - }, - "additionalProperties": false - }, - "^(streq|strneq)(_.*)?$": { - "type": "object", - "properties": { - "left": { - "type": "string" - }, - "right": { - "type": "string" + "type": "object" } }, - "required": [ - "left", - "right" - ], - "additionalProperties": false + "type": "object" }, - "^cycleexistdep(_.*)?$": { - "type": "object", - "properties": { - "cycle_offset": { - "$ref": "#/$defs/TimePattern" + "^some(_.*)?$": { + "patternProperties": { + "^threshold(_.*)?$": { + "maximum": 1, + "minimum": 0, + "type": "number" } }, - "additionalProperties": false - }, - "^taskvalid(_.*)?$": { - "type": "object", "properties": { - "task": { - "type": "string" - } + "$ref": "#/$defs/dependency" }, - "additionalProperties": false + "type": "array" }, - "^metataskdep(_.*)?$": { - "type": "object", + "^taskdep(_.*)?$": { + "additionalProperties": false, "properties": { "attrs": { - "type": "object", + "additionalProperties": false, "properties": { - "metatask": { - "type": "string" - }, "cycle_offset": { "$ref": "#/$defs/TimePattern" }, "state": { - "type": "string", "enum": [ + "RUNNING", + "Running", + "running", "SUCCEEDED", "DEAD", "Succeeded", "Dead", "succeeded", "dead" - ] + ], + "type": "string" }, - "threshold": { - "type": "number", - "minimum": 0, - "maximum": 1 + "task": { + "type": "string" } }, "required": [ - "metatask" + "task" ], - "additionalProperties": false + "type": "object" } }, - "additionalProperties": false + "type": "object" + }, + "^taskvalid(_.*)?$": { + "additionalProperties": false, + "properties": { + "task": { + "type": "string" + } + }, + "type": "object" }, "^timedep(_.*)?$": { "$ref": "#/$defs/TimePattern" } }, - "additionalProperties": false, - "minProperties": 1 + "type": "object" }, "metatask": { - "type": "object", - "properties": { - "var": { - "$ref": "#/$defs/var" - } - }, + "additionalProperties": false, + "maxProperties": 3, + "minProperties": 2, "patternProperties": { - "^task(_[a-z0-9_]+)?$": { - "$ref": "#/$defs/task" - }, "^metatask(_[a-z0-9_]+)?$": { "$ref": "#/$defs/metatask" + }, + "^task(_[a-z0-9_]+)?$": { + "$ref": "#/$defs/task" + } + }, + "properties": { + "var": { + "$ref": "#/$defs/var" } }, - "additionalProperties": false, "required": [ "var" ], - "minProperties": 2, - "maxProperties": 3 + "type": "object" }, "task": { - "type": "object", + "additionalProperties": false, + "dependentSchemas": { + "exclusive": { + "not": { + "required": [ + "shared" + ] + } + }, + "join": { + "not": { + "required": [ + "stdout" + ] + } + }, + "shared": { + "not": { + "required": [ + "exclusive" + ] + } + }, + "stderr": { + "required": [ + "stdout" + ] + }, + "stdout": { + "not": { + "required": [ + "join" + ] + }, + "required": [ + "stderr" + ] + } + }, + "oneOf": [ + { + "required": [ + "cores" + ] + }, + { + "required": [ + "native" + ] + }, + { + "required": [ + "nodes" + ] + } + ], "properties": { "account": { "type": "string" @@ -198,21 +257,21 @@ "type": "boolean" }, "maxtries": { - "type": "string", "format": "number", - "minimum": 0 + "minimum": 0, + "type": "string" }, "throttle": { - "type": "integer", - "minimum": 0 + "minimum": 0, + "type": "integer" } }, "command": { "type": "string" }, "cores": { - "type": "integer", - "minimum": 0 + "minimum": 0, + "type": "integer" }, "deadline": { "$ref": "#/$defs/datePattern" @@ -263,103 +322,41 @@ "$ref": "#/$defs/timestr" } }, - "additionalProperties": false, "required": [ "command", "walltime" ], - "dependentSchemas": { - "join": { - "not": { - "required": [ - "stdout" - ] - } - }, - "stdout": { - "not": { - "required": [ - "join" - ] - }, - "required": [ - "stderr" - ] - }, - "stderr": { - "required": [ - "stdout" - ] - }, - "shared": { - "not": { - "required": [ - "exclusive" - ] - } - }, - "exclusive": { - "not": { - "required": [ - "shared" - ] - } - } - }, - "oneOf": [ - { - "required": [ - "cores" - ] - }, - { - "required": [ - "native" - ] - }, - { - "required": [ - "nodes" - ] - } - ] - }, - "TimePattern": { - "type": "string", - "pattern": "^(\\d{2}:){3}\\d{2}$" + "type": "object" }, "timestr": { - "type": "string", - "pattern": "^[0-9]{2}:[0-9]{2}:[0-9]{2}$" + "pattern": "^[0-9]{2}:[0-9]{2}:[0-9]{2}$", + "type": "string" }, "var": { - "type": "object", + "additionalProperties": false, + "minProperties": 1, "patternProperties": { "^.+$": { "type": "string" } }, - "additionalProperties": false, - "minProperties": 1 + "type": "object" } }, - "type": "object", "properties": { "workflow": { - "type": "object", + "additionalProperties": false, "properties": { "attrs": { - "type": "object", "properties": { "cyclethrottle": { - "type": "integer", - "minimum": 0 + "minimum": 0, + "type": "integer" }, "realtime": { "type": "boolean" }, "scheduler": { - "type": "string", "enum": [ "sge", "lsf", @@ -371,41 +368,44 @@ "pbspro", "slurm", "cobalt" - ] + ], + "type": "string" }, "taskthrottle": { - "type": "integer", - "minimum": 0 + "minimum": 0, + "type": "integer" } }, "required": [ "realtime", "scheduler" - ] + ], + "type": "object" }, "cycledefs": { - "type": "object", "properties": { "groupname": { - "type": "array", "items": { "$ref": "#/$defs/dateTimePattern" - } + }, + "type": "array" }, "required": [ "items" ] - } + }, + "type": "object" }, "entities": { "type": "object" }, "log": { - "type": "string", - "format": "uri" + "format": "uri", + "type": "string" }, "tasks": { - "type": "object", + "additionalProperties": false, + "minProperties": 1, "patternProperties": { "^metatask(_[a-z0-9_]+)?$": { "$ref": "#/$defs/metatask" @@ -414,19 +414,19 @@ "$ref": "#/$defs/task" } }, - "additionalProperties": false, - "minProperties": 1 + "type": "object" } }, - "additionalProperties": false, "required": [ "cycledefs", "log", "tasks" - ] + ], + "type": "object" } }, "required": [ "workflow" - ] -} \ No newline at end of file + ], + "type": "object" +} diff --git a/src/uwtools/resources/workflow.jsonschema b/src/uwtools/resources/workflow.jsonschema index 917f963f1..012908411 100644 --- a/src/uwtools/resources/workflow.jsonschema +++ b/src/uwtools/resources/workflow.jsonschema @@ -1,101 +1,101 @@ { - "title": "workflow config", - "description": "This document is to validate config files from SRW, HAFS, Global", - "type": "object", - "properties": { - "platform": { - "description": "attributes of the platform", - "type": "object", - "properties": { - "WORKFLOW_MANAGER": { - "type": "string", - "enum": [ - "rocoto", - "none" - ] - }, - "NCORES_PER_NODE": { - "type": "number" - }, - "SCHED": { - "type": "string", - "enum": [ - "slurm", - "pbspro", - "lsf", - "lsfcray", - "none" - ] - }, - "CCPA_OBS_DIR": { - "type": "string", - "format": "uri" - }, - "MRMS_OBS_DIR": { - "type": "string", - "format": "uri" - }, - "NDAS_OBS_DIR": { - "type": "string", - "format": "uri" - }, - "METPLUS_PATH": { - "type": "string", - "format": "uri" - }, - "MET_BIN_EXEC": { - "type": "string" - }, - "MET_INSTALL_DIR": { - "type": "string", - "format": "uri" - }, - "DOMAIN_PREGEN_BASEDIR": { - "type": "string", - "format": "uri" - }, - "PARTITION_DEFAULT": { - "type": "string" - }, - "QUEUE_DEFAULT": { - "type": "string" - }, - "PARTITION_FCST": { - "type": "string" - }, - "QUEUE_FCST": { - "type": "string" - }, - "PARTITION_HPSS": { - "type": "string" - }, - "QUEUE_HPSS": { - "type": "string" - } - } - }, - "cpl_aqm_parm": { - "description": "attributes of coupled air quality", - "type": "object", - "properties": { - "AQM_CONFIG_DIR": { - "type": "string", - "format": "uri" - }, - "AQM_BIO_DIR": { - "type": "string", - "format": "uri" - } - } - }, - "task_get_da_obs": { - "description": "task for data assimilation", - "type": "object", - "properties": { - "OBS_SUFFIX": { - "type": "string" - } - } + "description": "This document is to validate config files from SRW, HAFS, Global", + "properties": { + "cpl_aqm_parm": { + "description": "attributes of coupled air quality", + "properties": { + "AQM_BIO_DIR": { + "format": "uri", + "type": "string" + }, + "AQM_CONFIG_DIR": { + "format": "uri", + "type": "string" + } + }, + "type": "object" + }, + "platform": { + "description": "attributes of the platform", + "properties": { + "CCPA_OBS_DIR": { + "format": "uri", + "type": "string" + }, + "DOMAIN_PREGEN_BASEDIR": { + "format": "uri", + "type": "string" + }, + "METPLUS_PATH": { + "format": "uri", + "type": "string" + }, + "MET_BIN_EXEC": { + "type": "string" + }, + "MET_INSTALL_DIR": { + "format": "uri", + "type": "string" + }, + "MRMS_OBS_DIR": { + "format": "uri", + "type": "string" + }, + "NCORES_PER_NODE": { + "type": "number" + }, + "NDAS_OBS_DIR": { + "format": "uri", + "type": "string" + }, + "PARTITION_DEFAULT": { + "type": "string" + }, + "PARTITION_FCST": { + "type": "string" + }, + "PARTITION_HPSS": { + "type": "string" + }, + "QUEUE_DEFAULT": { + "type": "string" + }, + "QUEUE_FCST": { + "type": "string" + }, + "QUEUE_HPSS": { + "type": "string" + }, + "SCHED": { + "enum": [ + "slurm", + "pbspro", + "lsf", + "lsfcray", + "none" + ], + "type": "string" + }, + "WORKFLOW_MANAGER": { + "enum": [ + "rocoto", + "none" + ], + "type": "string" + } + }, + "type": "object" + }, + "task_get_da_obs": { + "description": "task for data assimilation", + "properties": { + "OBS_SUFFIX": { + "type": "string" } + }, + "type": "object" } -} \ No newline at end of file + }, + "title": "workflow config", + "type": "object" +} From 2033263c16f8eb5a030c8bc97143741ef61486dc Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Thu, 19 Oct 2023 16:26:46 +0200 Subject: [PATCH 08/66] release-workflow-case-fix (#319) --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f0ef13139..d278229bb 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,4 +1,4 @@ -name: release +name: Release env: ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} on: From ac31f4e455738bcdbfcdef964284e27821eb2b17 Mon Sep 17 00:00:00 2001 From: Brian Weir <94982354+WeirAE@users.noreply.github.com> Date: Thu, 19 Oct 2023 09:44:18 -0500 Subject: [PATCH 09/66] UW 322 as a user, I would like to validate my rocoto xml workflow, given the rocoto native schema (#317) * Start of CLI, need inline validation and tests * updates to fix issues Tests in progress Will recheck file.py edits * updates to testing * add inline validation of input and output * fixed tests and logic reverted file.py changes * Incorporating multiple suggestions Pending rocoto.py edits and log handling * added realize_rocoto_xml(), tests in progress * Added verbose logging, tests in progress * fixes done, need rocoto tests and paths * Major suggestions added, fixing minor issues * all 'as m' clarified to 'as module' * fixed path handling * Incorporating feedback and correcting validation still need to fix coverage of invalid XML * removed pragma, added temp output handling note: coverage still an issue * change naming from input_yaml to config_file * Resolving feedback, still fixing coverage * Fixing residual naming issues * invalid_xml test coverage fixed * fixed importers to handle OptionalPath * Docstrings fixed, investigating rocoto.jinja2 * Clarified schema vs template; current write error * Fixed template and task handling return Current test issue with passing over temp xml * fixed tests * Removed unnecessary declarations * Several fixes; still an issue in rocoto.jinja2 * unpatched write --- src/uwtools/cli.py | 86 ++++++++++++ src/uwtools/config/j2template.py | 13 +- src/uwtools/resources/rocoto.jinja2 | 10 +- src/uwtools/resources/rocoto.jsonschema | 10 +- src/uwtools/rocoto.py | 127 ++++++++++++++++-- src/uwtools/tests/config/test_j2template.py | 4 +- .../tests/fixtures/hello_workflow.yaml | 1 + src/uwtools/tests/test_cli.py | 93 ++++++++++--- src/uwtools/tests/test_rocoto.py | 77 ++++++++--- src/uwtools/utils/file.py | 1 + 10 files changed, 358 insertions(+), 64 deletions(-) diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 3cac4774f..9f4acbdab 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -49,6 +49,7 @@ def main() -> None: modes = { STR.config: _dispatch_config, STR.forecast: _dispatch_forecast, + STR.rocoto: _dispatch_rocoto, STR.template: _dispatch_template, } sys.exit(0 if modes[args.mode](args) else 1) @@ -305,6 +306,89 @@ def _dispatch_forecast_run(args: Namespace) -> bool: ) +# Mode rocoto + + +def _add_subparser_rocoto(subparsers: Subparsers) -> ModeChecks: + """ + Subparser for mode: rocoto + + :param subparsers: Parent parser's subparsers, to add this subparser to. + """ + parser = _add_subparser(subparsers, STR.rocoto, "Realize and validate Rocoto XML Documents") + _basic_setup(parser) + subparsers = _add_subparsers(parser, STR.submode) + return { + STR.realize: _add_subparser_rocoto_realize(subparsers), + STR.validate: _add_subparser_rocoto_validate(subparsers), + } + + +def _add_subparser_rocoto_realize(subparsers: Subparsers) -> SubmodeChecks: + """ + Subparser for mode: rocoto realize + + :param subparsers: Parent parser's subparsers, to add this subparser to. + """ + parser = _add_subparser(subparsers, STR.realize, "Realize a Rocoto XML workflow document") + required = parser.add_argument_group(TITLE_REQ_ARG) + _add_arg_output_file(required) + optional = _basic_setup(parser) + _add_arg_input_file(optional) + checks = _add_args_quiet_and_verbose(optional) + return checks + + +def _add_subparser_rocoto_validate(subparsers: Subparsers) -> SubmodeChecks: + """ + Subparser for mode: rocoto validate + + :param subparsers: Parent parser's subparsers, to add this subparser to. + """ + parser = _add_subparser(subparsers, STR.validate, "Validate Rocoto XML") + optional = _basic_setup(parser) + _add_arg_input_file(optional) + checks = _add_args_quiet_and_verbose(optional) + return checks + + +def _dispatch_rocoto(args: Namespace) -> bool: + """ + Dispatch logic for rocoto mode. + + :param args: Parsed command-line args. + """ + return { + STR.realize: _dispatch_rocoto_realize, + STR.validate: _dispatch_rocoto_validate, + }[ + args.submode + ](args) + + +def _dispatch_rocoto_realize(args: Namespace) -> bool: + """ + Dispatch logic for rocoto realize submode. Validate input and output. + + :param args: Parsed command-line args. + """ + success = uwtools.rocoto.realize_rocoto_xml( + config_file=args.input_file, rendered_output=args.output_file + ) + return success + + +def _dispatch_rocoto_validate(args: Namespace) -> bool: + """ + Dispatch logic for rocoto validate submode. + + :param args: Parsed command-line args. + """ + + success = uwtools.rocoto.validate_rocoto_xml(input_xml=args.input_file) + return success + + # Mode template @@ -675,6 +759,7 @@ def _parse_args(raw_args: List[str]) -> Tuple[Namespace, Checks]: checks = { STR.config: _add_subparser_config(subparsers), STR.forecast: _add_subparser_forecast(subparsers), + STR.rocoto: _add_subparser_rocoto(subparsers), STR.template: _add_subparser_template(subparsers), } return parser.parse_args(raw_args), checks @@ -717,6 +802,7 @@ class _STR: quiet: str = "quiet" realize: str = "realize" render: str = "render" + rocoto: str = "rocoto" run: str = "run" schemafile: str = "schema_file" submode: str = "submode" diff --git a/src/uwtools/config/j2template.py b/src/uwtools/config/j2template.py index 44cd7b86c..870d532b5 100644 --- a/src/uwtools/config/j2template.py +++ b/src/uwtools/config/j2template.py @@ -8,6 +8,9 @@ from jinja2 import BaseLoader, Environment, FileSystemLoader, Template, meta +from uwtools.types import DefinitePath, OptionalPath +from uwtools.utils.file import readable + class J2Template: """ @@ -17,7 +20,7 @@ class J2Template: def __init__( self, values: dict, - template_path: Optional[str] = None, + template_path: OptionalPath = None, template_str: Optional[str] = None, **kwargs, ) -> None: @@ -40,7 +43,7 @@ def __init__( # Public methods - def dump(self, output_path: str) -> None: + def dump(self, output_path: DefinitePath) -> None: """ Write rendered template to the path provided. @@ -70,13 +73,13 @@ def undeclared_variables(self) -> Set[str]: j2_parsed = self._j2env.parse(self._template_str) else: assert self._template_path is not None - with open(self._template_path, encoding="utf-8") as file_: + with readable(self._template_path) as file_: j2_parsed = self._j2env.parse(file_.read()) return meta.find_undeclared_variables(j2_parsed) # Private methods - def _load_file(self, template_path: str) -> Template: + def _load_file(self, template_path: OptionalPath) -> Template: """ Load the Jinja2 template from the file provided. @@ -85,7 +88,7 @@ def _load_file(self, template_path: str) -> Template: """ self._j2env = Environment(loader=FileSystemLoader(searchpath="/")) _register_filters(self._j2env) - return self._j2env.get_template(template_path) + return self._j2env.get_template(str(template_path)) def _load_string(self, template: str) -> Template: """ diff --git a/src/uwtools/resources/rocoto.jinja2 b/src/uwtools/resources/rocoto.jinja2 index f6e30313c..3de1adadf 100644 --- a/src/uwtools/resources/rocoto.jinja2 +++ b/src/uwtools/resources/rocoto.jinja2 @@ -62,22 +62,22 @@ {%- endfor %} ]> - + - {%- for group, cdefs in cycledefs.items() %} + {%- for group, cdefs in workflow.cycledefs.items() %} {%- for cdef in cdefs %} {{ cdef }} {%- endfor %} {%- endfor %} - {{ log }} + {{ workflow.log }} -{%- for item, settings in tasks.items() %} +{%- for item, settings in workflow.tasks.items() %} {%- if item.split("_", 1)[0] == "task" %} {{ task(name=item.split("_", 1)[-1], settings=settings ) }} {%- elif item.split("_", 1)[0] == "metatask" %} diff --git a/src/uwtools/resources/rocoto.jsonschema b/src/uwtools/resources/rocoto.jsonschema index 5c192c02e..aeb1a41a8 100644 --- a/src/uwtools/resources/rocoto.jsonschema +++ b/src/uwtools/resources/rocoto.jsonschema @@ -171,10 +171,10 @@ "maxProperties": 3, "minProperties": 2, "patternProperties": { - "^metatask(_[a-z0-9_]+)?$": { + "^metatask(_.*)?$": { "$ref": "#/$defs/metatask" }, - "^task(_[a-z0-9_]+)?$": { + "^task(_.*)?$": { "$ref": "#/$defs/task" } }, @@ -279,7 +279,7 @@ "dependency": { "$ref": "#/$defs/dependency" }, - "envar": { + "envars": { "type": "object" }, "exclusive": { @@ -407,10 +407,10 @@ "additionalProperties": false, "minProperties": 1, "patternProperties": { - "^metatask(_[a-z0-9_]+)?$": { + "^metatask(_.*)?$": { "$ref": "#/$defs/metatask" }, - "^task(_[a-z0-9_]+)?$": { + "^task(_.*)?$": { "$ref": "#/$defs/task" } }, diff --git a/src/uwtools/rocoto.py b/src/uwtools/rocoto.py index 4bf329c4d..95ae8e0ca 100644 --- a/src/uwtools/rocoto.py +++ b/src/uwtools/rocoto.py @@ -2,8 +2,18 @@ Support for creating Rocoto XML workflow documents. """ +import logging +import shutil +import tempfile +from importlib import resources + +from lxml import etree + +import uwtools.config.validator from uwtools.config.core import YAMLConfig from uwtools.config.j2template import J2Template +from uwtools.types import DefinitePath, OptionalPath +from uwtools.utils.file import readable # Private functions @@ -12,7 +22,7 @@ def _add_jobname(tree: dict) -> None: """ Add a "jobname" attribute to each "task" element in the given config tree. - :param tree: A config tree containing "task" elements.. + :param tree: A config tree containing "task" elements. """ for element, subtree in tree.items(): element_parts = element.split("_", maxsplit=1) @@ -25,20 +35,119 @@ def _add_jobname(tree: dict) -> None: _add_jobname(subtree) -# Public functions -def write_rocoto_xml(input_yaml: str, input_template: str, rendered_output: str) -> None: +def _add_jobname_to_tasks( + input_yaml: OptionalPath = None, +) -> YAMLConfig: """ - Main entry point. + Load YAML config and add job names to each defined workflow task. :param input_yaml: Path to YAML input file. - :param input_template: Path to input template file. - :param rendered_output: Path to write rendered XML file. """ values = YAMLConfig(input_yaml) - tasks = values["tasks"] + tasks = values["workflow"]["tasks"] if isinstance(tasks, dict): _add_jobname(tasks) + return values + + +def _rocoto_schema_xml() -> DefinitePath: + """ + The path to the file containing the schema to validate the XML file against. + """ + with resources.as_file(resources.files("uwtools.resources")) as path: + return path / "schema_with_metatasks.rng" + + +def _rocoto_schema_yaml() -> DefinitePath: + """ + The path to the file containing the schema to validate the YAML file against. + """ + with resources.as_file(resources.files("uwtools.resources")) as path: + return path / "rocoto.jsonschema" + + +def _rocoto_template_xml() -> DefinitePath: + """ + The path to the file containing the Rocoto workflow document template to render. + """ + with resources.as_file(resources.files("uwtools.resources")) as path: + return path / "rocoto.jinja2" + + +def _write_rocoto_xml( + config_file: OptionalPath, + rendered_output: DefinitePath, +) -> None: + """ + Render the Rocoto workflow defined in the given YAML to XML. + + :param config_file: Path to YAML input file. + :param rendered_output: Path to write rendered XML file. + """ + + values = _add_jobname_to_tasks(config_file) # Render the template. - template = J2Template(values=values.data, template_path=input_template) - template.dump(output_path=rendered_output) + template = J2Template(values=values.data, template_path=_rocoto_template_xml()) + template.dump(output_path=str(rendered_output)) + + +# Public functions +def realize_rocoto_xml( + config_file: OptionalPath, + rendered_output: DefinitePath, +) -> bool: + """ + Realize the Rocoto workflow defined in the given YAML as XML. Validate both the YAML input and + XML output. + + :param config_file: Path to YAML input file. + :param rendered_output: Path to write rendered XML file. + :return: Did the input and output files conform to theirr schemas? + """ + + # Validate the YAML. + if uwtools.config.validator.validate_yaml( + config_file=config_file, schema_file=_rocoto_schema_yaml() + ): + # Render the template to a temporary file. + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + _write_rocoto_xml( + config_file=config_file, + rendered_output=temp_file.name, + ) + # Validate the XML. + if validate_rocoto_xml(input_xml=temp_file.name): + # If no issues were detected, save temp file and report success. + shutil.move(temp_file.name, rendered_output) + return True + logging.error("Rocoto validation errors identified in %s", temp_file.name) + return False + logging.error("YAML validation errors identified in %s", config_file) + return False + + +def validate_rocoto_xml(input_xml: OptionalPath) -> bool: + """ + Given a rendered XML file, validate it against the Rocoto schema. + + :param input_xml: Path to rendered XML file. + :return: Did the XML file conform to the schema? + """ + + # Validate the XML. + with open(_rocoto_schema_xml(), "r", encoding="utf-8") as f: + schema = etree.RelaxNG(etree.parse(f)) + with readable(input_xml) as f: + xml = f.read() + tree = etree.fromstring(bytes(xml, encoding="utf-8")) + success = schema.validate(tree) + + # Log validation errors. + errors = str(etree.RelaxNG.error_log).split("\n") + log_method = logging.error if len(errors) else logging.info + log_method("%s Rocoto validation error%s found", len(errors), "" if len(errors) == 1 else "s") + for line in errors: + logging.error(line) + + return success diff --git a/src/uwtools/tests/config/test_j2template.py b/src/uwtools/tests/config/test_j2template.py index c9ad33f81..a893314d2 100644 --- a/src/uwtools/tests/config/test_j2template.py +++ b/src/uwtools/tests/config/test_j2template.py @@ -32,7 +32,7 @@ def test_bad_args(testdata): def test_dump(testdata, tmp_path): - path = str(tmp_path / "rendered.txt") + path = tmp_path / "rendered.txt" j2template = J2Template(testdata.config, template_str=testdata.template) j2template.dump(output_path=path) with open(path, "r", encoding="utf-8") as f: @@ -43,7 +43,7 @@ def test_render_file(testdata, tmp_path): path = tmp_path / "template.jinja2" with path.open("w", encoding="utf-8") as f: print(testdata.template, file=f) - validate(J2Template(testdata.config, template_path=str(path))) + validate(J2Template(testdata.config, template_path=path)) def test_render_string(testdata): diff --git a/src/uwtools/tests/fixtures/hello_workflow.yaml b/src/uwtools/tests/fixtures/hello_workflow.yaml index 9db8657d7..be42a83fb 100644 --- a/src/uwtools/tests/fixtures/hello_workflow.yaml +++ b/src/uwtools/tests/fixtures/hello_workflow.yaml @@ -1,3 +1,4 @@ +workflow: attrs: realtime: false scheduler: slurm diff --git a/src/uwtools/tests/test_cli.py b/src/uwtools/tests/test_cli.py index 060586d8f..7dbcf6141 100644 --- a/src/uwtools/tests/test_cli.py +++ b/src/uwtools/tests/test_cli.py @@ -192,17 +192,17 @@ def test__dispatch_config(params): submode, funcname = params args = ns() vars(args).update({STR.submode: submode}) - with patch.object(cli, funcname) as m: + with patch.object(cli, funcname) as func: cli._dispatch_config(args) - assert m.called_once_with(args) + assert func.called_once_with(args) def test__dispatch_config_compare(): args = ns() vars(args).update({STR.file1path: 1, STR.file1fmt: 2, STR.file2path: 3, STR.file2fmt: 4}) - with patch.object(cli.uwtools.config.core, "compare_configs") as m: + with patch.object(cli.uwtools.config.core, "compare_configs") as compare_configs: cli._dispatch_config_compare(args) - assert m.called_once_with(args) + assert compare_configs.called_once_with(args) def test__dispatch_config_realize(): @@ -219,9 +219,9 @@ def test__dispatch_config_realize(): STR.dryrun: 8, } ) - with patch.object(cli.uwtools.config.core, "realize_config") as m: + with patch.object(cli.uwtools.config.core, "realize_config") as realize_config: cli._dispatch_config_realize(args) - assert m.called_once_with(args) + assert realize_config.called_once_with(args) def test__dispatch_config_translate_arparse_to_jinja2(): @@ -235,12 +235,12 @@ def test__dispatch_config_translate_arparse_to_jinja2(): STR.dryrun: 5, } ) - with patch.object(cli.uwtools.config.atparse_to_jinja2, "convert") as m: + with patch.object(cli.uwtools.config.atparse_to_jinja2, "convert") as convert: cli._dispatch_config_translate(args) - assert m.called_once_with(args) + assert convert.called_once_with(args) -def test_dispath_config_translate_unsupported(): +def test__dispatch_config_translate_unsupported(): args = ns() vars(args).update( {STR.infile: 1, STR.infmt: "jpg", STR.outfile: 3, STR.outfmt: "png", STR.dryrun: 5} @@ -251,12 +251,12 @@ def test_dispath_config_translate_unsupported(): def test__dispatch_config_validate_yaml(): args = ns() vars(args).update({STR.infile: 1, STR.infmt: FORMAT.yaml, STR.schemafile: 3}) - with patch.object(cli.uwtools.config.validator, "validate_yaml") as m: + with patch.object(cli.uwtools.config.validator, "validate_yaml") as validate_yaml: cli._dispatch_config_validate(args) - assert m.called_once_with(args) + assert validate_yaml.called_once_with(args) -def test_dispath_config_validate_unsupported(): +def test__dispatch_config_validate_unsupported(): args = ns() vars(args).update({STR.infile: 1, STR.infmt: "jpg", STR.schemafile: 3}) assert cli._dispatch_config_validate(args) is False @@ -267,9 +267,9 @@ def test__dispatch_forecast(params): submode, funcname = params args = ns() vars(args).update({STR.submode: submode}) - with patch.object(cli, funcname) as m: + with patch.object(cli, funcname) as module: cli._dispatch_forecast(args) - assert m.called_once_with(args) + assert module.called_once_with(args) def test__dispatch_forecast_run(): @@ -281,12 +281,63 @@ def test__dispatch_forecast_run(): forecast_model="foo", ) vars(args).update({STR.cfgfile: 1, "forecast_model": "foo"}) - with patch.object(cli.uwtools.drivers.forecast, "FooForecast", create=True) as m: + with patch.object(cli.uwtools.drivers.forecast, "FooForecast", create=True) as FooForecast: CLASSES = {"foo": getattr(cli.uwtools.drivers.forecast, "FooForecast")} with patch.object(cli.uwtools.drivers.forecast, "CLASSES", new=CLASSES): cli._dispatch_forecast_run(args) - assert m.called_once_with(args) - m().run.assert_called_once_with(cycle="2023-01-01T00:00:00") + assert FooForecast.called_once_with(args) + FooForecast().run.assert_called_once_with(cycle="2023-01-01T00:00:00") + + +@pytest.mark.parametrize( + "params", + [ + (STR.realize, "_dispatch_rocoto_realize"), + (STR.validate, "_dispatch_rocoto_validate"), + ], +) +def test__dispatch_rocoto(params): + submode, funcname = params + args = ns() + vars(args).update({STR.submode: submode}) + with patch.object(cli, funcname) as module: + cli._dispatch_rocoto(args) + assert module.called_once_with(args) + + +def test__dispatch_rocoto_realize(): + args = ns() + vars(args).update({STR.infile: 1, STR.outfile: 2}) + with patch.object(cli.uwtools.rocoto, "realize_rocoto_xml") as module: + cli._dispatch_rocoto_realize(args) + assert module.called_once_with(args) + + +def test__dispatch_rocoto_realize_invalid(): + args = ns() + vars(args).update( + { + STR.infile: 1, + STR.outfile: 2, + } + ) + with patch.object(cli.uwtools.rocoto, "realize_rocoto_xml", return_value=False): + assert cli._dispatch_rocoto_realize(args) is False + + +def test__dispatch_rocoto_validate_xml(): + args = ns() + vars(args).update({STR.infile: 1}) + with patch.object(cli.uwtools.rocoto, "validate_rocoto_xml") as validate: + cli._dispatch_rocoto_validate(args) + assert validate.called_once_with(args) + + +def test__dispatch_rocoto_validate_xml_invalid(): + args = ns() + vars(args).update({STR.infile: 1, STR.verbose: False}) + with patch.object(cli.uwtools.rocoto, "validate_rocoto_xml", return_value=False): + assert cli._dispatch_rocoto_validate(args) is False @pytest.mark.parametrize("params", [(STR.render, "_dispatch_template_render")]) @@ -294,9 +345,9 @@ def test__dispatch_template(params): submode, funcname = params args = ns() vars(args).update({STR.submode: submode}) - with patch.object(cli, funcname) as m: + with patch.object(cli, funcname) as func: cli._dispatch_template(args) - assert m.called_once_with(args) + assert func.called_once_with(args) def test__dispatch_template_render_yaml(): @@ -312,9 +363,9 @@ def test__dispatch_template_render_yaml(): STR.dryrun: 7, } ) - with patch.object(cli.uwtools.config.templater, STR.render) as m: + with patch.object(cli.uwtools.config.templater, STR.render) as templater: cli._dispatch_template_render(args) - assert m.called_once_with(args) + assert templater.called_once_with(args) @pytest.mark.parametrize("quiet", [True]) diff --git a/src/uwtools/tests/test_rocoto.py b/src/uwtools/tests/test_rocoto.py index f00b74040..a0031ff05 100644 --- a/src/uwtools/tests/test_rocoto.py +++ b/src/uwtools/tests/test_rocoto.py @@ -3,19 +3,21 @@ Tests for uwtools.rocoto module. """ +import tempfile from importlib import resources +from unittest.mock import patch import pytest import yaml -from lxml import etree from uwtools import rocoto +from uwtools.config.core import YAMLConfig from uwtools.tests import support # Test functions -def test_add_jobname(): +def test__add_jobname(): expected = yaml.safe_load( """ task_hello: @@ -44,26 +46,67 @@ def test_add_jobname(): assert expected == tree -def test_write_rocoto_xml(tmp_path): - input_yaml = support.fixture_path("hello_workflow.yaml") - with resources.as_file(resources.files("uwtools.resources")) as resc: - input_template = resc / "rocoto.jinja2" +def test__add_jobname_to_tasks(): + with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: + input_yaml = path / "hello_workflow.yaml" + + values = YAMLConfig(input_yaml) + tasks = values["workflow"]["tasks"] + with patch.object(rocoto, "_add_jobname") as module: + rocoto._add_jobname_to_tasks(input_yaml) + assert module.called_once_with(tasks) + + +def test__rocoto_schema_yaml(): + with resources.as_file(resources.files("uwtools.resources")) as path: + expected = path / "rocoto.jsonschema" + assert rocoto._rocoto_schema_yaml() == expected + + +def test__rocoto_schema_xml(): + with resources.as_file(resources.files("uwtools.resources")) as path: + expected = path / "schema_with_metatasks.rng" + assert rocoto._rocoto_schema_xml() == expected + + +@pytest.mark.parametrize("vals", [("hello_workflow.yaml", True), ("fruit_config.yaml", False)]) +def test_realize_rocoto_xml(vals, tmp_path): + fn, validity = vals output = tmp_path / "rendered.xml" - rocoto.write_rocoto_xml( - input_yaml=input_yaml, input_template=str(input_template), rendered_output=str(output) - ) - expected = support.fixture_path("hello_workflow.xml") - support.compare_files(expected, output) + with patch.object(rocoto, "validate_rocoto_xml", value=True): + with patch.object(rocoto.uwtools.config.validator, "_bad_paths", return_value=None): + with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: + config_file = path / fn + result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=output) + assert result is validity + + +def test_realize_rocoto_invalid_xml(): + config_file = support.fixture_path("hello_workflow.yaml") + xml = support.fixture_path("rocoto_invalid.xml") + with patch.object(rocoto, "_write_rocoto_xml", return_value=None): + with patch.object(rocoto.uwtools.config.validator, "_bad_paths", return_value=None): + with patch.object(tempfile, "NamedTemporaryFile") as context_manager: + context_manager.return_value.__enter__.return_value.name = xml + result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=xml) + assert result is False @pytest.mark.parametrize("vals", [("hello_workflow.xml", True), ("rocoto_invalid.xml", False)]) def test_rocoto_xml_is_valid(vals): fn, validity = vals - with resources.as_file(resources.files("uwtools.resources")) as resc: - with open(resc / "schema_with_metatasks.rng", "r", encoding="utf-8") as f: - schema = etree.RelaxNG(etree.parse(f)) - xml = support.fixture_path(fn) - tree = etree.parse(xml) - assert schema.validate(tree) is validity + result = rocoto.validate_rocoto_xml(input_xml=xml) + + assert result is validity + + +def test__write_rocoto_xml(tmp_path): + config_file = support.fixture_path("hello_workflow.yaml") + output = tmp_path / "rendered.xml" + + rocoto._write_rocoto_xml(config_file=config_file, rendered_output=output) + + expected = support.fixture_path("hello_workflow.xml") + assert support.compare_files(expected, output) is True diff --git a/src/uwtools/utils/file.py b/src/uwtools/utils/file.py index c00784c7d..87c20973f 100644 --- a/src/uwtools/utils/file.py +++ b/src/uwtools/utils/file.py @@ -28,6 +28,7 @@ class _FORMAT: _ini: str = "ini" _jinja2: str = "jinja2" _nml: str = "nml" + _xml: str = "xml" _yaml: str = "yaml" # Variants: From 42911558830d04e5150a27c22d791c7a16f9c47e Mon Sep 17 00:00:00 2001 From: Brian Weir <94982354+WeirAE@users.noreply.github.com> Date: Fri, 20 Oct 2023 11:41:23 -0500 Subject: [PATCH 10/66] Fix command-line use of uw rocoto (#321) * fixed import --- src/uwtools/cli.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 9f4acbdab..787574a53 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -19,6 +19,7 @@ import uwtools.config.templater import uwtools.config.validator import uwtools.drivers.forecast +import uwtools.rocoto from uwtools.logging import setup_logging from uwtools.utils.file import FORMAT, get_file_type From ae4ef7082fe6facde936cef8fd1f1d729b5e58f2 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Tue, 24 Oct 2023 09:36:37 -0600 Subject: [PATCH 11/66] switchable-logging (#320) --- src/uwtools/apps/srw.py | 4 +- src/uwtools/cli.py | 5 +-- src/uwtools/config/atparse_to_jinja2.py | 4 +- src/uwtools/config/core.py | 45 +++++++++---------- src/uwtools/config/j2template.py | 4 +- src/uwtools/config/templater.py | 25 +++++------ src/uwtools/config/validator.py | 8 ++-- src/uwtools/drivers/driver.py | 6 +-- src/uwtools/drivers/forecast.py | 10 ++--- src/uwtools/files/gateway/s3.py | 5 ++- src/uwtools/files/gateway/unix.py | 4 +- src/uwtools/logging.py | 31 +++++++++++++ src/uwtools/rocoto.py | 10 ++--- src/uwtools/scheduler.py | 4 +- .../tests/config/test_atparse_to_jinja2.py | 5 ++- src/uwtools/tests/config/test_core.py | 27 +++++------ src/uwtools/tests/config/test_templater.py | 9 ++-- src/uwtools/tests/config/test_validator.py | 9 ++-- src/uwtools/tests/drivers/test_driver.py | 3 +- src/uwtools/tests/drivers/test_forecast.py | 3 +- src/uwtools/tests/test_cli.py | 3 +- src/uwtools/tests/test_logging.py | 12 ++++- src/uwtools/tests/utils/test_processing.py | 8 ++-- src/uwtools/utils/file.py | 8 ++-- src/uwtools/utils/processing.py | 23 +++++----- 25 files changed, 162 insertions(+), 113 deletions(-) diff --git a/src/uwtools/apps/srw.py b/src/uwtools/apps/srw.py index 062363fe4..2fe0e575c 100644 --- a/src/uwtools/apps/srw.py +++ b/src/uwtools/apps/srw.py @@ -2,11 +2,11 @@ This file contains the specific drivers for a particular app, using the facade pattern base class. """ -import logging import shutil from uwtools.drivers.facade import Facade from uwtools.exceptions import UWError +from uwtools.logging import log from uwtools.utils.file import FORMAT, get_file_type from uwtools.utils.processing import execute @@ -47,7 +47,7 @@ def load_config(self, config_file: str) -> None: shutil.copy2(config_file, "config.yaml") else: msg = f"Bad file type -- {file_type}. Cannot load configuration!" - logging.critical(msg) + log.critical(msg) raise ValueError(msg) def validate_config(self, config_file: str) -> None: diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 787574a53..8fed0ef42 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -3,7 +3,6 @@ """ import datetime -import logging import sys from argparse import ArgumentParser as Parser from argparse import HelpFormatter, Namespace @@ -20,7 +19,7 @@ import uwtools.config.validator import uwtools.drivers.forecast import uwtools.rocoto -from uwtools.logging import setup_logging +from uwtools.logging import log, setup_logging from uwtools.utils.file import FORMAT, get_file_type FORMATS = [FORMAT.ini, FORMAT.nml, FORMAT.yaml] @@ -46,7 +45,7 @@ def main() -> None: for check in checks[args.mode][args.submode]: check(args) setup_logging(quiet=args.quiet, verbose=args.verbose) - logging.debug("Command: %s %s", Path(sys.argv[0]).name, " ".join(sys.argv[1:])) + log.debug("Command: %s %s", Path(sys.argv[0]).name, " ".join(sys.argv[1:])) modes = { STR.config: _dispatch_config, STR.forecast: _dispatch_forecast, diff --git a/src/uwtools/config/atparse_to_jinja2.py b/src/uwtools/config/atparse_to_jinja2.py index df371cf8f..480122c6b 100644 --- a/src/uwtools/config/atparse_to_jinja2.py +++ b/src/uwtools/config/atparse_to_jinja2.py @@ -2,10 +2,10 @@ Utilities for rendering Jinja2 templates. """ -import logging import re from typing import IO, Any, Generator, Optional +from uwtools.logging import log from uwtools.utils.file import readable, writable @@ -34,7 +34,7 @@ def write(f_out: IO) -> None: if dry_run: for line in lines(): - logging.info(line) + log.info(line) else: with writable(output_file) as f: write(f) diff --git a/src/uwtools/config/core.py b/src/uwtools/config/core.py index 0f125bade..dcf0e2bee 100644 --- a/src/uwtools/config/core.py +++ b/src/uwtools/config/core.py @@ -7,7 +7,6 @@ import configparser import copy import json -import logging import os import re import sys @@ -22,7 +21,7 @@ from uwtools.config.j2template import J2Template from uwtools.exceptions import UWConfigError -from uwtools.logging import MSGWIDTH +from uwtools.logging import MSGWIDTH, log from uwtools.types import DefinitePath, OptionalPath from uwtools.utils.file import FORMAT, get_file_type, readable, writable @@ -174,7 +173,7 @@ def compare_config(self, dict1: dict, dict2: Optional[dict] = None) -> bool: for sect, keys in diffs.items(): for key in keys: msg = f"{sect}: {key:>15}: {keys[key]}" - logging.info(msg) + log.info(msg) return not diffs @@ -260,13 +259,13 @@ def dereference( except Exception as e: # Fail on any other exception...something is probably wrong. msg = f"{key}: {template}" - logging.exception(msg) + log.exception(msg) raise e data.append(rendered) for tmpl, err in error_catcher.items(): msg = f"{func_name}: {tmpl} raised {err}" - logging.debug(msg) + log.debug(msg) for tmpl, rendered in zip(templates, data): v_str = v_str.replace(tmpl, rendered) @@ -670,9 +669,9 @@ def compare_configs( cfg_a = format_to_config(config_a_format)(config_a_path) cfg_b = format_to_config(config_b_format)(config_b_path) - logging.info("- %s", config_a_path) - logging.info("+ %s", config_b_path) - logging.info("-" * MSGWIDTH) + log.info("- %s", config_a_path) + log.info("+ %s", config_b_path) + log.info("-" * MSGWIDTH) return cfg_a.compare_config(cfg_b.data) @@ -754,7 +753,7 @@ def realize_config( if values_needed: return _realize_config_values_needed(input_obj) if dry_run: - logging.info(input_obj) + log.info(input_obj) else: format_to_config(output_format).dump_dict(path=output_file, cfg=input_obj.data) return True @@ -769,7 +768,7 @@ def _log_and_error(msg: str) -> Exception: :param msg: The error message to log and to associate with raised exception. """ - logging.error(msg) + log.error(msg) return UWConfigError(msg) @@ -785,7 +784,7 @@ def _realize_config_check_depths(input_obj: Config, output_format: str) -> None: output_format == FORMAT.nml and input_obj.depth != 2 ): msg = "Cannot write depth-%s input to type-'%s' output" % (input_obj.depth, output_format) - logging.error(msg) + log.error(msg) raise UWConfigError(msg) @@ -801,15 +800,15 @@ def _realize_config_update( :return: The input config, possibly updated. """ if values_file: - logging.debug("Before update, config has depth %s", input_obj.depth) + log.debug("Before update, config has depth %s", input_obj.depth) values_format = values_format or get_file_type(values_file) values_obj = format_to_config(values_format)(config_file=values_file) - logging.debug("Values config has depth %s", values_obj.depth) + log.debug("Values config has depth %s", values_obj.depth) input_obj.update_values(values_obj) input_obj.dereference_all() - logging.debug("After update, input config has depth %s", input_obj.depth) + log.debug("After update, input config has depth %s", input_obj.depth) else: - logging.debug("Input config has depth %s", input_obj.depth) + log.debug("Input config has depth %s", input_obj.depth) return input_obj @@ -820,15 +819,15 @@ def _realize_config_values_needed(input_obj: Config) -> bool: :param input_obj: The config to update. """ complete, empty, template = input_obj.characterize_values(input_obj.data, parent="") - logging.info("Keys that are complete:") + log.info("Keys that are complete:") for var in complete: - logging.info(var) - logging.info("") - logging.info("Keys that have unfilled Jinja2 templates:") + log.info(var) + log.info("") + log.info("Keys that have unfilled Jinja2 templates:") for var in template: - logging.info(var) - logging.info("") - logging.info("Keys that are set to empty:") + log.info(var) + log.info("") + log.info("Keys that are set to empty:") for var in empty: - logging.info(var) + log.info(var) return True diff --git a/src/uwtools/config/j2template.py b/src/uwtools/config/j2template.py index 870d532b5..c0707534e 100644 --- a/src/uwtools/config/j2template.py +++ b/src/uwtools/config/j2template.py @@ -2,12 +2,12 @@ Support for handling Jinja2 templates. """ -import logging import os from typing import List, Optional, Set from jinja2 import BaseLoader, Environment, FileSystemLoader, Template, meta +from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath from uwtools.utils.file import readable @@ -50,7 +50,7 @@ def dump(self, output_path: DefinitePath) -> None: :param output_path: Path to file to write. """ msg = f"Writing rendered template to output file: {output_path}" - logging.debug(msg) + log.debug(msg) with open(output_path, "w+", encoding="utf-8") as f: print(self.render(), file=f) diff --git a/src/uwtools/config/templater.py b/src/uwtools/config/templater.py index 5b280a440..be1ebefdd 100644 --- a/src/uwtools/config/templater.py +++ b/src/uwtools/config/templater.py @@ -2,13 +2,12 @@ Support for rendering Jinja2 templates. """ -import logging import os from typing import Dict, Optional from uwtools.config.core import format_to_config from uwtools.config.j2template import J2Template -from uwtools.logging import MSGWIDTH +from uwtools.logging import MSGWIDTH, log from uwtools.types import DefinitePath, OptionalPath from uwtools.utils.file import get_file_type, readable, writable @@ -45,9 +44,9 @@ def render( # then return. if values_needed: - logging.info("Value(s) needed to render this template are:") + log.info("Value(s) needed to render this template are:") for var in sorted(undeclared_variables): - logging.info(var) + log.info(var) return True # Check for missing values required to render the template. If found, report them and raise an @@ -56,9 +55,9 @@ def render( missing = [var for var in undeclared_variables if var not in values.keys()] if missing: msg = "Required value(s) not provided:" - logging.error(msg) + log.error(msg) for key in missing: - logging.error(key) + log.error(key) return False # In dry-run mode, display the rendered template and then return. @@ -66,7 +65,7 @@ def render( if dry_run: rendered_template = template.render() for line in rendered_template.split("\n"): - logging.info(line) + log.info(line) return True # Write rendered template to file. @@ -82,11 +81,11 @@ def _report(args: dict) -> None: :param args: The argument names and their values. """ - dashes = lambda: logging.debug("-" * MSGWIDTH) - logging.debug("Internal arguments:") + dashes = lambda: log.debug("-" * MSGWIDTH) + log.debug("Internal arguments:") dashes() for varname, value in args.items(): - logging.debug("%16s: %s", varname, value) + log.debug("%16s: %s", varname, value) dashes() @@ -108,11 +107,11 @@ def _set_up_values_obj( values_format = get_file_type(values_file) values_class = format_to_config(values_format) values = values_class(values_file).data - logging.debug("Read initial values from %s", values_file) + log.debug("Read initial values from %s", values_file) else: values = dict(os.environ) # Do not modify os.environ: Make a copy. - logging.debug("Initial values taken from environment") + log.debug("Initial values taken from environment") if overrides: values.update(overrides) - logging.debug("Updated values with overrides: %s", " ".join(overrides)) + log.debug("Updated values with overrides: %s", " ".join(overrides)) return values diff --git a/src/uwtools/config/validator.py b/src/uwtools/config/validator.py index 0dbea610e..022933bc8 100644 --- a/src/uwtools/config/validator.py +++ b/src/uwtools/config/validator.py @@ -3,13 +3,13 @@ """ import json -import logging from pathlib import Path from typing import List, Optional import jsonschema from uwtools.config.core import YAMLConfig +from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath # Public functions @@ -34,11 +34,11 @@ def validate_yaml( schema = json.load(f) # Collect and report on schema-validation errors. errors = _validation_errors(yaml_config.data, schema) - log_method = logging.error if errors else logging.info + log_method = log.error if errors else log.info log_method("%s schema-validation error%s found", len(errors), "" if len(errors) == 1 else "s") for error in errors: for line in str(error).split("\n"): - logging.error(line) + log.error(line) # It's pointless to evaluate an invalid config, so return now if that's the case. if errors: return False @@ -46,7 +46,7 @@ def validate_yaml( if check_paths: if bad_paths := _bad_paths(yaml_config.data, schema): for bad_path in bad_paths: - logging.error("Path does not exist: %s", bad_path) + log.error("Path does not exist: %s", bad_path) return False # If no issues were detected, report success. return True diff --git a/src/uwtools/drivers/driver.py b/src/uwtools/drivers/driver.py index 843b24488..28555a1f4 100644 --- a/src/uwtools/drivers/driver.py +++ b/src/uwtools/drivers/driver.py @@ -2,7 +2,6 @@ Provides an abstract class representing drivers for various NWP tools. """ -import logging import os import shutil from abc import ABC, abstractmethod @@ -12,6 +11,7 @@ from uwtools.config import validator from uwtools.config.core import Config, YAMLConfig +from uwtools.logging import log from uwtools.scheduler import BatchScript, JobScheduler from uwtools.types import OptionalPath @@ -130,7 +130,7 @@ def stage_files( else: link_or_copy(src_path_or_paths, dst_path) # type: ignore msg = f"File {src_path_or_paths} staged as {dst_path}" - logging.info(msg) + log.info(msg) # Private methods @@ -160,7 +160,7 @@ def _create_user_updated_config( config_class.dump_dict(path=output_path, cfg=user_values) msg = f"Configure file {output_path} created" - logging.info(msg) + log.info(msg) def _validate(self) -> bool: """ diff --git a/src/uwtools/drivers/forecast.py b/src/uwtools/drivers/forecast.py index fc9b2fd71..f37487d13 100644 --- a/src/uwtools/drivers/forecast.py +++ b/src/uwtools/drivers/forecast.py @@ -3,7 +3,6 @@ """ -import logging import os import sys from collections.abc import Mapping @@ -14,6 +13,7 @@ from uwtools.config.core import FieldTableConfig, NMLConfig, YAMLConfig from uwtools.drivers.driver import Driver +from uwtools.logging import log from uwtools.scheduler import BatchScript from uwtools.types import DefinitePath, OptionalPath from uwtools.utils.file import handle_existing @@ -72,7 +72,7 @@ def create_directory_structure(run_directory: DefinitePath, exist_act: str = "de # Exit program with error if caller chooses to quit. if exist_act == "quit" and os.path.isdir(run_directory): - logging.critical("User chose quit option when creating directory") + log.critical("User chose quit option when creating directory") sys.exit(1) # Delete or rename directory if it exists. @@ -83,7 +83,7 @@ def create_directory_structure(run_directory: DefinitePath, exist_act: str = "de for subdir in ("INPUT", "RESTART"): path = os.path.join(run_directory, subdir) - logging.info("Creating directory: %s", path) + log.info("Creating directory: %s", path) os.makedirs(path) def create_field_table(self, output_path: OptionalPath) -> None: @@ -170,7 +170,7 @@ def run(self, cycle: datetime) -> bool: if self._dry_run: # Apply switch to allow user to view the run command of config. # This will not run the job. - logging.info("Batch Script:") + log.info("Batch Script:") batch_script.dump(None) return True @@ -180,7 +180,7 @@ def run(self, cycle: datetime) -> bool: pre_run = self._mpi_env_variables(" ") full_cmd = f"{pre_run} {self.run_cmd()}" if self._dry_run: - logging.info("Would run: ") + log.info("Would run: ") print(full_cmd, file=sys.stdout) return True diff --git a/src/uwtools/files/gateway/s3.py b/src/uwtools/files/gateway/s3.py index 73ed85055..f9630ea40 100644 --- a/src/uwtools/files/gateway/s3.py +++ b/src/uwtools/files/gateway/s3.py @@ -2,7 +2,6 @@ Gateway for interacting with S3. """ -import logging import os import pathlib from typing import Optional @@ -10,6 +9,8 @@ import boto3 from botocore.exceptions import ClientError +from uwtools.logging import log + S3_CLIENT = boto3.client("s3") @@ -46,6 +47,6 @@ def upload_file(source_path: str, bucket: str, target_name: Optional[str] = None try: S3_CLIENT.upload_file(source_path, bucket, target_name) except ClientError as error: - logging.error(error) + log.error(error) return False return True diff --git a/src/uwtools/files/gateway/unix.py b/src/uwtools/files/gateway/unix.py index 703277407..cf23d596a 100644 --- a/src/uwtools/files/gateway/unix.py +++ b/src/uwtools/files/gateway/unix.py @@ -2,13 +2,13 @@ Unix-based, threaded, local file copying. """ -import logging import shutil from concurrent.futures import ThreadPoolExecutor, wait from pathlib import Path from typing import List, Tuple from uwtools.files.model import File +from uwtools.logging import log class Copier: @@ -41,7 +41,7 @@ def _copy(src: Path, dst: Path) -> None: Directories are copied recursively. """ - logging.debug("Copying %s to %s", src, dst) + log.debug("Copying %s to %s", src, dst) if src.is_file(): shutil.copy(src, dst) else: diff --git a/src/uwtools/logging.py b/src/uwtools/logging.py index 4f976ff51..34978bfd4 100644 --- a/src/uwtools/logging.py +++ b/src/uwtools/logging.py @@ -5,6 +5,7 @@ import logging import os import sys +from typing import Any # The logging prefix # @@ -16,6 +17,27 @@ MSGWIDTH = 69 +class _Logger: + """ + Support for swappable loggers. + """ + + def __init__(self) -> None: + self.logger = logging.getLogger() # default to Python root logger. + + def __getattr__(self, attr: str) -> Any: + """ + Delegate attribute access to the currently-used logger. + + :param attr: The attribute to access. + :returns: The requested attribute. + """ + return getattr(self.logger, attr) + + +log = _Logger() + + def setup_logging(quiet: bool = False, verbose: bool = False) -> None: """ Set up logging. @@ -36,3 +58,12 @@ def setup_logging(quiet: bool = False, verbose: bool = False) -> None: **({"filename": os.devnull} if quiet else {}), } logging.basicConfig(**kwargs) + + +def use_logger(logger: logging.Logger) -> None: + """ + Log hereafter via the given logger. + + :param logger: The logger to log to. + """ + log.logger = logger diff --git a/src/uwtools/rocoto.py b/src/uwtools/rocoto.py index 95ae8e0ca..a02219dba 100644 --- a/src/uwtools/rocoto.py +++ b/src/uwtools/rocoto.py @@ -2,7 +2,6 @@ Support for creating Rocoto XML workflow documents. """ -import logging import shutil import tempfile from importlib import resources @@ -12,6 +11,7 @@ import uwtools.config.validator from uwtools.config.core import YAMLConfig from uwtools.config.j2template import J2Template +from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath from uwtools.utils.file import readable @@ -121,9 +121,9 @@ def realize_rocoto_xml( # If no issues were detected, save temp file and report success. shutil.move(temp_file.name, rendered_output) return True - logging.error("Rocoto validation errors identified in %s", temp_file.name) + log.error("Rocoto validation errors identified in %s", temp_file.name) return False - logging.error("YAML validation errors identified in %s", config_file) + log.error("YAML validation errors identified in %s", config_file) return False @@ -145,9 +145,9 @@ def validate_rocoto_xml(input_xml: OptionalPath) -> bool: # Log validation errors. errors = str(etree.RelaxNG.error_log).split("\n") - log_method = logging.error if len(errors) else logging.info + log_method = log.error if len(errors) else log.info log_method("%s Rocoto validation error%s found", len(errors), "" if len(errors) == 1 else "s") for line in errors: - logging.error(line) + log.error(line) return success diff --git a/src/uwtools/scheduler.py b/src/uwtools/scheduler.py index c55d8ea8f..98a1cbb47 100644 --- a/src/uwtools/scheduler.py +++ b/src/uwtools/scheduler.py @@ -4,12 +4,12 @@ from __future__ import annotations -import logging import re from collections import UserDict, UserList from collections.abc import Mapping from typing import Any, Dict, List +from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath from uwtools.utils import Memory from uwtools.utils.file import writable @@ -179,7 +179,7 @@ def get_scheduler(props: Mapping) -> JobScheduler: if "scheduler" not in props: raise KeyError(f"No scheduler defined in props: [{', '.join(props.keys())}]") name = props["scheduler"] - logging.debug("Getting '%s' scheduler", name) + log.debug("Getting '%s' scheduler", name) schedulers = {"slurm": Slurm, "pbs": PBS, "lsf": LSF} try: scheduler = schedulers[name] diff --git a/src/uwtools/tests/config/test_atparse_to_jinja2.py b/src/uwtools/tests/config/test_atparse_to_jinja2.py index dd5f9fe41..78fc709dd 100644 --- a/src/uwtools/tests/config/test_atparse_to_jinja2.py +++ b/src/uwtools/tests/config/test_atparse_to_jinja2.py @@ -11,6 +11,7 @@ from pytest import fixture from uwtools.config import atparse_to_jinja2 +from uwtools.logging import log # Helper functions @@ -48,7 +49,7 @@ def test_convert_input_file_to_output_file(atparsefile, capsys, jinja2txt, tmp_p def test_convert_input_file_to_logging(atparsefile, caplog, capsys, jinja2txt, tmp_path): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) outfile = tmp_path / "outfile" atparse_to_jinja2.convert(input_file=atparsefile, dry_run=True) streams = capsys.readouterr() @@ -76,7 +77,7 @@ def test_convert_stdin_to_file(atparselines, capsys, jinja2txt, tmp_path): def test_convert_stdin_to_logging(atparselines, caplog, jinja2txt, tmp_path): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) outfile = tmp_path / "outfile" with patch.object(sys, "stdin", new=StringIO("\n".join(atparselines))): atparse_to_jinja2.convert(output_file=outfile, dry_run=True) diff --git a/src/uwtools/tests/config/test_core.py b/src/uwtools/tests/config/test_core.py index a8e969828..6c52a72c8 100644 --- a/src/uwtools/tests/config/test_core.py +++ b/src/uwtools/tests/config/test_core.py @@ -20,6 +20,7 @@ from uwtools import exceptions from uwtools.config import core from uwtools.exceptions import UWConfigError +from uwtools.logging import log from uwtools.tests.support import compare_files, fixture_path, logged from uwtools.utils.file import FORMAT, path_if_it_exists, writable @@ -31,7 +32,7 @@ def test_compare_config(caplog, fmt, salad_base): """ Compare two config objects. """ - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) cfgobj = core.format_to_config(fmt)(fixture_path(f"simple.{fmt}")) if fmt == FORMAT.ini: salad_base["salad"]["how_many"] = "12" # str "12" (not int 12) for ini @@ -55,7 +56,7 @@ def test_compare_config(caplog, fmt, salad_base): def test_compare_configs_good(compare_configs_assets, caplog): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) _, a, b = compare_configs_assets assert core.compare_configs( config_a_path=a, config_a_format=FORMAT.yaml, config_b_path=b, config_b_format=FORMAT.yaml @@ -64,7 +65,7 @@ def test_compare_configs_good(compare_configs_assets, caplog): def test_compare_configs_changed_value(compare_configs_assets, caplog): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) d, a, b = compare_configs_assets d["baz"]["qux"] = 11 with writable(b) as f: @@ -76,7 +77,7 @@ def test_compare_configs_changed_value(compare_configs_assets, caplog): def test_compare_configs_missing_key(compare_configs_assets, caplog): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) d, a, b = compare_configs_assets del d["baz"] with writable(b) as f: @@ -89,7 +90,7 @@ def test_compare_configs_missing_key(compare_configs_assets, caplog): def test_compare_configs_bad_format(caplog): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) with raises(UWConfigError) as e: core.compare_configs( config_a_path="/not/used", @@ -187,7 +188,7 @@ def test_dereference_exceptions(caplog, tmp_path): """ Test that dereference handles some standard mistakes. """ - logging.getLogger().setLevel(logging.DEBUG) + log.setLevel(logging.DEBUG) path = tmp_path / "cfg.yaml" with open(path, "w", encoding="utf-8") as f: print( @@ -204,7 +205,7 @@ def test_dereference_exceptions(caplog, tmp_path): ) cfgobj = core.YAMLConfig(config_file=path) cfgobj.dereference() - logging.info("HELLO") + log.info("HELLO") raised = [record.message for record in caplog.records if "raised" in record.message] assert "ZeroDivisionError" in raised[0] assert "TypeError" in raised[1] @@ -425,7 +426,7 @@ def test_realize_config_dry_run(caplog): """ Test that providing a YAML base file with a dry-run flag will print an YAML config file. """ - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) infile = fixture_path("fruit_config.yaml") yaml_config = core.YAMLConfig(infile) yaml_config.dereference_all() @@ -615,7 +616,7 @@ def test__realize_config_update(realize_config_testobj, tmp_path): def test__realize_config_values_needed(caplog, tmp_path): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) path = tmp_path / "a.yaml" with writable(path) as f: yaml.dump({1: "complete", 2: "{{ jinja2 }}", 3: ""}, f) @@ -650,7 +651,7 @@ def test_values_needed_ini(caplog): Test that the values_needed flag logs keys completed, keys containing unfilled Jinja2 templates, and keys set to empty. """ - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) core.realize_config( input_file=fixture_path("simple3.ini"), input_format=FORMAT.ini, @@ -689,7 +690,7 @@ def test_values_needed_nml(caplog): Test that the values_needed flag logs keys completed, keys containing unfilled Jinja2 templates, and keys set to empty. """ - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) core.realize_config( input_file=fixture_path("simple3.nml"), input_format=FORMAT.nml, @@ -725,7 +726,7 @@ def test_values_needed_yaml(caplog): Test that the values_needed flag logs keys completed, keys containing unfilled Jinja2 templates, and keys set to empty. """ - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) core.realize_config( input_file=fixture_path("srw_example.yaml"), input_format=FORMAT.yaml, @@ -915,7 +916,7 @@ def test_YAMLConfig__load_paths_failure_stdin_plus_relpath(caplog): # provide YAML with an include directive specifying a relative path. Since a relative path # is meaningless relative to stdin, assert that an appropriate error is logged and exception # raised. - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) relpath = "../bar/baz.yaml" with patch.object(core.sys, "stdin", new=StringIO(f"foo: {core.INCLUDE_TAG} [{relpath}]")): with raises(UWConfigError) as e: diff --git a/src/uwtools/tests/config/test_templater.py b/src/uwtools/tests/config/test_templater.py index 497685678..9f2d9ac66 100644 --- a/src/uwtools/tests/config/test_templater.py +++ b/src/uwtools/tests/config/test_templater.py @@ -11,6 +11,7 @@ from pytest import fixture from uwtools.config import templater +from uwtools.logging import log from uwtools.tests.support import logged @@ -52,7 +53,7 @@ def test_render(values_file, template, tmp_path): def test_render_dry_run(caplog, values_file, template): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) render_helper( input_file=template, values_file=values_file, output_file="/dev/null", dry_run=True ) @@ -61,7 +62,7 @@ def test_render_dry_run(caplog, values_file, template): def test_render_values_missing(caplog, values_file, template): # Read in the config, remove the "roses" key, then re-write it. - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) with open(values_file, "r", encoding="utf-8") as f: cfgobj = yaml.safe_load(f.read()) del cfgobj["roses"] @@ -73,7 +74,7 @@ def test_render_values_missing(caplog, values_file, template): def test_render_values_needed(caplog, values_file, template): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) render_helper( input_file=template, values_file=values_file, output_file="/dev/null", values_needed=True ) @@ -82,7 +83,7 @@ def test_render_values_needed(caplog, values_file, template): def test__report(caplog): - logging.getLogger().setLevel(logging.DEBUG) + log.setLevel(logging.DEBUG) expected = """ Internal arguments: --------------------------------------------------------------------- diff --git a/src/uwtools/tests/config/test_validator.py b/src/uwtools/tests/config/test_validator.py index 3f6d0f31a..d7baec96d 100644 --- a/src/uwtools/tests/config/test_validator.py +++ b/src/uwtools/tests/config/test_validator.py @@ -12,6 +12,7 @@ from pytest import fixture from uwtools.config import validator +from uwtools.logging import log from uwtools.tests.support import logged, regex_logged # Support functions @@ -77,7 +78,7 @@ def write_as_json(data: Dict[str, Any], path: Path) -> Path: def test_validate_yaml_fail_bad_dir_top(caplog, config, config_file, schema, schema_file, tmp_path): # Specify a non-existent directory for the topmost directory value. - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) d = str(tmp_path / "no-such-dir") config["dir"] = d write_as_json(config, config_file) @@ -90,7 +91,7 @@ def test_validate_yaml_fail_bad_dir_nested( caplog, config, config_file, schema, schema_file, tmp_path ): # Specify a non-existent directory for the nested directory value. - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) d = str(tmp_path / "no-such-dir") config["sub"]["dir"] = d write_as_json(config, config_file) @@ -101,7 +102,7 @@ def test_validate_yaml_fail_bad_dir_nested( def test_validate_yaml_fail_bad_enum_val(caplog, config, config_file, schema, schema_file): # Specify an invalid enum value. - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) config["color"] = "yellow" write_as_json(config, config_file) write_as_json(schema, schema_file) @@ -112,7 +113,7 @@ def test_validate_yaml_fail_bad_enum_val(caplog, config, config_file, schema, sc def test_validate_yaml_fail_bad_number_val(caplog, config, config_file, schema, schema_file): # Specify an invalid number value. - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) config["number"] = "string" write_as_json(config, config_file) write_as_json(schema, schema_file) diff --git a/src/uwtools/tests/drivers/test_driver.py b/src/uwtools/tests/drivers/test_driver.py index 1b77d0895..95397a692 100644 --- a/src/uwtools/tests/drivers/test_driver.py +++ b/src/uwtools/tests/drivers/test_driver.py @@ -12,6 +12,7 @@ from pytest import fixture from uwtools.drivers.driver import Driver +from uwtools.logging import log from uwtools.tests.support import logged @@ -92,7 +93,7 @@ def test_validation(caplog, configs, schema, tmp_path, valid): with open(schema_file, "w", encoding="utf-8") as f: print(schema, file=f) with patch.object(ConcreteDriver, "schema_file", new=schema_file): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) ConcreteDriver(config_file=config_file) if valid: assert logged(caplog, "0 schema-validation errors found") diff --git a/src/uwtools/tests/drivers/test_forecast.py b/src/uwtools/tests/drivers/test_forecast.py index 52f4f0a87..7cfa3a840 100644 --- a/src/uwtools/tests/drivers/test_forecast.py +++ b/src/uwtools/tests/drivers/test_forecast.py @@ -16,6 +16,7 @@ from uwtools.drivers import forecast from uwtools.drivers.driver import Driver from uwtools.drivers.forecast import FV3Forecast +from uwtools.logging import log from uwtools.tests.support import compare_files, fixture_path @@ -332,7 +333,7 @@ def test_run_direct(fv3_mpi_assets, fv3_run_assets): @pytest.mark.parametrize("with_batch_script", [True, False]) def test_FV3Forecast_run_dry_run(capsys, fv3_mpi_assets, fv3_run_assets, with_batch_script): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) batch_script, config_file, config = fv3_run_assets if with_batch_script: batch_components = [ diff --git a/src/uwtools/tests/test_cli.py b/src/uwtools/tests/test_cli.py index 7dbcf6141..b8ba55ffe 100644 --- a/src/uwtools/tests/test_cli.py +++ b/src/uwtools/tests/test_cli.py @@ -13,6 +13,7 @@ from uwtools import cli from uwtools.cli import STR +from uwtools.logging import log from uwtools.utils.file import FORMAT # Test functions @@ -126,7 +127,7 @@ def test__check_file_vs_format_pass_implicit(fmt): def test__check_quiet_vs_verbose_fail(capsys): - logging.getLogger().setLevel(logging.INFO) + log.setLevel(logging.INFO) args = ns() vars(args).update({STR.quiet: True, STR.verbose: True}) with raises(SystemExit): diff --git a/src/uwtools/tests/test_logging.py b/src/uwtools/tests/test_logging.py index 240f3c0e8..6cba77952 100644 --- a/src/uwtools/tests/test_logging.py +++ b/src/uwtools/tests/test_logging.py @@ -1,4 +1,4 @@ -# pylint: disable=missing-function-docstring +# pylint: disable=missing-function-docstring,protected-access """ Tests for uwtools.logging module. """ @@ -50,3 +50,13 @@ def test_setup_logging_verbose(): format=ANY, level=logging.DEBUG, ) + + +def test_use_logger(): + with patch.object(uwtools.logging, "log", uwtools.logging._Logger()): + # Initially, uwtools logging uses the Python root logger: + assert uwtools.logging.log.logger == logging.getLogger() + # But the logger can be swapped to use a logger of choice: + test_logger = logging.getLogger("test-logger") + uwtools.logging.use_logger(test_logger) + assert uwtools.logging.log.logger == test_logger diff --git a/src/uwtools/tests/utils/test_processing.py b/src/uwtools/tests/utils/test_processing.py index 346d7db41..e082b4399 100644 --- a/src/uwtools/tests/utils/test_processing.py +++ b/src/uwtools/tests/utils/test_processing.py @@ -3,12 +3,14 @@ Tests for uwtools.utils.processing module. """ +import logging + from uwtools.tests.support import logged from uwtools.utils import processing def test_run_failure(caplog): - processing.logging.getLogger().setLevel(processing.logging.INFO) + processing.log.setLevel(logging.INFO) cmd = "expr 1 / 0" result = processing.execute(cmd=cmd) assert "division by zero" in result.output @@ -20,9 +22,9 @@ def test_run_failure(caplog): def test_run_success(caplog, tmp_path): - processing.logging.getLogger().setLevel(processing.logging.INFO) + processing.log.setLevel(logging.INFO) cmd = "echo hello $FOO" - assert processing.execute(cmd=cmd, cwd=tmp_path, env={"FOO": "bar"}, log=True) + assert processing.execute(cmd=cmd, cwd=tmp_path, env={"FOO": "bar"}, log_output=True) assert logged(caplog, "Executing: %s" % cmd) assert logged(caplog, " in %s" % tmp_path) assert logged(caplog, " with environment variables:") diff --git a/src/uwtools/utils/file.py b/src/uwtools/utils/file.py index 87c20973f..f97697ec1 100644 --- a/src/uwtools/utils/file.py +++ b/src/uwtools/utils/file.py @@ -2,7 +2,6 @@ Helpers for working with files and directories. """ -import logging import os import shutil import sys @@ -12,6 +11,7 @@ from pathlib import Path from typing import IO, Generator +from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath @@ -60,7 +60,7 @@ def get_file_type(path: DefinitePath) -> str: if fmt := vars(FORMAT).get(suffix): return fmt msg = f"Cannot deduce format of '{path}' from unknown extension '{suffix}'" - logging.critical(msg) + log.critical(msg) raise ValueError(msg) @@ -79,7 +79,7 @@ def handle_existing(directory: str, action: str) -> None: shutil.rmtree(directory) except (FileExistsError, RuntimeError) as e: msg = f"Could not delete directory {directory}" - logging.critical(msg) + log.critical(msg) raise RuntimeError(msg) from e # Try to rename existing run directory if option is rename. @@ -91,7 +91,7 @@ def handle_existing(directory: str, action: str) -> None: shutil.move(directory, save_dir) except (FileExistsError, RuntimeError) as e: msg = f"Could not rename directory {directory}" - logging.critical(msg) + log.critical(msg) raise RuntimeError(msg) from e diff --git a/src/uwtools/utils/processing.py b/src/uwtools/utils/processing.py index f83ed59e2..bbb54ca34 100644 --- a/src/uwtools/utils/processing.py +++ b/src/uwtools/utils/processing.py @@ -2,18 +2,19 @@ Utilities for interacting with external processes. """ -import logging from pathlib import Path from subprocess import STDOUT, CalledProcessError, check_output from types import SimpleNamespace as ns from typing import Dict, Optional, Union +from uwtools.logging import log + def execute( cmd: str, cwd: Optional[Union[Path, str]] = None, env: Optional[Dict[str, str]] = None, - log: Optional[bool] = False, + log_output: Optional[bool] = False, ) -> ns: """ Execute a command in a subshell. @@ -21,30 +22,30 @@ def execute( :param cmd: The command to execute. :param cwd: Change to this directory before executing cmd. :param env: Environment variables to set before executing cmd. - :param log: Log output from successful cmd? (Error output is always logged.) + :param log_output: Log output from successful cmd? (Error output is always logged.) :return: A result object providing combined stder/stdout output and success values. """ indent = " " - logging.info("Executing: %s", cmd) + log.info("Executing: %s", cmd) if cwd: - logging.info("%sin %s", indent, cwd) + log.info("%sin %s", indent, cwd) if env: - logging.info("%swith environment variables:", indent) + log.info("%swith environment variables:", indent) for key, val in env.items(): - logging.info("%s%s=%s", indent * 2, key, val) + log.info("%s%s=%s", indent * 2, key, val) try: output = check_output( cmd, cwd=cwd, encoding="utf=8", env=env, shell=True, stderr=STDOUT, text=True ) - logfunc = logging.info + logfunc = log.info success = True except CalledProcessError as e: output = e.output - logging.error("%sFailed with status: %s", indent, e.returncode) - logfunc = logging.error + log.error("%sFailed with status: %s", indent, e.returncode) + logfunc = log.error success = False - if output and (log or not success): + if output and (log_output or not success): logfunc("%sOutput:", indent) for line in output.split("\n"): logfunc("%s%s", indent * 2, line) From 22503a38e4ba834d22329e20783114e30805fbdd Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Thu, 26 Oct 2023 09:58:52 -0600 Subject: [PATCH 12/66] rereadable-stdin (#325) --- src/pyproject.toml | 1 - src/uwtools/config/core.py | 2 +- src/uwtools/config/validator.py | 32 +------------- .../tests/config/test_atparse_to_jinja2.py | 4 ++ src/uwtools/tests/config/test_core.py | 6 ++- src/uwtools/tests/config/test_validator.py | 42 +------------------ src/uwtools/tests/test_rocoto.py | 14 +++---- src/uwtools/tests/utils/test_file.py | 34 ++++++++++++++- src/uwtools/utils/file.py | 37 ++++++++++++++-- 9 files changed, 84 insertions(+), 88 deletions(-) diff --git a/src/pyproject.toml b/src/pyproject.toml index 80fd9f03b..139ae369f 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -37,7 +37,6 @@ disable = [ "too-many-instance-attributes", "too-many-lines", "too-many-locals", - "too-many-statements", "unnecessary-lambda-assignment", "use-dict-literal", ] diff --git a/src/uwtools/config/core.py b/src/uwtools/config/core.py index dcf0e2bee..bfd811d9f 100644 --- a/src/uwtools/config/core.py +++ b/src/uwtools/config/core.py @@ -533,7 +533,7 @@ def _load(self, config_file: OptionalPath) -> dict: loader = self._yaml_loader with readable(config_file) as f: try: - cfg = yaml.load(f, Loader=loader) + cfg = yaml.load(f.read(), Loader=loader) except yaml.constructor.ConstructorError as e: if e.problem: if "unhashable" in e.problem: diff --git a/src/uwtools/config/validator.py b/src/uwtools/config/validator.py index 022933bc8..b2ed6e660 100644 --- a/src/uwtools/config/validator.py +++ b/src/uwtools/config/validator.py @@ -3,8 +3,7 @@ """ import json -from pathlib import Path -from typing import List, Optional +from typing import List import jsonschema @@ -15,16 +14,13 @@ # Public functions -def validate_yaml( - schema_file: DefinitePath, config_file: OptionalPath = None, check_paths: Optional[bool] = True -) -> bool: +def validate_yaml(schema_file: DefinitePath, config_file: OptionalPath = None) -> bool: """ Check whether the given config file conforms to the given JSON Schema spec and whether any filesystem paths it identifies do not exist. :param schema_file: The JSON Schema file to use for validation. :param config_file: The YAML file to validate (stdin will be used by default) - :param check_paths: Check for filesystem paths that do not exist :return: Did the YAML file conform to the schema? """ # Load the config and schema. @@ -42,12 +38,6 @@ def validate_yaml( # It's pointless to evaluate an invalid config, so return now if that's the case. if errors: return False - # Collect and report bad paths found in config. - if check_paths: - if bad_paths := _bad_paths(yaml_config.data, schema): - for bad_path in bad_paths: - log.error("Path does not exist: %s", bad_path) - return False # If no issues were detected, report success. return True @@ -55,24 +45,6 @@ def validate_yaml( # Private functions -def _bad_paths(config: dict, schema: dict) -> List[str]: - """ - Identify non-existent config paths. - - The schema has the same shape as the config, so traverse them together, recursively, checking - values identified by the schema as having "uri" format, which denotes a path. - """ - paths = [] - for key, val in config.items(): - subschema = schema.get("properties", {}).get(key, {}) - if isinstance(val, dict): - paths += _bad_paths(val, subschema) - else: - if subschema.get("format") == "uri" and not Path(val).exists(): - paths.append(val) - return sorted(paths) - - def _validation_errors(config: dict, schema: dict) -> List[str]: """ Identify schema-validation errors. diff --git a/src/uwtools/tests/config/test_atparse_to_jinja2.py b/src/uwtools/tests/config/test_atparse_to_jinja2.py index 78fc709dd..49a4ecc1c 100644 --- a/src/uwtools/tests/config/test_atparse_to_jinja2.py +++ b/src/uwtools/tests/config/test_atparse_to_jinja2.py @@ -12,6 +12,7 @@ from uwtools.config import atparse_to_jinja2 from uwtools.logging import log +from uwtools.utils.file import _stdinproxy # Helper functions @@ -67,6 +68,7 @@ def test_convert_input_file_to_stdout(atparsefile, capsys, jinja2txt): def test_convert_stdin_to_file(atparselines, capsys, jinja2txt, tmp_path): outfile = tmp_path / "outfile" + _stdinproxy.cache_clear() with patch.object(sys, "stdin", new=StringIO("\n".join(atparselines))): atparse_to_jinja2.convert(output_file=outfile) with open(outfile, "r", encoding="utf-8") as f: @@ -79,6 +81,7 @@ def test_convert_stdin_to_file(atparselines, capsys, jinja2txt, tmp_path): def test_convert_stdin_to_logging(atparselines, caplog, jinja2txt, tmp_path): log.setLevel(logging.INFO) outfile = tmp_path / "outfile" + _stdinproxy.cache_clear() with patch.object(sys, "stdin", new=StringIO("\n".join(atparselines))): atparse_to_jinja2.convert(output_file=outfile, dry_run=True) assert "\n".join(record.message for record in caplog.records) == jinja2txt.strip() @@ -86,6 +89,7 @@ def test_convert_stdin_to_logging(atparselines, caplog, jinja2txt, tmp_path): def test_convert_stdin_to_stdout(atparselines, capsys, jinja2txt): + _stdinproxy.cache_clear() with patch.object(sys, "stdin", new=StringIO("\n".join(atparselines))): atparse_to_jinja2.convert() streams = capsys.readouterr() diff --git a/src/uwtools/tests/config/test_core.py b/src/uwtools/tests/config/test_core.py index 6c52a72c8..4922b5b92 100644 --- a/src/uwtools/tests/config/test_core.py +++ b/src/uwtools/tests/config/test_core.py @@ -7,6 +7,7 @@ import filecmp import logging import os +import sys from collections import OrderedDict from io import StringIO from pathlib import Path @@ -22,7 +23,7 @@ from uwtools.exceptions import UWConfigError from uwtools.logging import log from uwtools.tests.support import compare_files, fixture_path, logged -from uwtools.utils.file import FORMAT, path_if_it_exists, writable +from uwtools.utils.file import FORMAT, _stdinproxy, path_if_it_exists, writable # Test functions @@ -917,8 +918,9 @@ def test_YAMLConfig__load_paths_failure_stdin_plus_relpath(caplog): # is meaningless relative to stdin, assert that an appropriate error is logged and exception # raised. log.setLevel(logging.INFO) + _stdinproxy.cache_clear() relpath = "../bar/baz.yaml" - with patch.object(core.sys, "stdin", new=StringIO(f"foo: {core.INCLUDE_TAG} [{relpath}]")): + with patch.object(sys, "stdin", new=StringIO(f"foo: {core.INCLUDE_TAG} [{relpath}]")): with raises(UWConfigError) as e: core.YAMLConfig() msg = f"Reading from stdin, a relative path was encountered: {relpath}" diff --git a/src/uwtools/tests/config/test_validator.py b/src/uwtools/tests/config/test_validator.py index d7baec96d..85cbe79db 100644 --- a/src/uwtools/tests/config/test_validator.py +++ b/src/uwtools/tests/config/test_validator.py @@ -76,30 +76,6 @@ def write_as_json(data: Dict[str, Any], path: Path) -> Path: # Test functions -def test_validate_yaml_fail_bad_dir_top(caplog, config, config_file, schema, schema_file, tmp_path): - # Specify a non-existent directory for the topmost directory value. - log.setLevel(logging.INFO) - d = str(tmp_path / "no-such-dir") - config["dir"] = d - write_as_json(config, config_file) - write_as_json(schema, schema_file) - assert not validator.validate_yaml(schema_file=schema_file, config_file=config_file) - assert len([x for x in caplog.records if f"Path does not exist: {d}" in x.message]) == 1 - - -def test_validate_yaml_fail_bad_dir_nested( - caplog, config, config_file, schema, schema_file, tmp_path -): - # Specify a non-existent directory for the nested directory value. - log.setLevel(logging.INFO) - d = str(tmp_path / "no-such-dir") - config["sub"]["dir"] = d - write_as_json(config, config_file) - write_as_json(schema, schema_file) - assert not validator.validate_yaml(schema_file=schema_file, config_file=config_file) - assert len([x for x in caplog.records if f"Path does not exist: {d}" in x.message]) == 1 - - def test_validate_yaml_fail_bad_enum_val(caplog, config, config_file, schema, schema_file): # Specify an invalid enum value. log.setLevel(logging.INFO) @@ -133,7 +109,7 @@ def test_validate_yaml_pass(config, config_file, schema, schema_file): def rocoto_assets(): with resources.as_file(resources.files("uwtools.resources")) as resc: schema_file = resc / "rocoto.jsonschema" - kwargs = {"schema_file": schema_file, "config_file": "/not/used", "check_paths": False} + kwargs = {"schema_file": schema_file, "config_file": "/not/used"} config = { "workflow": { "cycledefs": {"howdy": ["202209290000 202209300000 06:00:00"]}, @@ -244,22 +220,6 @@ def test_validate_yaml_rocoto_valid(rocoto_assets): assert validator.validate_yaml(**kwargs) -def test__bad_paths_top(config, schema, tmp_path): - d = str(tmp_path / "no-such-dir") - config["dir"] = d - assert validator._bad_paths(config, schema) == [d] - - -def test__bad_paths_nested(config, schema, tmp_path): - d = str(tmp_path / "no-such-dir") - config["sub"]["dir"] = d - assert validator._bad_paths(config, schema) == [d] - - -def test__bad_paths_none(config, schema): - assert not validator._bad_paths(config, schema) - - def test__validation_errors_bad_enum_value(config, schema): config["color"] = "yellow" assert len(validator._validation_errors(config, schema)) == 1 diff --git a/src/uwtools/tests/test_rocoto.py b/src/uwtools/tests/test_rocoto.py index a0031ff05..8a44e002f 100644 --- a/src/uwtools/tests/test_rocoto.py +++ b/src/uwtools/tests/test_rocoto.py @@ -75,10 +75,9 @@ def test_realize_rocoto_xml(vals, tmp_path): output = tmp_path / "rendered.xml" with patch.object(rocoto, "validate_rocoto_xml", value=True): - with patch.object(rocoto.uwtools.config.validator, "_bad_paths", return_value=None): - with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: - config_file = path / fn - result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=output) + with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: + config_file = path / fn + result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=output) assert result is validity @@ -86,10 +85,9 @@ def test_realize_rocoto_invalid_xml(): config_file = support.fixture_path("hello_workflow.yaml") xml = support.fixture_path("rocoto_invalid.xml") with patch.object(rocoto, "_write_rocoto_xml", return_value=None): - with patch.object(rocoto.uwtools.config.validator, "_bad_paths", return_value=None): - with patch.object(tempfile, "NamedTemporaryFile") as context_manager: - context_manager.return_value.__enter__.return_value.name = xml - result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=xml) + with patch.object(tempfile, "NamedTemporaryFile") as context_manager: + context_manager.return_value.__enter__.return_value.name = xml + result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=xml) assert result is False diff --git a/src/uwtools/tests/utils/test_file.py b/src/uwtools/tests/utils/test_file.py index 7b87a61dc..47a22afb3 100644 --- a/src/uwtools/tests/utils/test_file.py +++ b/src/uwtools/tests/utils/test_file.py @@ -1,10 +1,11 @@ -# pylint: disable=missing-function-docstring,redefined-outer-name +# pylint: disable=missing-function-docstring,protected-access,redefined-outer-name """ Tests for uwtools.utils.file module. """ import sys from datetime import datetime as dt +from io import StringIO from unittest.mock import patch import pytest @@ -24,6 +25,35 @@ def assets(tmp_path): return now, renamed, rundir +def test_StdinProxy(): + msg = "proxying stdin" + with patch.object(sys, "stdin", new=StringIO(msg)): + assert sys.stdin.read() == msg + # Reading from stdin a second time yields no input, as the stream has been exhausted: + assert sys.stdin.read() == "" + with patch.object(sys, "stdin", new=StringIO(msg)): + sp = file.StdinProxy() + assert sp.read() == msg + # But the stdin proxy can be read multiple times: + assert sp.read() == msg + + +def test__stdinproxy(): + file._stdinproxy.cache_clear() + msg0 = "hello world" + msg1 = "bonjour monde" + # Unsurprisingly, the first read from stdin finds the expected message: + with patch.object(sys, "stdin", new=StringIO(msg0)): + assert file._stdinproxy().read() == msg0 + # But after re-patching stdin with a new message, a second read returns the old message: + with patch.object(sys, "stdin", new=StringIO(msg1)): + assert file._stdinproxy().read() == msg0 # <-- the OLD message + # However, if the cache is cleared, the second message is then read: + file._stdinproxy.cache_clear() + with patch.object(sys, "stdin", new=StringIO(msg1)): + assert file._stdinproxy().read() == msg1 # <-- the NEW message + + def test_get_file_type(): for ext, file_type in { "atparse": "atparse", @@ -103,7 +133,7 @@ def test_readable_file(tmp_path): def test_readable_nofile(): with file.readable() as f: - assert f is sys.stdin + assert hasattr(f, "read") def test_writable_file(tmp_path): diff --git a/src/uwtools/utils/file.py b/src/uwtools/utils/file.py index f97697ec1..bd1f64567 100644 --- a/src/uwtools/utils/file.py +++ b/src/uwtools/utils/file.py @@ -8,8 +8,10 @@ from contextlib import contextmanager from dataclasses import dataclass from datetime import datetime as dt +from functools import cache +from io import StringIO from pathlib import Path -from typing import IO, Generator +from typing import IO, Any, Generator, Union from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath @@ -48,6 +50,33 @@ class _FORMAT: FORMAT = _FORMAT() +class StdinProxy: + """ + Reads stdin once but permits multiple reads of its data. + """ + + def __init__(self) -> None: + self._stdin = sys.stdin.read() + self._reset() + + def __getattr__(self, attr: str) -> Any: + self._reset() + return getattr(self._stringio, attr) + + def __iter__(self): + self._reset() + for line in self._stringio.read().split("\n"): + yield line + + def _reset(self) -> None: + self._stringio = StringIO(self._stdin) + + +@cache +def _stdinproxy(): + return StdinProxy() + + def get_file_type(path: DefinitePath) -> str: """ Returns a standardized file type given a path/filename. @@ -112,7 +141,9 @@ def path_if_it_exists(path: str) -> str: @contextmanager -def readable(filepath: OptionalPath = None, mode: str = "r") -> Generator[IO, None, None]: +def readable( + filepath: OptionalPath = None, mode: str = "r" +) -> Generator[Union[IO, StdinProxy], None, None]: """ If a path to a file is specified, open it and return a readable handle; if not, return readable stdin. @@ -123,7 +154,7 @@ def readable(filepath: OptionalPath = None, mode: str = "r") -> Generator[IO, No with open(filepath, mode, encoding="utf-8") as f: yield f else: - yield sys.stdin + yield _stdinproxy() @contextmanager From 1cf4220a7f8b261f0c617554666d8fca5900a4cf Mon Sep 17 00:00:00 2001 From: Brian Weir <94982354+WeirAE@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:56:05 -0500 Subject: [PATCH 13/66] UW-413: Ensure CLI functions accept file and stdin streams for both input and output (#324) * Fix rocoto and jinja output calls to handle stream * Fix required output; revert jinja to limit scope * Restored changes to j2template * missed save on import declaration * Added recommended refactor of realize_rocoto_xml * Adding additional feedback to fix validate also, no longer have to __enter__ on mock * Fix tests after merging; piping and redirect works * Fixed test mock, added feedback of renaming * reverting to etree.fromstring() * Further simplified realize() --- src/uwtools/cli.py | 5 +-- src/uwtools/config/j2template.py | 8 ++-- src/uwtools/rocoto.py | 74 ++++++++++++++------------------ src/uwtools/tests/test_rocoto.py | 17 +++++--- 4 files changed, 51 insertions(+), 53 deletions(-) diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 8fed0ef42..10d2b8acf 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -331,10 +331,9 @@ def _add_subparser_rocoto_realize(subparsers: Subparsers) -> SubmodeChecks: :param subparsers: Parent parser's subparsers, to add this subparser to. """ parser = _add_subparser(subparsers, STR.realize, "Realize a Rocoto XML workflow document") - required = parser.add_argument_group(TITLE_REQ_ARG) - _add_arg_output_file(required) optional = _basic_setup(parser) _add_arg_input_file(optional) + _add_arg_output_file(optional) checks = _add_args_quiet_and_verbose(optional) return checks @@ -373,7 +372,7 @@ def _dispatch_rocoto_realize(args: Namespace) -> bool: :param args: Parsed command-line args. """ success = uwtools.rocoto.realize_rocoto_xml( - config_file=args.input_file, rendered_output=args.output_file + config_file=args.input_file, output_file=args.output_file ) return success diff --git a/src/uwtools/config/j2template.py b/src/uwtools/config/j2template.py index c0707534e..94b51dc00 100644 --- a/src/uwtools/config/j2template.py +++ b/src/uwtools/config/j2template.py @@ -9,7 +9,7 @@ from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath -from uwtools.utils.file import readable +from uwtools.utils.file import readable, writable class J2Template: @@ -43,7 +43,7 @@ def __init__( # Public methods - def dump(self, output_path: DefinitePath) -> None: + def dump(self, output_path: OptionalPath) -> None: """ Write rendered template to the path provided. @@ -51,7 +51,7 @@ def dump(self, output_path: DefinitePath) -> None: """ msg = f"Writing rendered template to output file: {output_path}" log.debug(msg) - with open(output_path, "w+", encoding="utf-8") as f: + with writable(output_path) as f: print(self.render(), file=f) def render(self) -> str: @@ -79,7 +79,7 @@ def undeclared_variables(self) -> Set[str]: # Private methods - def _load_file(self, template_path: OptionalPath) -> Template: + def _load_file(self, template_path: DefinitePath) -> Template: """ Load the Jinja2 template from the file provided. diff --git a/src/uwtools/rocoto.py b/src/uwtools/rocoto.py index a02219dba..0d1d9806b 100644 --- a/src/uwtools/rocoto.py +++ b/src/uwtools/rocoto.py @@ -2,18 +2,18 @@ Support for creating Rocoto XML workflow documents. """ -import shutil import tempfile from importlib import resources +from pathlib import Path from lxml import etree -import uwtools.config.validator from uwtools.config.core import YAMLConfig from uwtools.config.j2template import J2Template +from uwtools.config.validator import validate_yaml from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath -from uwtools.utils.file import readable +from uwtools.utils.file import readable, writable # Private functions @@ -76,55 +76,53 @@ def _rocoto_template_xml() -> DefinitePath: def _write_rocoto_xml( config_file: OptionalPath, - rendered_output: DefinitePath, + output_file: OptionalPath, ) -> None: """ Render the Rocoto workflow defined in the given YAML to XML. :param config_file: Path to YAML input file. - :param rendered_output: Path to write rendered XML file. + :param output_file: Path to write rendered XML file. """ values = _add_jobname_to_tasks(config_file) # Render the template. template = J2Template(values=values.data, template_path=_rocoto_template_xml()) - template.dump(output_path=str(rendered_output)) + template.dump(output_path=output_file) # Public functions def realize_rocoto_xml( config_file: OptionalPath, - rendered_output: DefinitePath, + output_file: OptionalPath = None, ) -> bool: """ Realize the Rocoto workflow defined in the given YAML as XML. Validate both the YAML input and XML output. :param config_file: Path to YAML input file. - :param rendered_output: Path to write rendered XML file. + :param output_file: Path to write rendered XML file. :return: Did the input and output files conform to theirr schemas? """ - # Validate the YAML. - if uwtools.config.validator.validate_yaml( - config_file=config_file, schema_file=_rocoto_schema_yaml() - ): - # Render the template to a temporary file. - with tempfile.NamedTemporaryFile(delete=False) as temp_file: - _write_rocoto_xml( - config_file=config_file, - rendered_output=temp_file.name, - ) - # Validate the XML. - if validate_rocoto_xml(input_xml=temp_file.name): - # If no issues were detected, save temp file and report success. - shutil.move(temp_file.name, rendered_output) - return True - log.error("Rocoto validation errors identified in %s", temp_file.name) + if not validate_yaml(config_file=config_file, schema_file=_rocoto_schema_yaml()): + log.error("YAML validation errors identified in %s", config_file) return False - log.error("YAML validation errors identified in %s", config_file) - return False + + _, temp_file = tempfile.mkstemp(suffix=".xml") + + _write_rocoto_xml(config_file=config_file, output_file=temp_file) + + if not validate_rocoto_xml(input_xml=temp_file): + log.error("Rocoto validation errors identified in %s", temp_file) + return False + + with open(temp_file, "r", encoding="utf-8") as f_in: + with writable(output_file) as f_out: + print(f_in.read(), file=f_out) + Path(temp_file).unlink() + return True def validate_rocoto_xml(input_xml: OptionalPath) -> bool: @@ -134,20 +132,14 @@ def validate_rocoto_xml(input_xml: OptionalPath) -> bool: :param input_xml: Path to rendered XML file. :return: Did the XML file conform to the schema? """ - - # Validate the XML. + with readable(input_xml) as f: + tree = etree.fromstring(bytes(f.read(), encoding="utf-8")) with open(_rocoto_schema_xml(), "r", encoding="utf-8") as f: schema = etree.RelaxNG(etree.parse(f)) - with readable(input_xml) as f: - xml = f.read() - tree = etree.fromstring(bytes(xml, encoding="utf-8")) - success = schema.validate(tree) - - # Log validation errors. - errors = str(etree.RelaxNG.error_log).split("\n") - log_method = log.error if len(errors) else log.info - log_method("%s Rocoto validation error%s found", len(errors), "" if len(errors) == 1 else "s") - for line in errors: - log.error(line) - - return success + valid = schema.validate(tree) + nerr = len(schema.error_log) + log_method = log.info if valid else log.error + log_method("%s Rocoto validation error%s found", nerr, "" if nerr == 1 else "s") + for err in list(schema.error_log): + log.error(err) + return valid diff --git a/src/uwtools/tests/test_rocoto.py b/src/uwtools/tests/test_rocoto.py index 8a44e002f..02081963f 100644 --- a/src/uwtools/tests/test_rocoto.py +++ b/src/uwtools/tests/test_rocoto.py @@ -77,17 +77,24 @@ def test_realize_rocoto_xml(vals, tmp_path): with patch.object(rocoto, "validate_rocoto_xml", value=True): with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: config_file = path / fn - result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=output) + result = rocoto.realize_rocoto_xml(config_file=config_file, output_file=output) assert result is validity +def test_realize_rocoto_default_output(): + with patch.object(rocoto, "validate_rocoto_xml", value=True): + with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: + config_file = path / "hello_workflow.yaml" + result = rocoto.realize_rocoto_xml(config_file=config_file) + assert result is True + + def test_realize_rocoto_invalid_xml(): config_file = support.fixture_path("hello_workflow.yaml") xml = support.fixture_path("rocoto_invalid.xml") with patch.object(rocoto, "_write_rocoto_xml", return_value=None): - with patch.object(tempfile, "NamedTemporaryFile") as context_manager: - context_manager.return_value.__enter__.return_value.name = xml - result = rocoto.realize_rocoto_xml(config_file=config_file, rendered_output=xml) + with patch.object(tempfile, "mkstemp", return_value=(None, xml)): + result = rocoto.realize_rocoto_xml(config_file=config_file, output_file=xml) assert result is False @@ -104,7 +111,7 @@ def test__write_rocoto_xml(tmp_path): config_file = support.fixture_path("hello_workflow.yaml") output = tmp_path / "rendered.xml" - rocoto._write_rocoto_xml(config_file=config_file, rendered_output=output) + rocoto._write_rocoto_xml(config_file=config_file, output_file=output) expected = support.fixture_path("hello_workflow.xml") assert support.compare_files(expected, output) is True From e90fac42166be6225942324d61e7d60697d5dfc5 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Tue, 31 Oct 2023 07:13:59 -0600 Subject: [PATCH 14/66] UW-376 (#326) --- src/uwtools/drivers/driver.py | 3 +- src/uwtools/drivers/forecast.py | 8 +- src/uwtools/resources/rocoto.jinja2 | 88 ---- .../resources/schema_with_metatasks.rng | 400 ++++++++---------- src/uwtools/rocoto.py | 369 ++++++++++++---- src/uwtools/tests/config/test_validator.py | 5 +- src/uwtools/tests/drivers/test_driver.py | 5 +- src/uwtools/tests/drivers/test_experiment.py | 36 -- .../tests/fixtures/hello_workflow.yaml | 2 +- src/uwtools/tests/support.py | 42 +- src/uwtools/tests/test_rocoto.py | 283 +++++++++---- src/uwtools/tests/utils/test_file.py | 4 + src/uwtools/utils/file.py | 12 + 13 files changed, 703 insertions(+), 554 deletions(-) delete mode 100644 src/uwtools/resources/rocoto.jinja2 diff --git a/src/uwtools/drivers/driver.py b/src/uwtools/drivers/driver.py index 28555a1f4..d9a669815 100644 --- a/src/uwtools/drivers/driver.py +++ b/src/uwtools/drivers/driver.py @@ -7,6 +7,7 @@ from abc import ABC, abstractmethod from collections.abc import Mapping from datetime import datetime +from pathlib import Path from typing import Any, Dict, Optional, Type, Union from uwtools.config import validator @@ -99,7 +100,7 @@ def scheduler(self) -> JobScheduler: @property @abstractmethod - def schema_file(self) -> str: + def schema_file(self) -> Path: """ The path to the file containing the schema to validate the config file against. """ diff --git a/src/uwtools/drivers/forecast.py b/src/uwtools/drivers/forecast.py index f37487d13..e58f8e402 100644 --- a/src/uwtools/drivers/forecast.py +++ b/src/uwtools/drivers/forecast.py @@ -7,7 +7,6 @@ import sys from collections.abc import Mapping from datetime import datetime -from importlib import resources from pathlib import Path from typing import Dict, Optional @@ -16,7 +15,7 @@ from uwtools.logging import log from uwtools.scheduler import BatchScript from uwtools.types import DefinitePath, OptionalPath -from uwtools.utils.file import handle_existing +from uwtools.utils.file import handle_existing, resource_pathobj from uwtools.utils.processing import execute @@ -188,12 +187,11 @@ def run(self, cycle: datetime) -> bool: return result.success @property - def schema_file(self) -> str: + def schema_file(self) -> Path: """ The path to the file containing the schema to validate the config file against. """ - with resources.as_file(resources.files("uwtools.resources")) as path: - return (path / "FV3Forecast.jsonschema").as_posix() + return resource_pathobj("FV3Forecast.jsonschema") # Private methods diff --git a/src/uwtools/resources/rocoto.jinja2 b/src/uwtools/resources/rocoto.jinja2 deleted file mode 100644 index 3de1adadf..000000000 --- a/src/uwtools/resources/rocoto.jinja2 +++ /dev/null @@ -1,88 +0,0 @@ -{%- macro dependency_tree(dep_dict) %} -{%- if dep_dict is mapping %} - {%- for tag, values in dep_dict.items() %} - {%- set tag_type = tag.split("_")[0] %} - {%- if values is mapping %} - <{{ tag_type -}} {% for attr, val in values.pop("attrs", {}).items() %} {{ attr }}="{{ val }}"{%- endfor -%}{%- if tag_type in ["taskdep", "metataskdep", "taskvalid"] %}/{%- endif %}> - {%- if values.get("text") %} - {{ values.pop("text") }} - - {%- elif values %} - {{- dependency_tree(values)|indent(2) }} - - {%- endif %} - {%- else %} - <{{ tag_type|indent(2) -}}> - {{- values -}} - - {%- endif %} - {%- endfor %} -{%- endif %} -{%- endmacro -%} - -{%- macro task(name, settings) %} - - - {%- for key, value in settings.items() -%} - {%- if key not in ["envars", "attrs", "dependency", "nnodes", "ppn"] %} - <{{ key }}>{{ value }} - {%- endif %} - {%- endfor %} - - {% for var, value in settings.get("envars", {}).items() %} - {{ var }}{{ value }} - {%- endfor %} - - {% if settings.get("dependency") -%} - - {{- dependency_tree(dep_dict=settings.get("dependency")) }} - - {%- endif %} - -{%- endmacro -%} - -{%- macro metatask(name, settings) %} - - {% for varname, value in settings.get("var", {}).items() %} - {{ value }} - {%- endfor %} - {%- for item, task_settings in settings.items() %} - {%- if item.split("_", 1)[0] == "task" %} - {%- if task_settings.get("command") %} - {{ task(name=item.split("_", 1)[-1], settings=task_settings)|indent(2) }} - {%- endif %} - {%- elif item.split("_", 1)[0] == "metatask" %} - {{ metatask(name=item.split("_", 1)[-1], settings=task_settings)|indent(2) }} - {%- endif %} - {%- endfor %} - - -{%- endmacro -%} - - - -{%- endfor %} - -]> - - - {%- for group, cdefs in workflow.cycledefs.items() %} - {%- for cdef in cdefs %} - {{ cdef }} - {%- endfor %} - {%- endfor %} - - {{ workflow.log }} - -{%- for item, settings in workflow.tasks.items() %} - {%- if item.split("_", 1)[0] == "task" %} - {{ task(name=item.split("_", 1)[-1], settings=settings ) }} - {%- elif item.split("_", 1)[0] == "metatask" %} - {{ metatask(name=item.split("_", 1)[-1], settings=settings ) }} - {%- endif %} -{%- endfor %} - - diff --git a/src/uwtools/resources/schema_with_metatasks.rng b/src/uwtools/resources/schema_with_metatasks.rng index e0b3aba01..cb022ba0d 100644 --- a/src/uwtools/resources/schema_with_metatasks.rng +++ b/src/uwtools/resources/schema_with_metatasks.rng @@ -1,10 +1,8 @@ - - - + + - - @@ -20,7 +18,6 @@ TRUE - @@ -36,368 +33,376 @@ cobalt - - + - - + - - + - - + - - + - - - + - + - - + - + - - - - - - + + + + + + - - - + + + - - - + + + - - - + + - - - - + - - - + - + - - + - + - + - + - - \s*(\d+|(#[^#\s]+#))+\s* - \s*(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* - \s*(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* - \s*(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + + \s*(\d+|(#[^#\s]+#))+\s* + + + \s*(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + + + \s*(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + + + \s*(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + - - \s*-?(\d+|(#[^#\s]+#))+\s* - \s*-?(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* - \s*-?(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* - \s*-?(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + + \s*-?(\d+|(#[^#\s]+#))+\s* + + + \s*-?(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + + + \s*-?(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + + + \s*-?(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+:(\d+|(#[^#\s]+#))+\s* + - - - \s*\d\d\d\d\d\d\d\d\d\d\d\d\s* + + \s*\d\d\d\d\d\d\d\d\d\d\d\d\s* + - - \* - \*/\d+ - (\d+|\d+-\d+|\d+-\d+/\d+)(,\d+|,\d+-\d+|,\d+-\d+/\d+)* + + \* + + + \*/\d+ + + + (\d+|\d+-\d+|\d+-\d+/\d+)(,\d+|,\d+-\d+|,\d+-\d+/\d+)* + - - - + + + + + + + + + - - + - - + + - - + - + - RUNNING - Running - running - SUCCEEDED + RUNNING + Running + running + SUCCEEDED DEAD - Succeeded + Succeeded Dead - succeeded + succeeded dead - + - + - + - + - + - + - + - + - + - + - + - + - + - + - SUCCEEDED + SUCCEEDED DEAD - Succeeded + Succeeded Dead - succeeded + succeeded dead - 0.01.0 + + 0.0 + 1.0 + - + - + - + - + - + - + - + - + - + - + - + - 0.01.0 + + 0.0 + 1.0 + - + - - - + - - + - - + - - + - @@ -412,89 +417,77 @@ true True TRUE - + - - - - + + - - + + - - - - - + + + + - + - - + - - + - - + - - + - - - - - - - - - + + + + + + + - - - - - - + + + + + - + - - + - @@ -503,121 +496,102 @@ - + - + - + - - + - - + - - + - - + - - + - - + - - - - - - + - - + - - + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - - diff --git a/src/uwtools/rocoto.py b/src/uwtools/rocoto.py index 0d1d9806b..4d6f547d0 100644 --- a/src/uwtools/rocoto.py +++ b/src/uwtools/rocoto.py @@ -2,97 +2,25 @@ Support for creating Rocoto XML workflow documents. """ +import re import tempfile -from importlib import resources +from dataclasses import dataclass from pathlib import Path +from typing import Optional, Tuple +import yaml +from jinja2 import DebugUndefined, Template from lxml import etree +from lxml.etree import Element, SubElement from uwtools.config.core import YAMLConfig -from uwtools.config.j2template import J2Template from uwtools.config.validator import validate_yaml +from uwtools.exceptions import UWConfigError from uwtools.logging import log -from uwtools.types import DefinitePath, OptionalPath -from uwtools.utils.file import readable, writable +from uwtools.types import OptionalPath +from uwtools.utils.file import readable, resource_pathobj, writable -# Private functions - -def _add_jobname(tree: dict) -> None: - """ - Add a "jobname" attribute to each "task" element in the given config tree. - - :param tree: A config tree containing "task" elements. - """ - for element, subtree in tree.items(): - element_parts = element.split("_", maxsplit=1) - element_type = element_parts[0] - if element_type == "task": - # Use the provided attribute if it is present, otherwise use the name in the key. - task_name = element_parts[1] - tree[element]["jobname"] = subtree.get("attrs", {}).get("name") or task_name - elif element_type == "metatask": - _add_jobname(subtree) - - -def _add_jobname_to_tasks( - input_yaml: OptionalPath = None, -) -> YAMLConfig: - """ - Load YAML config and add job names to each defined workflow task. - - :param input_yaml: Path to YAML input file. - """ - values = YAMLConfig(input_yaml) - tasks = values["workflow"]["tasks"] - if isinstance(tasks, dict): - _add_jobname(tasks) - return values - - -def _rocoto_schema_xml() -> DefinitePath: - """ - The path to the file containing the schema to validate the XML file against. - """ - with resources.as_file(resources.files("uwtools.resources")) as path: - return path / "schema_with_metatasks.rng" - - -def _rocoto_schema_yaml() -> DefinitePath: - """ - The path to the file containing the schema to validate the YAML file against. - """ - with resources.as_file(resources.files("uwtools.resources")) as path: - return path / "rocoto.jsonschema" - - -def _rocoto_template_xml() -> DefinitePath: - """ - The path to the file containing the Rocoto workflow document template to render. - """ - with resources.as_file(resources.files("uwtools.resources")) as path: - return path / "rocoto.jinja2" - - -def _write_rocoto_xml( - config_file: OptionalPath, - output_file: OptionalPath, -) -> None: - """ - Render the Rocoto workflow defined in the given YAML to XML. - - :param config_file: Path to YAML input file. - :param output_file: Path to write rendered XML file. - """ - - values = _add_jobname_to_tasks(config_file) - - # Render the template. - template = J2Template(values=values.data, template_path=_rocoto_template_xml()) - template.dump(output_path=output_file) - - -# Public functions def realize_rocoto_xml( config_file: OptionalPath, output_file: OptionalPath = None, @@ -106,13 +34,9 @@ def realize_rocoto_xml( :return: Did the input and output files conform to theirr schemas? """ - if not validate_yaml(config_file=config_file, schema_file=_rocoto_schema_yaml()): - log.error("YAML validation errors identified in %s", config_file) - return False - _, temp_file = tempfile.mkstemp(suffix=".xml") - _write_rocoto_xml(config_file=config_file, output_file=temp_file) + _RocotoXML(config_file).dump(temp_file) if not validate_rocoto_xml(input_xml=temp_file): log.error("Rocoto validation errors identified in %s", temp_file) @@ -134,7 +58,7 @@ def validate_rocoto_xml(input_xml: OptionalPath) -> bool: """ with readable(input_xml) as f: tree = etree.fromstring(bytes(f.read(), encoding="utf-8")) - with open(_rocoto_schema_xml(), "r", encoding="utf-8") as f: + with open(resource_pathobj("schema_with_metatasks.rng"), "r", encoding="utf-8") as f: schema = etree.RelaxNG(etree.parse(f)) valid = schema.validate(tree) nerr = len(schema.error_log) @@ -143,3 +67,274 @@ def validate_rocoto_xml(input_xml: OptionalPath) -> bool: for err in list(schema.error_log): log.error(err) return valid + + +class _RocotoXML: + """ + Generate a Rocoto XML document from a UW YAML config. + """ + + def __init__(self, config_file: OptionalPath = None) -> None: + self._config_validate(config_file) + self._config = YAMLConfig(config_file).data + self._add_workflow(self._config) + + def dump(self, path: OptionalPath = None) -> None: + """ + Emit Rocoto XML document to file or stdout. + + :param path: Optional path to write XML document to. + """ + # Render the tree to a string, fix mangled entities (e.g. "&FOO;" -> "&FOO;"), insert + # !DOCTYPE block, then write final XML. + xml = etree.tostring( + self._root, pretty_print=True, encoding="utf-8", xml_declaration=True + ).decode() + xml = re.sub(r"&([^;]+);", r"&\1;", xml) + xml = self._insert_doctype(xml) + with writable(path) as f: + f.write(xml.strip()) + + @property + def _doctype(self) -> Optional[str]: + """ + Generate the block with definitions. + + :return: The block if entities are defined, otherwise None. + """ + if entities := self._config[STR.workflow].get(STR.entities): + tags = (f' ' for key, val in entities.items()) + return "" % "\n".join(tags) + return None + + def _config_validate(self, config_file: OptionalPath) -> None: + """ + Validate the given YAML config. + + :param config_file: Path to the YAML config (defaults to stdin). + """ + if not validate_yaml( + config_file=config_file, schema_file=resource_pathobj("rocoto.jsonschema") + ): + raise UWConfigError("YAML validation errors identified in %s" % config_file) + + def _add_metatask(self, e: Element, config: dict, taskname: str) -> None: + """ + Add a element to the . + + :param e: The parent element to add the new element to. + :param config: Configuration data for this element. + :param taskname: The name of the metatask being defined. + """ + e = SubElement(e, STR.metatask, name=taskname) + for key, val in config.items(): + tag, taskname = self._tag_name(key) + if tag == STR.metatask: + self._add_metatask(e, val, taskname) + elif tag == STR.task: + self._add_task(e, val, taskname) + elif tag == STR.var: + for name, value in val.items(): + SubElement(e, STR.var, name=name).text = value + + def _add_task(self, e: Element, config: dict, taskname: str) -> None: + """ + Add a element to the . + + :param e: The parent element to add the new element to. + :param config: Configuration data for this element. + :param taskname: The name of the task being defined. + """ + e = SubElement(e, STR.task, name=taskname) + self._set_attrs(e, config) + self._set_and_render_jobname(config, taskname) + for tag in ( + STR.account, + STR.command, + STR.cores, + STR.deadline, + STR.exclusive, + STR.jobname, + STR.join, + STR.memory, + STR.native, + STR.nodes, + STR.nodesize, + STR.partition, + STR.queue, + STR.rewind, + STR.shared, + STR.stderr, + STR.stdout, + STR.walltime, + ): + if tag in config: + SubElement(e, tag).text = config[tag] + for name, value in config.get(STR.envars, {}).items(): + self._add_task_envar(e, name, value) + if STR.dependency in config: + self._add_task_dependency(e, config[STR.dependency]) + + def _add_task_dependency(self, e: Element, config: dict) -> None: + """ + Add a element to the . + + :param e: The parent element to add the new element to. + :param config: Configuration data for this element. + """ + e = SubElement(e, STR.dependency) + for key, block in config.items(): + tag, _ = self._tag_name(key) + if tag == STR.taskdep: + self._set_attrs(SubElement(e, STR.taskdep), block) + else: + raise UWConfigError("Unhandled dependency type %s" % tag) + + def _add_task_envar(self, e: Element, name: str, value: str) -> None: + """ + Add a element to the . + + :param e: The parent element to add the new element to. + :param config: Configuration data for this element. + """ + e = SubElement(e, STR.envar) + SubElement(e, STR.name).text = name + SubElement(e, STR.value).text = value + + def _add_workflow(self, config: dict) -> None: + """ + Create the root element. + + :param config: Configuration data for this element. + """ + config, e = config[STR.workflow], Element(STR.workflow) + self._set_attrs(e, config) + self._add_workflow_cycledefs(e, config[STR.cycledefs]) + self._add_workflow_log(e, config[STR.log]) + self._add_workflow_tasks(e, config[STR.tasks]) + self._root: Element = e + + def _add_workflow_cycledefs(self, e: Element, config: dict) -> None: + """ + Add element(s) to the . + + :param e: The parent element to add the new element to. + :param config: Configuration data for this element. + """ + for name, coords in config.items(): + for coord in coords: + SubElement(e, STR.cycledef, group=name).text = coord + + def _add_workflow_log(self, e: Element, logfile: str) -> None: + """ + Add element(s) to the . + + :param e: The parent element to add the new element to. + :param logfile: The path to the log file. + """ + SubElement(e, STR.log).text = logfile + + def _add_workflow_tasks(self, e: Element, config: dict) -> None: + """ + Add and/or element(s) to the . + + :param e: The parent element to add the new element to. + :param config: Configuration data for these elements. + """ + for key, block in config.items(): + tag, name = self._tag_name(key) + {STR.metatask: self._add_metatask, STR.task: self._add_task}[tag](e, block, name) + + def _insert_doctype(self, xml: str) -> str: + """ + Return the given XML document with an Inserted block. + + :param xml: The XML document rendered as a string. + """ + lines = xml.split("\n") + if doctype := self._doctype: + lines.insert(1, doctype) + return "\n".join(lines) + + def _set_and_render_jobname(self, config: dict, taskname: str) -> dict: + """ + In the given config, ensure 'jobname' is set, then render {{ jobname }}. + + :param config: Configuration data for this element. + :param taskname: The name of the task being defined. + """ + if STR.jobname not in config: + config[STR.jobname] = taskname + return yaml.safe_load( + Template(yaml.dump(config), undefined=DebugUndefined).render( + jobname=config[STR.jobname] + ) + ) + + def _set_attrs(self, e: Element, config: dict) -> None: + """ + Set attributes on an element. + + :param e: The element to set the attributes on. + :param config: A config containing the attribute definitions. + """ + for attr, val in config[STR.attrs].items(): + e.set(attr, str(val)) + + def _tag_name(self, key: str) -> Tuple[str, str]: + """ + Return the tag and metadata extracted from a metadata-bearing key. + + :param key: A string of the form "tag_metadata" (or simply STR.tag). + """ + # For example, key "task_foo"bar" will be split into tag "task" and name "foo_bar". + parts = key.split("_") + tag = parts[0] + name = "_".join(parts[1:]) if parts[1:] else "" + return tag, name + + +@dataclass(frozen=True) +class _STR: + """ + A lookup map for Rocoto-related strings. + """ + + account: str = "account" + attrs: str = "attrs" + command: str = "command" + cores: str = "cores" + cycledef: str = "cycledef" + cycledefs: str = "cycledefs" + deadline: str = "deadline" + dependency: str = "dependency" + entities: str = "entities" + envar: str = "envar" + envars: str = "envars" + exclusive: str = "exclusive" + jobname: str = "jobname" + join: str = "join" + log: str = "log" + memory: str = "memory" + metatask: str = "metatask" + name: str = "name" + native: str = "native" + nodes: str = "nodes" + nodesize: str = "nodesize" + partition: str = "partition" + queue: str = "queue" + rewind: str = "rewind" + shared: str = "shared" + stderr: str = "stderr" + stdout: str = "stdout" + tag: str = "tag" + task: str = "task" + taskdep: str = "taskdep" + tasks: str = "tasks" + value: str = "value" + var: str = "var" + walltime: str = "walltime" + workflow: str = "workflow" + + +STR = _STR() diff --git a/src/uwtools/tests/config/test_validator.py b/src/uwtools/tests/config/test_validator.py index 85cbe79db..e698c745b 100644 --- a/src/uwtools/tests/config/test_validator.py +++ b/src/uwtools/tests/config/test_validator.py @@ -4,7 +4,6 @@ """ import json import logging -from importlib import resources from pathlib import Path from typing import Any, Dict from unittest.mock import patch @@ -14,6 +13,7 @@ from uwtools.config import validator from uwtools.logging import log from uwtools.tests.support import logged, regex_logged +from uwtools.utils.file import resource_pathobj # Support functions @@ -107,8 +107,7 @@ def test_validate_yaml_pass(config, config_file, schema, schema_file): @fixture def rocoto_assets(): - with resources.as_file(resources.files("uwtools.resources")) as resc: - schema_file = resc / "rocoto.jsonschema" + schema_file = resource_pathobj("rocoto.jsonschema") kwargs = {"schema_file": schema_file, "config_file": "/not/used"} config = { "workflow": { diff --git a/src/uwtools/tests/drivers/test_driver.py b/src/uwtools/tests/drivers/test_driver.py index 95397a692..47a757ed1 100644 --- a/src/uwtools/tests/drivers/test_driver.py +++ b/src/uwtools/tests/drivers/test_driver.py @@ -6,6 +6,7 @@ import datetime import logging from collections.abc import Mapping +from pathlib import Path from unittest.mock import patch import pytest @@ -40,8 +41,8 @@ def run_cmd(self, *args): pass @property - def schema_file(self) -> str: - return "" + def schema_file(self) -> Path: + return Path() @fixture diff --git a/src/uwtools/tests/drivers/test_experiment.py b/src/uwtools/tests/drivers/test_experiment.py index bdfcde1b6..14696d8c3 100644 --- a/src/uwtools/tests/drivers/test_experiment.py +++ b/src/uwtools/tests/drivers/test_experiment.py @@ -3,7 +3,6 @@ Tests for uwtools.drivers.experiment module. """ -import pytest from pytest import fixture from uwtools.drivers import experiment @@ -16,38 +15,3 @@ def SRWExperiment(): def test_SRWExperiment_load_config(SRWExperiment): assert SRWExperiment - - -@pytest.mark.skip(reason="no way of currently testing this") -def test_load_config(): - """ - Test that YAML load, update, and dump work with a basic YAML file. - """ - - -@pytest.mark.skip(reason="no way of currently testing this") -def test_validate_config(): - """ - Test that the YAML file is validated correctly. - """ - - -@pytest.mark.skip(reason="no way of currently testing this") -def test_create_experiment(): - """ - Test that the experiment directory and manager files are created. - """ - - -@pytest.mark.skip(reason="no way of currently testing this") -def test_create_manager(): - """ - Test that the manager files are created. - """ - - -@pytest.mark.skip(reason="no way of currently testing this") -def test_link_fix_files(): - """ - Test that the fix files are linked. - """ diff --git a/src/uwtools/tests/fixtures/hello_workflow.yaml b/src/uwtools/tests/fixtures/hello_workflow.yaml index be42a83fb..6be824346 100644 --- a/src/uwtools/tests/fixtures/hello_workflow.yaml +++ b/src/uwtools/tests/fixtures/hello_workflow.yaml @@ -35,4 +35,4 @@ workflow: dependency: taskdep: attrs: - task: hello \ No newline at end of file + task: hello diff --git a/src/uwtools/tests/support.py b/src/uwtools/tests/support.py index 81467e4f5..801831d0c 100644 --- a/src/uwtools/tests/support.py +++ b/src/uwtools/tests/support.py @@ -12,16 +12,9 @@ def compare_files(path1: str, path2: str) -> bool: Determines whether the two given files are identical up to any number of trailing newlines, which are ignored. Print the contents of both files when they do not match. - Parameters - ---------- - path1 - Path to first file - path2 - Path to second file - - Returns - ------- - A bool indicating whether or not the files match. + :param path1: Path to first file. + :param path2: Path to second file. + :return: Do the files match? """ with open(path1, "r", encoding="utf-8") as f: content1 = f.read().rstrip("\n") @@ -41,12 +34,9 @@ def fixture_pathobj(suffix: str = "") -> Path: """ Returns a pathlib Path object to a test-fixture resource file. - Parameters - ---------- - suffix - A subpath relative to the location of the unit-test fixture resource - files. The prefix path to the resources files is known to Python and - varies based on installation location. + :param suffix: A subpath relative to the location of the unit-test fixture resource files. The + prefix path to the resources files is known to Python and varies based on installation + location. """ with resources.as_file(resources.files("uwtools.tests.fixtures")) as prefix: path = prefix / suffix @@ -57,12 +47,9 @@ def fixture_path(suffix: str = "") -> str: """ Returns a POSIX path to a test-fixture resource file. - Parameters - ---------- - suffix - A subpath relative to the location of the unit-test fixture resource - files. The prefix path to the resources files is known to Python and - varies based on installation location. + :param suffix: A subpath relative to the location of the unit-test fixture resource files. The + prefix path to the resources files is known to Python and varies based on installation + location. """ return fixture_pathobj(suffix).as_posix() @@ -71,12 +58,9 @@ def fixture_uri(suffix: str = "") -> str: """ Returns a file:// URI path to a test-fixture resource file. - Parameters - ---------- - suffix - A subpath relative to the location of the unit-test fixture resource - files. The prefix path to the resources files is known to Python and - varies based on installation location. + :param suffix: A subpath relative to the location of the unit-test fixture resource files. The + prefix path to the resources files is known to Python and varies based on installation + location. """ return fixture_pathobj(suffix).as_uri() @@ -87,6 +71,7 @@ def logged(caplog: LogCaptureFixture, msg: str) -> bool: :param caplog: The pytest log capture. :param msg: The message sought. + :return: Does it? """ return msg in [record.message for record in caplog.records] @@ -97,6 +82,7 @@ def regex_logged(caplog: LogCaptureFixture, msg: str) -> bool: :param caplog: The pytest log capture. :param msg: The message sought. + :return: Does it? """ pattern = re.compile(re.escape(msg)) return any(pattern.search(record.message) for record in caplog.records) diff --git a/src/uwtools/tests/test_rocoto.py b/src/uwtools/tests/test_rocoto.py index 02081963f..21019aa75 100644 --- a/src/uwtools/tests/test_rocoto.py +++ b/src/uwtools/tests/test_rocoto.py @@ -1,117 +1,220 @@ -# pylint: disable=missing-function-docstring, protected-access +# pylint: disable=missing-function-docstring,protected-access,redefined-outer-name """ Tests for uwtools.rocoto module. """ -import tempfile -from importlib import resources -from unittest.mock import patch +import shutil +from unittest.mock import DEFAULT as D +from unittest.mock import PropertyMock, patch import pytest -import yaml +from pytest import fixture, raises from uwtools import rocoto -from uwtools.config.core import YAMLConfig -from uwtools.tests import support - -# Test functions - - -def test__add_jobname(): - expected = yaml.safe_load( - """ -task_hello: - command: echo hello - jobname: hello -metatask_howdy: - foo: bar - task_howdy_#mem#: - command: echo hello - jobname: howdy_#mem# -""" - ) - - tree = yaml.safe_load( - """ -task_hello: - command: echo hello -metatask_howdy: - foo: bar - task_howdy_#mem#: - command: echo hello -""" - ) +from uwtools.exceptions import UWConfigError +from uwtools.tests.support import fixture_path - rocoto._add_jobname(tree) - assert expected == tree +# Fixtures -def test__add_jobname_to_tasks(): - with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: - input_yaml = path / "hello_workflow.yaml" +@fixture +def assets(tmp_path): + return fixture_path("hello_workflow.yaml"), tmp_path / "rocoto.xml" - values = YAMLConfig(input_yaml) - tasks = values["workflow"]["tasks"] - with patch.object(rocoto, "_add_jobname") as module: - rocoto._add_jobname_to_tasks(input_yaml) - assert module.called_once_with(tasks) +@fixture +def instance(assets): + cfgfile, _ = assets + return rocoto._RocotoXML(config_file=cfgfile) -def test__rocoto_schema_yaml(): - with resources.as_file(resources.files("uwtools.resources")) as path: - expected = path / "rocoto.jsonschema" - assert rocoto._rocoto_schema_yaml() == expected +@fixture +def root(): + return rocoto.Element("root") -def test__rocoto_schema_xml(): - with resources.as_file(resources.files("uwtools.resources")) as path: - expected = path / "schema_with_metatasks.rng" - assert rocoto._rocoto_schema_xml() == expected +# Tests -@pytest.mark.parametrize("vals", [("hello_workflow.yaml", True), ("fruit_config.yaml", False)]) -def test_realize_rocoto_xml(vals, tmp_path): - fn, validity = vals - output = tmp_path / "rendered.xml" - with patch.object(rocoto, "validate_rocoto_xml", value=True): - with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: - config_file = path / fn - result = rocoto.realize_rocoto_xml(config_file=config_file, output_file=output) - assert result is validity +def test_realize_rocoto_xml_to_file(assets): + cfgfile, outfile = assets + assert rocoto.realize_rocoto_xml(config_file=cfgfile, output_file=outfile) is True -def test_realize_rocoto_default_output(): - with patch.object(rocoto, "validate_rocoto_xml", value=True): - with resources.as_file(resources.files("uwtools.tests.fixtures")) as path: - config_file = path / "hello_workflow.yaml" - result = rocoto.realize_rocoto_xml(config_file=config_file) - assert result is True +def test_realize_rocoto_xml_to_stdout(capsys, assets): + cfgfile, outfile = assets + assert rocoto.realize_rocoto_xml(config_file=cfgfile) is True + with open(outfile, "w", encoding="utf-8") as f: + f.write(capsys.readouterr().out) + assert rocoto.validate_rocoto_xml(outfile) -def test_realize_rocoto_invalid_xml(): - config_file = support.fixture_path("hello_workflow.yaml") - xml = support.fixture_path("rocoto_invalid.xml") - with patch.object(rocoto, "_write_rocoto_xml", return_value=None): - with patch.object(tempfile, "mkstemp", return_value=(None, xml)): - result = rocoto.realize_rocoto_xml(config_file=config_file, output_file=xml) - assert result is False +def test_realize_rocoto_invalid_xml(assets): + cfgfile, outfile = assets + dump = lambda _, dst: shutil.copyfile(fixture_path("rocoto_invalid.xml"), dst) + with patch.object(rocoto._RocotoXML, "dump", dump): + assert rocoto.realize_rocoto_xml(config_file=cfgfile, output_file=outfile) is False @pytest.mark.parametrize("vals", [("hello_workflow.xml", True), ("rocoto_invalid.xml", False)]) -def test_rocoto_xml_is_valid(vals): +def test_validate_rocoto_xml(vals): fn, validity = vals - xml = support.fixture_path(fn) - result = rocoto.validate_rocoto_xml(input_xml=xml) - - assert result is validity - - -def test__write_rocoto_xml(tmp_path): - config_file = support.fixture_path("hello_workflow.yaml") - output = tmp_path / "rendered.xml" - - rocoto._write_rocoto_xml(config_file=config_file, output_file=output) - - expected = support.fixture_path("hello_workflow.xml") - assert support.compare_files(expected, output) is True + xml = fixture_path(fn) + assert rocoto.validate_rocoto_xml(input_xml=xml) is validity + + +def test__RocotoXML__doctype_entities(instance): + assert '' in instance._doctype + assert '' in instance._doctype + + +def test__RocotoXML__doctype_entities_none(instance): + del instance._config["workflow"]["entities"] + assert instance._doctype is None + + +def test__RocotoXML__config_validate(assets, instance): + cfgfile, _ = assets + instance._config_validate(config_file=cfgfile) + + +def test__RocotoXML__config_validate_fail(instance, tmp_path): + cfgfile = tmp_path / "bad.yaml" + with open(cfgfile, "w", encoding="utf-8") as f: + print("not: ok", file=f) + with raises(UWConfigError): + instance._config_validate(config_file=cfgfile) + + +def test__RocotoXML__add_metatask(instance, root): + config = {"metatask_foo": "1", "task_bar": "2", "var": {"baz": "3", "qux": "4"}} + taskname = "test-metatask" + orig = instance._add_metatask + with patch.multiple(instance, _add_metatask=D, _add_task=D) as mocks: + orig(e=root, config=config, taskname=taskname) + metatask = root[0] + assert metatask.tag == "metatask" + assert metatask.get("name") == taskname + mocks["_add_metatask"].assert_called_once_with(metatask, "1", "foo") + mocks["_add_task"].assert_called_once_with(metatask, "2", "bar") + + +def test__RocotoXML__add_task(instance, root): + config = { + "attrs": {"foo": "1", "bar": "2"}, + "account": "baz", + "dependency": "qux", + "envars": {"A": "apple"}, + } + taskname = "test-task" + with patch.multiple(instance, _add_task_dependency=D, _add_task_envar=D) as mocks: + instance._add_task(e=root, config=config, taskname=taskname) + task = root[0] + assert task.tag == "task" + assert task.get("name") == taskname + assert task.get("foo") == "1" + assert task.get("bar") == "2" + mocks["_add_task_dependency"].assert_called_once_with(task, "qux") + mocks["_add_task_envar"].assert_called_once_with(task, "A", "apple") + + +def test__RocotoXML__add_task_dependency(instance, root): + config = {"taskdep": {"attrs": {"task": "foo"}}} + instance._add_task_dependency(e=root, config=config) + dependency = root[0] + assert dependency.tag == "dependency" + taskdep = dependency[0] + assert taskdep.tag == "taskdep" + assert taskdep.get("task") == "foo" + + +def test__RocotoXML__add_task_dependency_fail(instance, root): + config = {"unrecognized": "whatever"} + with raises(UWConfigError): + instance._add_task_dependency(e=root, config=config) + + +def test__RocotoXML__add_task_envar(instance, root): + instance._add_task_envar(root, "foo", "bar") + envar = root[0] + name, value = envar + assert name.tag == "name" + assert name.text == "foo" + assert value.tag == "value" + assert value.text == "bar" + + +def test__RocotoXML__add_workflow(instance): + config = { + "workflow": {"attrs": {"foo": "1", "bar": "2"}, "cycledefs": "3", "log": "4", "tasks": "5"} + } + with patch.multiple( + instance, _add_workflow_cycledefs=D, _add_workflow_log=D, _add_workflow_tasks=D + ) as mocks: + instance._add_workflow(config=config) + workflow = instance._root + assert workflow.tag == "workflow" + assert workflow.get("foo") == "1" + assert workflow.get("bar") == "2" + mocks["_add_workflow_cycledefs"].assert_called_once_with(workflow, "3") + mocks["_add_workflow_log"].assert_called_once_with(workflow, "4") + mocks["_add_workflow_tasks"].assert_called_once_with(workflow, "5") + + +def test__RocotoXML__add_workflow_cycledefs(instance, root): + config = {"foo": ["1", "2"], "bar": ["3", "4"]} + instance._add_workflow_cycledefs(e=root, config=config) + for i, group, coord in [(0, "foo", "1"), (1, "foo", "2"), (2, "bar", "3"), (3, "bar", "4")]: + assert root[i].tag == "cycledef" + assert root[i].get("group") == group + assert root[i].text == coord + + +def test__RocotoXML__add_workflow_log(instance, root): + path = "/path/to/logfile" + instance._add_workflow_log(e=root, logfile=path) + log = root[0] + assert log.tag == "log" + assert log.text == path + + +def test__RocotoXML__add_workflow_tasks(instance, root): + config = {"metatask_foo": "1", "task_bar": "2"} + with patch.multiple(instance, _add_metatask=D, _add_task=D) as mocks: + instance._add_workflow_tasks(e=root, config=config) + mocks["_add_metatask"].assert_called_once_with(root, "1", "foo") + mocks["_add_task"].assert_called_once_with(root, "2", "bar") + + +def test__RocotoXML__insert_doctype(instance): + with patch.object(rocoto._RocotoXML, "_doctype", new_callable=PropertyMock) as _doctype: + _doctype.return_value = "bar" + assert instance._insert_doctype("foo\nbaz\n") == "foo\nbar\nbaz\n" + + +def test__RocotoXML__insert_doctype_none(instance): + with patch.object(rocoto._RocotoXML, "_doctype", new_callable=PropertyMock) as _doctype: + _doctype.return_value = None + assert instance._insert_doctype("foo\nbaz\n") == "foo\nbaz\n" + + +def test__RocotoXML__set_and_render_jobname(instance): + config = {"foo": "{{ jobname }}", "baz": "{{ qux }}"} + assert instance._set_and_render_jobname(config=config, taskname="bar") == { + "jobname": "bar", # set + "foo": "bar", # rendered + "baz": "{{ qux }}", # ignored + } + + +def test__RocotoXML__setattrs(instance, root): + config = {"attrs": {"foo": "1", "bar": "2"}} + instance._set_attrs(e=root, config=config) + assert root.get("foo") == "1" + assert root.get("bar") == "2" + + +def test__RocotoXML__tag_name(instance): + assert instance._tag_name("foo") == ("foo", "") + assert instance._tag_name("foo_bar") == ("foo", "bar") + assert instance._tag_name("foo_bar_baz") == ("foo", "bar_baz") diff --git a/src/uwtools/tests/utils/test_file.py b/src/uwtools/tests/utils/test_file.py index 47a22afb3..2494129ba 100644 --- a/src/uwtools/tests/utils/test_file.py +++ b/src/uwtools/tests/utils/test_file.py @@ -136,6 +136,10 @@ def test_readable_nofile(): assert hasattr(f, "read") +def test_resource_pathobj(): + assert file.resource_pathobj().is_dir() + + def test_writable_file(tmp_path): apath = tmp_path / "afile" with file.writable(filepath=apath) as f: diff --git a/src/uwtools/utils/file.py b/src/uwtools/utils/file.py index bd1f64567..3a51d9ac2 100644 --- a/src/uwtools/utils/file.py +++ b/src/uwtools/utils/file.py @@ -9,6 +9,7 @@ from dataclasses import dataclass from datetime import datetime as dt from functools import cache +from importlib import resources from io import StringIO from pathlib import Path from typing import IO, Any, Generator, Union @@ -157,6 +158,17 @@ def readable( yield _stdinproxy() +def resource_pathobj(suffix: str = "") -> Path: + """ + Returns a pathlib Path object to a uwtools resource file. + + :param suffix: A subpath relative to the location of the uwtools resource files. The prefix path + to the resources files is known to Python and varies based on installation location. + """ + with resources.as_file(resources.files("uwtools.resources")) as prefix: + return prefix / suffix + + @contextmanager def writable(filepath: OptionalPath = None, mode: str = "w") -> Generator[IO, None, None]: """ From a6335cee963bbc3433b72f7d5893eb87235b0c02 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Tue, 31 Oct 2023 10:09:40 -0600 Subject: [PATCH 15/66] post-uw-376-items (#328) --- README.md | 5 +- src/uwtools/cli.py | 5 +- src/uwtools/rocoto.py | 5 +- src/uwtools/tests/test_rocoto.py | 307 +++++++++++++++---------------- src/uwtools/utils/file.py | 5 +- 5 files changed, 155 insertions(+), 172 deletions(-) diff --git a/README.md b/README.md index 6ac2b0adf..fb86e276e 100644 --- a/README.md +++ b/README.md @@ -97,8 +97,9 @@ In addition to the `make devshell` command, two other `make` targets are availab | Command | Description | | ---------------- | ---------------------------------------------------------- | -| `make package` | Builds a `uwtools` conda package | | `make env` | Creates a conda environment based on the `uwtools` code | +| `make meta` | Update `recipe/meta.json` from `recipe/meta.yaml` | +| `make package` | Builds a `uwtools` conda package | These targets work from the code in its current state in the clone. `make env` calls `make package` automatically to create a local package, then builds an environment based on the package. @@ -123,7 +124,7 @@ The following files in this repo are derived from their counterparts in the [con │   ├── build.sh │   ├── channels │   ├── conda_build_config.yaml -│   ├── .gitignore +│   ├── meta.json │   ├── meta.yaml │   └── run_test.sh ├── src diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 10d2b8acf..3c784ee43 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -775,7 +775,7 @@ def _switch(arg: str) -> str: @dataclass(frozen=True) -class _STR: +class STR: """ A lookup map for CLI-related strings. """ @@ -812,6 +812,3 @@ class _STR: valsfmt: str = "values_format" valsneeded: str = "values_needed" verbose: str = "verbose" - - -STR = _STR() diff --git a/src/uwtools/rocoto.py b/src/uwtools/rocoto.py index 4d6f547d0..5b56eb7ef 100644 --- a/src/uwtools/rocoto.py +++ b/src/uwtools/rocoto.py @@ -295,7 +295,7 @@ def _tag_name(self, key: str) -> Tuple[str, str]: @dataclass(frozen=True) -class _STR: +class STR: """ A lookup map for Rocoto-related strings. """ @@ -335,6 +335,3 @@ class _STR: var: str = "var" walltime: str = "walltime" workflow: str = "workflow" - - -STR = _STR() diff --git a/src/uwtools/tests/test_rocoto.py b/src/uwtools/tests/test_rocoto.py index 21019aa75..48b0a2caf 100644 --- a/src/uwtools/tests/test_rocoto.py +++ b/src/uwtools/tests/test_rocoto.py @@ -22,17 +22,6 @@ def assets(tmp_path): return fixture_path("hello_workflow.yaml"), tmp_path / "rocoto.xml" -@fixture -def instance(assets): - cfgfile, _ = assets - return rocoto._RocotoXML(config_file=cfgfile) - - -@fixture -def root(): - return rocoto.Element("root") - - # Tests @@ -63,158 +52,160 @@ def test_validate_rocoto_xml(vals): assert rocoto.validate_rocoto_xml(input_xml=xml) is validity -def test__RocotoXML__doctype_entities(instance): - assert '' in instance._doctype - assert '' in instance._doctype - +class Test_RocotoXML: + """ + Tests for class uwtools.rocoto._RocotoXML. + """ -def test__RocotoXML__doctype_entities_none(instance): - del instance._config["workflow"]["entities"] - assert instance._doctype is None + @fixture + def instance(self, assets): + cfgfile, _ = assets + return rocoto._RocotoXML(config_file=cfgfile) + @fixture + def root(self): + return rocoto.Element("root") -def test__RocotoXML__config_validate(assets, instance): - cfgfile, _ = assets - instance._config_validate(config_file=cfgfile) + def test__doctype_entities(self, instance): + assert '' in instance._doctype + assert '' in instance._doctype + def test__doctype_entities_none(self, instance): + del instance._config["workflow"]["entities"] + assert instance._doctype is None -def test__RocotoXML__config_validate_fail(instance, tmp_path): - cfgfile = tmp_path / "bad.yaml" - with open(cfgfile, "w", encoding="utf-8") as f: - print("not: ok", file=f) - with raises(UWConfigError): + def test__config_validate(self, assets, instance): + cfgfile, _ = assets instance._config_validate(config_file=cfgfile) - -def test__RocotoXML__add_metatask(instance, root): - config = {"metatask_foo": "1", "task_bar": "2", "var": {"baz": "3", "qux": "4"}} - taskname = "test-metatask" - orig = instance._add_metatask - with patch.multiple(instance, _add_metatask=D, _add_task=D) as mocks: - orig(e=root, config=config, taskname=taskname) - metatask = root[0] - assert metatask.tag == "metatask" - assert metatask.get("name") == taskname - mocks["_add_metatask"].assert_called_once_with(metatask, "1", "foo") - mocks["_add_task"].assert_called_once_with(metatask, "2", "bar") - - -def test__RocotoXML__add_task(instance, root): - config = { - "attrs": {"foo": "1", "bar": "2"}, - "account": "baz", - "dependency": "qux", - "envars": {"A": "apple"}, - } - taskname = "test-task" - with patch.multiple(instance, _add_task_dependency=D, _add_task_envar=D) as mocks: - instance._add_task(e=root, config=config, taskname=taskname) - task = root[0] - assert task.tag == "task" - assert task.get("name") == taskname - assert task.get("foo") == "1" - assert task.get("bar") == "2" - mocks["_add_task_dependency"].assert_called_once_with(task, "qux") - mocks["_add_task_envar"].assert_called_once_with(task, "A", "apple") - - -def test__RocotoXML__add_task_dependency(instance, root): - config = {"taskdep": {"attrs": {"task": "foo"}}} - instance._add_task_dependency(e=root, config=config) - dependency = root[0] - assert dependency.tag == "dependency" - taskdep = dependency[0] - assert taskdep.tag == "taskdep" - assert taskdep.get("task") == "foo" - - -def test__RocotoXML__add_task_dependency_fail(instance, root): - config = {"unrecognized": "whatever"} - with raises(UWConfigError): + def test__config_validate_fail(self, instance, tmp_path): + cfgfile = tmp_path / "bad.yaml" + with open(cfgfile, "w", encoding="utf-8") as f: + print("not: ok", file=f) + with raises(UWConfigError): + instance._config_validate(config_file=cfgfile) + + def test__add_metatask(self, instance, root): + config = {"metatask_foo": "1", "task_bar": "2", "var": {"baz": "3", "qux": "4"}} + taskname = "test-metatask" + orig = instance._add_metatask + with patch.multiple(instance, _add_metatask=D, _add_task=D) as mocks: + orig(e=root, config=config, taskname=taskname) + metatask = root[0] + assert metatask.tag == "metatask" + assert metatask.get("name") == taskname + mocks["_add_metatask"].assert_called_once_with(metatask, "1", "foo") + mocks["_add_task"].assert_called_once_with(metatask, "2", "bar") + + def test__add_task(self, instance, root): + config = { + "attrs": {"foo": "1", "bar": "2"}, + "account": "baz", + "dependency": "qux", + "envars": {"A": "apple"}, + } + taskname = "test-task" + with patch.multiple(instance, _add_task_dependency=D, _add_task_envar=D) as mocks: + instance._add_task(e=root, config=config, taskname=taskname) + task = root[0] + assert task.tag == "task" + assert task.get("name") == taskname + assert task.get("foo") == "1" + assert task.get("bar") == "2" + mocks["_add_task_dependency"].assert_called_once_with(task, "qux") + mocks["_add_task_envar"].assert_called_once_with(task, "A", "apple") + + def test__add_task_dependency(self, instance, root): + config = {"taskdep": {"attrs": {"task": "foo"}}} instance._add_task_dependency(e=root, config=config) - - -def test__RocotoXML__add_task_envar(instance, root): - instance._add_task_envar(root, "foo", "bar") - envar = root[0] - name, value = envar - assert name.tag == "name" - assert name.text == "foo" - assert value.tag == "value" - assert value.text == "bar" - - -def test__RocotoXML__add_workflow(instance): - config = { - "workflow": {"attrs": {"foo": "1", "bar": "2"}, "cycledefs": "3", "log": "4", "tasks": "5"} - } - with patch.multiple( - instance, _add_workflow_cycledefs=D, _add_workflow_log=D, _add_workflow_tasks=D - ) as mocks: - instance._add_workflow(config=config) - workflow = instance._root - assert workflow.tag == "workflow" - assert workflow.get("foo") == "1" - assert workflow.get("bar") == "2" - mocks["_add_workflow_cycledefs"].assert_called_once_with(workflow, "3") - mocks["_add_workflow_log"].assert_called_once_with(workflow, "4") - mocks["_add_workflow_tasks"].assert_called_once_with(workflow, "5") - - -def test__RocotoXML__add_workflow_cycledefs(instance, root): - config = {"foo": ["1", "2"], "bar": ["3", "4"]} - instance._add_workflow_cycledefs(e=root, config=config) - for i, group, coord in [(0, "foo", "1"), (1, "foo", "2"), (2, "bar", "3"), (3, "bar", "4")]: - assert root[i].tag == "cycledef" - assert root[i].get("group") == group - assert root[i].text == coord - - -def test__RocotoXML__add_workflow_log(instance, root): - path = "/path/to/logfile" - instance._add_workflow_log(e=root, logfile=path) - log = root[0] - assert log.tag == "log" - assert log.text == path - - -def test__RocotoXML__add_workflow_tasks(instance, root): - config = {"metatask_foo": "1", "task_bar": "2"} - with patch.multiple(instance, _add_metatask=D, _add_task=D) as mocks: - instance._add_workflow_tasks(e=root, config=config) - mocks["_add_metatask"].assert_called_once_with(root, "1", "foo") - mocks["_add_task"].assert_called_once_with(root, "2", "bar") - - -def test__RocotoXML__insert_doctype(instance): - with patch.object(rocoto._RocotoXML, "_doctype", new_callable=PropertyMock) as _doctype: - _doctype.return_value = "bar" - assert instance._insert_doctype("foo\nbaz\n") == "foo\nbar\nbaz\n" - - -def test__RocotoXML__insert_doctype_none(instance): - with patch.object(rocoto._RocotoXML, "_doctype", new_callable=PropertyMock) as _doctype: - _doctype.return_value = None - assert instance._insert_doctype("foo\nbaz\n") == "foo\nbaz\n" - - -def test__RocotoXML__set_and_render_jobname(instance): - config = {"foo": "{{ jobname }}", "baz": "{{ qux }}"} - assert instance._set_and_render_jobname(config=config, taskname="bar") == { - "jobname": "bar", # set - "foo": "bar", # rendered - "baz": "{{ qux }}", # ignored - } - - -def test__RocotoXML__setattrs(instance, root): - config = {"attrs": {"foo": "1", "bar": "2"}} - instance._set_attrs(e=root, config=config) - assert root.get("foo") == "1" - assert root.get("bar") == "2" - - -def test__RocotoXML__tag_name(instance): - assert instance._tag_name("foo") == ("foo", "") - assert instance._tag_name("foo_bar") == ("foo", "bar") - assert instance._tag_name("foo_bar_baz") == ("foo", "bar_baz") + dependency = root[0] + assert dependency.tag == "dependency" + taskdep = dependency[0] + assert taskdep.tag == "taskdep" + assert taskdep.get("task") == "foo" + + def test__add_task_dependency_fail(self, instance, root): + config = {"unrecognized": "whatever"} + with raises(UWConfigError): + instance._add_task_dependency(e=root, config=config) + + def test__add_task_envar(self, instance, root): + instance._add_task_envar(root, "foo", "bar") + envar = root[0] + name, value = envar + assert name.tag == "name" + assert name.text == "foo" + assert value.tag == "value" + assert value.text == "bar" + + def test__add_workflow(self, instance): + config = { + "workflow": { + "attrs": {"foo": "1", "bar": "2"}, + "cycledefs": "3", + "log": "4", + "tasks": "5", + } + } + with patch.multiple( + instance, _add_workflow_cycledefs=D, _add_workflow_log=D, _add_workflow_tasks=D + ) as mocks: + instance._add_workflow(config=config) + workflow = instance._root + assert workflow.tag == "workflow" + assert workflow.get("foo") == "1" + assert workflow.get("bar") == "2" + mocks["_add_workflow_cycledefs"].assert_called_once_with(workflow, "3") + mocks["_add_workflow_log"].assert_called_once_with(workflow, "4") + mocks["_add_workflow_tasks"].assert_called_once_with(workflow, "5") + + def test__add_workflow_cycledefs(self, instance, root): + config = {"foo": ["1", "2"], "bar": ["3", "4"]} + instance._add_workflow_cycledefs(e=root, config=config) + for i, group, coord in [(0, "foo", "1"), (1, "foo", "2"), (2, "bar", "3"), (3, "bar", "4")]: + assert root[i].tag == "cycledef" + assert root[i].get("group") == group + assert root[i].text == coord + + def test__add_workflow_log(self, instance, root): + path = "/path/to/logfile" + instance._add_workflow_log(e=root, logfile=path) + log = root[0] + assert log.tag == "log" + assert log.text == path + + def test__add_workflow_tasks(self, instance, root): + config = {"metatask_foo": "1", "task_bar": "2"} + with patch.multiple(instance, _add_metatask=D, _add_task=D) as mocks: + instance._add_workflow_tasks(e=root, config=config) + mocks["_add_metatask"].assert_called_once_with(root, "1", "foo") + mocks["_add_task"].assert_called_once_with(root, "2", "bar") + + def test__insert_doctype(self, instance): + with patch.object(rocoto._RocotoXML, "_doctype", new_callable=PropertyMock) as _doctype: + _doctype.return_value = "bar" + assert instance._insert_doctype("foo\nbaz\n") == "foo\nbar\nbaz\n" + + def test__insert_doctype_none(self, instance): + with patch.object(rocoto._RocotoXML, "_doctype", new_callable=PropertyMock) as _doctype: + _doctype.return_value = None + assert instance._insert_doctype("foo\nbaz\n") == "foo\nbaz\n" + + def test__set_and_render_jobname(self, instance): + config = {"foo": "{{ jobname }}", "baz": "{{ qux }}"} + assert instance._set_and_render_jobname(config=config, taskname="bar") == { + "jobname": "bar", # set + "foo": "bar", # rendered + "baz": "{{ qux }}", # ignored + } + + def test__setattrs(self, instance, root): + config = {"attrs": {"foo": "1", "bar": "2"}} + instance._set_attrs(e=root, config=config) + assert root.get("foo") == "1" + assert root.get("bar") == "2" + + def test__tag_name(self, instance): + assert instance._tag_name("foo") == ("foo", "") + assert instance._tag_name("foo_bar") == ("foo", "bar") + assert instance._tag_name("foo_bar_baz") == ("foo", "bar_baz") diff --git a/src/uwtools/utils/file.py b/src/uwtools/utils/file.py index 3a51d9ac2..a1b29dd3b 100644 --- a/src/uwtools/utils/file.py +++ b/src/uwtools/utils/file.py @@ -19,7 +19,7 @@ @dataclass(frozen=True) -class _FORMAT: +class FORMAT: """ A mapping from config format names to literal strings. """ @@ -48,9 +48,6 @@ class _FORMAT: yml: str = _yaml -FORMAT = _FORMAT() - - class StdinProxy: """ Reads stdin once but permits multiple reads of its data. From ac5879ad2c863d57f7966532d1a0056e9a7facdb Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Tue, 31 Oct 2023 14:42:35 -0600 Subject: [PATCH 16/66] cli-test-fixup (#331) --- src/uwtools/cli.py | 137 ++++++++++++----------- src/uwtools/tests/test_cli.py | 205 ++++++++++++++++------------------ 2 files changed, 163 insertions(+), 179 deletions(-) diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 3c784ee43..c9cff1d34 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -5,13 +5,13 @@ import datetime import sys from argparse import ArgumentParser as Parser -from argparse import HelpFormatter, Namespace +from argparse import HelpFormatter from argparse import _ArgumentGroup as Group from argparse import _SubParsersAction as Subparsers from dataclasses import dataclass from functools import partial from pathlib import Path -from typing import Callable, Dict, List, Tuple +from typing import Any, Callable, Dict, List, Tuple import uwtools.config.atparse_to_jinja2 import uwtools.config.core @@ -25,7 +25,8 @@ FORMATS = [FORMAT.ini, FORMAT.nml, FORMAT.yaml] TITLE_REQ_ARG = "Required arguments" -SubmodeChecks = List[Callable[[Namespace], Namespace]] +Args = Dict[str, Any] +SubmodeChecks = List[Callable[[Args], Args]] ModeChecks = Dict[str, SubmodeChecks] Checks = Dict[str, ModeChecks] @@ -42,9 +43,9 @@ def main() -> None: setup_logging(quiet=True) try: args, checks = _parse_args(sys.argv[1:]) - for check in checks[args.mode][args.submode]: + for check in checks[args[STR.mode]][args[STR.submode]]: check(args) - setup_logging(quiet=args.quiet, verbose=args.verbose) + setup_logging(quiet=args[STR.quiet], verbose=args[STR.verbose]) log.debug("Command: %s %s", Path(sys.argv[0]).name, " ".join(sys.argv[1:])) modes = { STR.config: _dispatch_config, @@ -52,7 +53,7 @@ def main() -> None: STR.rocoto: _dispatch_rocoto, STR.template: _dispatch_template, } - sys.exit(0 if modes[args.mode](args) else 1) + sys.exit(0 if modes[args[STR.mode]](args) else 1) except Exception as e: # pylint: disable=broad-exception-caught _abort(str(e)) @@ -170,7 +171,7 @@ def _add_subparser_config_validate(subparsers: Subparsers) -> SubmodeChecks: ] -def _dispatch_config(args: Namespace) -> bool: +def _dispatch_config(args: Args) -> bool: """ Dispatch logic for config mode. @@ -181,67 +182,69 @@ def _dispatch_config(args: Namespace) -> bool: STR.realize: _dispatch_config_realize, STR.translate: _dispatch_config_translate, STR.validate: _dispatch_config_validate, - }[args.submode](args) + }[args[STR.submode]](args) -def _dispatch_config_compare(args: Namespace) -> bool: +def _dispatch_config_compare(args: Args) -> bool: """ Dispatch logic for config compare submode. :param args: Parsed command-line args. """ return uwtools.config.core.compare_configs( - config_a_path=args.file_1_path, - config_a_format=args.file_1_format, - config_b_path=args.file_2_path, - config_b_format=args.file_2_format, + config_a_path=args[STR.file1path], + config_a_format=args[STR.file1fmt], + config_b_path=args[STR.file2path], + config_b_format=args[STR.file2fmt], ) -def _dispatch_config_realize(args: Namespace) -> bool: +def _dispatch_config_realize(args: Args) -> bool: """ Dispatch logic for config realize submode. :param args: Parsed command-line args. """ return uwtools.config.core.realize_config( - input_file=args.input_file, - input_format=args.input_format, - output_file=args.output_file, - output_format=args.output_format, - values_file=args.values_file, - values_format=args.values_format, - values_needed=args.values_needed, - dry_run=args.dry_run, + input_file=args[STR.infile], + input_format=args[STR.infmt], + output_file=args[STR.outfile], + output_format=args[STR.outfmt], + values_file=args[STR.valsfile], + values_format=args[STR.valsfmt], + values_needed=args[STR.valsneeded], + dry_run=args[STR.dryrun], ) -def _dispatch_config_translate(args: Namespace) -> bool: +def _dispatch_config_translate(args: Args) -> bool: """ Dispatch logic for config translate submode. :param args: Parsed command-line args. """ success = True - if args.input_format == FORMAT.atparse and args.output_format == FORMAT.jinja2: + if args[STR.infmt] == FORMAT.atparse and args[STR.outfmt] == FORMAT.jinja2: uwtools.config.atparse_to_jinja2.convert( - input_file=args.input_file, output_file=args.output_file, dry_run=args.dry_run + input_file=args[STR.infile], + output_file=args[STR.outfile], + dry_run=args[STR.dryrun], ) else: success = False return success -def _dispatch_config_validate(args: Namespace) -> bool: +def _dispatch_config_validate(args: Args) -> bool: """ Dispatch logic for config validate submode. :param args: Parsed command-line args. """ success = True - if args.input_format == FORMAT.yaml: + if args[STR.infmt] == FORMAT.yaml: success = uwtools.config.validator.validate_yaml( - config_file=args.input_file, schema_file=args.schema_file + config_file=args[STR.infile], schema_file=args[STR.schemafile] ) else: success = False @@ -283,27 +286,27 @@ def _add_subparser_forecast_run(subparsers: Subparsers) -> SubmodeChecks: return checks -def _dispatch_forecast(args: Namespace) -> bool: +def _dispatch_forecast(args: Args) -> bool: """ Dispatch logic for forecast mode. :param args: Parsed command-line args. """ - return {STR.run: _dispatch_forecast_run}[args.submode](args) + return {STR.run: _dispatch_forecast_run}[args[STR.submode]](args) -def _dispatch_forecast_run(args: Namespace) -> bool: +def _dispatch_forecast_run(args: Args) -> bool: """ Dispatch logic for forecast run submode. :param args: Parsed command-line args. """ - forecast_class = uwtools.drivers.forecast.CLASSES[args.forecast_model] + forecast_class = uwtools.drivers.forecast.CLASSES[args[STR.model]] return forecast_class( - batch_script=args.batch_script, config_file=args.config_file, dry_run=args.dry_run - ).run( - cycle=args.cycle, - ) + batch_script=args[STR.batch_script], + config_file=args[STR.cfgfile], + dry_run=args[STR.dryrun], + ).run(cycle=args[STR.cycle]) # Mode rocoto @@ -351,7 +354,7 @@ def _add_subparser_rocoto_validate(subparsers: Subparsers) -> SubmodeChecks: return checks -def _dispatch_rocoto(args: Namespace) -> bool: +def _dispatch_rocoto(args: Args) -> bool: """ Dispatch logic for rocoto mode. @@ -361,30 +364,30 @@ def _dispatch_rocoto(args: Namespace) -> bool: STR.realize: _dispatch_rocoto_realize, STR.validate: _dispatch_rocoto_validate, }[ - args.submode + args[STR.submode] ](args) -def _dispatch_rocoto_realize(args: Namespace) -> bool: +def _dispatch_rocoto_realize(args: Args) -> bool: """ Dispatch logic for rocoto realize submode. Validate input and output. :param args: Parsed command-line args. """ success = uwtools.rocoto.realize_rocoto_xml( - config_file=args.input_file, output_file=args.output_file + config_file=args[STR.infile], output_file=args[STR.outfile] ) return success -def _dispatch_rocoto_validate(args: Namespace) -> bool: +def _dispatch_rocoto_validate(args: Args) -> bool: """ Dispatch logic for rocoto validate submode. :param args: Parsed command-line args. """ - success = uwtools.rocoto.validate_rocoto_xml(input_xml=args.input_file) + success = uwtools.rocoto.validate_rocoto_xml(input_xml=args[STR.infile]) return success @@ -424,29 +427,29 @@ def _add_subparser_template_render(subparsers: Subparsers) -> SubmodeChecks: return checks + [_check_template_render_vals_args] -def _dispatch_template(args: Namespace) -> bool: +def _dispatch_template(args: Args) -> bool: """ Dispatch logic for template mode. :param args: Parsed command-line args. """ - return {STR.render: _dispatch_template_render}[args.submode](args) + return {STR.render: _dispatch_template_render}[args[STR.submode]](args) -def _dispatch_template_render(args: Namespace) -> bool: +def _dispatch_template_render(args: Args) -> bool: """ Dispatch logic for template render submode. :param args: Parsed command-line args. """ return uwtools.config.templater.render( - input_file=args.input_file, - output_file=args.output_file, - values_file=args.values_file, - values_format=args.values_format, - overrides=_dict_from_key_eq_val_strings(args.key_eq_val_pairs), - values_needed=args.values_needed, - dry_run=args.dry_run, + input_file=args[STR.infile], + output_file=args[STR.outfile], + values_file=args[STR.valsfile], + values_format=args[STR.valsfmt], + overrides=_dict_from_key_eq_val_strings(args[STR.keyvalpairs]), + values_needed=args[STR.valsneeded], + dry_run=args[STR.dryrun], ) @@ -479,7 +482,7 @@ def _add_arg_config_file(group: Group) -> None: def _add_arg_cycle(group: Group) -> None: group.add_argument( - "--cycle", + _switch(STR.cycle), help="The cycle in ISO8601 format", required=True, type=datetime.datetime.fromisoformat, @@ -698,31 +701,28 @@ def _basic_setup(parser: Parser) -> Group: return optional -def _check_file_vs_format(file_arg: str, format_arg: str, args: Namespace) -> Namespace: - a = vars(args) - if a[format_arg] is None: - if a[file_arg] is None: +def _check_file_vs_format(file_arg: str, format_arg: str, args: Args) -> Args: + if args.get(format_arg) is None: + if args.get(file_arg) is None: _abort("Specify %s when %s is not specified" % (_switch(format_arg), _switch(file_arg))) - a[format_arg] = get_file_type(a[file_arg]) + args[format_arg] = get_file_type(args[file_arg]) return args -def _check_quiet_vs_verbose(args) -> Namespace: - a = vars(args) - if a.get(STR.quiet) and a.get(STR.verbose): +def _check_quiet_vs_verbose(args) -> Args: + if args.get(STR.quiet) and args.get(STR.verbose): _abort("Specify at most one of %s, %s" % (_switch(STR.quiet), _switch(STR.verbose))) return args -def _check_template_render_vals_args(args: Namespace) -> Namespace: +def _check_template_render_vals_args(args: Args) -> Args: # In "template render" mode, a values file is optional, as values used to render the template # will be taken from the environment or from key=value command-line pairs by default. But if a # values file IS specified, its format must either be explicitly specified, or deduced from its # extension. - a = vars(args) - if a.get(STR.valsfile) is not None: - if a.get(STR.valsfmt) is None: - a[STR.valsfmt] = get_file_type(a[STR.valsfile]) + if args.get(STR.valsfile) is not None: + if args.get(STR.valsfmt) is None: + args[STR.valsfmt] = get_file_type(args[STR.valsfile]) return args @@ -743,7 +743,7 @@ def _formatter(prog: str) -> HelpFormatter: return HelpFormatter(prog, max_help_position=8) -def _parse_args(raw_args: List[str]) -> Tuple[Namespace, Checks]: +def _parse_args(raw_args: List[str]) -> Tuple[Args, Checks]: """ Parse command-line arguments. @@ -761,7 +761,7 @@ def _parse_args(raw_args: List[str]) -> Tuple[Namespace, Checks]: STR.rocoto: _add_subparser_rocoto(subparsers), STR.template: _add_subparser_template(subparsers), } - return parser.parse_args(raw_args), checks + return vars(parser.parse_args(raw_args)), checks def _switch(arg: str) -> str: @@ -784,6 +784,7 @@ class STR: cfgfile: str = "config_file" compare: str = "compare" config: str = "config" + cycle: str = "cycle" dryrun: str = "dry_run" file1fmt: str = "file_1_format" file1path: str = "file_1_path" diff --git a/src/uwtools/tests/test_cli.py b/src/uwtools/tests/test_cli.py index b8ba55ffe..5bbc5b7d2 100644 --- a/src/uwtools/tests/test_cli.py +++ b/src/uwtools/tests/test_cli.py @@ -3,7 +3,6 @@ import logging import sys from argparse import ArgumentParser as Parser -from argparse import Namespace as ns from argparse import _SubParsersAction from typing import List from unittest.mock import patch @@ -90,8 +89,7 @@ def test__check_file_vs_format_fail(capsys, vals): # When reading/writing from/to stdin/stdout, the data format must be specified, since there is # no filename to deduce it from. file_arg, format_arg = vals - args = ns() - vars(args).update({file_arg: None, format_arg: None}) + args = dict(file_arg=None, format_arg=None) with raises(SystemExit): cli._check_file_vs_format(file_arg=file_arg, format_arg=format_arg, args=args) assert ( @@ -103,33 +101,30 @@ def test__check_file_vs_format_fail(capsys, vals): def test__check_file_vs_format_pass_explicit(): # Accept explcitly-specified format, whatever it is. fmt = "jpg" - args = ns() - vars(args).update({STR.infile: "/path/to/input.txt", STR.infmt: fmt}) + args = {STR.infile: "/path/to/input.txt", STR.infmt: fmt} args = cli._check_file_vs_format( file_arg=STR.infile, format_arg=STR.infmt, args=args, ) - assert args.input_format == fmt + assert args[STR.infmt] == fmt @pytest.mark.parametrize("fmt", vars(FORMAT).keys()) def test__check_file_vs_format_pass_implicit(fmt): # The format is correctly deduced for a file with a known extension. - args = ns() - vars(args).update({STR.infile: f"/path/to/input.{fmt}", STR.infmt: None}) + args = {STR.infile: f"/path/to/input.{fmt}", STR.infmt: None} args = cli._check_file_vs_format( file_arg=STR.infile, format_arg=STR.infmt, args=args, ) - assert args.input_format == vars(FORMAT)[fmt] + assert args[STR.infmt] == vars(FORMAT)[fmt] def test__check_quiet_vs_verbose_fail(capsys): log.setLevel(logging.INFO) - args = ns() - vars(args).update({STR.quiet: True, STR.verbose: True}) + args = {STR.quiet: True, STR.verbose: True} with raises(SystemExit): cli._check_quiet_vs_verbose(args) assert ( @@ -139,14 +134,13 @@ def test__check_quiet_vs_verbose_fail(capsys): def test__check_quiet_vs_verbose_ok(): - args = ns(foo=88) + args = {"foo": 88} assert cli._check_quiet_vs_verbose(args) == args def test__check_template_render_vals_args_implicit_fail(): # The values-file format cannot be deduced from the filename. - args = ns() - vars(args)[STR.valsfile] = "a.jpg" + args = {STR.valsfile: "a.jpg"} with raises(ValueError) as e: cli._check_template_render_vals_args(args) assert "Cannot deduce format" in str(e.value) @@ -154,24 +148,20 @@ def test__check_template_render_vals_args_implicit_fail(): def test__check_template_render_vals_args_implicit_pass(): # The values-file format is deduced from the filename. - args = ns() - vars(args)[STR.valsfile] = "a.yaml" + args = {STR.valsfile: "a.yaml"} checked = cli._check_template_render_vals_args(args) - assert vars(checked)[STR.valsfmt] == FORMAT.yaml + assert checked[STR.valsfmt] == FORMAT.yaml def test__check_template_render_vals_args_noop_no_valsfile(): # No values file is provided, so format is irrelevant. - args = ns() - vars(args)[STR.valsfile] = None + args = {STR.valsfile: None} assert cli._check_template_render_vals_args(args) == args def test__check_template_render_vals_args_noop_explicit_valsfmt(): # An explicit values format is honored, valid or not. - args = ns() - vars(args)[STR.valsfile] = "a.txt" - vars(args)[STR.valsfmt] = "jpg" + args = {STR.valsfile: "a.txt", STR.valsfmt: "jpg"} assert cli._check_template_render_vals_args(args) == args @@ -191,102 +181,101 @@ def test__dict_from_key_eq_val_strings(): ) def test__dispatch_config(params): submode, funcname = params - args = ns() - vars(args).update({STR.submode: submode}) + args = {STR.submode: submode} with patch.object(cli, funcname) as func: cli._dispatch_config(args) - assert func.called_once_with(args) + func.assert_called_once_with(args) def test__dispatch_config_compare(): - args = ns() - vars(args).update({STR.file1path: 1, STR.file1fmt: 2, STR.file2path: 3, STR.file2fmt: 4}) + args = {STR.file1path: 1, STR.file1fmt: 2, STR.file2path: 3, STR.file2fmt: 4} with patch.object(cli.uwtools.config.core, "compare_configs") as compare_configs: cli._dispatch_config_compare(args) - assert compare_configs.called_once_with(args) + compare_configs.assert_called_once_with( + config_a_path=args[STR.file1path], + config_a_format=args[STR.file1fmt], + config_b_path=args[STR.file2path], + config_b_format=args[STR.file2fmt], + ) def test__dispatch_config_realize(): - args = ns() - vars(args).update( - { - STR.infile: 1, - STR.infmt: 2, - STR.outfile: 3, - STR.outfmt: 4, - STR.valsfile: 5, - STR.valsfmt: 6, - STR.valsneeded: 7, - STR.dryrun: 8, - } - ) + args = { + STR.infile: 1, + STR.infmt: 2, + STR.outfile: 3, + STR.outfmt: 4, + STR.valsfile: 5, + STR.valsfmt: 6, + STR.valsneeded: 7, + STR.dryrun: 8, + } with patch.object(cli.uwtools.config.core, "realize_config") as realize_config: cli._dispatch_config_realize(args) - assert realize_config.called_once_with(args) + realize_config.assert_called_once_with( + input_file=1, + input_format=2, + output_file=3, + output_format=4, + values_file=5, + values_format=6, + values_needed=7, + dry_run=8, + ) def test__dispatch_config_translate_arparse_to_jinja2(): - args = ns() - vars(args).update( - { - STR.infile: 1, - STR.infmt: FORMAT.atparse, - STR.outfile: 3, - STR.outfmt: FORMAT.jinja2, - STR.dryrun: 5, - } - ) + args = { + STR.infile: 1, + STR.infmt: FORMAT.atparse, + STR.outfile: 3, + STR.outfmt: FORMAT.jinja2, + STR.dryrun: 5, + } with patch.object(cli.uwtools.config.atparse_to_jinja2, "convert") as convert: cli._dispatch_config_translate(args) - assert convert.called_once_with(args) + convert.assert_called_once_with(input_file=1, output_file=3, dry_run=5) def test__dispatch_config_translate_unsupported(): - args = ns() - vars(args).update( - {STR.infile: 1, STR.infmt: "jpg", STR.outfile: 3, STR.outfmt: "png", STR.dryrun: 5} - ) + args = {STR.infile: 1, STR.infmt: "jpg", STR.outfile: 3, STR.outfmt: "png", STR.dryrun: 5} assert cli._dispatch_config_translate(args) is False def test__dispatch_config_validate_yaml(): - args = ns() - vars(args).update({STR.infile: 1, STR.infmt: FORMAT.yaml, STR.schemafile: 3}) + args = {STR.infile: 1, STR.infmt: FORMAT.yaml, STR.schemafile: 3} with patch.object(cli.uwtools.config.validator, "validate_yaml") as validate_yaml: cli._dispatch_config_validate(args) - assert validate_yaml.called_once_with(args) + validate_yaml.assert_called_once_with(config_file=1, schema_file=3) def test__dispatch_config_validate_unsupported(): - args = ns() - vars(args).update({STR.infile: 1, STR.infmt: "jpg", STR.schemafile: 3}) + args = {STR.infile: 1, STR.infmt: "jpg", STR.schemafile: 3} assert cli._dispatch_config_validate(args) is False @pytest.mark.parametrize("params", [(STR.run, "_dispatch_forecast_run")]) def test__dispatch_forecast(params): submode, funcname = params - args = ns() - vars(args).update({STR.submode: submode}) + args = {STR.submode: submode} with patch.object(cli, funcname) as module: cli._dispatch_forecast(args) - assert module.called_once_with(args) + module.assert_called_once_with(args) def test__dispatch_forecast_run(): - args = ns( - batch_script=None, - cycle="2023-01-01T00:00:00", - config_file=1, - dry_run=True, - forecast_model="foo", - ) - vars(args).update({STR.cfgfile: 1, "forecast_model": "foo"}) + args = { + STR.batch_script: None, + STR.cfgfile: 1, + STR.cycle: "2023-01-01T00:00:00", + STR.dryrun: True, + STR.model: "foo", + } with patch.object(cli.uwtools.drivers.forecast, "FooForecast", create=True) as FooForecast: CLASSES = {"foo": getattr(cli.uwtools.drivers.forecast, "FooForecast")} with patch.object(cli.uwtools.drivers.forecast, "CLASSES", new=CLASSES): cli._dispatch_forecast_run(args) - assert FooForecast.called_once_with(args) + FooForecast.assert_called_once_with(batch_script=None, config_file=1, dry_run=True) FooForecast().run.assert_called_once_with(cycle="2023-01-01T00:00:00") @@ -299,44 +288,34 @@ def test__dispatch_forecast_run(): ) def test__dispatch_rocoto(params): submode, funcname = params - args = ns() - vars(args).update({STR.submode: submode}) + args = {STR.submode: submode} with patch.object(cli, funcname) as module: cli._dispatch_rocoto(args) - assert module.called_once_with(args) + module.assert_called_once_with(args) def test__dispatch_rocoto_realize(): - args = ns() - vars(args).update({STR.infile: 1, STR.outfile: 2}) - with patch.object(cli.uwtools.rocoto, "realize_rocoto_xml") as module: + args = {STR.infile: 1, STR.outfile: 2} + with patch.object(cli.uwtools.rocoto, "realize_rocoto_xml") as realize_rocoto_xml: cli._dispatch_rocoto_realize(args) - assert module.called_once_with(args) + realize_rocoto_xml.assert_called_once_with(config_file=1, output_file=2) def test__dispatch_rocoto_realize_invalid(): - args = ns() - vars(args).update( - { - STR.infile: 1, - STR.outfile: 2, - } - ) + args = {STR.infile: 1, STR.outfile: 2} with patch.object(cli.uwtools.rocoto, "realize_rocoto_xml", return_value=False): assert cli._dispatch_rocoto_realize(args) is False def test__dispatch_rocoto_validate_xml(): - args = ns() - vars(args).update({STR.infile: 1}) - with patch.object(cli.uwtools.rocoto, "validate_rocoto_xml") as validate: + args = {STR.infile: 1} + with patch.object(cli.uwtools.rocoto, "validate_rocoto_xml") as validate_rocoto_xml: cli._dispatch_rocoto_validate(args) - assert validate.called_once_with(args) + validate_rocoto_xml.assert_called_once_with(input_xml=1) def test__dispatch_rocoto_validate_xml_invalid(): - args = ns() - vars(args).update({STR.infile: 1, STR.verbose: False}) + args = {STR.infile: 1, STR.verbose: False} with patch.object(cli.uwtools.rocoto, "validate_rocoto_xml", return_value=False): assert cli._dispatch_rocoto_validate(args) is False @@ -344,29 +323,33 @@ def test__dispatch_rocoto_validate_xml_invalid(): @pytest.mark.parametrize("params", [(STR.render, "_dispatch_template_render")]) def test__dispatch_template(params): submode, funcname = params - args = ns() - vars(args).update({STR.submode: submode}) + args = {STR.submode: submode} with patch.object(cli, funcname) as func: cli._dispatch_template(args) - assert func.called_once_with(args) + func.assert_called_once_with(args) def test__dispatch_template_render_yaml(): - args = ns() - vars(args).update( - { - STR.infile: 1, - STR.outfile: 2, - STR.valsfile: 3, - STR.valsfmt: 4, - STR.keyvalpairs: ["foo=88", "bar=99"], - STR.valsneeded: 6, - STR.dryrun: 7, - } - ) - with patch.object(cli.uwtools.config.templater, STR.render) as templater: + args = { + STR.infile: 1, + STR.outfile: 2, + STR.valsfile: 3, + STR.valsfmt: 4, + STR.keyvalpairs: ["foo=88", "bar=99"], + STR.valsneeded: 6, + STR.dryrun: 7, + } + with patch.object(cli.uwtools.config.templater, "render") as render: cli._dispatch_template_render(args) - assert templater.called_once_with(args) + render.assert_called_once_with( + input_file=1, + output_file=2, + values_file=3, + values_format=4, + overrides={"foo": "88", "bar": "99"}, + values_needed=6, + dry_run=7, + ) @pytest.mark.parametrize("quiet", [True]) From 8957f80f209f61cf408ff34dd4d36926d86ab29b Mon Sep 17 00:00:00 2001 From: Brian Weir <94982354+WeirAE@users.noreply.github.com> Date: Wed, 1 Nov 2023 10:14:15 -0500 Subject: [PATCH 17/66] UW-412: expand tests to also not provide optional args (#330) * added cli tests without optional args _dict_from_key_eq_val_strings now can be optional * tests now use parse_args, reverting None option * removed test_cli use of STR.render for "render" * merging fixed; translate/validate tests still fail * tests fixed with hardcoded formats * Removed use of _parse_args() from all cli tests * fixed test typing --- src/uwtools/tests/test_cli.py | 90 +++++++++++++++++++++++++++++++++-- 1 file changed, 86 insertions(+), 4 deletions(-) diff --git a/src/uwtools/tests/test_cli.py b/src/uwtools/tests/test_cli.py index 5bbc5b7d2..a8369e141 100644 --- a/src/uwtools/tests/test_cli.py +++ b/src/uwtools/tests/test_cli.py @@ -224,7 +224,32 @@ def test__dispatch_config_realize(): ) -def test__dispatch_config_translate_arparse_to_jinja2(): +def test__dispatch_config_realize_no_optional(): + args = { + STR.infile: None, + STR.infmt: None, + STR.outfile: None, + STR.outfmt: None, + STR.valsfile: "/foo.vals", + STR.valsfmt: None, + STR.valsneeded: False, + STR.dryrun: False, + } + with patch.object(cli.uwtools.config.core, "realize_config") as realize_config: + cli._dispatch_config_realize(args) + realize_config.assert_called_once_with( + input_file=None, + input_format=None, + output_file=None, + output_format=None, + values_file="/foo.vals", + values_format=None, + values_needed=False, + dry_run=False, + ) + + +def test__dispatch_config_translate_atparse_to_jinja2(): args = { STR.infile: 1, STR.infmt: FORMAT.atparse, @@ -237,16 +262,29 @@ def test__dispatch_config_translate_arparse_to_jinja2(): convert.assert_called_once_with(input_file=1, output_file=3, dry_run=5) +def test__dispatch_config_translate_no_optional(): + args = { + STR.dryrun: False, + STR.infile: None, + STR.infmt: FORMAT.atparse, + STR.outfile: None, + STR.outfmt: FORMAT.jinja2, + } + with patch.object(cli.uwtools.config.atparse_to_jinja2, "convert") as convert: + cli._dispatch_config_translate(args) + convert.assert_called_once_with(input_file=None, output_file=None, dry_run=False) + + def test__dispatch_config_translate_unsupported(): args = {STR.infile: 1, STR.infmt: "jpg", STR.outfile: 3, STR.outfmt: "png", STR.dryrun: 5} assert cli._dispatch_config_translate(args) is False -def test__dispatch_config_validate_yaml(): - args = {STR.infile: 1, STR.infmt: FORMAT.yaml, STR.schemafile: 3} +def test__dispatch_config_validate_no_optional(): + args = {STR.infile: None, STR.infmt: FORMAT.yaml, STR.schemafile: "/foo.schema"} with patch.object(cli.uwtools.config.validator, "validate_yaml") as validate_yaml: cli._dispatch_config_validate(args) - validate_yaml.assert_called_once_with(config_file=1, schema_file=3) + validate_yaml.assert_called_once_with(config_file=None, schema_file="/foo.schema") def test__dispatch_config_validate_unsupported(): @@ -254,6 +292,13 @@ def test__dispatch_config_validate_unsupported(): assert cli._dispatch_config_validate(args) is False +def test__dispatch_config_validate_yaml(): + args = {STR.infile: 1, STR.infmt: FORMAT.yaml, STR.schemafile: 3} + with patch.object(cli.uwtools.config.validator, "validate_yaml") as validate_yaml: + cli._dispatch_config_validate(args) + validate_yaml.assert_called_once_with(config_file=1, schema_file=3) + + @pytest.mark.parametrize("params", [(STR.run, "_dispatch_forecast_run")]) def test__dispatch_forecast(params): submode, funcname = params @@ -307,6 +352,13 @@ def test__dispatch_rocoto_realize_invalid(): assert cli._dispatch_rocoto_realize(args) is False +def test__dispatch_rocoto_realize_no_optional(): + args = {STR.infile: None, STR.outfile: None} + with patch.object(cli.uwtools.rocoto, "realize_rocoto_xml") as module: + cli._dispatch_rocoto_realize(args) + module.assert_called_once_with(config_file=None, output_file=None) + + def test__dispatch_rocoto_validate_xml(): args = {STR.infile: 1} with patch.object(cli.uwtools.rocoto, "validate_rocoto_xml") as validate_rocoto_xml: @@ -320,6 +372,13 @@ def test__dispatch_rocoto_validate_xml_invalid(): assert cli._dispatch_rocoto_validate(args) is False +def test__dispatch_rocoto_validate_xml_no_optional(): + args = {STR.infile: None, STR.verbose: False} + with patch.object(cli.uwtools.rocoto, "validate_rocoto_xml") as validate: + cli._dispatch_rocoto_validate(args) + validate.assert_called_once_with(input_xml=None) + + @pytest.mark.parametrize("params", [(STR.render, "_dispatch_template_render")]) def test__dispatch_template(params): submode, funcname = params @@ -329,6 +388,29 @@ def test__dispatch_template(params): func.assert_called_once_with(args) +def test__dispatch_template_render_no_optional(): + args: dict = { + STR.infile: None, + STR.outfile: None, + STR.valsfile: None, + STR.valsfmt: None, + STR.keyvalpairs: [], + STR.valsneeded: False, + STR.dryrun: False, + } + with patch.object(cli.uwtools.config.templater, "render") as render: + cli._dispatch_template_render(args) + render.assert_called_once_with( + input_file=None, + output_file=None, + values_file=None, + values_format=None, + overrides={}, + values_needed=False, + dry_run=False, + ) + + def test__dispatch_template_render_yaml(): args = { STR.infile: 1, From 5c1477606bbad273952d2fe38045a80cf4da3089 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Wed, 1 Nov 2023 11:54:24 -0600 Subject: [PATCH 18/66] UW-365 (#332) --- .github/scripts/build-documentation | 47 ------- .github/scripts/install-conda | 2 +- .github/workflows/docs.yaml | 33 ----- .../{make-package.yaml => test.yaml} | 6 +- README.md | 35 +++-- docs/Users_Guide/getting_started.rst | 7 - recipe/meta.json | 2 - recipe/meta.yaml | 1 - src/pyproject.toml | 1 - src/uwtools/files/__init__.py | 2 - src/uwtools/files/gateway/__init__.py | 0 src/uwtools/files/gateway/s3.py | 52 -------- src/uwtools/files/gateway/unix.py | 50 ------- src/uwtools/files/interface/__init__.py | 1 - src/uwtools/files/interface/file_manager.py | 53 -------- src/uwtools/files/model/__init__.py | 1 - src/uwtools/files/model/file.py | 124 ------------------ src/uwtools/scheduler.py | 2 +- src/uwtools/tests/files/__init__.py | 0 src/uwtools/tests/files/gateway/__init__.py | 0 src/uwtools/tests/files/gateway/test_s3.py | 52 -------- src/uwtools/tests/files/gateway/test_unix.py | 95 -------------- src/uwtools/tests/files/interface/__init__.py | 0 .../tests/files/interface/test___init__.py | 14 -- .../files/interface/test_file_manager.py | 46 ------- src/uwtools/tests/files/model/__init__.py | 0 .../tests/files/model/test___init__.py | 15 --- src/uwtools/tests/files/model/test_file.py | 65 --------- src/uwtools/tests/files/test___init__.py | 18 --- src/uwtools/tests/utils/test_memory.py | 2 +- src/uwtools/utils/__init__.py | 1 - 31 files changed, 22 insertions(+), 705 deletions(-) delete mode 100755 .github/scripts/build-documentation delete mode 100644 .github/workflows/docs.yaml rename .github/workflows/{make-package.yaml => test.yaml} (86%) delete mode 100644 src/uwtools/files/__init__.py delete mode 100644 src/uwtools/files/gateway/__init__.py delete mode 100644 src/uwtools/files/gateway/s3.py delete mode 100644 src/uwtools/files/gateway/unix.py delete mode 100644 src/uwtools/files/interface/__init__.py delete mode 100644 src/uwtools/files/interface/file_manager.py delete mode 100644 src/uwtools/files/model/__init__.py delete mode 100644 src/uwtools/files/model/file.py delete mode 100644 src/uwtools/tests/files/__init__.py delete mode 100644 src/uwtools/tests/files/gateway/__init__.py delete mode 100644 src/uwtools/tests/files/gateway/test_s3.py delete mode 100644 src/uwtools/tests/files/gateway/test_unix.py delete mode 100644 src/uwtools/tests/files/interface/__init__.py delete mode 100644 src/uwtools/tests/files/interface/test___init__.py delete mode 100644 src/uwtools/tests/files/interface/test_file_manager.py delete mode 100644 src/uwtools/tests/files/model/__init__.py delete mode 100644 src/uwtools/tests/files/model/test___init__.py delete mode 100644 src/uwtools/tests/files/model/test_file.py delete mode 100644 src/uwtools/tests/files/test___init__.py diff --git a/.github/scripts/build-documentation b/.github/scripts/build-documentation deleted file mode 100755 index 1e37d576d..000000000 --- a/.github/scripts/build-documentation +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# Set up build environment. - -set -e -source conda/etc/profile.d/conda.sh -conda activate -pip install sphinx sphinx-gallery sphinx_rtd_theme -set +e - -# Path to docs directory relative to top level of repository. $GITHUB_WORKSPACE -# is set if the actions/checkout@v2 action is run first. - -DOCS_DIR=$GITHUB_WORKSPACE/docs - -# Build the documentation. - -buildcmd="make -C $DOCS_DIR clean html" -$buildcmd -status=$? - -# Copy HTML output to create an artifact. - -dstdir=artifact/documentation -mkdir -pv $dstdir -cp -r $DOCS_DIR/_build/html/* $dstdir/ - -# If the build failed and/or the warnings log has content, copy it to the -# artifact and documentation directories, and print a report about issues that -# were encountered during the build. - -warnings=$DOCS_DIR/_build/warnings.log - -if [[ $status != 0 || -s $warnings ]]; then - test $status -eq 0 && echo "ERROR: '$buildcmd' failed with status $status." - cp -v $warnings artifact/ - cp -v $warnings $dstdir/ - echo ERROR: Found warnings/errors! - echo Summary: - grep WARNING $warnings - grep ERROR $warnings - grep CRITICAL $warnings - echo Review this log file or download $(basename $warnings) artifact. - exit 1 -fi - -echo INFO: Documentation was built successfully. diff --git a/.github/scripts/install-conda b/.github/scripts/install-conda index 196bd3e26..43ae78f50 100755 --- a/.github/scripts/install-conda +++ b/.github/scripts/install-conda @@ -7,5 +7,5 @@ bash $(basename $url) -bfp conda set -e +ux source conda/etc/profile.d/conda.sh conda activate - conda install -q -y -c maddenp --repodata-fn repodata.json anaconda-client condev + conda install -q -y -c maddenp --repodata-fn repodata.json anaconda-client condev=0.6.0 ) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml deleted file mode 100644 index 58f816d3d..000000000 --- a/.github/workflows/docs.yaml +++ /dev/null @@ -1,33 +0,0 @@ -name: Documentation -on: - push: - branches: - - develop - - feature/* - - main/* - - bugfix/* - paths: - - docs/** - pull_request: - types: [opened, reopened, synchronize] -jobs: - create_documentation: - runs-on: ubuntu-latest - name: Deploy Documentation - steps: - - uses: actions/checkout@v3 - - name: Install conda - run: .github/scripts/install-conda - - name: Build Documentation - run: .github/scripts/build-documentation - - uses: actions/upload-artifact@v3 - if: always() - with: - name: Upload Documentation Artifacts - path: artifact/documentation - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: Upload Warnings Log - path: artifact/doc_warnings.log - if-no-files-found: ignore diff --git a/.github/workflows/make-package.yaml b/.github/workflows/test.yaml similarity index 86% rename from .github/workflows/make-package.yaml rename to .github/workflows/test.yaml index 0bb913b29..c3e66298e 100644 --- a/.github/workflows/make-package.yaml +++ b/.github/workflows/test.yaml @@ -1,4 +1,4 @@ -name: Make Package +name: Test on: pull_request: branches: @@ -10,8 +10,8 @@ on: branches: - develop jobs: - make-package: - name: Make Package + test: + name: Test runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index fb86e276e..2fedf978f 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,15 @@ -[![Documentation](https://github.com/ufs-community/workflow-tools/actions/workflows/docs.yaml/badge.svg)](https://github.com/ufs-community/workflow-tools/actions/workflows/docs.yaml) +[![docs](https://readthedocs.org/projects/unified-workflow/badge/?version=latest)](https://readthedocs.org/projects/unified-workflow/builds/) # workflow-tools -Unified Workflow tools for use with applications with UFS and beyond +Unified Workflow tools for use with UFS applications and beyond + +## Documentation + +Comprehensive documentation is available [here](https://unified-workflow.readthedocs.io/en/latest/). ## User Installation + - If you are a Developer, skip the User Installation and continue on to the Development section The recommended installation mechanism uses the Python package and virtual-environment manager [conda](https://docs.conda.io/en/latest/). Specifically, these instructions detail the use of the minimal [Miniforge](https://github.com/conda-forge/miniforge) variant of [Miniconda](https://docs.conda.io/en/latest/miniconda.html), built to use, by default, packages from the [conda-forge](https://conda-forge.org/) project. Users of the original Miniconda (or the [Anaconda](https://anaconda.org/) distribution) may need to add the flags `-c conda-forge --override-channels` to `conda build`, `conda create`, and `conda install` commands to specify the use of conda-forge packages. @@ -58,7 +63,7 @@ bash Miniforge3-Linux-aarch64.sh -bfp ~/conda rm Miniforge3-Linux-aarch64.sh source ~/conda/etc/profile.d/conda.sh conda activate -conda install -y -c maddenp condev=0.5.3 +conda install -y -c maddenp condev=0.6.0 cd /to/your/workflow-tools/clone make devshell ``` @@ -121,23 +126,15 @@ The following files in this repo are derived from their counterparts in the [con ``` sh ├── Makefile ├── recipe -│   ├── build.sh -│   ├── channels -│   ├── conda_build_config.yaml -│   ├── meta.json -│   ├── meta.yaml -│   └── run_test.sh +│ ├── build.sh +│ ├── channels +│ ├── conda_build_config.yaml +│ ├── meta.json +│ ├── meta.yaml +│ └── run_test.sh ├── src -│   ├── pyproject.toml -│   ├── setup.py +│ ├── pyproject.toml +│ ├── setup.py ``` Behaviors described in previous sections may rely on these files continuing to follow `condev` conventions. - -## Documentation - -Documentation is automatically generated through [Read the Docs](https://readthedocs.org/) when [develop](https://github.com/ufs-community/workflow-tools/tree/develop) is updated and available [here](https://unified-workflow.readthedocs.io/en/latest/). - -[Developer Status](https://github.com/orgs/ufs-community/projects/1) - -[UW Tools Github Pages Site](https://ufs-community.github.io/workflow-tools/) diff --git a/docs/Users_Guide/getting_started.rst b/docs/Users_Guide/getting_started.rst index ddf7075b2..5199a5670 100644 --- a/docs/Users_Guide/getting_started.rst +++ b/docs/Users_Guide/getting_started.rst @@ -22,13 +22,6 @@ Package Dependencies - Source - Description - * - boto3 - - >=1.22.13 - - https://anaconda.org/conda-forge/boto3 - - - allows Python developers to write software - that makes use of services like Amazon - S3 and Amazon EC2 * - black - - diff --git a/recipe/meta.json b/recipe/meta.json index 28822e3a7..e4a3311ed 100644 --- a/recipe/meta.json +++ b/recipe/meta.json @@ -5,7 +5,6 @@ "packages": { "dev": [ "black =23.3.*", - "boto3 =1.22.*", "coverage =7.2.*", "docformatter =1.7.*", "f90nml =1.4.*", @@ -22,7 +21,6 @@ "pyyaml =6.0.*" ], "run": [ - "boto3 =1.22.*", "f90nml =1.4.*", "jinja2 =3.0.*", "jsonschema =4.17.*", diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 99cf8ff84..b175d7955 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -11,7 +11,6 @@ requirements: host: - pip run: - - boto3 1.22.* - f90nml 1.4.* - jinja2 3.0.* - jsonschema 4.17.* diff --git a/src/pyproject.toml b/src/pyproject.toml index 139ae369f..eff58bfd7 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -48,4 +48,3 @@ extension-pkg-allow-list = [ recursive = true [tool.pytest.ini_options] -filterwarnings = ["ignore::DeprecationWarning:botocore.*"] diff --git a/src/uwtools/files/__init__.py b/src/uwtools/files/__init__.py deleted file mode 100644 index d3fd70487..000000000 --- a/src/uwtools/files/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .interface import FileManager, S3FileManager, UnixFileManager -from .model import S3, File, Prefixes, Unix diff --git a/src/uwtools/files/gateway/__init__.py b/src/uwtools/files/gateway/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/uwtools/files/gateway/s3.py b/src/uwtools/files/gateway/s3.py deleted file mode 100644 index f9630ea40..000000000 --- a/src/uwtools/files/gateway/s3.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Gateway for interacting with S3. -""" - -import os -import pathlib -from typing import Optional - -import boto3 -from botocore.exceptions import ClientError - -from uwtools.logging import log - -S3_CLIENT = boto3.client("s3") - - -def download_file(bucket_name: str, source_name: str, target_name: str) -> None: - """ - Download files from S3. - """ - S3_CLIENT.download_file(bucket_name, source_name, target_name) - - -def exists(_path: pathlib.Path) -> bool: - """ - Returns True if file exists. - """ - try: - S3_CLIENT.head_object(Bucket=_path.parts[0], Key="/".join(_path.parts[1:])) - except ClientError: - return False - return True - - -def upload_file(source_path: str, bucket: str, target_name: Optional[str] = None) -> bool: - """ - Upload a file to an S3 bucket. - """ - - # If S3 object_name was not specified, use file name. - - if target_name is None: - target_name = os.path.basename(source_path) - - # Upload the file. - - try: - S3_CLIENT.upload_file(source_path, bucket, target_name) - except ClientError as error: - log.error(error) - return False - return True diff --git a/src/uwtools/files/gateway/unix.py b/src/uwtools/files/gateway/unix.py deleted file mode 100644 index cf23d596a..000000000 --- a/src/uwtools/files/gateway/unix.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -Unix-based, threaded, local file copying. -""" - -import shutil -from concurrent.futures import ThreadPoolExecutor, wait -from pathlib import Path -from typing import List, Tuple - -from uwtools.files.model import File -from uwtools.logging import log - - -class Copier: - """ - A threaded file copier. - """ - - def __init__(self, srcs: List[File], dsts: List[Path]) -> None: - self.pairs: List[Tuple[Path, Path]] = list(zip([Path(x.path) for x in srcs], dsts)) - - def run(self) -> None: - """ - Copy each src->dst pair in a thread. - """ - executor = ThreadPoolExecutor() - futures = [executor.submit(_copy, src, dst) for src, dst in self.pairs] - wait(futures) - - -def copy(srcs: List[File], dsts: List[Path]) -> None: - """ - Copies each source item to corresponding destination item. - """ - Copier(srcs, dsts).run() - - -def _copy(src: Path, dst: Path) -> None: - """ - Copies file or directory from source to destination. - - Directories are copied recursively. - """ - log.debug("Copying %s to %s", src, dst) - if src.is_file(): - shutil.copy(src, dst) - else: - if dst.is_dir(): - shutil.rmtree(dst) - shutil.copytree(src, dst) diff --git a/src/uwtools/files/interface/__init__.py b/src/uwtools/files/interface/__init__.py deleted file mode 100644 index 3889bfe69..000000000 --- a/src/uwtools/files/interface/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .file_manager import * diff --git a/src/uwtools/files/interface/file_manager.py b/src/uwtools/files/interface/file_manager.py deleted file mode 100644 index dabc37104..000000000 --- a/src/uwtools/files/interface/file_manager.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import pathlib -from abc import ABC, abstractmethod -from typing import List - -from uwtools.files.gateway import s3, unix -from uwtools.files.model import S3, File, Prefixes - - -class FileManager(ABC): - """ - Represents file operations in an environment. - """ - - @abstractmethod # pragma: no cover - def copy(self, source: List[File], destination: List): - """ - Copies source to destination. - """ - raise NotImplementedError - - @staticmethod - def get_file_manager(_type: Prefixes): - """ - Returns a file manager with source and destination. - """ - _map = { - Prefixes.UNIX: UnixFileManager, - Prefixes.S3: S3FileManager, - } - return _map[_type]() - - -class S3FileManager(FileManager): - """ - S3 based file operations. - """ - - def copy(self, source: List[File], destination: List[S3]): - """ - Copies source to destination. - """ - for src, dest in zip(source, destination): - s3.upload_file(src.path, "bucket_name_here", os.path.basename(dest.path)) - - -class UnixFileManager(FileManager): - """ - UNIX based file operations. - """ - - def copy(self, source: List[File], destination: List[str]): - unix.copy(list(source), [pathlib.Path(x) for x in list(destination)]) diff --git a/src/uwtools/files/model/__init__.py b/src/uwtools/files/model/__init__.py deleted file mode 100644 index 6dbc21905..000000000 --- a/src/uwtools/files/model/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .file import * diff --git a/src/uwtools/files/model/file.py b/src/uwtools/files/model/file.py deleted file mode 100644 index de61450ad..000000000 --- a/src/uwtools/files/model/file.py +++ /dev/null @@ -1,124 +0,0 @@ -from abc import ABC, abstractmethod -from enum import Enum -from glob import glob -from pathlib import Path -from typing import Any, List - - -class Prefixes(Enum): - """ - Supported URI file prefixes. - """ - - S3 = "s3://" - UNIX = "file://" - - -class File(ABC): - """ - Represents a file. - """ - - def __init__(self, uri: str): - self._uri = uri - if not isinstance(self._uri, str): - raise TypeError("Argument 'uri' must be type str but is type %s" % type(self._uri)) - if not self._uri.startswith(self.uri_prefix): - raise ValueError( - "Path %s does not start with expected URI prefix %s" % (self._uri, self.uri_prefix) - ) - if not self.exists: - raise FileNotFoundError("File not found: %s" % self._uri) - - def __str__(self) -> str: - return self._uri - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self._uri}/>" - - @property - def path(self) -> str: - """ - Returns the file path without a prefix. - """ - return self._uri.split("://", maxsplit=1)[1] - - @property - @abstractmethod # pragma: no cover - def dir(self) -> List[Any]: - """ - Returns the contents of the directory recursively. - """ - raise NotImplementedError() - - @property - @abstractmethod # pragma: no cover - def exists(self) -> bool: - """ - Returns true if the file exists. - """ - raise NotImplementedError() - - @property - @abstractmethod # pragma: no cover - def uri_prefix(self) -> str: - """ - The URI prefix for this file type. - """ - raise NotImplementedError() - - -class S3(File): - """ - Represents an AWS S3 file. - """ - - @property - def dir(self) -> List[Any]: - """ - Returns the contents of the directory recursively. - """ - return [] - - @property - def exists(self) -> bool: - """ - Returns true if the file exists. - """ - return True - - @property - def uri_prefix(self) -> str: - """ - The URI prefix for this file type. - """ - return Prefixes.S3.value - - -class Unix(File): - """ - Represents a Unix file. - """ - - @property - def dir(self) -> List[Any]: - """ - Returns the contents of the directory recursively. - """ - if Path(self.path).is_file(): - return glob(self.path) - return glob(str(Path(self.path) / "*")) - - @property - def exists(self) -> bool: - """ - Returns true if the file exists. - """ - return Path(self.path).exists() - - @property - def uri_prefix(self) -> str: - """ - The URI prefix for this file type. - """ - return Prefixes.UNIX.value diff --git a/src/uwtools/scheduler.py b/src/uwtools/scheduler.py index 98a1cbb47..e31b832b9 100644 --- a/src/uwtools/scheduler.py +++ b/src/uwtools/scheduler.py @@ -11,8 +11,8 @@ from uwtools.logging import log from uwtools.types import DefinitePath, OptionalPath -from uwtools.utils import Memory from uwtools.utils.file import writable +from uwtools.utils.memory import Memory from uwtools.utils.processing import execute NONEISH = [None, "", " ", "None", "none", False] diff --git a/src/uwtools/tests/files/__init__.py b/src/uwtools/tests/files/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/uwtools/tests/files/gateway/__init__.py b/src/uwtools/tests/files/gateway/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/uwtools/tests/files/gateway/test_s3.py b/src/uwtools/tests/files/gateway/test_s3.py deleted file mode 100644 index 6631d8017..000000000 --- a/src/uwtools/tests/files/gateway/test_s3.py +++ /dev/null @@ -1,52 +0,0 @@ -# pylint: disable=missing-function-docstring,redefined-outer-name -""" -Tests for uwtools.files.gateway.s3 module. -""" - -from pathlib import Path -from unittest.mock import patch - -from pytest import fixture - -from uwtools.files.gateway import s3 - - -@fixture -def exc(): - return s3.ClientError(operation_name="NA", error_response={}) - - -@fixture -def upload_kwargs(): - return dict(source_path="/foo/bar", bucket="bucket") - - -def test_download_file(): - kwargs = dict(bucket_name="bucket", source_name="source", target_name="target") - with patch.object(s3, "S3_CLIENT") as S3_CLIENT: - s3.download_file(**kwargs) - assert S3_CLIENT.download_file.called_once_with(*kwargs.values()) - - -def test_exists_false(exc): - with patch.object(s3, "S3_CLIENT") as S3_CLIENT: - S3_CLIENT.head_object.side_effect = exc - assert not s3.exists(Path("/foo/bar")) - - -def test_exists_true(): - with patch.object(s3, "S3_CLIENT") as S3_CLIENT: - S3_CLIENT.head_object.return_value = True - assert s3.exists(Path("/foo/bar")) - - -def test_upload_file_failure(exc, upload_kwargs): - with patch.object(s3, "S3_CLIENT") as S3_CLIENT: - S3_CLIENT.upload_file.side_effect = exc - assert not s3.upload_file(**upload_kwargs) - - -def test_upload_file_success(upload_kwargs): - with patch.object(s3, "S3_CLIENT") as S3_CLIENT: - assert s3.upload_file(**upload_kwargs) - S3_CLIENT.upload_file.assert_called_once_with(*upload_kwargs.values(), "bar") diff --git a/src/uwtools/tests/files/gateway/test_unix.py b/src/uwtools/tests/files/gateway/test_unix.py deleted file mode 100644 index 6b97fdb97..000000000 --- a/src/uwtools/tests/files/gateway/test_unix.py +++ /dev/null @@ -1,95 +0,0 @@ -# pylint: disable=missing-function-docstring,protected-access,redefined-outer-name -""" -Tests for uwtools.files.gateway.unix module. -""" - -from pytest import fixture - -from uwtools.files.gateway import unix -from uwtools.files.model.file import Unix - - -@fixture -def dirs(tmp_path): - f1 = tmp_path / "src" / "f1" - f2 = tmp_path / "src" / "subdir" / "f2" - for path in f1, f2: - path.parent.mkdir(parents=True) - with open(path, "w", encoding="utf-8") as f: - print(path.name, file=f) - assert f1.is_file() - assert f2.is_file() - src = tmp_path / "src" - dst = tmp_path / "dst" - return src, dst - - -@fixture -def files2copy(dirs): - src, dst = dirs - src1, src2 = [Unix(x.as_uri()) for x in (src / "f1", src / "subdir" / "f2")] - dst1, dst2 = dst / "f1", dst / "f2" - dst.mkdir() - return src1, src2, dst1, dst2 - - -def content(path): - return [x.relative_to(path) for x in path.glob("**/*")] - - -def test_Copier_dir(dirs): - src, dst = dirs - assert not dst.is_dir() - c = unix.Copier(srcs=[Unix(src.as_uri())], dsts=[dst]) - c.run() - assert content(src) == content(dst) - - -def test_Copier_files(files2copy): - src1, src2, dst1, dst2 = files2copy - c = unix.Copier(srcs=[src1, src2], dsts=[dst1, dst2]) - assert not dst1.is_file() - assert not dst2.is_file() - c.run() - assert dst1.is_file() - assert dst2.is_file() - - -def test_copy_dir(dirs): - src, dst = dirs - assert not dst.is_dir() - unix.copy(srcs=[Unix(src.as_uri())], dsts=[dst]) - assert content(src) == content(dst) - - -def test_copy_files(files2copy): - src1, src2, dst1, dst2 = files2copy - assert not dst1.is_file() - assert not dst2.is_file() - unix.copy(srcs=[src1, src2], dsts=[dst1, dst2]) - assert dst1.is_file() - assert dst2.is_file() - - -def test__copy_dir_create(dirs): - src, dst = dirs - assert not dst.is_dir() - unix._copy(src=src, dst=dst) - assert content(src) == content(dst) - - -def test__copy_dir_replace(dirs): - src, dst = dirs - dst.mkdir() - (dst / "junk").touch() - unix._copy(src=src, dst=dst) - assert content(src) == content(dst) # i.e. junk is gone - - -def test__copy_file(dirs): - src, dst = dirs - srcfile, dstfile = src / "f1", dst / "f1" - assert not dstfile.is_file() - dst.mkdir() - unix._copy(src=srcfile, dst=dstfile) - assert dstfile.is_file() diff --git a/src/uwtools/tests/files/interface/__init__.py b/src/uwtools/tests/files/interface/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/uwtools/tests/files/interface/test___init__.py b/src/uwtools/tests/files/interface/test___init__.py deleted file mode 100644 index 9609ba821..000000000 --- a/src/uwtools/tests/files/interface/test___init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# pylint: disable=missing-function-docstring - - -from uwtools.files import interface - - -def test_imports(): - # Test that the expected modules have been imported into this package: - for member in ( - "FileManager", - "S3FileManager", - "UnixFileManager", - ): - assert hasattr(interface, member) diff --git a/src/uwtools/tests/files/interface/test_file_manager.py b/src/uwtools/tests/files/interface/test_file_manager.py deleted file mode 100644 index b45642939..000000000 --- a/src/uwtools/tests/files/interface/test_file_manager.py +++ /dev/null @@ -1,46 +0,0 @@ -# pylint: disable=missing-function-docstring -""" -Tests for uwtools.files FileManager and subclasses. -""" - -import shutil -from unittest.mock import patch - -from uwtools.files import FileManager, S3FileManager, UnixFileManager -from uwtools.files.gateway import s3 -from uwtools.files.model import S3, Prefixes, Unix -from uwtools.tests.support import fixture_uri - - -def test_FileManager_constructor_S3(): - assert isinstance(FileManager.get_file_manager(Prefixes.S3), S3FileManager) - - -def test_FileManager_constructor_Unix(): - assert isinstance(FileManager.get_file_manager(Prefixes.UNIX), UnixFileManager) - - -@patch.object(s3, "upload_file", return_value=None) -def test_S3_FileManager(upload_file): - source = Unix(fixture_uri("files/a.txt")) - destination = S3("s3://tests/fixtures/files/b.txt") - fm: S3FileManager = FileManager.get_file_manager(Prefixes.S3) - fm.copy([source], [destination]) - assert upload_file.copy.called_once_with([source], [destination]) - - -@patch.object(shutil, "copy", return_value=None) -def test_Unix_FileManager(copy): - source = Unix(fixture_uri("files/a.txt")) - destination = Unix(fixture_uri("files/b.txt")) - fm: UnixFileManager = FileManager.get_file_manager(Prefixes.UNIX) - fm.copy([source], [destination.path]) - assert copy.copy.called_once_with([source], [destination]) - - -def test_Unix_FileManager_Threaded(tmp_path): - source = Unix(fixture_uri("files")) - destination = Unix(tmp_path.as_uri()) - fm: UnixFileManager = FileManager.get_file_manager(Prefixes.UNIX) - fm.copy([source], [destination.path]) - assert len(list(tmp_path.glob("*.txt"))) == 3 diff --git a/src/uwtools/tests/files/model/__init__.py b/src/uwtools/tests/files/model/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/uwtools/tests/files/model/test___init__.py b/src/uwtools/tests/files/model/test___init__.py deleted file mode 100644 index d36581790..000000000 --- a/src/uwtools/tests/files/model/test___init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# pylint: disable=missing-function-docstring - - -from uwtools.files import model - - -def test_imports(): - # Test that the expected modules have been imported into this package: - for member in ( - "File", - "Prefixes", - "S3", - "Unix", - ): - assert hasattr(model, member) diff --git a/src/uwtools/tests/files/model/test_file.py b/src/uwtools/tests/files/model/test_file.py deleted file mode 100644 index aad4ffab2..000000000 --- a/src/uwtools/tests/files/model/test_file.py +++ /dev/null @@ -1,65 +0,0 @@ -# pylint: disable=missing-function-docstring - -from glob import glob - -from pytest import raises - -from uwtools.files import S3, Unix -from uwtools.tests.support import fixture_path, fixture_uri - - -def test_dir_file(): - """ - Tests dir method given a file. - """ - suffix = "files/a.txt" - my_init = Unix(fixture_uri(suffix)) - assert my_init.dir == glob(fixture_path(suffix)) - - -def test_dir_path(): - """ - Tests dir method given a path, i.e. not a file. - """ - my_init = Unix(fixture_uri()) - assert my_init.dir == glob(fixture_path("*")) - - -def test_S3(): - uri = "s3://foo/bar/files/a.txt" - obj = S3(uri) - assert obj.exists - assert obj.path == "foo/bar/files/a.txt" - assert str(obj).startswith("s3://") - assert str(obj).endswith("files/a.txt") - assert repr(obj).startswith("") - assert not obj.dir - - -def test_Unix(): - uri = fixture_uri("files/a.txt") - obj = Unix(uri) - assert obj.exists - assert obj.path == uri.replace("file://", "") - assert str(obj).startswith("file://") - assert str(obj).endswith("files/a.txt") - assert repr(obj).startswith("") - - -def test_Unix_bad_file(tmp_path): - with raises(FileNotFoundError): - Unix(uri=(tmp_path / "no-such-file").as_uri()) - - -def test_Unix_bad_path_type(): - with raises(TypeError): - Unix(uri=None) # type: ignore - - -def test_Unix_bad_prefixes(): - with raises(ValueError): - Unix(uri="foo:///bad/prefix") - with raises(ValueError): - Unix(uri="/no/prefix/at/all") diff --git a/src/uwtools/tests/files/test___init__.py b/src/uwtools/tests/files/test___init__.py deleted file mode 100644 index 9db6c987d..000000000 --- a/src/uwtools/tests/files/test___init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# pylint: disable=missing-function-docstring - - -from uwtools import files - - -def test_imports(): - # Test that the expected modules have been imported into this package: - for member in ( - "File", - "FileManager", - "Prefixes", - "S3", - "S3FileManager", - "Unix", - "UnixFileManager", - ): - assert hasattr(files, member) diff --git a/src/uwtools/tests/utils/test_memory.py b/src/uwtools/tests/utils/test_memory.py index 1f76e560c..4930d2d79 100644 --- a/src/uwtools/tests/utils/test_memory.py +++ b/src/uwtools/tests/utils/test_memory.py @@ -3,7 +3,7 @@ Tests for uwtools.utils.file_helpers module. """ -from uwtools.utils import Memory +from uwtools.utils.memory import Memory def test_memory_conversions(): diff --git a/src/uwtools/utils/__init__.py b/src/uwtools/utils/__init__.py index db0261ebe..e69de29bb 100644 --- a/src/uwtools/utils/__init__.py +++ b/src/uwtools/utils/__init__.py @@ -1 +0,0 @@ -from .memory import Memory From 6a99d9e5485378009152d854c5ae252aac44e4c3 Mon Sep 17 00:00:00 2001 From: Emily Carpenter <137525341+elcarpenterNOAA@users.noreply.github.com> Date: Thu, 2 Nov 2023 15:46:34 -0400 Subject: [PATCH 19/66] UW-408: Refactor of forecast's run method (#327) * added dry run to prepare directories * added logging to prepare config files * added prepare batch script method * full command -> prepare & execute * WIP, dry test failing * from UW Group * failing local execution test * before-the-fact log messages * updated tests * updated run_cmd doc string * updated var name * fixed spelling * doc string updates * doc string changes --- src/uwtools/drivers/driver.py | 17 ++- src/uwtools/drivers/forecast.py | 146 +++++++++++++-------- src/uwtools/tests/drivers/test_forecast.py | 52 ++++++-- 3 files changed, 144 insertions(+), 71 deletions(-) diff --git a/src/uwtools/drivers/driver.py b/src/uwtools/drivers/driver.py index d9a669815..a0a97c6c1 100644 --- a/src/uwtools/drivers/driver.py +++ b/src/uwtools/drivers/driver.py @@ -81,7 +81,7 @@ def run_cmd(self) -> str: """ The command-line command to run the NWP tool. - :return: The fully formed string that executes the program + :return: Collated string that contains MPI command, run time arguments, and exec name. """ run_cmd = self._platform_config["mpicmd"] exec_name = self._config["exec_name"] @@ -107,7 +107,10 @@ def schema_file(self) -> Path: @staticmethod def stage_files( - run_directory: str, files_to_stage: Dict[str, Union[list, str]], link_files: bool = False + run_directory: str, + files_to_stage: Dict[str, Union[list, str]], + link_files: bool = False, + dry_run: bool = False, ) -> None: """ Creates destination files in run directory and copies or links contents from the source path @@ -129,9 +132,13 @@ def stage_files( link_files, ) else: - link_or_copy(src_path_or_paths, dst_path) # type: ignore - msg = f"File {src_path_or_paths} staged as {dst_path}" - log.info(msg) + if dry_run: + msg = f"File {src_path_or_paths} would be staged as {dst_path}" + log.info(msg) + else: + msg = f"File {src_path_or_paths} staged as {dst_path}" + log.info(msg) + link_or_copy(src_path_or_paths, dst_path) # type: ignore # Private methods diff --git a/src/uwtools/drivers/forecast.py b/src/uwtools/drivers/forecast.py index e58f8e402..c55f694c6 100644 --- a/src/uwtools/drivers/forecast.py +++ b/src/uwtools/drivers/forecast.py @@ -8,7 +8,7 @@ from collections.abc import Mapping from datetime import datetime from pathlib import Path -from typing import Dict, Optional +from typing import Dict, List, Optional, Tuple from uwtools.config.core import FieldTableConfig, NMLConfig, YAMLConfig from uwtools.drivers.driver import Driver @@ -46,13 +46,15 @@ def batch_script(self) -> BatchScript: :return: The batch script object with all run commands needed for executing the program. """ pre_run = self._mpi_env_variables("\n") - bs = self.scheduler.batch_script - bs.append(pre_run) - bs.append(self.run_cmd()) - return bs + batch_script = self.scheduler.batch_script + batch_script.append(pre_run) + batch_script.append(self.run_cmd()) + return batch_script @staticmethod - def create_directory_structure(run_directory: DefinitePath, exist_act: str = "delete") -> None: + def create_directory_structure( + run_directory: DefinitePath, exist_act: str = "delete", dry_run: bool = False + ) -> None: """ Collects the name of the desired run directory, and has an optional flag for what to do if the run directory specified already exists. Creates the run directory and adds @@ -63,6 +65,8 @@ def create_directory_structure(run_directory: DefinitePath, exist_act: str = "de to a preexisting run directory. The default is to delete the old run directory. """ + run_directory = Path(run_directory) + # Caller should only provide correct argument. if exist_act not in ["delete", "rename", "quit"]: @@ -70,20 +74,26 @@ def create_directory_structure(run_directory: DefinitePath, exist_act: str = "de # Exit program with error if caller chooses to quit. - if exist_act == "quit" and os.path.isdir(run_directory): + if exist_act == "quit" and run_directory.is_dir(): log.critical("User chose quit option when creating directory") sys.exit(1) # Delete or rename directory if it exists. - handle_existing(str(run_directory), exist_act) + if dry_run and run_directory.is_dir(): + log.info(f"Would {exist_act} directory") + else: + handle_existing(str(run_directory), exist_act) # Create new run directory with two required subdirectories. for subdir in ("INPUT", "RESTART"): - path = os.path.join(run_directory, subdir) - log.info("Creating directory: %s", path) - os.makedirs(path) + path = run_directory / subdir + if dry_run: + log.info("Would create directory: %s", path) + else: + log.info("Creating directory: %s", path) + os.makedirs(path) def create_field_table(self, output_path: OptionalPath) -> None: """ @@ -127,6 +137,23 @@ def output(self) -> None: ??? """ + def prepare_directories(self) -> Path: + """ + Prepares the run directory and stages static and cycle-dependent files. + """ + run_directory = self._config["run_dir"] + self.create_directory_structure(run_directory, "delete", dry_run=self._dry_run) + + self._prepare_config_files(Path(run_directory)) + + self._config["cycle-dependent"].update(self._define_boundary_files()) + + for file_category in ["static", "cycle-dependent"]: + self.stage_files( + run_directory, self._config[file_category], link_files=True, dry_run=self._dry_run + ) + return run_directory + def requirements(self) -> None: """ ??? @@ -151,40 +178,15 @@ def run(self, cycle: datetime) -> bool: :return: Did the FV3 run exit with success status? """ - # Prepare directories. - run_directory = self._config["run_dir"] - self.create_directory_structure(run_directory, "delete") - - self._prepare_config_files(Path(run_directory)) - - self._config["cycle-dependent"].update(self._define_boundary_files()) - - for file_category in ["static", "cycle-dependent"]: - self.stage_files(run_directory, self._config[file_category], link_files=True) - - if self._batch_script is not None: - batch_script = self.batch_script() - outpath = Path(run_directory) / self._batch_script - - if self._dry_run: - # Apply switch to allow user to view the run command of config. - # This will not run the job. - log.info("Batch Script:") - batch_script.dump(None) - return True - - batch_script.dump(outpath) - return self.scheduler.submit_job(outpath) - - pre_run = self._mpi_env_variables(" ") - full_cmd = f"{pre_run} {self.run_cmd()}" + status, output = ( + self._run_via_batch_submission() + if self._batch_script + else self._run_via_local_execution() + ) if self._dry_run: - log.info("Would run: ") - print(full_cmd, file=sys.stdout) - return True - - result = execute(cmd=full_cmd) - return result.success + for line in output: + log.info(line) + return status @property def schema_file(self) -> Path: @@ -229,15 +231,6 @@ def _define_boundary_files(self) -> Dict[str, str]: return boundary_files - def _prepare_config_files(self, run_directory: Path) -> None: - """ - Collect all the configuration files needed for FV3. - """ - - self.create_field_table(run_directory / "field_table") - self.create_model_configure(run_directory / "model_configure") - self.create_namelist(run_directory / "input.nml") - def _mpi_env_variables(self, delimiter: str = " ") -> str: """ Set the environment variables needed for the MPI job. @@ -253,5 +246,52 @@ def _mpi_env_variables(self, delimiter: str = " ") -> str: } return delimiter.join([f"{k}={v}" for k, v in envvars.items()]) + def _prepare_config_files(self, run_directory: Path) -> None: + """ + Collect all the configuration files needed for FV3. + """ + if self._dry_run: + for call in ("field_table", "model_configure", "input.nml"): + log.info(f"Would prepare: {run_directory}/{call}") + else: + self.create_field_table(run_directory / "field_table") + self.create_model_configure(run_directory / "model_configure") + self.create_namelist(run_directory / "input.nml") + + def _run_via_batch_submission(self) -> Tuple[bool, List[str]]: + """ + Prepares and submits a batch script. + + :return: A tuple containing the success status of submitting the job to the batch system, + and a list of strings that make up the batch script. + """ + run_directory = self.prepare_directories() + batch_script = self.batch_script() + batch_lines = ["Batch script:", *str(batch_script).split("\n")] + if self._dry_run: + return True, batch_lines + + assert self._batch_script is not None + outpath = run_directory / self._batch_script + + batch_script.dump(outpath) + return self.scheduler.submit_job(outpath), batch_lines + + def _run_via_local_execution(self) -> Tuple[bool, List[str]]: + """ + Collects the necessary MPI environment variables in order to construct full run command, + then executes said command. + + :return: A tuple containing a boolean of the success status of the FV3 run and a list of + strings that make up the full command line. + """ + pre_run = self._mpi_env_variables(" ") + full_cmd = f"{pre_run} {self.run_cmd()}" + command_lines = ["Command:", *full_cmd.split("\n")] + if self._dry_run: + return True, command_lines + result = execute(cmd=full_cmd) + return result.success, command_lines + CLASSES = {"FV3": FV3Forecast} diff --git a/src/uwtools/tests/drivers/test_forecast.py b/src/uwtools/tests/drivers/test_forecast.py index 7cfa3a840..6a67ccf96 100644 --- a/src/uwtools/tests/drivers/test_forecast.py +++ b/src/uwtools/tests/drivers/test_forecast.py @@ -6,7 +6,8 @@ import logging import os from pathlib import Path -from unittest.mock import patch +from types import SimpleNamespace as ns +from unittest.mock import ANY, patch import pytest from pytest import fixture, raises @@ -17,7 +18,7 @@ from uwtools.drivers.driver import Driver from uwtools.drivers.forecast import FV3Forecast from uwtools.logging import log -from uwtools.tests.support import compare_files, fixture_path +from uwtools.tests.support import compare_files, fixture_path, logged def test_batch_script(): @@ -332,7 +333,7 @@ def test_run_direct(fv3_mpi_assets, fv3_run_assets): @pytest.mark.parametrize("with_batch_script", [True, False]) -def test_FV3Forecast_run_dry_run(capsys, fv3_mpi_assets, fv3_run_assets, with_batch_script): +def test_FV3Forecast_run_dry_run(caplog, fv3_mpi_assets, fv3_run_assets, with_batch_script): log.setLevel(logging.INFO) batch_script, config_file, config = fv3_run_assets if with_batch_script: @@ -344,23 +345,48 @@ def test_FV3Forecast_run_dry_run(capsys, fv3_mpi_assets, fv3_run_assets, with_ba "#SBATCH --qos=batch", "#SBATCH --time=00:01:00", ] + fv3_mpi_assets - run_expected = "\n".join(batch_components) + expected_lines = batch_components else: batch_script = None - run_expected = " ".join(fv3_mpi_assets) + expected_lines = [" ".join(fv3_mpi_assets)] with patch.object(FV3Forecast, "_validate", return_value=True): fcstobj = FV3Forecast(config_file=config_file, dry_run=True, batch_script=batch_script) with patch.object(fcstobj, "_config", config): fcstobj.run(cycle=dt.datetime.now()) - assert run_expected in capsys.readouterr().out + for line in expected_lines: + assert logged(caplog, line) -def test_run_submit(fv3_run_assets): +@pytest.mark.parametrize( + "vals", [(True, "_run_via_batch_submission"), (False, "_run_via_local_execution")] +) +def test_FV3Forecast_run(fv3_run_assets, vals): + batch_script, config_file, _ = fv3_run_assets + use_batch, helper_method = vals + fcstobj = FV3Forecast(config_file=config_file, batch_script=batch_script if use_batch else None) + with patch.object(fcstobj, helper_method) as helper: + helper.return_value = (True, None) + assert fcstobj.run(cycle=dt.datetime.now()) is True + helper.assert_called_once_with() + + +def test_FV3Forecast__run_via_batch_submission(fv3_run_assets): batch_script, config_file, config = fv3_run_assets - with patch.object(FV3Forecast, "_validate", return_value=True): - with patch.object(scheduler, "execute") as execute: - fcstobj = FV3Forecast(config_file=config_file, batch_script=batch_script) - with patch.object(fcstobj, "_config", config): - fcstobj.run(cycle=dt.datetime.now()) - execute.assert_called_once_with(cmd=f"sbatch {batch_script}") + with patch.object(scheduler, "execute") as execute: + fcstobj = FV3Forecast(config_file=config_file, batch_script=batch_script) + with patch.object(fcstobj, "_config", config): + fcstobj._run_via_batch_submission() + execute.assert_called_once_with(cmd=ANY) + + +def test_FV3Forecast__run_via_local_execution(fv3_run_assets): + _, config_file, config = fv3_run_assets + fcstobj = FV3Forecast(config_file=config_file) + with patch.object(fcstobj, "_config", config): + with patch.object(forecast, "execute") as execute: + execute.return_value = ns(success=True) + success, lines = fcstobj._run_via_local_execution() + assert success is True + assert lines[0] == "Command:" + execute.assert_called_once_with(cmd=ANY) From 2e4cff3044b2bc573dd79e89a9f50796dfc9fc46 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Thu, 2 Nov 2023 14:01:15 -0600 Subject: [PATCH 20/66] UW-363 (#334) --- src/uwtools/config/core.py | 50 +++++++++++++-------------- src/uwtools/tests/config/test_core.py | 11 ------ 2 files changed, 24 insertions(+), 37 deletions(-) diff --git a/src/uwtools/config/core.py b/src/uwtools/config/core.py index bfd811d9f..fcb6c4022 100644 --- a/src/uwtools/config/core.py +++ b/src/uwtools/config/core.py @@ -303,19 +303,6 @@ def dump_dict(path: OptionalPath, cfg: dict, opts: Optional[ns] = None) -> None: :param opts: Other options required by a subclass. """ - def from_ordereddict(self, in_dict: dict) -> dict: - """ - Recursively replaces all OrderedDict objects with basic dict objects. - - :param: in_dict: A dictionary potentially containing OrderedDict objects - """ - if isinstance(in_dict, OrderedDict): - in_dict = dict(in_dict) - for sect, keys in in_dict.items(): - if isinstance(keys, OrderedDict): - in_dict[sect] = dict(keys) - return in_dict - def parse_include(self, ref_dict: Optional[dict] = None) -> None: """ Recursively process include directives in a config object. @@ -417,18 +404,17 @@ def _load(self, config_file: OptionalPath) -> dict: # The protected _sections method is the most straightforward way to get at the dict # representation of the parse config. - cfg = configparser.ConfigParser(dict_type=OrderedDict) + cfg = configparser.ConfigParser() cfg.optionxform = str # type: ignore sections = cfg._sections # type: ignore # pylint: disable=protected-access with readable(config_file) as f: raw = f.read() try: cfg.read_string(raw) - d = dict(sections) + return dict(sections) except configparser.MissingSectionHeaderError: cfg.read_string("[top]\n" + raw) - d = dict(sections.get("top")) - return self.from_ordereddict(d) + return dict(sections.get("top")) # Public methods @@ -478,9 +464,17 @@ def _load(self, config_file: OptionalPath) -> dict: :param config_file: Path to config file to load. """ + + # f90nml returns OrderedDict objects to maintain the order of namelists in the namelist + # files that it reads. But in Python 3.6+ the standard dict maintains order as well. Since + # OrderedDict can cause problems downstream when serializing to YAML, convert OrderedDict + # objects to standard dicts here. + + def from_od(d): + return {key: from_od(val) if isinstance(val, dict) else val for key, val in d.items()} + with readable(config_file) as f: - cfg = f90nml.read(f).todict(complex_tuple=False) - return self.from_ordereddict(cfg) + return from_od(f90nml.read(f).todict()) # Public methods @@ -501,12 +495,17 @@ def dump_dict(path: OptionalPath, cfg: dict, opts: Optional[ns] = None) -> None: :param cfg: The in-memory config object to dump. :param opts: Other options required by a subclass. """ - nml = OrderedDict(cfg) - for sect, keys in nml.items(): - if isinstance(keys, dict): - nml[sect] = OrderedDict(keys) + + # f90nml honors namelist and variable order if it receives an OrderedDict as input, so + # ensure that it receives one. + + def to_od(d): + return OrderedDict( + {key: to_od(val) if isinstance(val, dict) else val for key, val in d.items()} + ) + with writable(path) as f: - f90nml.Namelist(nml).write(f, sort=False) + f90nml.Namelist(to_od(cfg)).write(f, sort=False) class YAMLConfig(Config): @@ -533,7 +532,7 @@ def _load(self, config_file: OptionalPath) -> dict: loader = self._yaml_loader with readable(config_file) as f: try: - cfg = yaml.load(f.read(), Loader=loader) + return yaml.load(f.read(), Loader=loader) except yaml.constructor.ConstructorError as e: if e.problem: if "unhashable" in e.problem: @@ -546,7 +545,6 @@ def _load(self, config_file: OptionalPath) -> dict: else: msg = str(e) raise _log_and_error(msg) from e - return self.from_ordereddict(cfg) def _yaml_include(self, loader: yaml.Loader, node: yaml.SequenceNode) -> dict: """ diff --git a/src/uwtools/tests/config/test_core.py b/src/uwtools/tests/config/test_core.py index 4922b5b92..adf43df2f 100644 --- a/src/uwtools/tests/config/test_core.py +++ b/src/uwtools/tests/config/test_core.py @@ -8,7 +8,6 @@ import logging import os import sys -from collections import OrderedDict from io import StringIO from pathlib import Path from typing import Any, Dict @@ -890,16 +889,6 @@ def test_Config_dereference_unexpected_error(nml_cfgobj): nml_cfgobj.dereference(ref_dict={"n": "{{ n }}"}) -def test_Config_from_ordereddict(nml_cfgobj): - d: dict[Any, Any] = OrderedDict([("z", 26), ("a", OrderedDict([("alpha", 1)]))]) - d = nml_cfgobj.from_ordereddict(d) - # Assert that every OrderedDict is now just a dict. The second assert is needed because - # isinstance(OrderedDict(), dict) is True. - for x in d, d["a"]: - assert isinstance(x, dict) - assert not isinstance(x, OrderedDict) - - def test_YAMLConfig__load_unexpected_error(tmp_path): cfgfile = tmp_path / "cfg.yaml" with open(cfgfile, "w", encoding="utf-8") as f: From 5de7088179d322f6d1d0b642e594a9c5ffda2396 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Fri, 3 Nov 2023 08:18:23 -0600 Subject: [PATCH 21/66] UW-407 (#335) --- src/uwtools/drivers/driver.py | 15 +++--- src/uwtools/drivers/forecast.py | 43 +++++++-------- src/uwtools/tests/drivers/test_forecast.py | 22 ++++---- src/uwtools/tests/utils/test_file.py | 27 +++++++--- src/uwtools/types.py | 12 +++++ src/uwtools/utils/file.py | 61 ++++++++++++---------- 6 files changed, 106 insertions(+), 74 deletions(-) diff --git a/src/uwtools/drivers/driver.py b/src/uwtools/drivers/driver.py index a0a97c6c1..f2ba7f1d9 100644 --- a/src/uwtools/drivers/driver.py +++ b/src/uwtools/drivers/driver.py @@ -81,13 +81,14 @@ def run_cmd(self) -> str: """ The command-line command to run the NWP tool. - :return: Collated string that contains MPI command, run time arguments, and exec name. - """ - run_cmd = self._platform_config["mpicmd"] - exec_name = self._config["exec_name"] - run_time_args = self._config["runtime_info"].get("mpi_args", []) - args_str = " ".join(str(arg) for arg in run_time_args) - return f"{run_cmd} {args_str} {exec_name}" + :return: Collated string that contains MPI command, runtime arguments, and exec name. + """ + components = [ + self._platform_config.get("mpicmd"), # MPI run program + *[str(x) for x in self._config["runtime_info"].get("mpi_args", [])], # MPI arguments + self._config["exec_name"], # NWP tool executable name + ] + return " ".join(filter(None, components)) @property def scheduler(self) -> JobScheduler: diff --git a/src/uwtools/drivers/forecast.py b/src/uwtools/drivers/forecast.py index c55f694c6..4157559a5 100644 --- a/src/uwtools/drivers/forecast.py +++ b/src/uwtools/drivers/forecast.py @@ -3,7 +3,6 @@ """ -import os import sys from collections.abc import Mapping from datetime import datetime @@ -14,8 +13,8 @@ from uwtools.drivers.driver import Driver from uwtools.logging import log from uwtools.scheduler import BatchScript -from uwtools.types import DefinitePath, OptionalPath -from uwtools.utils.file import handle_existing, resource_pathobj +from uwtools.types import DefinitePath, ExistAct, OptionalPath +from uwtools.utils.file import handle_existing, resource_pathobj, validate_existing_action from uwtools.utils.processing import execute @@ -53,7 +52,7 @@ def batch_script(self) -> BatchScript: @staticmethod def create_directory_structure( - run_directory: DefinitePath, exist_act: str = "delete", dry_run: bool = False + run_directory: DefinitePath, exist_act: str = ExistAct.delete, dry_run: bool = False ) -> None: """ Collects the name of the desired run directory, and has an optional flag for what to do if @@ -61,29 +60,27 @@ def create_directory_structure( subdirectories INPUT and RESTART. Verifies creation of all directories. :param run_directory: Path of desired run directory. - :param exist_act: Could be any of 'delete', 'rename', 'quit'. Sets how the program responds - to a preexisting run directory. The default is to delete the old run directory. + :param exist_act: Action when run directory exists: "delete" (default), "quit", or "rename" """ - run_directory = Path(run_directory) - - # Caller should only provide correct argument. + validate_existing_action( + exist_act, valid_actions=[ExistAct.delete, ExistAct.quit, ExistAct.rename] + ) - if exist_act not in ["delete", "rename", "quit"]: - raise ValueError(f"Bad argument: {exist_act}") + run_directory = Path(run_directory) - # Exit program with error if caller chooses to quit. + # Exit program with error if caller specified the "quit" action. - if exist_act == "quit" and run_directory.is_dir(): - log.critical("User chose quit option when creating directory") + if exist_act == ExistAct.quit and run_directory.is_dir(): + log.critical(f"Option {exist_act} specified, exiting") sys.exit(1) - # Delete or rename directory if it exists. + # Handle a potentially pre-existing directory appropriately. if dry_run and run_directory.is_dir(): log.info(f"Would {exist_act} directory") else: - handle_existing(str(run_directory), exist_act) + handle_existing(run_directory, exist_act) # Create new run directory with two required subdirectories. @@ -93,7 +90,7 @@ def create_directory_structure( log.info("Would create directory: %s", path) else: log.info("Creating directory: %s", path) - os.makedirs(path) + path.mkdir(parents=True) def create_field_table(self, output_path: OptionalPath) -> None: """ @@ -140,14 +137,13 @@ def output(self) -> None: def prepare_directories(self) -> Path: """ Prepares the run directory and stages static and cycle-dependent files. + + :return: Path to the run directory. """ run_directory = self._config["run_dir"] - self.create_directory_structure(run_directory, "delete", dry_run=self._dry_run) - + self.create_directory_structure(run_directory, ExistAct.delete, dry_run=self._dry_run) self._prepare_config_files(Path(run_directory)) - self._config["cycle-dependent"].update(self._define_boundary_files()) - for file_category in ["static", "cycle-dependent"]: self.stage_files( run_directory, self._config[file_category], link_files=True, dry_run=self._dry_run @@ -176,7 +172,8 @@ def run(self, cycle: datetime) -> bool: """ Runs FV3 either locally or via a batch-script submission. - :return: Did the FV3 run exit with success status? + :param cycle: The forecast cycle to run. + :return: Did the batch submission or FV3 run exit with success status? """ status, output = ( self._run_via_batch_submission() @@ -270,10 +267,8 @@ def _run_via_batch_submission(self) -> Tuple[bool, List[str]]: batch_lines = ["Batch script:", *str(batch_script).split("\n")] if self._dry_run: return True, batch_lines - assert self._batch_script is not None outpath = run_directory / self._batch_script - batch_script.dump(outpath) return self.scheduler.submit_job(outpath), batch_lines diff --git a/src/uwtools/tests/drivers/test_forecast.py b/src/uwtools/tests/drivers/test_forecast.py index 6a67ccf96..c1245c56c 100644 --- a/src/uwtools/tests/drivers/test_forecast.py +++ b/src/uwtools/tests/drivers/test_forecast.py @@ -19,6 +19,7 @@ from uwtools.drivers.forecast import FV3Forecast from uwtools.logging import log from uwtools.tests.support import compare_files, fixture_path, logged +from uwtools.types import ExistAct def test_batch_script(): @@ -87,7 +88,7 @@ def test_create_directory_structure(tmp_path): rundir = tmp_path / "rundir" # Test delete behavior when run directory does not exist. - FV3Forecast.create_directory_structure(rundir, "delete") + FV3Forecast.create_directory_structure(rundir, ExistAct.delete) assert (rundir / "RESTART").is_dir() # Create a file in the run directory. @@ -97,18 +98,18 @@ def test_create_directory_structure(tmp_path): # Test delete behavior when run directory exists. Test file should be gone # since old run directory was deleted. - FV3Forecast.create_directory_structure(rundir, "delete") + FV3Forecast.create_directory_structure(rundir, ExistAct.delete) assert (rundir / "RESTART").is_dir() assert not test_file.is_file() # Test rename behavior when run directory exists. - FV3Forecast.create_directory_structure(rundir, "rename") + FV3Forecast.create_directory_structure(rundir, ExistAct.rename) copy_directory = next(tmp_path.glob("%s_*" % rundir.name)) assert (copy_directory / "RESTART").is_dir() # Test quit behavior when run directory exists. with raises(SystemExit) as pytest_wrapped_e: - FV3Forecast.create_directory_structure(rundir, "quit") + FV3Forecast.create_directory_structure(rundir, ExistAct.quit) assert pytest_wrapped_e.type == SystemExit assert pytest_wrapped_e.value.code == 1 @@ -373,11 +374,14 @@ def test_FV3Forecast_run(fv3_run_assets, vals): def test_FV3Forecast__run_via_batch_submission(fv3_run_assets): batch_script, config_file, config = fv3_run_assets - with patch.object(scheduler, "execute") as execute: - fcstobj = FV3Forecast(config_file=config_file, batch_script=batch_script) - with patch.object(fcstobj, "_config", config): - fcstobj._run_via_batch_submission() - execute.assert_called_once_with(cmd=ANY) + fcstobj = FV3Forecast(config_file=config_file, batch_script=batch_script) + with patch.object(fcstobj, "_config", config): + with patch.object(scheduler, "execute") as execute: + execute.return_value = ns(success=True) + success, lines = fcstobj._run_via_batch_submission() + assert success is True + assert lines[0] == "Batch script:" + execute.assert_called_once_with(cmd=ANY) def test_FV3Forecast__run_via_local_execution(fv3_run_assets): diff --git a/src/uwtools/tests/utils/test_file.py b/src/uwtools/tests/utils/test_file.py index 2494129ba..a6e5cc1aa 100644 --- a/src/uwtools/tests/utils/test_file.py +++ b/src/uwtools/tests/utils/test_file.py @@ -11,6 +11,7 @@ import pytest from pytest import fixture, raises +from uwtools.types import ExistAct from uwtools.utils import file @@ -75,19 +76,24 @@ def test_get_file_type_unrecignized(): file.get_file_type("a.jpg") +def test_handle_existing_bad_action(): + with raises(ValueError): + file.handle_existing(directory="unused", exist_act="foo") + + @pytest.mark.parametrize("exc", [FileExistsError, RuntimeError]) def test_handle_existing_delete_failure(exc, assets): _, _, rundir = assets with patch.object(file.shutil, "rmtree", side_effect=exc): with raises(RuntimeError) as e: - file.handle_existing(directory=rundir, action="delete") - assert "Could not delete directory" in str(e.value) + file.handle_existing(directory=rundir, exist_act=ExistAct.delete) + assert f"Could not {ExistAct.delete} directory" in str(e.value) assert rundir.is_dir() def test_handle_existing_delete_success(assets): _, _, rundir = assets - file.handle_existing(directory=rundir, action="delete") + file.handle_existing(directory=rundir, exist_act=ExistAct.delete) assert not rundir.is_dir() @@ -96,8 +102,8 @@ def test_handle_existing_rename_failure(exc, assets): _, renamed, rundir = assets with patch.object(file.shutil, "move", side_effect=exc): with raises(RuntimeError) as e: - file.handle_existing(directory=rundir, action="rename") - assert "Could not rename directory" in str(e.value) + file.handle_existing(directory=rundir, exist_act=ExistAct.rename) + assert f"Could not {ExistAct.rename} directory" in str(e.value) assert not renamed.is_dir() assert rundir.is_dir() @@ -106,7 +112,7 @@ def test_handle_existing_rename_success(assets): now, renamed, rundir = assets with patch.object(file, "dt") as dt: dt.now.return_value = now - file.handle_existing(directory=rundir, action="rename") + file.handle_existing(directory=rundir, exist_act=ExistAct.rename) assert renamed.is_dir() assert not rundir.is_dir() @@ -140,6 +146,15 @@ def test_resource_pathobj(): assert file.resource_pathobj().is_dir() +def test_validate_existing_action_fail(): + with raises(ValueError): + file.validate_existing_action(ExistAct.quit, [ExistAct.delete]) + + +def test_validate_existing_action_pass(): + file.validate_existing_action(ExistAct.quit, [ExistAct.delete, ExistAct.quit]) + + def test_writable_file(tmp_path): apath = tmp_path / "afile" with file.writable(filepath=apath) as f: diff --git a/src/uwtools/types.py b/src/uwtools/types.py index 657c0df44..f62d34145 100644 --- a/src/uwtools/types.py +++ b/src/uwtools/types.py @@ -1,5 +1,17 @@ +from dataclasses import dataclass from pathlib import Path from typing import Optional, Union DefinitePath = Union[Path, str] OptionalPath = Optional[DefinitePath] + + +@dataclass +class ExistAct: + """ + Possible actions to take when a directory already exists. + """ + + delete: str = "delete" + quit: str = "quit" + rename: str = "rename" diff --git a/src/uwtools/utils/file.py b/src/uwtools/utils/file.py index a1b29dd3b..08d77c40d 100644 --- a/src/uwtools/utils/file.py +++ b/src/uwtools/utils/file.py @@ -2,7 +2,6 @@ Helpers for working with files and directories. """ -import os import shutil import sys from contextlib import contextmanager @@ -12,10 +11,10 @@ from importlib import resources from io import StringIO from pathlib import Path -from typing import IO, Any, Generator, Union +from typing import IO, Any, Generator, List, Union from uwtools.logging import log -from uwtools.types import DefinitePath, OptionalPath +from uwtools.types import DefinitePath, ExistAct, OptionalPath @dataclass(frozen=True) @@ -91,35 +90,26 @@ def get_file_type(path: DefinitePath) -> str: raise ValueError(msg) -def handle_existing(directory: str, action: str) -> None: +def handle_existing(directory: DefinitePath, exist_act: str) -> None: """ - Given a run directory, and an action to do if directory exists, delete or rename directory. + Take specified action on a directory. - :param directory: The directory to delete or rename. - :param action: The action to take on an existing directory ("delete" or "rename") + :param directory: The directory to handle. + :param exist_act: Action ("delete" or "rename") to take when directory exists. """ - # Try to delete existing run directory if option is delete. - - try: - if action == "delete" and os.path.isdir(directory): - shutil.rmtree(directory) - except (FileExistsError, RuntimeError) as e: - msg = f"Could not delete directory {directory}" - log.critical(msg) - raise RuntimeError(msg) from e - - # Try to rename existing run directory if option is rename. - - try: - if action == "rename" and os.path.isdir(directory): - now = dt.now() - save_dir = "%s%s" % (directory, now.strftime("_%Y%m%d_%H%M%S")) - shutil.move(directory, save_dir) - except (FileExistsError, RuntimeError) as e: - msg = f"Could not rename directory {directory}" - log.critical(msg) - raise RuntimeError(msg) from e + validate_existing_action(exist_act, valid_actions=[ExistAct.delete, ExistAct.rename]) + if Path(directory).is_dir(): + try: + if exist_act == ExistAct.delete: + shutil.rmtree(directory) + elif exist_act == ExistAct.rename: + save_dir = "%s%s" % (directory, dt.now().strftime("_%Y%m%d_%H%M%S")) + shutil.move(directory, save_dir) + except (FileExistsError, RuntimeError) as e: + msg = f"Could not {exist_act} directory {directory}" + log.critical(msg) + raise RuntimeError(msg) from e def path_if_it_exists(path: str) -> str: @@ -166,6 +156,21 @@ def resource_pathobj(suffix: str = "") -> Path: return prefix / suffix +def validate_existing_action(exist_act: str, valid_actions: List[str]) -> None: + """ + Ensure that action specified for an existing directory is valid. + + :param exist_act: Action to check. + :param valid_actions: Actions valid for the caller's context. + :raises: ValueError if specified action is invalid. + """ + if exist_act not in valid_actions: + raise ValueError( + 'Specify one of %s as exist_act, not "%s"' + % (", ".join(f'"{x}"' for x in valid_actions), exist_act) + ) + + @contextmanager def writable(filepath: OptionalPath = None, mode: str = "w") -> Generator[IO, None, None]: """ From 953e9a7da389cbe85ff18b85f3f6d07c4448ce02 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Mon, 6 Nov 2023 09:04:47 -0700 Subject: [PATCH 22/66] UW-364 (#338) Co-authored-by: Naureen Bharwani --- .github/CODEOWNERS | 4 ++-- .github/scripts/format-check.sh | 18 ++++++++++++++++++ .github/scripts/install-conda | 11 ----------- .github/scripts/install-conda.sh | 11 +++++++++++ .github/scripts/make-package | 5 ----- .github/scripts/make-package.sh | 6 ++++++ .github/scripts/{publish => publish.sh} | 5 ++--- .github/scripts/{tag-check => tag-check.sh} | 3 ++- .github/scripts/{tag-create => tag-create.sh} | 3 ++- .github/workflows/release.yaml | 13 ++++++++----- .github/workflows/test.yaml | 6 ++++-- README.md | 2 +- recipe/meta.json | 1 + recipe/meta.yaml | 1 + 14 files changed, 58 insertions(+), 31 deletions(-) create mode 100755 .github/scripts/format-check.sh delete mode 100755 .github/scripts/install-conda create mode 100755 .github/scripts/install-conda.sh delete mode 100755 .github/scripts/make-package create mode 100755 .github/scripts/make-package.sh rename .github/scripts/{publish => publish.sh} (71%) rename .github/scripts/{tag-check => tag-check.sh} (76%) rename .github/scripts/{tag-create => tag-create.sh} (63%) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8b058dbd9..6c04a87b5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,10 +1,10 @@ # See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners -# Side-wide code owners: +# Code owners: * @NaureenBharwaniNOAA @christinaholtNOAA @elcarpenterNOAA @fgabelmannjr @maddenp-noaa @weirae -# Documentation-system config owners: +# Documentation owners: .readthedocs.yaml @christinaholtNOAA @jprestop @maddenp-noaa docs/* @christinaholtNOAA @jprestop @maddenp-noaa diff --git a/.github/scripts/format-check.sh b/.github/scripts/format-check.sh new file mode 100755 index 000000000..078fca6e2 --- /dev/null +++ b/.github/scripts/format-check.sh @@ -0,0 +1,18 @@ +# Actions invokes script with: bash -e