diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 466c0b4..9f38d8f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: types_or: [yaml, markdown, css, scss] # https://docs.astral.sh/ruff/integrations/#pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.1 + rev: v0.6.3 hooks: # Run the linter - id: ruff diff --git a/poetry.lock b/poetry.lock index 893cb23..1c2b896 100644 --- a/poetry.lock +++ b/poetry.lock @@ -35,29 +35,29 @@ files = [ [[package]] name = "filelock" -version = "3.13.4" +version = "3.15.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.13.4-py3-none-any.whl", hash = "sha256:404e5e9253aa60ad457cae1be07c0f0ca90a63931200a47d9b6a6af84fd7b45f"}, - {file = "filelock-3.13.4.tar.gz", hash = "sha256:d13f466618bfde72bd2c18255e269f72542c6e70e7bac83a0232d6b1cc5c8cf4"}, + {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, + {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] name = "identify" -version = "2.5.36" +version = "2.6.0" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.5.36-py2.py3-none-any.whl", hash = "sha256:37d93f380f4de590500d9dba7db359d0d3da95ffe7f9de1753faa159e71e7dfa"}, - {file = "identify-2.5.36.tar.gz", hash = "sha256:e5e00f54165f9047fbebeb4a560f9acfb8af4c88232be60a488e9b68d122745d"}, + {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, + {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, ] [package.extras] @@ -171,18 +171,15 @@ resolved_reference = "0707be900a103eee1de40c9ae0c411072da84ee5" [[package]] name = "nodeenv" -version = "1.8.0" +version = "1.9.1" description = "Node.js virtual environment builder" optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ - {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, - {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, ] -[package.dependencies] -setuptools = "*" - [[package]] name = "pathspec" version = "0.11.2" @@ -196,28 +193,29 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, - {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pre-commit" -version = "3.7.0" +version = "3.8.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" files = [ - {file = "pre_commit-3.7.0-py2.py3-none-any.whl", hash = "sha256:5eae9e10c2b5ac51577c3452ec0a490455c45a0533f7960f993a0d01e59decab"}, - {file = "pre_commit-3.7.0.tar.gz", hash = "sha256:e209d61b8acdcf742404408531f0c37d49d2c734fd7cff2d6076083d191cb060"}, + {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, + {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, ] [package.dependencies] @@ -229,78 +227,75 @@ virtualenv = ">=20.10.0" [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] - -[[package]] -name = "setuptools" -version = "69.5.1" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, - {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - [[package]] name = "virtualenv" -version = "20.25.3" +version = "20.26.3" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.25.3-py3-none-any.whl", hash = "sha256:8aac4332f2ea6ef519c648d0bc48a5b1d324994753519919bddbb1aff25a104e"}, - {file = "virtualenv-20.25.3.tar.gz", hash = "sha256:7bb554bbdfeaacc3349fa614ea5bff6ac300fc7c335e9facf3a3bcfc703f45be"}, + {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, + {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, ] [package.dependencies] diff --git a/rull.toml b/ruff.toml similarity index 100% rename from rull.toml rename to ruff.toml diff --git a/urbanopt_des/analysis_instance.py b/urbanopt_des/analysis_instance.py index 28d7be9..0d4cd47 100644 --- a/urbanopt_des/analysis_instance.py +++ b/urbanopt_des/analysis_instance.py @@ -10,9 +10,7 @@ def __init__(self) -> None: # the machine name of the variable self.variables: dict = {} - def add_variable_instance( - self, variable_name: str, variable_value: Any, **kwargs - ) -> None: + def add_variable_instance(self, variable_name: str, variable_value: Any, **kwargs) -> None: """Store the variable instance and value in a dictionary. There can only be one variable_name per instance of the analysis @@ -71,9 +69,7 @@ def create_unique_variable_instance_name(self, prepend_str: str = "sim") -> str: return result - def save_analysis_name_to_file( - self, filename: Path, override_name: Union[None, str] = None - ) -> None: + def save_analysis_name_to_file(self, filename: Path, override_name: Union[None, str] = None) -> None: """Save off the analysis name to a file that can be used for later reference and post processing. Right now this is a simple file but ideally the instance of the analysis that is written should be the same @@ -99,6 +95,4 @@ def save_variables_to_file(self, filename: Path) -> None: with open(filename, "w") as f: for variable_name in self.variables: - f.write( - f"{variable_name},{self.variables[variable_name]['value']},{self.variables[variable_name]['short_name']}\n" - ) + f.write(f"{variable_name},{self.variables[variable_name]['value']},{self.variables[variable_name]['short_name']}\n") diff --git a/urbanopt_des/emissions.py b/urbanopt_des/emissions.py index 3fa950e..c12f025 100644 --- a/urbanopt_des/emissions.py +++ b/urbanopt_des/emissions.py @@ -39,21 +39,14 @@ def __init__( path = path / "with_distribution_losses" else: path = path / "without_distribution_losses" - path = ( - path - / "future" - / "hourly" - / f"future_hourly_{emissions_type}_co2e_{future_year}.csv" - ) + path = path / "future" / "hourly" / f"future_hourly_{emissions_type}_co2e_{future_year}.csv" if not path.exists(): raise Exception(f"Future emissions data file does not exist: {path}") # verify that the eGRID subregion is valid if egrid_subregion not in self.region_names(): - raise Exception( - f"Invalid eGRID subregion: {egrid_subregion}, expected one of {self.region_names()}" - ) + raise Exception(f"Invalid eGRID subregion: {egrid_subregion}, expected one of {self.region_names()}") if analysis_year is None: analysis_year = future_year @@ -61,21 +54,15 @@ def __init__( self.data = pd.read_csv(path, header=0) # create two new columns, one for the datetime based on the future_year and one based on the analysis_year. - self.data["datetime"] = datetime.datetime(future_year, 1, 1) + pd.to_timedelta( - self.data["hour"], unit="h" - ) + self.data["datetime"] = datetime.datetime(future_year, 1, 1) + pd.to_timedelta(self.data["hour"], unit="h") # If the year is a leap year, then shift the datetime by one day, effectively eliminating the leap day. # This isn't working yet, moving on... # if self.data['datetime'][0].is_leap_year: # after 2/28/future_year, shift all hours back by 24 hours # self.data.loc[self.data['datetime'] > datetime.datetime(future_year, 3, 1), 'datetime'] = self.data.loc[self.data['datetime'] > datetime.datetime(future_year, 3, 1), 'datetime'] - pd.to_timedelta(1, unit='d') - self.data["analysis_datetime_end"] = datetime.datetime( - analysis_year, 1, 1 - ) + pd.to_timedelta(self.data["hour"], unit="h") - self.data["analysis_datetime_start"] = self.data[ - "analysis_datetime_end" - ] - pd.to_timedelta(1, unit="h") + self.data["analysis_datetime_end"] = datetime.datetime(analysis_year, 1, 1) + pd.to_timedelta(self.data["hour"], unit="h") + self.data["analysis_datetime_start"] = self.data["analysis_datetime_end"] - pd.to_timedelta(1, unit="h") # move the datetime columns to the front cols = self.data.columns.tolist() @@ -98,26 +85,20 @@ def __init__( # remove the MBtu column, and then transpose # if the column exists, then drop it if "emission_kg_per_mbtu" in self.other_fuel_data.columns: - self.other_fuel_data = self.other_fuel_data.drop( - columns=["emission_kg_per_mbtu"] - ) + self.other_fuel_data = self.other_fuel_data.drop(columns=["emission_kg_per_mbtu"]) self.other_fuel_data = self.other_fuel_data.T # make the first row the column names self.other_fuel_data.columns = self.other_fuel_data.iloc[0] # drop the first row self.other_fuel_data = self.other_fuel_data.drop(self.other_fuel_data.index[0]) - self.other_fuel_data.reset_index(inplace=True) - self.other_fuel_data.drop(columns=["index"], inplace=True) + self.other_fuel_data = self.other_fuel_data.reset_index() + self.other_fuel_data = self.other_fuel_data.drop(columns=["index"]) # copy the self.data and remove all the columns except self.other_fuels = self.data.copy() # drop all columns except 'analysis_datetime_start', 'analysis_datetime_end', 'hour' - to_drop = [ - col - for col in self.other_fuels.columns - if col not in ["analysis_datetime_start", "analysis_datetime_end", "hour"] - ] + to_drop = [col for col in self.other_fuels.columns if col not in ["analysis_datetime_start", "analysis_datetime_end", "hour"]] self.other_fuels = self.other_fuels.drop(columns=to_drop) # merge in the other_fuels_data with the self.other_fuels and fill down diff --git a/urbanopt_des/measures/set_argument_value/measure.py b/urbanopt_des/measures/set_argument_value/measure.py index 4fe1006..ed75822 100644 --- a/urbanopt_des/measures/set_argument_value/measure.py +++ b/urbanopt_des/measures/set_argument_value/measure.py @@ -32,18 +32,10 @@ def arguments(self): "The model_name in the ModelicaProject passed into the run method.", units="string", ) - self.measure_args.add_argument( - "type", "Name of the type being modified", units="string" - ) - self.measure_args.add_argument( - "identifier", "Name of the type identifier being modified", units="string" - ) - self.measure_args.add_argument( - "argument_name", "Name of the argument to set", units="string" - ) - self.measure_args.add_argument( - "value", "Value to set the argument to", units="float" - ) + self.measure_args.add_argument("type", "Name of the type being modified", units="string") + self.measure_args.add_argument("identifier", "Name of the type identifier being modified", units="string") + self.measure_args.add_argument("argument_name", "Name of the argument to set", units="string") + self.measure_args.add_argument("value", "Value to set the argument to", units="float") return self.measure_args def run(self, project: ModelicaProject, user_arguments: list[dict]): @@ -61,9 +53,7 @@ def run(self, project: ModelicaProject, user_arguments: list[dict]): # add actions to manipulate the model # store the previous value, for fun - previous_value = model.get_component_argument_value( - type_, identifier, argument_name - ) + previous_value = model.get_component_argument_value(type_, identifier, argument_name) self.measure_attributes.register_value( model_name, self.unique_name, @@ -71,15 +61,11 @@ def run(self, project: ModelicaProject, user_arguments: list[dict]): previous_value, ) - model.update_component_modifications( - type_, identifier, {argument_name: new_value} - ) + model.update_component_modifications(type_, identifier, {argument_name: new_value}) for args in user_arguments.get_args_with_register_values(): # register the value that was set after the fact - self.measure_attributes.register_value( - model_name, self.unique_name, args["name"], args["value"] - ) + self.measure_attributes.register_value(model_name, self.unique_name, args["name"], args["value"]) # execute the actions model.execute() diff --git a/urbanopt_des/measures/set_extended_type_argument_value/measure.py b/urbanopt_des/measures/set_extended_type_argument_value/measure.py index 98a21cf..3992d20 100644 --- a/urbanopt_des/measures/set_extended_type_argument_value/measure.py +++ b/urbanopt_des/measures/set_extended_type_argument_value/measure.py @@ -32,24 +32,12 @@ def arguments(self): "The model_name in the ModelicaProject passed into the run method.", units="string", ) - self.measure_args.add_argument( - "extended_type", "Name of the extended type being modified", units="string" - ) - self.measure_args.add_argument( - "type", "Name of the type being modified", units="string" - ) - self.measure_args.add_argument( - "identifier", "Name of the type identifier being modified", units="string" - ) - self.measure_args.add_argument( - "object_name", "Name of the data object being modified", units="string" - ) - self.measure_args.add_argument( - "argument_name", "Name of the argument to set", units="string" - ) - self.measure_args.add_argument( - "value", "Value to set the argument to", units="float" - ) + self.measure_args.add_argument("extended_type", "Name of the extended type being modified", units="string") + self.measure_args.add_argument("type", "Name of the type being modified", units="string") + self.measure_args.add_argument("identifier", "Name of the type identifier being modified", units="string") + self.measure_args.add_argument("object_name", "Name of the data object being modified", units="string") + self.measure_args.add_argument("argument_name", "Name of the argument to set", units="string") + self.measure_args.add_argument("value", "Value to set the argument to", units="float") return self.measure_args def run(self, project: ModelicaProject, user_arguments: list[dict]): @@ -68,15 +56,11 @@ def run(self, project: ModelicaProject, user_arguments: list[dict]): model = project.get_model(model_name) # add actions to manipulate the model - model.update_extended_component_modification( - extended_type, type_, identifier, object_name, argument_name, str(new_value) - ) + model.update_extended_component_modification(extended_type, type_, identifier, object_name, argument_name, str(new_value)) for args in user_arguments.get_args_with_register_values(): # register the value that was set after the fact - self.measure_attributes.register_value( - model_name, self.unique_name, args["name"], args["value"] - ) + self.measure_attributes.register_value(model_name, self.unique_name, args["name"], args["value"]) # execute the actions model.execute() diff --git a/urbanopt_des/measures/set_parameter_value/measure.py b/urbanopt_des/measures/set_parameter_value/measure.py index 483c0dd..57d1d5c 100644 --- a/urbanopt_des/measures/set_parameter_value/measure.py +++ b/urbanopt_des/measures/set_parameter_value/measure.py @@ -32,15 +32,9 @@ def arguments(self): "The model_name in the ModelicaProject passed into the run method.", units="string", ) - self.measure_args.add_argument( - "type", "Name of the type being modified", units="string" - ) - self.measure_args.add_argument( - "identifier", "Name of the type identifier being modified", units="string" - ) - self.measure_args.add_argument( - "value", "Value to set the argument to", units="float" - ) + self.measure_args.add_argument("type", "Name of the type being modified", units="string") + self.measure_args.add_argument("identifier", "Name of the type identifier being modified", units="string") + self.measure_args.add_argument("value", "Value to set the argument to", units="float") return self.measure_args def run(self, project: ModelicaProject, user_arguments: list[dict]): @@ -59,15 +53,11 @@ def run(self, project: ModelicaProject, user_arguments: list[dict]): model.update_parameter(type_, identifier, new_value) - self.measure_attributes.register_value( - model_name, self.unique_name, f"{identifier}_previous_value", previous_value - ) + self.measure_attributes.register_value(model_name, self.unique_name, f"{identifier}_previous_value", previous_value) for args in user_arguments.get_args_with_register_values(): # register the value that was set after the fact - self.measure_attributes.register_value( - model_name, self.unique_name, args["name"], args["value"] - ) + self.measure_attributes.register_value(model_name, self.unique_name, args["name"], args["value"]) # execute the actions model.execute() diff --git a/urbanopt_des/measures/set_parameter_value_multiplier/measure.py b/urbanopt_des/measures/set_parameter_value_multiplier/measure.py index a854e78..0916ce7 100644 --- a/urbanopt_des/measures/set_parameter_value_multiplier/measure.py +++ b/urbanopt_des/measures/set_parameter_value_multiplier/measure.py @@ -32,15 +32,9 @@ def arguments(self): "The model_name in the ModelicaProject passed into the run method.", units="string", ) - self.measure_args.add_argument( - "type", "Name of the type being modified", units="string" - ) - self.measure_args.add_argument( - "identifier", "Name of the type identifier being modified", units="string" - ) - self.measure_args.add_argument( - "value", "Value to multiply the argument by", units="float" - ) + self.measure_args.add_argument("type", "Name of the type being modified", units="string") + self.measure_args.add_argument("identifier", "Name of the type identifier being modified", units="string") + self.measure_args.add_argument("value", "Value to multiply the argument by", units="float") return self.measure_args def run(self, project: ModelicaProject, user_arguments: list[dict]): @@ -60,15 +54,11 @@ def run(self, project: ModelicaProject, user_arguments: list[dict]): to_set_value = float(previous_value) * float(new_value) model.update_parameter(type_, identifier, str(to_set_value)) - self.measure_attributes.register_value( - model_name, self.unique_name, f"{identifier}_previous_value", previous_value - ) + self.measure_attributes.register_value(model_name, self.unique_name, f"{identifier}_previous_value", previous_value) for args in user_arguments.get_args_with_register_values(): # register the value that was set after the fact - self.measure_attributes.register_value( - model_name, self.unique_name, args["name"], str(to_set_value) - ) + self.measure_attributes.register_value(model_name, self.unique_name, args["name"], str(to_set_value)) # execute the actions model.execute() diff --git a/urbanopt_des/modelica_results.py b/urbanopt_des/modelica_results.py index 6236b84..c443113 100644 --- a/urbanopt_des/modelica_results.py +++ b/urbanopt_des/modelica_results.py @@ -155,37 +155,20 @@ def resample_and_convert_to_df( (_, ets_q_cooling) = self.modelica_data.values(f"bui[{n_b}].QCoo_flow") (_, ets_q_heating) = self.modelica_data.values(f"bui[{n_b}].QHea_flow") - agg_columns["ETS Pump Electricity Total"].append( - f"ETS Pump Electricity Building {building_id}" - ) - agg_columns["ETS Heat Pump Electricity Total"].append( - f"ETS Heat Pump Electricity Building {building_id}" - ) - agg_columns["ETS Thermal Cooling Total"].append( - f"ETS Thermal Cooling Building {building_id}" - ) - agg_columns["ETS Thermal Heating Total"].append( - f"ETS Thermal Heating Building {building_id}" - ) - building_data[f"ETS Pump Electricity Building {building_id}"] = ( - ets_pump_data - ) - building_data[f"ETS Heat Pump Electricity Building {building_id}"] = ( - ets_hp_data - ) + agg_columns["ETS Pump Electricity Total"].append(f"ETS Pump Electricity Building {building_id}") + agg_columns["ETS Heat Pump Electricity Total"].append(f"ETS Heat Pump Electricity Building {building_id}") + agg_columns["ETS Thermal Cooling Total"].append(f"ETS Thermal Cooling Building {building_id}") + agg_columns["ETS Thermal Heating Total"].append(f"ETS Thermal Heating Building {building_id}") + building_data[f"ETS Pump Electricity Building {building_id}"] = ets_pump_data + building_data[f"ETS Heat Pump Electricity Building {building_id}"] = ets_hp_data building_data[f"ETS Thermal Cooling Building {building_id}"] = ets_q_cooling building_data[f"ETS Thermal Heating Building {building_id}"] = ets_q_heating # convert time to timestamps for pandas - time = [ - datetime(year_of_data, 1, 1, 0, 0, 0) + timedelta(seconds=int(t)) - for t in time1 - ] + time = [datetime(year_of_data, 1, 1, 0, 0, 0) + timedelta(seconds=int(t)) for t in time1] # convert into data frame - df_energy = pd.pandas.DataFrame( - {"datetime": time, "Total DES Electricity": total_energy} - ) + df_energy = pd.pandas.DataFrame({"datetime": time, "Total DES Electricity": total_energy}) df_energy = df_energy.set_index("datetime") df_energy = df_energy.resample("60min").max() # set the index name so that it exports nicely @@ -208,37 +191,22 @@ def resample_and_convert_to_df( if len(other_var_data) == len(time): data[other_var] = other_var_data else: - print( - f'Other var "{other_var}" length does not match {len(other_var_data)} != {len(time)}' - ) + print(f'Other var "{other_var}" length does not match {len(other_var_data)} != {len(time)}') df_power = pd.pandas.DataFrame(data) # create aggregation columns for total pumps, total heat pumps, and total - df_power["ETS Pump Electricity Total"] = df_power[ - agg_columns["ETS Pump Electricity Total"] - ].sum(axis=1) - df_power["ETS Heat Pump Electricity Total"] = df_power[ - agg_columns["ETS Heat Pump Electricity Total"] - ].sum(axis=1) - df_power["Total Thermal Cooling Energy"] = df_power[ - agg_columns["ETS Thermal Cooling Total"] - ].sum(axis=1) - df_power["Total Thermal Heating Energy"] = df_power[ - agg_columns["ETS Thermal Heating Total"] - ].sum(axis=1) + df_power["ETS Pump Electricity Total"] = df_power[agg_columns["ETS Pump Electricity Total"]].sum(axis=1) + df_power["ETS Heat Pump Electricity Total"] = df_power[agg_columns["ETS Heat Pump Electricity Total"]].sum(axis=1) + df_power["Total Thermal Cooling Energy"] = df_power[agg_columns["ETS Thermal Cooling Total"]].sum(axis=1) + df_power["Total Thermal Heating Energy"] = df_power[agg_columns["ETS Thermal Heating Total"]].sum(axis=1) # Calculate the District Loop Power - if the columns exists # check if multiple columns are in a dataframe - if all( - column in df_power.columns - for column in ["TDisWatRet.port_a.m_flow", "TDisWatRet.T", "TDisWatSup.T"] - ): + if all(column in df_power.columns for column in ["TDisWatRet.port_a.m_flow", "TDisWatRet.T", "TDisWatSup.T"]): # \dot{m} * c_p * \Delta T with Water at (4186 J/kg/K) df_power["District Loop Energy"] = ( - df_power["TDisWatRet.port_a.m_flow"] - * 4186 - * abs(df_power["TDisWatRet.T"] - df_power["TDisWatSup.T"]) + df_power["TDisWatRet.port_a.m_flow"] * 4186 * abs(df_power["TDisWatRet.T"] - df_power["TDisWatSup.T"]) ) column_names = [ @@ -252,7 +220,7 @@ def resample_and_convert_to_df( # sum up all ETS data (pump and heat pump) df_power.to_csv(self.path / "power_original.csv") - df_power.drop_duplicates(subset="datetime", inplace=True) + df_power = df_power.drop_duplicates(subset="datetime") df_power = df_power.set_index("datetime") # upsample to 1min with filling the last. This will @@ -293,11 +261,7 @@ def combine_with_openstudio_results( "ExteriorEquipment:Electricity Building", "InteriorEquipment:NaturalGas Building", ] - meter_names = [ - f"{meter_name} {building_id}" - for building_id in building_ids - for meter_name in building_meter_names - ] + meter_names = [f"{meter_name} {building_id}" for building_id in building_ids for meter_name in building_meter_names] # add in the end use totals that are non-HVAC meter_names += [ "Total Building Interior Lighting", @@ -308,21 +272,15 @@ def combine_with_openstudio_results( "Total Building Interior Equipment", ] - self.min_60_with_buildings = pd.concat( - [self.min_60, openstudio_df[meter_names]], axis=1, join="inner" - ) + self.min_60_with_buildings = pd.concat([self.min_60, openstudio_df[meter_names]], axis=1, join="inner") self.min_60_with_buildings.index.name = "datetime" # also conduct this for the 15 minute time step - self.min_15_with_buildings = pd.concat( - [self.min_15, openstudio_df_15[meter_names]], axis=1, join="inner" - ) + self.min_15_with_buildings = pd.concat([self.min_15, openstudio_df_15[meter_names]], axis=1, join="inner") self.min_15_with_buildings.index.name = "datetime" # should we resort the columns? - return None - def create_summary(self): """Create an annual summary by selecting key variables and values and transposing them for easy comparison""" # now create the summary table @@ -467,9 +425,7 @@ def create_summary(self): for column in summary_columns: # check if the column exists in the data frame and if not, then set the value to zero! if column["name"] in self.annual.columns: - self.end_use_summary[self.display_name][column["display_name"]] = float( - self.annual[column["name"]].iloc[0] - ) + self.end_use_summary[self.display_name][column["display_name"]] = float(self.annual[column["name"]].iloc[0]) else: self.end_use_summary[self.display_name][column["display_name"]] = 0.0 @@ -507,30 +463,20 @@ def calculate_carbon_emissions( # Calculate the natural gas emissions, emissions data is in kg/MWh so Wh->MWh, then divide by another 1000 to get mtCO2e self.min_60_with_buildings["Total Building Natural Gas Carbon Emissions"] = ( - self.min_60_with_buildings["Total Building Natural Gas"] - * hourly_emissions_data.other_fuels["natural_gas"] - / 1e6 - / 1000 - ) - self.min_60_with_buildings["Total Natural Gas Carbon Emissions"] = ( - self.min_60_with_buildings["Total Building Natural Gas Carbon Emissions"] + self.min_60_with_buildings["Total Building Natural Gas"] * hourly_emissions_data.other_fuels["natural_gas"] / 1e6 / 1000 ) + self.min_60_with_buildings["Total Natural Gas Carbon Emissions"] = self.min_60_with_buildings[ + "Total Building Natural Gas Carbon Emissions" + ] # Calculate the electricity carbon emissions, emissions data is in kg/MWh, so Wh->Mwh, then divide by another 1000 to get mtCO2e - self.min_60_with_buildings[ - f"Total Electricity Carbon Emissions {future_year}" - ] = ( - self.min_60_with_buildings["Total Electricity"] - * hourly_emissions_data.data[lookup_egrid_subregion] - / 1e6 - / 1000 + self.min_60_with_buildings[f"Total Electricity Carbon Emissions {future_year}"] = ( + self.min_60_with_buildings["Total Electricity"] * hourly_emissions_data.data[lookup_egrid_subregion] / 1e6 / 1000 ) # Sum the total carbon emissions self.min_60_with_buildings[f"Total Carbon Emissions {future_year}"] = ( self.min_60_with_buildings["Total Natural Gas Carbon Emissions"] - + self.min_60_with_buildings[ - f"Total Electricity Carbon Emissions {future_year}" - ] + + self.min_60_with_buildings[f"Total Electricity Carbon Emissions {future_year}"] ) def calculate_grid_metrics( @@ -553,9 +499,7 @@ def calculate_grid_metrics( # warm up times that have yet to be resolved. n_days = 2 skip_time = n_days * 96 - self.min_15_with_buildings_to_process = ( - self.min_15_with_buildings_to_process.iloc[skip_time:] - ) + self.min_15_with_buildings_to_process = self.min_15_with_buildings_to_process.iloc[skip_time:] # # END NEED TO FIX # # THIS IS HARD CODED -- NEED TO FIX! @@ -576,9 +520,7 @@ def calculate_grid_metrics( self.grid_metrics_daily = None for meter in meters: df_tmp = self.min_15_with_buildings_to_process.copy() - df_tmp = df_tmp.groupby([pd.Grouper(freq="1d")])[meter].agg( - ["max", "idxmax", "min", "idxmin", "mean", "sum"] - ) + df_tmp = df_tmp.groupby([pd.Grouper(freq="1d")])[meter].agg(["max", "idxmax", "min", "idxmin", "mean", "sum"]) # update the column names and save back into the results data frame df_tmp.columns = [ @@ -594,28 +536,19 @@ def calculate_grid_metrics( df_tmp[f"{meter} PVR"] = df_tmp[f"{meter} Max"] / df_tmp[f"{meter} Min"] # calculate the load factor - df_tmp[f"{meter} Load Factor"] = ( - df_tmp[f"{meter} Mean"] / df_tmp[f"{meter} Max"] - ) + df_tmp[f"{meter} Load Factor"] = df_tmp[f"{meter} Mean"] / df_tmp[f"{meter} Max"] # add in the system ramping, which has to be calculated from the original data frame df_tmp2 = self.min_15_with_buildings_to_process.copy() df_tmp2[f"{meter} System Ramping"] = df_tmp2[meter].diff().abs().fillna(0) - df_tmp2 = ( - df_tmp2.groupby([pd.Grouper(freq="1d")])[f"{meter} System Ramping"].agg( - ["sum"] - ) - / 1e6 - ) + df_tmp2 = df_tmp2.groupby([pd.Grouper(freq="1d")])[f"{meter} System Ramping"].agg(["sum"]) / 1e6 df_tmp2.columns = [f"{meter} System Ramping"] df_tmp = pd.concat([df_tmp, df_tmp2], axis=1, join="inner") if self.grid_metrics_daily is None: self.grid_metrics_daily = df_tmp else: - self.grid_metrics_daily = pd.concat( - [self.grid_metrics_daily, df_tmp], axis=1, join="inner" - ) + self.grid_metrics_daily = pd.concat([self.grid_metrics_daily, df_tmp], axis=1, join="inner") # aggregate the df_daily daily data to annual metrics. For the maxes/mins, we only want the max of the max # and the min of the min. @@ -637,13 +570,9 @@ def calculate_grid_metrics( # there is only one year of data, so grab the idmax/idmin of the first element. If # we expand to multiple years, then this will need to be updated id_lookup = df_tmp[f"{meter} Max idxmax"][0] - df_tmp[f"{meter} Max idxmax"] = self.grid_metrics_daily.loc[id_lookup][ - f"{meter} Max Datetime" - ] + df_tmp[f"{meter} Max idxmax"] = self.grid_metrics_daily.loc[id_lookup][f"{meter} Max Datetime"] id_lookup = df_tmp[f"{meter} Min idxmin"][0] - df_tmp[f"{meter} Min idxmin"] = self.grid_metrics_daily.loc[id_lookup][ - f"{meter} Min Datetime" - ] + df_tmp[f"{meter} Min idxmin"] = self.grid_metrics_daily.loc[id_lookup][f"{meter} Min Datetime"] # rename these two columns to remove the idxmax/idxmin nomenclature df_tmp = df_tmp.rename( columns={ @@ -653,23 +582,13 @@ def calculate_grid_metrics( ) # Add the MWh related metrics, can't sum up the 15 minute data, so we have to sum up the hourly - df_tmp["Total Electricity"] = ( - self.min_60_with_buildings["Total Electricity"].resample("1y").sum() / 1e6 - ) # MWh - df_tmp["Total Natural Gas"] = ( - self.min_60_with_buildings["Total Natural Gas"].resample("1y").sum() / 1e6 - ) # MWh + df_tmp["Total Electricity"] = self.min_60_with_buildings["Total Electricity"].resample("1y").sum() / 1e6 # MWh + df_tmp["Total Natural Gas"] = self.min_60_with_buildings["Total Natural Gas"].resample("1y").sum() / 1e6 # MWh df_tmp["Total Thermal Cooling Energy"] = ( - self.min_60_with_buildings["Total Thermal Cooling Energy"] - .resample("1y") - .sum() - / 1e6 + self.min_60_with_buildings["Total Thermal Cooling Energy"].resample("1y").sum() / 1e6 ) # MWh df_tmp["Total Thermal Heating Energy"] = ( - self.min_60_with_buildings["Total Thermal Heating Energy"] - .resample("1y") - .sum() - / 1e6 + self.min_60_with_buildings["Total Thermal Heating Energy"].resample("1y").sum() / 1e6 ) # MWh # graph the top 5 peak values for each of the meters @@ -684,9 +603,9 @@ def calculate_grid_metrics( df_to_proc = self.min_15_with_buildings_to_process.copy() if "Cooling" in meter: # values are negative, so ascending is actually descending - df_to_proc.sort_values(by=meter, ascending=True, inplace=True) + df_to_proc = df_to_proc.sort_values(by=meter, ascending=True) else: - df_to_proc.sort_values(by=meter, ascending=False, inplace=True) + df_to_proc = df_to_proc.sort_values(by=meter, ascending=False) df_to_proc = df_to_proc.head(50) # save the top 5 values to the df_tmp @@ -738,20 +657,10 @@ def save_dataframes( self.min_15.to_csv(self.path / "power_15min.csv") if self.min_60 is not None and "min_60" in dfs_to_save: self.min_60.to_csv(self.path / "power_60min.csv") - if ( - self.min_15_with_buildings is not None - and "min_15_with_buildings" in dfs_to_save - ): - self.min_15_with_buildings.to_csv( - self.path / "power_15min_with_buildings.csv" - ) - if ( - self.min_60_with_buildings is not None - and "min_60_with_buildings" in dfs_to_save - ): - self.min_60_with_buildings.to_csv( - self.path / "power_60min_with_buildings.csv" - ) + if self.min_15_with_buildings is not None and "min_15_with_buildings" in dfs_to_save: + self.min_15_with_buildings.to_csv(self.path / "power_15min_with_buildings.csv") + if self.min_60_with_buildings is not None and "min_60_with_buildings" in dfs_to_save: + self.min_60_with_buildings.to_csv(self.path / "power_60min_with_buildings.csv") # save the monthly and annual if self.monthly is not None and "monthly" in dfs_to_save: @@ -766,8 +675,5 @@ def save_dataframes( # save the metrics if self.grid_metrics_daily is not None and "grid_metrics_daily" in dfs_to_save: self.grid_metrics_daily.to_csv(self.path / "grid_metrics_daily.csv") - if ( - self.grid_metrics_annual is not None - and "grid_metrics_annual" in dfs_to_save - ): + if self.grid_metrics_annual is not None and "grid_metrics_annual" in dfs_to_save: self.grid_metrics_annual.to_csv(self.path / "grid_metrics_annual.csv") diff --git a/urbanopt_des/uo_cli_wrapper.py b/urbanopt_des/uo_cli_wrapper.py index f4d3769..75380ac 100644 --- a/urbanopt_des/uo_cli_wrapper.py +++ b/urbanopt_des/uo_cli_wrapper.py @@ -5,7 +5,7 @@ from pathlib import Path -class UOCliWrapper(object): +class UOCliWrapper: """Wrapper for running the UO CLI from within Python. If you are testing this locally, then you might need to configure your URBANopt CLI. @@ -43,19 +43,17 @@ def _run_command(self, command): app_root = f"/Applications/URBANoptCLI_{self.uo_version}" new_env["GEM_HOME"] = f"{app_root}/gems/ruby/2.7.0" new_env["GEM_PATH"] = f"{app_root}/gems/ruby/2.7.0" - new_env["PATH"] = ( - f"{app_root}/ruby/bin:{app_root}/gems/ruby/2.7.0/bin:{os.environ['PATH']}" - ) + new_env["PATH"] = f"{app_root}/ruby/bin:{app_root}/gems/ruby/2.7.0/bin:{os.environ['PATH']}" new_env["RUBYLIB"] = f"{app_root}/OpenStudio/Ruby" new_env["RUBY_DLL_PATH"] = f"{app_root}/OpenStudio/Ruby" # For REopt new_env["GEM_DEVELOPER_KEY"] = os.environ["GEM_DEVELOPER_KEY"] - result = subprocess.run( + result = subprocess.run( # noqa: S602 command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, shell=True, env=new_env, + check=False, ) log.write(result.stdout.decode("utf-8")) log.write(result.stderr.decode("utf-8")) @@ -69,9 +67,7 @@ def create(self, remove_example_project=False): self._run_command(f"uo create -p {self.uo_project}") else: print(f"Project {self.uo_project} already exists, skipping creation") - print( - f"Remove the project folder if you want to recreate it, {self.working_dir / self.uo_project}" - ) + print(f"Remove the project folder if you want to recreate it, {self.working_dir / self.uo_project}") if (self.working_dir / self.uo_project / "example_project.json").exists(): os.remove(self.working_dir / self.uo_project / "example_project.json") @@ -96,14 +92,10 @@ def create_scenarios(self, feature_file): def create_reopt_scenario(self, feature_file, baseline_scenario): """Create a scenario file for REopt assumptions based on the baseline scenario""" - self._run_command( - f"uo create -f {self.uo_project}/{feature_file} -r {self.uo_project}/{baseline_scenario}" - ) + self._run_command(f"uo create -f {self.uo_project}/{feature_file} -r {self.uo_project}/{baseline_scenario}") def run(self, feature_file, scenario_name): - self._run_command( - f"uo run -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}" - ) + self._run_command(f"uo run -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}") def info(self): print(f"Template path: {self.template_dir}") @@ -114,28 +106,18 @@ def info(self): def process_scenario(self, feature_file, scenario_name): # -d is for the default settings and needs to be used (most of the time) - self._run_command( - f"uo process -d -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}" - ) + self._run_command(f"uo process -d -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}") - def process_reopt_scenario( - self, feature_file, scenario_name, individual_features=False - ): + def process_reopt_scenario(self, feature_file, scenario_name, individual_features=False): # In UO, the -r flag is used for the aggregated load analysis, whereas # the -e flag if for (e)aach individual feature. if not individual_features: - self._run_command( - f"uo process -r -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}" - ) + self._run_command(f"uo process -r -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}") else: - self._run_command( - f"uo process -e -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}" - ) + self._run_command(f"uo process -e -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}") def visualize_scenario(self, feature_file, scenario_name): - self._run_command( - f"uo visualize -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}" - ) + self._run_command(f"uo visualize -f {self.uo_project}/{feature_file} -s {self.uo_project}/{scenario_name}") def visualize_feature(self, feature_file): # -d is for the default settings @@ -145,10 +127,7 @@ def visualize_feature(self, feature_file): if (self.working_dir / self.uo_project / "run" / "scenarioData.js").exists(): shutil.copy( self.working_dir / self.uo_project / "run" / "scenarioData.js", - self.working_dir - / self.uo_project - / "visualization" - / "scenarioData.js", + self.working_dir / self.uo_project / "visualization" / "scenarioData.js", ) def set_number_parallel(self, num): @@ -160,33 +139,19 @@ def set_number_parallel(self, num): with open(self.working_dir / self.uo_project / "runner.conf", "w") as f: json.dump(data, f, indent=2) - def replace_weather_file_in_mapper( - self, mapper_file, weather_file_name, climate_zone - ): + def replace_weather_file_in_mapper(self, mapper_file, weather_file_name, climate_zone): """Replace the weather file in the mapper file with the given weather file name""" mapper_filepath = self.working_dir / self.uo_project / "mappers" / mapper_file if not mapper_filepath.exists(): raise Exception(f"Mapper file {mapper_filepath} does not exist") # verify that the weather_file exists in the weather path - if not ( - self.working_dir / self.uo_project / "weather" / f"{weather_file_name}.epw" - ).exists(): - raise Exception( - f"Weather file {weather_file_name}.epw does not exist in the weather path" - ) - if not ( - self.working_dir / self.uo_project / "weather" / f"{weather_file_name}.ddy" - ).exists(): - raise Exception( - f"Weather file {weather_file_name}.ddy does not exist in the weather path" - ) - if not ( - self.working_dir / self.uo_project / "weather" / f"{weather_file_name}.stat" - ).exists(): - raise Exception( - f"Weather file {weather_file_name}.stat does not exist in the weather path" - ) + if not (self.working_dir / self.uo_project / "weather" / f"{weather_file_name}.epw").exists(): + raise Exception(f"Weather file {weather_file_name}.epw does not exist in the weather path") + if not (self.working_dir / self.uo_project / "weather" / f"{weather_file_name}.ddy").exists(): + raise Exception(f"Weather file {weather_file_name}.ddy does not exist in the weather path") + if not (self.working_dir / self.uo_project / "weather" / f"{weather_file_name}.stat").exists(): + raise Exception(f"Weather file {weather_file_name}.stat does not exist in the weather path") with open(mapper_filepath) as f: data = json.load(f) diff --git a/urbanopt_des/urbanopt_analysis.py b/urbanopt_des/urbanopt_analysis.py index 4e18ea6..b70b9a0 100644 --- a/urbanopt_des/urbanopt_analysis.py +++ b/urbanopt_des/urbanopt_analysis.py @@ -15,9 +15,7 @@ class URBANoptAnalysis: - def __init__( - self, geojson_file: Path, analysis_dir: Path, year_of_data: int = 2017, **kwargs - ) -> None: + def __init__(self, geojson_file: Path, analysis_dir: Path, year_of_data: int = 2017, **kwargs) -> None: """Class to hold contents from a comprehensive UO analysis. The analysis can include contents from both URBANopt (OpenStudio/EnergyPlus) and URBANopt DES (Modelica). @@ -106,49 +104,29 @@ def retrieve_scaling_factors(self, df_scaling, year, building_id): } # collect some statistics about the meter readings to inform which are heating and cooling variables - filter = (df_scaling["building_id"] == building_id) & ( - df_scaling.index.year == year - ) + filter = (df_scaling["building_id"] == building_id) & (df_scaling.index.year == year) ng_filter = df_scaling["meter_type"] == "Natural Gas" el_filter = df_scaling["meter_type"] == "Electric - Grid" - number_of_meters = len( - df_scaling[df_scaling["building_id"] == building_id]["meter_type"].unique() - ) + number_of_meters = len(df_scaling[df_scaling["building_id"] == building_id]["meter_type"].unique()) if number_of_meters == 1: # then just heating, so the scaling should apply to both heating and cooling fields = ["end_time", "scaling_factor_electricity"] - scaling_factors["heating"] += ( - df_scaling[filter & el_filter][fields] - .reset_index() - .to_dict("records", index=True) - ) + scaling_factors["heating"] += df_scaling[filter & el_filter][fields].reset_index().to_dict("records", index=True) for item in scaling_factors["heating"]: item["scaling_factor"] = item.pop("scaling_factor_electricity") - scaling_factors["cooling"] += ( - df_scaling[filter & el_filter][fields] - .reset_index() - .to_dict("records", index=True) - ) + scaling_factors["cooling"] += df_scaling[filter & el_filter][fields].reset_index().to_dict("records", index=True) for item in scaling_factors["cooling"]: item["scaling_factor"] = item.pop("scaling_factor_electricity") # set the variables to the be max scaling factor - scaling_factors["variables"]["Peak space cooling load"] = df_scaling[ - filter & el_filter - ]["scaling_factor_electricity"].max() - scaling_factors["variables"]["Peak space heating load"] = df_scaling[ - filter & el_filter - ]["scaling_factor_electricity"].max() + scaling_factors["variables"]["Peak space cooling load"] = df_scaling[filter & el_filter]["scaling_factor_electricity"].max() + scaling_factors["variables"]["Peak space heating load"] = df_scaling[filter & el_filter]["scaling_factor_electricity"].max() elif number_of_meters == 2: # exclude zero values in the calculation zero_filter = df_scaling["converted_value"] != 0 - min_ng_non_zero = df_scaling[ - filter & zero_filter & (df_scaling["meter_type"] == "Natural Gas") - ]["converted_value"].min() - max_ng_non_zero = df_scaling[ - filter & zero_filter & (df_scaling["meter_type"] == "Natural Gas") - ]["converted_value"].max() + min_ng_non_zero = df_scaling[filter & zero_filter & (df_scaling["meter_type"] == "Natural Gas")]["converted_value"].min() + max_ng_non_zero = df_scaling[filter & zero_filter & (df_scaling["meter_type"] == "Natural Gas")]["converted_value"].max() ng_pvr = max_ng_non_zero / min_ng_non_zero # max_ng = df_scaling[filter & (df_scaling['meter_type'] == 'Natural Gas')]['converted_value'].max() # max_el = df_scaling[filter & (df_scaling['meter_type'] == 'Electric - Grid')]['converted_value'].max() @@ -166,41 +144,23 @@ def retrieve_scaling_factors(self, df_scaling, year, building_id): if ng_pvr > 1.5: # if the peak to valley ration of the natural gas is high, then assume that it has an impact on the heating load only fields = ["end_time", "scaling_factor_natural_gas"] - scaling_factors["heating"] += ( - df_scaling[filter & ng_filter][fields] - .reset_index() - .to_dict("records", index=True) - ) + scaling_factors["heating"] += df_scaling[filter & ng_filter][fields].reset_index().to_dict("records", index=True) for item in scaling_factors["heating"]: item["scaling_factor"] = item.pop("scaling_factor_natural_gas") - scaling_factors["variables"]["Peak space heating load"] = df_scaling[ - filter & ng_filter - ]["scaling_factor_natural_gas"].max() + scaling_factors["variables"]["Peak space heating load"] = df_scaling[filter & ng_filter]["scaling_factor_natural_gas"].max() else: # just use the electric load scaling factors for everything fields = ["end_time", "scaling_factor_electricity"] - scaling_factors["heating"] += ( - df_scaling[filter & el_filter][fields] - .reset_index() - .to_dict("records", index=True) - ) + scaling_factors["heating"] += df_scaling[filter & el_filter][fields].reset_index().to_dict("records", index=True) for item in scaling_factors["heating"]: item["scaling_factor"] = item.pop("scaling_factor_electricity") - scaling_factors["variables"]["Peak space heating load"] = df_scaling[ - filter & el_filter - ]["scaling_factor_electricity"].max() + scaling_factors["variables"]["Peak space heating load"] = df_scaling[filter & el_filter]["scaling_factor_electricity"].max() fields = ["end_time", "scaling_factor_electricity"] - scaling_factors["cooling"] += ( - df_scaling[filter & el_filter][fields] - .reset_index() - .to_dict("records", index=True) - ) + scaling_factors["cooling"] += df_scaling[filter & el_filter][fields].reset_index().to_dict("records", index=True) for item in scaling_factors["cooling"]: item["scaling_factor"] = item.pop("scaling_factor_electricity") - scaling_factors["variables"]["Peak space cooling load"] = df_scaling[ - filter & el_filter - ]["scaling_factor_electricity"].max() + scaling_factors["variables"]["Peak space cooling load"] = df_scaling[filter & el_filter]["scaling_factor_electricity"].max() # # plot sf_ng vs sf_el # plt.clf() @@ -219,9 +179,7 @@ def add_urbanopt_results(self, path_to_urbanopt: Path, scenario_name: str) -> No scenario_name (str): Name of the scenario that was run with URBANopt. """ self.urbanopt = URBANoptResults(path_to_urbanopt, scenario_name) - self.urbanopt.process_results( - self.geojson.get_building_ids(), year_of_data=self.year_of_data - ) + self.urbanopt.process_results(self.geojson.get_building_ids(), year_of_data=self.year_of_data) # note that the number of buildings in the geojson will match here since the file being passed # into the process_results method is the geojson file that was used to run the analysis. So no need @@ -233,18 +191,13 @@ def scale_urbanopt_results(self, path_to_urbanopt: Path) -> None: The name of the scaling CSV files and format is very specific to this method.""" if not self.urbanopt: - raise Exception( - "URBANopt results are not loaded, run `add_urbanopt_results` method" - ) + raise Exception("URBANopt results are not loaded, run `add_urbanopt_results` method") for building_id in self.geojson.get_building_ids(): # retrieve the scaling factors, fixed at electric_grid and natural_gas for meter_type in ["electric_grid", "natural_gas"]: filepath = path_to_urbanopt / "output" - filepath = ( - filepath - / f"building_{building_id}_scaling_factors_{meter_type}.csv" - ) + filepath = filepath / f"building_{building_id}_scaling_factors_{meter_type}.csv" if filepath.exists(): # load into a data frame, and load only the columns that @@ -263,11 +216,9 @@ def scale_urbanopt_results(self, path_to_urbanopt: Path) -> None: # set start_time and end time to be datetime objects df_scalars["start_time"] = pd.to_datetime(df_scalars["start_time"]) # add midnight to the start_time - df_scalars["start_time"] = df_scalars["start_time"].apply( - lambda x: x.replace(hour=0, minute=0, second=0) - ) + df_scalars["start_time"] = df_scalars["start_time"].apply(lambda x: x.replace(hour=0, minute=0, second=0)) df_scalars["end_time"] = pd.to_datetime(df_scalars["end_time"]) - df_scalars.reset_index(inplace=True) + df_scalars = df_scalars.reset_index() self.urbanopt.scale_results(df_scalars, self.year_of_data, 2021) @@ -302,14 +253,12 @@ def sort_modelica_results_order(self, analysis_names: list[str]) -> None: def save_urbanopt_results_in_modelica_paths(self): """Iterate through each of the modelica result paths and save a copy of the URBANopt OpenStudio data frames into the path.""" - for analysis_name in self.modelica.keys(): - self.urbanopt.data.to_csv( - self.modelica[analysis_name].path / "openstudio_df.csv" - ) + for analysis_name in self.modelica: + self.urbanopt.data.to_csv(self.modelica[analysis_name].path / "openstudio_df.csv") def combine_modelica_and_openstudio_results(self) -> None: """Combine the modelica and openstudio results into a single data frame for each analysis_name""" - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: self.modelica[analysis_name].combine_with_openstudio_results( self.geojson.get_building_ids(), self.urbanopt.data, @@ -330,35 +279,20 @@ def resample_actual_data(self) -> None: for meter in meters: # print(f"Processing meter {meter} for building {building_id}") - meter_readings = self.geojson.get_meter_readings_for_building( - building_id, meter - ) + meter_readings = self.geojson.get_meter_readings_for_building(building_id, meter) # add the meter_type to all the json objects - [ - meter_reading.update( - {"meter_type": meter, "building_id": building_id} - ) - for meter_reading in meter_readings - ] + [meter_reading.update({"meter_type": meter, "building_id": building_id}) for meter_reading in meter_readings] # print(f"Found {len(meter_readings)} meter readings") # save the readings into a dataframe with end_time as the index - self.actual_data = pd.concat( - [self.actual_data, pd.DataFrame(meter_readings)] - ) + self.actual_data = pd.concat([self.actual_data, pd.DataFrame(meter_readings)]) self.actual_data["start_time"] = pd.to_datetime(self.actual_data["start_time"]) - self.actual_data["start_time"] = self.actual_data["start_time"].apply( - lambda x: x.replace(tzinfo=None) - ) + self.actual_data["start_time"] = self.actual_data["start_time"].apply(lambda x: x.replace(tzinfo=None)) self.actual_data["end_time"] = pd.to_datetime(self.actual_data["end_time"]) - self.actual_data["end_time"] = self.actual_data["end_time"].apply( - lambda x: x.replace(tzinfo=None) - ) + self.actual_data["end_time"] = self.actual_data["end_time"].apply(lambda x: x.replace(tzinfo=None)) # check if there is a time on the end_time and if not make it 23:59:59 - self.actual_data["end_time"] = self.actual_data["end_time"].apply( - lambda x: x.replace(hour=23, minute=59, second=59) - ) + self.actual_data["end_time"] = self.actual_data["end_time"].apply(lambda x: x.replace(hour=23, minute=59, second=59)) self.actual_data = self.actual_data.set_index(["start_time"]) # monthly agg across each building_id, meter_type (and other non-important fields) @@ -372,20 +306,12 @@ def resample_actual_data(self) -> None: ] drop_cols = ["end_time", "id"] # drop the columns first, then run the groupby - self.actual_data_monthly = ( - self.actual_data.drop(columns=drop_cols) - .groupby([pd.Grouper(freq="M"), *groupby_cols]) - .sum() - ) - self.actual_data_monthly.reset_index(inplace=True) - self.actual_data_monthly.set_index(["start_time"], inplace=True) - self.actual_data_yearly = ( - self.actual_data.drop(columns=drop_cols) - .groupby([pd.Grouper(freq="Y"), *groupby_cols]) - .sum() - ) - self.actual_data_yearly.reset_index(inplace=True) - self.actual_data_yearly.set_index(["start_time"], inplace=True) + self.actual_data_monthly = self.actual_data.drop(columns=drop_cols).groupby([pd.Grouper(freq="M"), *groupby_cols]).sum() + self.actual_data_monthly = self.actual_data_monthly.reset_index() + self.actual_data_monthly = self.actual_data_monthly.set_index(["start_time"]) + self.actual_data_yearly = self.actual_data.drop(columns=drop_cols).groupby([pd.Grouper(freq="Y"), *groupby_cols]).sum() + self.actual_data_yearly = self.actual_data_yearly.reset_index() + self.actual_data_yearly = self.actual_data_yearly.set_index(["start_time"]) # for each building, create a new row with the building_id and new meter called 'total' which has the # converted_value for all the meters for that building summed together @@ -398,8 +324,8 @@ def resample_actual_data(self) -> None: "converted_units", ] new_data = self.actual_data_monthly.groupby(groupby_cols).sum() - new_data.reset_index(inplace=True) - new_data.set_index(["start_time"], inplace=True) + new_data = new_data.reset_index() + new_data = new_data.set_index(["start_time"]) new_data["meter_type"] = "Total" self.new_data = new_data # add the new_data rows to the existing self.actual_monthly dataframe, mapping the common columns @@ -407,8 +333,8 @@ def resample_actual_data(self) -> None: # now do the same for the yearly data for the totals new_data = self.actual_data_yearly.groupby(groupby_cols).sum() - new_data.reset_index(inplace=True) - new_data.set_index(["start_time"], inplace=True) + new_data = new_data.reset_index() + new_data = new_data.set_index(["start_time"]) new_data["meter_type"] = "Total" self.new_data = new_data # add the new_data rows to the existing self.actual_monthly dataframe, mapping the common columns @@ -428,10 +354,8 @@ def resample_and_convert_modelica_results( Raises: Exception: errors""" - for analysis_name in self.modelica.keys(): - self.modelica[analysis_name].resample_and_convert_to_df( - building_ids, other_vars, self.year_of_data - ) + for analysis_name in self.modelica: + self.modelica[analysis_name].resample_and_convert_to_df(building_ids, other_vars, self.year_of_data) def create_building_summaries(self) -> None: """Create the summary of the results for URBANopt and each modelica simulation. This stores the data on the @@ -440,12 +364,12 @@ def create_building_summaries(self) -> None: self.urbanopt.create_summary() # create summary for each Modelica result - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: self.modelica[analysis_name].create_summary() def save_modelica_variables(self) -> None: """For each Modelica analysis, save the variables in the location alongside the .mat file""" - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: self.modelica[analysis_name].save_variables() def save_dataframes( @@ -469,20 +393,16 @@ def save_dataframes( self.urbanopt.save_dataframes() - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: self.modelica[analysis_name].save_dataframes(dfs_to_save) # save the UO Analysis dataframes, which go into a summary directory if self.grid_summary is not None and "grid_summary" in dfs_to_save: self.grid_summary.to_csv(self.analysis_output_dir / "grid_summary.csv") - self.grid_metrics_annual.to_csv( - self.analysis_output_dir / "grid_metrics_annual_all.csv" - ) + self.grid_metrics_annual.to_csv(self.analysis_output_dir / "grid_metrics_annual_all.csv") if self.end_use_summary is not None and "end_use_summary" in dfs_to_save: - self.end_use_summary.to_csv( - self.analysis_output_dir / "annual_end_use_summary.csv" - ) + self.end_use_summary.to_csv(self.analysis_output_dir / "annual_end_use_summary.csv") def calculate_carbon_emissions( self, @@ -518,15 +438,11 @@ def calculate_carbon_emissions( ) # calculate the carbon emission on the URBANopt results - self.urbanopt.calculate_carbon_emissions( - hourly_emissions_data, future_year=future_year - ) + self.urbanopt.calculate_carbon_emissions(hourly_emissions_data, future_year=future_year) # Now for each of the modelica results - for analysis_name in self.modelica.keys(): - self.modelica[analysis_name].calculate_carbon_emissions( - hourly_emissions_data, future_year=future_year - ) + for analysis_name in self.modelica: + self.modelica[analysis_name].calculate_carbon_emissions(hourly_emissions_data, future_year=future_year) def calculate_all_grid_metrics(self) -> None: """Call each Modelica analysis to create the grid metric""" @@ -535,7 +451,7 @@ def calculate_all_grid_metrics(self) -> None: # skip n-days at the beginning of the grid metrics, due to # warm up times that have yet to be resolved. - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: self.modelica[analysis_name].calculate_grid_metrics() def calculate_utility_cost(self, **kwargs) -> None: @@ -547,7 +463,7 @@ def calculate_utility_cost(self, **kwargs) -> None: # create three columns of utility costs, hot water, chilled water, and ambient water # 1 ton-hour = 3.5 kWh, 1 ton=3.5kW # energy, energy charges, demand charges, transition rate, and taxes - # - Demand is based on a building’s multiple one-hour peaks from June through September of the previous two 12-month periods. + # - Demand is based on a building's multiple one-hour peaks from June through September of the previous two 12-month periods. # hot water, 0.094 per ton-hour, 30.02 per ton per month, 0.394 per ton-hour, 3.5% # get the utility rates for thermal host water and chilled water @@ -559,16 +475,12 @@ def create_modelica_aggregations(self) -> None: df_60min_with_buildings. This does not act on the URBANopt results since this method must be called after the combine_modelica_and_openstudio_results method.""" # First check that the data are in the dataframes - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: if self.modelica[analysis_name].min_15_with_buildings is None: - raise Exception( - "Must call combine_modelica_and_openstudio_results() before calling create_aggregations()" - ) + raise Exception("Must call combine_modelica_and_openstudio_results() before calling create_aggregations()") if self.modelica[analysis_name].min_60_with_buildings is None: - raise Exception( - "Must call combine_modelica_and_openstudio_results() before calling create_aggregations()" - ) + raise Exception("Must call combine_modelica_and_openstudio_results() before calling create_aggregations()") # try block is here for folding in IDE :) # Note that the order of aggregations matter if a new aggregation is dependent on another @@ -633,26 +545,20 @@ def create_modelica_aggregations(self) -> None: "Total Building and ETS Energy", "Total DES Electricity", ] - building_aggs["Total Natural Gas"]["agg_columns"] = [ - "Total Building Natural Gas" - ] + building_aggs["Total Natural Gas"]["agg_columns"] = ["Total Building Natural Gas"] building_aggs["Total Energy"]["agg_columns"] = [ "Total Electricity", "Total Natural Gas", ] - building_aggs["Total Thermal Energy Cooling"]["agg_columns"] = [ - "Total Thermal Cooling Energy" - ] - building_aggs["Total Thermal Energy Heating"]["agg_columns"] = [ - "Total Thermal Heating Energy" - ] + building_aggs["Total Thermal Energy Cooling"]["agg_columns"] = ["Total Thermal Cooling Energy"] + building_aggs["Total Thermal Energy Heating"]["agg_columns"] = ["Total Thermal Heating Energy"] finally: pass # Do this for each of the analyses' 15 and 60 min dataframes - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: for resolution in ["min_15_with_buildings", "min_60_with_buildings"]: - df = getattr(self.modelica[analysis_name], resolution) + temp_df = getattr(self.modelica[analysis_name], resolution) # Go through each building_aggs and create the aggregation for key, value in building_aggs.items(): @@ -662,16 +568,14 @@ def create_modelica_aggregations(self) -> None: # sum up the columns in the agg_columns defined above for the dataframe of # the analysis - df[key] = df[value["agg_columns"]].sum(axis=1) + temp_df[key] = temp_df[value["agg_columns"]].sum(axis=1) def create_rollups(self) -> None: """Rollups take the 60 minute data sets and roll up to monthly and annual""" # make sure that the data exist in the correct dataframes - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: if self.modelica[analysis_name].min_60_with_buildings is None: - raise Exception( - f"Data do not exist in {analysis_name} for min_60_with_buildings." - ) + raise Exception(f"Data do not exist in {analysis_name} for min_60_with_buildings.") # confirm that URBANopt has the results too if self.urbanopt.data is None: @@ -682,13 +586,9 @@ def create_rollups(self) -> None: self.urbanopt.data_annual = self.urbanopt.data.resample("Y").sum() # roll up the Modelica results (each analysis) - for analysis_name in self.modelica.keys(): - self.modelica[analysis_name].monthly = ( - self.modelica[analysis_name].min_60_with_buildings.resample("M").sum() - ) - self.modelica[analysis_name].annual = ( - self.modelica[analysis_name].min_60_with_buildings.resample("Y").sum() - ) + for analysis_name in self.modelica: + self.modelica[analysis_name].monthly = self.modelica[analysis_name].min_60_with_buildings.resample("M").sum() + self.modelica[analysis_name].annual = self.modelica[analysis_name].min_60_with_buildings.resample("Y").sum() def __getitem__(self, key: str) -> ModelicaResults: # Accessor to the self.modelica dictionary that takes the key value as in the input @@ -733,9 +633,7 @@ def update_geojson_from_seed_data(self, **kwargs) -> dict: "weather_filename", "USA_VA_Arlington-Ronald.Reagan.Washington.Natl.AP.724050_TMY3.epw", ) - site_origin = kwargs.get( - "site_origin", [-77.03896375997412, 38.901950685284746] - ) + site_origin = kwargs.get("site_origin", [-77.03896375997412, 38.901950685284746]) # Add in the Project information project_info = { @@ -774,9 +672,7 @@ def update_geojson_from_seed_data(self, **kwargs) -> dict: # order the keys # Skip the new_dict = { - "type": geojson[ - "type" - ], # ignore for now since FeatureCollection doesn't validate in URBANopt + "type": geojson["type"], # ignore for now since FeatureCollection doesn't validate in URBANopt "name": geojson["name"], "project": geojson["project"], "features": [], @@ -836,16 +732,12 @@ def update_geojson_from_seed_data(self, **kwargs) -> dict: ) # map to the terms that UO expects - new_feature["properties"]["name"] = new_feature["properties"][ - "Property Name" - ] + new_feature["properties"]["name"] = new_feature["properties"]["Property Name"] new_feature["properties"]["id"] = f"{feature_count}" # process the floor area if gfa_location: - new_feature["properties"]["floor_area"] = new_feature["properties"][ - "Gross Floor Area" - ] + new_feature["properties"]["floor_area"] = new_feature["properties"]["Gross Floor Area"] else: raise Exception("No GFA found in the data, which is required!") @@ -853,22 +745,13 @@ def update_geojson_from_seed_data(self, **kwargs) -> dict: if footprint_location and stories_location: # print("Found footprint area and number of stories, using those values.") # We know everything about the building areas, so just store the data in the right location - new_feature["properties"]["footprint_area"] = new_feature[ - "properties" - ][footprint_location] - new_feature["properties"]["number_of_stories"] = new_feature[ - "properties" - ][stories_location] + new_feature["properties"]["footprint_area"] = new_feature["properties"][footprint_location] + new_feature["properties"]["number_of_stories"] = new_feature["properties"][stories_location] elif footprint_location and not stories_location: - new_feature["properties"]["footprint_area"] = new_feature[ - "properties" - ][footprint_location] + new_feature["properties"]["footprint_area"] = new_feature["properties"][footprint_location] # Calculate the stories by dividing out the GFA by the footprint area - number_of_stories = math.ceil( - new_feature["properties"]["floor_area"] - / new_feature["properties"]["Footprint Area"] - ) + number_of_stories = math.ceil(new_feature["properties"]["floor_area"] / new_feature["properties"]["Footprint Area"]) if number_of_stories > 18: print( f"WARNING: number of stories ({number_of_stories}) is greater than 18, which is not likely in Washington DC!, setting to 18 for this analysis." @@ -881,18 +764,12 @@ def update_geojson_from_seed_data(self, **kwargs) -> dict: # Calculate the footprint area from the GFA and number of stories new_feature["properties"]["floor_area"] / number_of_stories else: - print( - "Unknown footprint area and number of stories, inferring from GFA and 18 stories." - ) + print("Unknown footprint area and number of stories, inferring from GFA and 18 stories.") new_feature["properties"]["number_of_stories"] = 18 - new_feature["properties"]["footprint_area"] = ( - new_feature["properties"]["floor_area"] / number_of_stories - ) + new_feature["properties"]["footprint_area"] = new_feature["properties"]["floor_area"] / number_of_stories # Data for residential properties - new_feature["properties"]["number_of_stories_above_ground"] = ( - new_feature["properties"]["number_of_stories"] - ) + new_feature["properties"]["number_of_stories_above_ground"] = new_feature["properties"]["number_of_stories"] new_feature["properties"]["foundation_type"] = "slab" new_feature["properties"]["attic_type"] = "flat roof" @@ -919,30 +796,18 @@ def update_geojson_from_seed_data(self, **kwargs) -> dict: } lookup_value = new_feature["properties"]["Property Type"] if lookup_value in mapping: - new_feature["properties"]["building_type"] = mapping[ - lookup_value - ] + new_feature["properties"]["building_type"] = mapping[lookup_value] else: - raise Exception( - f"No property type mapping for building type: {lookup_value}" - ) + raise Exception(f"No property type mapping for building type: {lookup_value}") if new_feature["properties"].get("Year Built"): - new_feature["properties"]["year_built"] = new_feature["properties"][ - "Year Built" - ] + new_feature["properties"]["year_built"] = new_feature["properties"]["Year Built"] if new_feature.get("geometry"): if new_feature.get("geometry").get("type") == "GeometryCollection": # grab the one that is a polygon and save to the new_dict - index_geom = [ - i - for i, x in enumerate(new_feature["geometry"]["geometries"]) - if x["type"] == "Polygon" - ][0] - new_feature["geometry"] = new_feature["geometry"]["geometries"][ - index_geom - ] + index_geom = next(i for i, x in enumerate(new_feature["geometry"]["geometries"]) if x["type"] == "Polygon") + new_feature["geometry"] = new_feature["geometry"]["geometries"][index_geom] elif new_feature.get("geometry").get("type") == "Point": # remove the point del new_feature["geometry"] @@ -984,87 +849,51 @@ def update_geojson_from_seed_data(self, **kwargs) -> dict: if reading["converted_value"] > peaks["electricity"]: # kwh peaks["electricity"] = reading["converted_value"] - peaks["electricity_month"] = ( - datetime.datetime.fromisoformat( - reading["start_time"] - ).month - ) + peaks["electricity_month"] = datetime.datetime.fromisoformat(reading["start_time"]).month elif meter["type"] == "Natural Gas": meter_info["number_of_gas_meters"] += 1 for reading in meter["readings"]: if reading["converted_value"] > peaks["natural_gas"]: peaks["natural_gas"] = reading["converted_value"] - peaks["natural_gas_month"] = ( - datetime.datetime.fromisoformat( - reading["start_time"] - ).month - ) + peaks["natural_gas_month"] = datetime.datetime.fromisoformat(reading["start_time"]).month else: meter_info["number_of_other_meters"] += 1 - print( - f"WARNING: Not calculating peak for meter type: {meter['type']}" - ) + print(f"WARNING: Not calculating peak for meter type: {meter['type']}") new_feature["properties"]["electricity_peak"] = peaks["electricity"] - new_feature["properties"]["electricity_peak_month"] = peaks[ - "electricity_month" - ] + new_feature["properties"]["electricity_peak_month"] = peaks["electricity_month"] new_feature["properties"]["natural_gas_peak"] = peaks["natural_gas"] - new_feature["properties"]["natural_gas_peak_month"] = peaks[ - "natural_gas_month" - ] + new_feature["properties"]["natural_gas_peak_month"] = peaks["natural_gas_month"] no_meters = False else: - print( - f"WARNING: No meters found for building {index}, assuming NG heating." - ) + print(f"WARNING: No meters found for building {index}, assuming NG heating.") if no_meters: # determine the system type by floor_area only, assume Gas is # available if new_feature["properties"].get("floor_area", 0) > 125000: - new_feature["properties"]["system_type"] = ( - "VAV chiller with gas boiler reheat" - ) + new_feature["properties"]["system_type"] = "VAV chiller with gas boiler reheat" elif new_feature["properties"].get("floor_area", 0) > 75000: - new_feature["properties"]["system_type"] = ( - "PVAV with gas heat with electric reheat" - ) + new_feature["properties"]["system_type"] = "PVAV with gas heat with electric reheat" else: - new_feature["properties"]["system_type"] = ( - "PSZ-AC with gas coil" - ) - else: - # based on the meters, select a system type - if new_feature["properties"].get("floor_area", 0) > 125000: - if meter_info["number_of_gas_meters"] > 0: - new_feature["properties"]["system_type"] = ( - "VAV chiller with gas boiler reheat" - ) - else: - # no gas - new_feature["properties"]["system_type"] = ( - "VAV chiller with PFP boxes" - ) - elif new_feature["properties"].get("floor_area", 0) > 75000: - if meter_info["number_of_gas_meters"] > 0: - new_feature["properties"]["system_type"] = ( - "PVAV with gas heat with electric reheat" - ) - else: - # no gas - new_feature["properties"]["system_type"] = ( - "PVAV with PFP boxes" - ) + new_feature["properties"]["system_type"] = "PSZ-AC with gas coil" + elif new_feature["properties"].get("floor_area", 0) > 125000: + if meter_info["number_of_gas_meters"] > 0: + new_feature["properties"]["system_type"] = "VAV chiller with gas boiler reheat" else: - # small building - if meter_info["number_of_gas_meters"] > 0: - new_feature["properties"]["system_type"] = ( - "PSZ-AC with gas coil" - ) - else: - # no gas - new_feature["properties"]["system_type"] = "PSZ-HP" + # no gas + new_feature["properties"]["system_type"] = "VAV chiller with PFP boxes" + elif new_feature["properties"].get("floor_area", 0) > 75000: + if meter_info["number_of_gas_meters"] > 0: + new_feature["properties"]["system_type"] = "PVAV with gas heat with electric reheat" + else: + # no gas + new_feature["properties"]["system_type"] = "PVAV with PFP boxes" + elif meter_info["number_of_gas_meters"] > 0: + new_feature["properties"]["system_type"] = "PSZ-AC with gas coil" + else: + # no gas + new_feature["properties"]["system_type"] = "PSZ-HP" # # set the construction template based on the year built if new_feature["properties"].get("year_built"): @@ -1125,7 +954,7 @@ def create_summary_results(self) -> None: # only save off the useful columns for the summary table year_end = f"{self.year_of_data}-12-31" summary_data_columns = ["Metric", "Units"] - for analysis_name in ["Non-Connected"] + list(self.modelica.keys()): + for analysis_name in ["Non-Connected"], *list(self.modelica.keys()): summary_data_columns.append(analysis_name) if analysis_name == "Non-Connected": self.urbanopt.grid_metrics_daily @@ -1135,87 +964,37 @@ def create_summary_results(self) -> None: df_annual = self.modelica[analysis_name].grid_metrics_annual # print(df_annual) - summary_data["Electricity Consumption"].append( - df_annual[year_end]["Total Electricity"] - ) - summary_data["Electricity Peak Demand"].append( - df_annual[year_end]["Total Electricity Peak 1"] - ) - summary_data["Electricity Peak Demand Date Time"].append( - df_annual[year_end]["Total Electricity Peak Date Time 1"] - ) - - summary_data["Natural Gas Consumption"].append( - df_annual[year_end]["Total Natural Gas"] - ) - summary_data["Natural Gas Peak Demand"].append( - df_annual[year_end]["Total Natural Gas Peak 1"] - ) - summary_data["Natural Gas Peak Demand Date Time"].append( - df_annual[year_end]["Total Natural Gas Peak Date Time 1"] - ) - - summary_data["Thermal Cooling"].append( - df_annual[year_end]["Total Thermal Cooling Energy"] - ) - summary_data["Thermal Heating"].append( - df_annual[year_end]["Total Thermal Heating Energy"] - ) - - summary_data["Peak to Valley Ratio (Max)"].append( - df_annual[year_end]["Total Electricity PVR max"] - ) - summary_data["Peak to Valley Ratio (Min)"].append( - df_annual[year_end]["Total Electricity PVR min"] - ) - summary_data["Peak to Valley Ratio (Mean)"].append( - df_annual[year_end]["Total Electricity PVR mean"] - ) - summary_data["Load Factor (Max)"].append( - df_annual[year_end]["Total Electricity Load Factor max"] - ) - summary_data["Load Factor (Min)"].append( - df_annual[year_end]["Total Electricity Load Factor min"] - ) - summary_data["Load Factor (Mean)"].append( - df_annual[year_end]["Total Electricity Load Factor mean"] - ) - summary_data["System Ramping (Max)"].append( - df_annual[year_end]["Total Electricity System Ramping max"] - ) - summary_data["System Ramping (Sum)"].append( - df_annual[year_end]["Total Electricity System Ramping sum"] - ) - summary_data["System Ramping Cooling (Max)"].append( - df_annual[year_end][ - "Total Thermal Cooling Energy System Ramping max" - ] - ) - summary_data["System Ramping Cooling (Sum)"].append( - df_annual[year_end][ - "Total Thermal Cooling Energy System Ramping sum" - ] - ) - summary_data["System Ramping Heating (Max)"].append( - df_annual[year_end][ - "Total Thermal Heating Energy System Ramping max" - ] - ) - summary_data["System Ramping Heating (Sum)"].append( - df_annual[year_end][ - "Total Thermal Heating Energy System Ramping sum" - ] - ) + summary_data["Electricity Consumption"].append(df_annual[year_end]["Total Electricity"]) + summary_data["Electricity Peak Demand"].append(df_annual[year_end]["Total Electricity Peak 1"]) + summary_data["Electricity Peak Demand Date Time"].append(df_annual[year_end]["Total Electricity Peak Date Time 1"]) + + summary_data["Natural Gas Consumption"].append(df_annual[year_end]["Total Natural Gas"]) + summary_data["Natural Gas Peak Demand"].append(df_annual[year_end]["Total Natural Gas Peak 1"]) + summary_data["Natural Gas Peak Demand Date Time"].append(df_annual[year_end]["Total Natural Gas Peak Date Time 1"]) + + summary_data["Thermal Cooling"].append(df_annual[year_end]["Total Thermal Cooling Energy"]) + summary_data["Thermal Heating"].append(df_annual[year_end]["Total Thermal Heating Energy"]) + + summary_data["Peak to Valley Ratio (Max)"].append(df_annual[year_end]["Total Electricity PVR max"]) + summary_data["Peak to Valley Ratio (Min)"].append(df_annual[year_end]["Total Electricity PVR min"]) + summary_data["Peak to Valley Ratio (Mean)"].append(df_annual[year_end]["Total Electricity PVR mean"]) + summary_data["Load Factor (Max)"].append(df_annual[year_end]["Total Electricity Load Factor max"]) + summary_data["Load Factor (Min)"].append(df_annual[year_end]["Total Electricity Load Factor min"]) + summary_data["Load Factor (Mean)"].append(df_annual[year_end]["Total Electricity Load Factor mean"]) + summary_data["System Ramping (Max)"].append(df_annual[year_end]["Total Electricity System Ramping max"]) + summary_data["System Ramping (Sum)"].append(df_annual[year_end]["Total Electricity System Ramping sum"]) + summary_data["System Ramping Cooling (Max)"].append(df_annual[year_end]["Total Thermal Cooling Energy System Ramping max"]) + summary_data["System Ramping Cooling (Sum)"].append(df_annual[year_end]["Total Thermal Cooling Energy System Ramping sum"]) + summary_data["System Ramping Heating (Max)"].append(df_annual[year_end]["Total Thermal Heating Energy System Ramping max"]) + summary_data["System Ramping Heating (Sum)"].append(df_annual[year_end]["Total Thermal Heating Energy System Ramping sum"]) # need to convert the summary_data into format: [['tom', 10, 15], ['nicholas', 15, 17], ['julian', 14, 30]] new_summary_data = [] for key, value in summary_data.items(): - new_summary_data.append([key] + value) + new_summary_data.append([key, *value]) - self.grid_summary = pd.DataFrame( - data=new_summary_data, columns=summary_data_columns - ) - self.grid_summary.set_index(["Metric", "Units"], inplace=True) + self.grid_summary = pd.DataFrame(data=new_summary_data, columns=summary_data_columns) + self.grid_summary = self.grid_summary.set_index(["Metric", "Units"]) finally: pass @@ -1223,7 +1002,7 @@ def create_summary_results(self) -> None: try: # grab the end_use_summary data from each analysis, starting with urbanopt then each modelica analysis self.end_use_summary = self.urbanopt.end_use_summary - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: self.end_use_summary = pd.concat( [ self.end_use_summary, @@ -1233,9 +1012,7 @@ def create_summary_results(self) -> None: ) # check if there are duplicate units columns, and if so, only keep the first - self.end_use_summary = self.end_use_summary.loc[ - :, ~self.end_use_summary.columns.duplicated() - ] + self.end_use_summary = self.end_use_summary.loc[:, ~self.end_use_summary.columns.duplicated()] finally: pass @@ -1245,7 +1022,7 @@ def create_summary_results(self) -> None: # set the column name to the analysis name self.grid_metrics_annual.columns = ["Non-Connected"] - for analysis_name in self.modelica.keys(): + for analysis_name in self.modelica: self.grid_metrics_annual = pd.concat( [ self.grid_metrics_annual, @@ -1254,19 +1031,14 @@ def create_summary_results(self) -> None: axis=1, ) # rename the column to the analysis name - self.grid_metrics_annual.rename( - columns={self.grid_metrics_annual.columns[-1]: analysis_name}, - inplace=True, - ) + self.grid_metrics_annual = self.grid_metrics_annual.rename(columns={self.grid_metrics_annual.columns[-1]: analysis_name}) finally: pass return True @classmethod - def get_list_of_valid_result_folders( - cls, root_analysis_path: Path - ) -> Tuple[dict, dict]: + def get_list_of_valid_result_folders(cls, root_analysis_path: Path) -> Tuple[dict, dict]: """Parse through the root_analysis_path and return a dict of valid result folders that can be loaded and processed. Also return dict of folders that have simulation errors or empty results @@ -1288,45 +1060,31 @@ def get_list_of_valid_result_folders( dslog_file = sim_folder.parent / "dslog.txt" error = False if dslog_file.exists(): - with open(dslog_file, "r") as f: + with open(dslog_file) as f: lines = f.readlines() for line in lines: if "Error" in line: error = True bad_or_empty_results[sim_folder.parent] = {} - bad_or_empty_results[sim_folder.parent][ - "path_to_analysis" - ] = sim_folder.parent + bad_or_empty_results[sim_folder.parent]["path_to_analysis"] = sim_folder.parent # from folder - bad_or_empty_results[sim_folder.parent]["name"] = ( - sim_folder.parent.name - ) - bad_or_empty_results[sim_folder.parent]["error"] = ( - "Error in dslog.txt" - ) + bad_or_empty_results[sim_folder.parent]["name"] = sim_folder.parent.name + bad_or_empty_results[sim_folder.parent]["error"] = "Error in dslog.txt" break if 'Integration terminated before reaching "StopTime"' in line: error = True bad_or_empty_results[sim_folder.parent] = {} - bad_or_empty_results[sim_folder.parent][ - "path_to_analysis" - ] = sim_folder.parent + bad_or_empty_results[sim_folder.parent]["path_to_analysis"] = sim_folder.parent # from folder - bad_or_empty_results[sim_folder.parent]["name"] = ( - sim_folder.parent.name - ) - bad_or_empty_results[sim_folder.parent]["error"] = ( - "Error did not reach the stop time" - ) + bad_or_empty_results[sim_folder.parent]["name"] = sim_folder.parent.name + bad_or_empty_results[sim_folder.parent]["error"] = "Error did not reach the stop time" break else: # hmm, no dslog.txt file, then this is an empty folder error = True bad_or_empty_results[sim_folder.parent] = {} - bad_or_empty_results[sim_folder.parent]["path_to_analysis"] = ( - sim_folder.parent - ) + bad_or_empty_results[sim_folder.parent]["path_to_analysis"] = sim_folder.parent # from folder bad_or_empty_results[sim_folder.parent]["name"] = sim_folder.parent.name bad_or_empty_results[sim_folder.parent]["error"] = "No dslog.txt" @@ -1338,14 +1096,10 @@ def get_list_of_valid_result_folders( mat_file = sim_folder.parent / "district.mat" if not mat_file.exists(): bad_or_empty_results[sim_folder.parent] = {} - bad_or_empty_results[sim_folder.parent]["path_to_analysis"] = ( - sim_folder.parent - ) + bad_or_empty_results[sim_folder.parent]["path_to_analysis"] = sim_folder.parent # from folder bad_or_empty_results[sim_folder.parent]["name"] = sim_folder.parent.name - bad_or_empty_results[sim_folder.parent]["error"] = ( - "Does not contain a district.mat file" - ) + bad_or_empty_results[sim_folder.parent]["error"] = "Does not contain a district.mat file" continue # If we are here then there is likely a successful simulation. Now store it in a @@ -1354,12 +1108,10 @@ def get_list_of_valid_result_folders( # Get the simulation name from the analysis_name.txt file analysis_name_file = sim_folder.parent / "analysis_name.txt" if analysis_name_file.exists(): - with open(analysis_name_file, "r") as f: + with open(analysis_name_file) as f: analysis_name = f.read().strip() else: - print( - f"Error: could not load analysis_name.txt file for {mat_file.parent}" - ) + print(f"Error: could not load analysis_name.txt file for {mat_file.parent}") results[analysis_name] = { "path_to_analysis": sim_folder.parent, diff --git a/urbanopt_des/urbanopt_geojson.py b/urbanopt_des/urbanopt_geojson.py index ab65b2b..63d8fa8 100644 --- a/urbanopt_des/urbanopt_geojson.py +++ b/urbanopt_des/urbanopt_geojson.py @@ -8,7 +8,7 @@ def __init__(self, filename: Path): self.data = None # read in the JSON file and store it in data - with open(filename, "r") as f: + with open(filename) as f: self.data = json.load(f) def get_building_paths(self, scenario_name: str) -> list[Path]: @@ -16,12 +16,7 @@ def get_building_paths(self, scenario_name: str) -> list[Path]: result = [] for feature in self.data["features"]: if feature["properties"]["type"] == "Building": - building_path = ( - self._filename.parent - / "run" - / scenario_name - / feature["properties"]["id"] - ) + building_path = self._filename.parent / "run" / scenario_name / feature["properties"]["id"] result.append(building_path) # result.append(Path(feature["properties"]["file"])) @@ -36,23 +31,15 @@ def get_building_ids(self) -> list: """Return a list of building names""" result = [] for feature in self.data["features"]: - if ( - "type" in feature["properties"] - and feature["properties"]["type"] == "Building" - ): + if "type" in feature["properties"] and feature["properties"]["type"] == "Building": result.append(feature["properties"]["id"]) + elif "name" in feature["properties"] and feature["properties"]["name"] == "Site Origin": + pass else: - # check if the name is site origin, if so, then this is okay - if ( - "name" in feature["properties"] - and feature["properties"]["name"] == "Site Origin" - ): - pass - else: - # need to implement a reasonable logger. - pass - # print(f"Feature does not have a type Building: {feature}") - # print("Did you forget to call the `update_geojson_from_seed_data` method?") + # need to implement a reasonable logger. + pass + # print(f"Feature does not have a type Building: {feature}") + # print("Did you forget to call the `update_geojson_from_seed_data` method?") return result @@ -78,9 +65,8 @@ def get_building_properties_by_id(self, building_id: str) -> dict: """ result = {} for feature in self.data["features"]: - if feature["properties"]["type"] == "Building": - if feature["properties"]["id"] == building_id: - result = feature["properties"] + if feature["properties"]["type"] == "Building" and feature["properties"]["id"] == building_id: + result = feature["properties"] return result @@ -88,24 +74,20 @@ def get_meters_for_building(self, building_id: str) -> list: """Return a list of meters for the building_id""" result = [] for feature in self.data["features"]: - if feature["properties"]["type"] == "Building": - if feature["properties"]["id"] == building_id: - for meter in feature["properties"].get("meters", []): - result.append(meter["type"]) + if feature["properties"]["type"] == "Building" and feature["properties"]["id"] == building_id: + for meter in feature["properties"].get("meters", []): + result.append(meter["type"]) return result - def get_meter_readings_for_building( - self, building_id: str, meter_type: str - ) -> list: + def get_meter_readings_for_building(self, building_id: str, meter_type: str) -> list: """Return a list of meter readings for the building_id""" result = [] for feature in self.data["features"]: - if feature["properties"]["type"] == "Building": - if feature["properties"]["id"] == building_id: - for meter in feature["properties"].get("meters", []): - if meter["type"] == meter_type: - result = meter["readings"] + if feature["properties"]["type"] == "Building" and feature["properties"]["id"] == building_id: + for meter in feature["properties"].get("meters", []): + if meter["type"] == meter_type: + result = meter["readings"] return result @@ -113,8 +95,7 @@ def get_monthly_readings(self, building_id: str, meter_type: str) -> list: """Return a list of monthly electricity consumption for the building_id""" result = [] for feature in self.data["features"]: - if feature["properties"]["type"] == "Building": - if feature["properties"]["id"] == building_id: - result = feature["properties"]["monthly_electricity"] + if feature["properties"]["type"] == "Building" and feature["properties"]["id"] == building_id: + result = feature["properties"]["monthly_electricity"] return result diff --git a/urbanopt_des/urbanopt_results.py b/urbanopt_des/urbanopt_results.py index c0e3ee5..af8bd0b 100644 --- a/urbanopt_des/urbanopt_results.py +++ b/urbanopt_des/urbanopt_results.py @@ -32,15 +32,11 @@ def __init__(self, uo_path: Path, scenario_name: str) -> None: self.path = uo_path self.scenario_name = scenario_name if not self.path.exists(): - raise Exception( - f"Could not find {self.path} for the URBANopt results. Will not continue." - ) + raise Exception(f"Could not find {self.path} for the URBANopt results. Will not continue.") # check if the run with the scenario name exists if not (self.path / "run" / f"{scenario_name}").exists(): - raise Exception( - f"Could not find {self.path / 'run' / scenario_name} for the URBANopt results. Will not continue." - ) + raise Exception(f"Could not find {self.path / 'run' / scenario_name} for the URBANopt results. Will not continue.") # path to store outputs not specific to the scenario self.output_path = self.path / "output" @@ -88,9 +84,7 @@ def calculate_grid_metrics( for meter in meters: df_tmp = self.data_15min_to_process.copy() - df_tmp = df_tmp.groupby([pd.Grouper(freq="1d")])[meter].agg( - ["max", "idxmax", "min", "idxmin", "mean", "sum"] - ) + df_tmp = df_tmp.groupby([pd.Grouper(freq="1d")])[meter].agg(["max", "idxmax", "min", "idxmin", "mean", "sum"]) # update the column names and save back into the results data frame df_tmp.columns = [ @@ -106,28 +100,19 @@ def calculate_grid_metrics( df_tmp[f"{meter} PVR"] = df_tmp[f"{meter} Max"] / df_tmp[f"{meter} Min"] # calculate the load factor - df_tmp[f"{meter} Load Factor"] = ( - df_tmp[f"{meter} Mean"] / df_tmp[f"{meter} Max"] - ) + df_tmp[f"{meter} Load Factor"] = df_tmp[f"{meter} Mean"] / df_tmp[f"{meter} Max"] # add in the system ramping, which has to be calculated from the original data frame df_tmp2 = self.data_15min_to_process.copy() df_tmp2[f"{meter} System Ramping"] = df_tmp2[meter].diff().abs().fillna(0) - df_tmp2 = ( - df_tmp2.groupby([pd.Grouper(freq="1d")])[f"{meter} System Ramping"].agg( - ["sum"] - ) - / 1e6 - ) + df_tmp2 = df_tmp2.groupby([pd.Grouper(freq="1d")])[f"{meter} System Ramping"].agg(["sum"]) / 1e6 df_tmp2.columns = [f"{meter} System Ramping"] df_tmp = pd.concat([df_tmp, df_tmp2], axis=1, join="inner") if self.grid_metrics_daily is None: self.grid_metrics_daily = df_tmp else: - self.grid_metrics_daily = pd.concat( - [self.grid_metrics_daily, df_tmp], axis=1, join="inner" - ) + self.grid_metrics_daily = pd.concat([self.grid_metrics_daily, df_tmp], axis=1, join="inner") # aggregate the df_daily daily data to annual metrics. For the maxes/mins, we only want the max of the max # and the min of the min. @@ -149,13 +134,9 @@ def calculate_grid_metrics( # there is only one year of data, so grab the idmax/idmin of the first element. If # we expand to multiple years, then this will need to be updated id_lookup = df_tmp[f"{meter} Max idxmax"][0] - df_tmp[f"{meter} Max idxmax"] = self.grid_metrics_daily.loc[id_lookup][ - f"{meter} Max Datetime" - ] + df_tmp[f"{meter} Max idxmax"] = self.grid_metrics_daily.loc[id_lookup][f"{meter} Max Datetime"] id_lookup = df_tmp[f"{meter} Min idxmin"][0] - df_tmp[f"{meter} Min idxmin"] = self.grid_metrics_daily.loc[id_lookup][ - f"{meter} Min Datetime" - ] + df_tmp[f"{meter} Min idxmin"] = self.grid_metrics_daily.loc[id_lookup][f"{meter} Min Datetime"] # rename these two columns to remove the idxmax/idxmin nomenclature df_tmp = df_tmp.rename( columns={ @@ -165,18 +146,10 @@ def calculate_grid_metrics( ) # Add the MWh related metrics, can't sum up the 15 minute data, so we have to sum up the hourly - df_tmp["Total Electricity"] = ( - self.data["Total Electricity"].resample("1y").sum() / 1e6 - ) # MWh - df_tmp["Total Natural Gas"] = ( - self.data["Total Natural Gas"].resample("1y").sum() / 1e6 - ) # MWh - df_tmp["Total Thermal Cooling Energy"] = ( - self.data["Total Thermal Cooling Energy"].resample("1y").sum() / 1e6 - ) # MWh - df_tmp["Total Thermal Heating Energy"] = ( - self.data["Total Thermal Heating Energy"].resample("1y").sum() / 1e6 - ) # MWh + df_tmp["Total Electricity"] = self.data["Total Electricity"].resample("1y").sum() / 1e6 # MWh + df_tmp["Total Natural Gas"] = self.data["Total Natural Gas"].resample("1y").sum() / 1e6 # MWh + df_tmp["Total Thermal Cooling Energy"] = self.data["Total Thermal Cooling Energy"].resample("1y").sum() / 1e6 # MWh + df_tmp["Total Thermal Heating Energy"] = self.data["Total Thermal Heating Energy"].resample("1y").sum() / 1e6 # MWh # graph the top 5 peak values for each of the meters meters = [ @@ -190,9 +163,9 @@ def calculate_grid_metrics( df_to_proc = self.data_15min_to_process.copy() if "Cooling" in meter: # values are negative, so ascending is actually descending - df_to_proc.sort_values(by=meter, ascending=True, inplace=True) + df_to_proc = df_to_proc.sort_values(by=meter, ascending=True) else: - df_to_proc.sort_values(by=meter, ascending=False, inplace=True) + df_to_proc = df_to_proc.sort_values(by=meter, ascending=False) df_to_proc = df_to_proc.head(50) # save the top 5 values to the df_tmp @@ -362,9 +335,7 @@ def create_summary(self): for column in summary_columns: # check if the column exists in the data frame and if not, then set the value to zero! if column["name"] in self.data_annual.columns: - self.end_use_summary["Non-Connected"][column["display_name"]] = float( - self.data_annual[column["name"]].iloc[0] - ) + self.end_use_summary["Non-Connected"][column["display_name"]] = float(self.data_annual[column["name"]].iloc[0]) else: self.end_use_summary["Non-Connected"][column["display_name"]] = 0.0 @@ -384,9 +355,7 @@ def save_dataframes(self) -> None: self.grid_metrics_daily.to_csv(self.output_path / "grid_metrics_daily.csv") if self.grid_metrics_annual is not None: - self.grid_metrics_annual.to_csv( - self.output_path / "grid_metrics_annual.csv" - ) + self.grid_metrics_annual.to_csv(self.output_path / "grid_metrics_annual.csv") def create_aggregations(self, building_names: list[str]) -> None: """Aggregate the results from all the buildings together to get the totals @@ -436,61 +405,35 @@ def create_aggregations(self, building_names: list[str]) -> None: for i in building_names: # By fuels - building_aggs["Total Building Electricity"]["agg_columns"].append( - f"Electricity:Facility Building {i}" - ) - building_aggs["Total Building Natural Gas"]["agg_columns"].append( - f"NaturalGas:Facility Building {i}" - ) + building_aggs["Total Building Electricity"]["agg_columns"].append(f"Electricity:Facility Building {i}") + building_aggs["Total Building Natural Gas"]["agg_columns"].append(f"NaturalGas:Facility Building {i}") # Building level HVAC aggregations - building_aggs["Total Building Cooling Electricity"][ - "agg_columns" - ].append(f"Cooling:Electricity Building {i}") - building_aggs["Total Building Heating Electricity"][ - "agg_columns" - ].append(f"Heating:Electricity Building {i}") - building_aggs["Total Building Heating Natural Gas"][ - "agg_columns" - ].append( + building_aggs["Total Building Cooling Electricity"]["agg_columns"].append(f"Cooling:Electricity Building {i}") + building_aggs["Total Building Heating Electricity"]["agg_columns"].append(f"Heating:Electricity Building {i}") + building_aggs["Total Building Heating Natural Gas"]["agg_columns"].append( f"Heating:NaturalGas Building {i}", ) - building_aggs["Total Building Fans Electricity"]["agg_columns"].append( - f"Fans:Electricity Building {i}" - ) - building_aggs["Total Building Pumps Electricity"]["agg_columns"].append( - f"Pumps:Electricity Building {i}" - ) - building_aggs["Total Building Heat Rejection Electricity"][ - "agg_columns" - ].append(f"HeatRejection:Electricity Building {i}") - building_aggs["Total Building Heat Rejection Natural Gas"][ - "agg_columns" - ].append(f"HeatRejection:NaturalGas Building {i}") - building_aggs["Total Building Water Systems Natural Gas"][ - "agg_columns" - ].append(f"WaterSystems:NaturalGas Building {i}") - building_aggs["Total Building Water Systems Electricity"][ - "agg_columns" - ].append(f"WaterSystems:Electricity Building {i}") + building_aggs["Total Building Fans Electricity"]["agg_columns"].append(f"Fans:Electricity Building {i}") + building_aggs["Total Building Pumps Electricity"]["agg_columns"].append(f"Pumps:Electricity Building {i}") + building_aggs["Total Building Heat Rejection Electricity"]["agg_columns"].append(f"HeatRejection:Electricity Building {i}") + building_aggs["Total Building Heat Rejection Natural Gas"]["agg_columns"].append(f"HeatRejection:NaturalGas Building {i}") + building_aggs["Total Building Water Systems Natural Gas"]["agg_columns"].append(f"WaterSystems:NaturalGas Building {i}") + building_aggs["Total Building Water Systems Electricity"]["agg_columns"].append(f"WaterSystems:Electricity Building {i}") # Interior and exterior lighting - building_aggs["Total Building Interior Lighting"]["agg_columns"].append( - f"InteriorLights:Electricity Building {i}" - ) - building_aggs["Total Building Exterior Lighting"]["agg_columns"].append( - f"ExteriorLights:Electricity Building {i}" - ) + building_aggs["Total Building Interior Lighting"]["agg_columns"].append(f"InteriorLights:Electricity Building {i}") + building_aggs["Total Building Exterior Lighting"]["agg_columns"].append(f"ExteriorLights:Electricity Building {i}") # Interior and exterior equipment - building_aggs["Total Building Interior Equipment Electricity"][ - "agg_columns" - ].append(f"InteriorEquipment:Electricity Building {i}") - building_aggs["Total Building Interior Equipment Natural Gas"][ - "agg_columns" - ].append(f"InteriorEquipment:NaturalGas Building {i}") - building_aggs["Total Building Exterior Equipment Electricity"][ - "agg_columns" - ].append(f"ExteriorEquipment:Electricity Building {i}") + building_aggs["Total Building Interior Equipment Electricity"]["agg_columns"].append( + f"InteriorEquipment:Electricity Building {i}" + ) + building_aggs["Total Building Interior Equipment Natural Gas"]["agg_columns"].append( + f"InteriorEquipment:NaturalGas Building {i}" + ) + building_aggs["Total Building Exterior Equipment Electricity"]["agg_columns"].append( + f"ExteriorEquipment:Electricity Building {i}" + ) building_aggs["Total Building Interior Equipment"]["agg_columns"] += [ f"InteriorEquipment:Electricity Building {i}", f"InteriorEquipment:NaturalGas Building {i}", @@ -536,13 +479,9 @@ def create_aggregations(self, building_names: list[str]) -> None: # Since the dataframe needs to be consistent with the Modelica and DES dataframes, add in the # following columns, which have no totaling or aggregating self.data["Total Electricity"] = self.data["Total Building Electricity"] - self.data_15min["Total Electricity"] = self.data_15min[ - "Total Building Electricity" - ] + self.data_15min["Total Electricity"] = self.data_15min["Total Building Electricity"] self.data["Total Natural Gas"] = self.data["Total Building Natural Gas"] - self.data_15min["Total Natural Gas"] = self.data_15min[ - "Total Building Natural Gas" - ] + self.data_15min["Total Natural Gas"] = self.data_15min["Total Building Natural Gas"] self.data["Total ETS Electricity"] = 0 self.data_15min["Total ETS Electricity"] = 0 self.data["Total Thermal Cooling Energy"] = 0 @@ -552,17 +491,10 @@ def create_aggregations(self, building_names: list[str]) -> None: self.data["District Loop Energy"] = 0 self.data_15min["District Loop Energy"] = 0 # Now mix energy types for the totals - self.data["Total Energy"] = ( - self.data["Total Electricity"] + self.data["Total Natural Gas"] - ) - self.data_15min["Total Energy"] = ( - self.data_15min["Total Electricity"] - + self.data_15min["Total Natural Gas"] - ) + self.data["Total Energy"] = self.data["Total Electricity"] + self.data["Total Natural Gas"] + self.data_15min["Total Energy"] = self.data_15min["Total Electricity"] + self.data_15min["Total Natural Gas"] self.data["Total Building and ETS Energy"] = ( - self.data["Total Building Electricity"] - + self.data["Total Building Natural Gas"] - + self.data["Total ETS Electricity"] + self.data["Total Building Electricity"] + self.data["Total Building Natural Gas"] + self.data["Total ETS Electricity"] ) self.data_15min["Total Building and ETS Energy"] = ( self.data_15min["Total Building Electricity"] @@ -573,9 +505,7 @@ def create_aggregations(self, building_names: list[str]) -> None: finally: pass - def process_results( - self, building_names: list[str], year_of_data: int = 2017 - ) -> None: + def process_results(self, building_names: list[str], year_of_data: int = 2017) -> None: """The building-by-building end uses are only available in each run directory's feature report. This method will create a dataframe with the end uses for each building. @@ -590,9 +520,7 @@ def process_results( self.data = None for building_id in building_names: print(f"Processing building {building_id}") - feature_report = self.get_urbanopt_default_feature_report( - self.path / "run" / f"{self.scenario_name}" / f"{building_id}" - ) + feature_report = self.get_urbanopt_default_feature_report(self.path / "run" / f"{self.scenario_name}" / f"{building_id}") # print(feature_report.head()) # rename and convert units in the feature_report before concatenating with the others for ( @@ -603,20 +531,14 @@ def process_results( continue # set the new column name to include the building number new_column_name = f"{feature_column['name']} Building {building_id}" - feature_report[new_column_name] = ( - feature_report[column_name] * feature_column["conversion"] - ) + feature_report[new_column_name] = feature_report[column_name] * feature_column["conversion"] feature_report = feature_report.drop(columns=[column_name]) # convert Datetime column in data frame to be datetime from the string. The year # should be set to a year that has the day of week starting correctly for the real data # This defaults to year_of_data - feature_report["Datetime"] = pd.to_datetime( - feature_report["Datetime"], format="%Y/%m/%d %H:%M:%S" - ) - feature_report["Datetime"] = feature_report["Datetime"].apply( - lambda x: x.replace(year=year_of_data) - ) + feature_report["Datetime"] = pd.to_datetime(feature_report["Datetime"], format="%Y/%m/%d %H:%M:%S") + feature_report["Datetime"] = feature_report["Datetime"].apply(lambda x: x.replace(year=year_of_data)) # set the datetime column and make it the index feature_report = feature_report.set_index("Datetime") @@ -672,26 +594,17 @@ def calculate_carbon_emissions( # Calculate the natural gas emissions, emissions data is in kg/MWh so Wh->MWh, then divide by another 1000 to get mtCO2e self.data["Total Building Natural Gas Carbon Emissions"] = ( - self.data["Total Building Natural Gas"] - * hourly_emissions_data.other_fuels["natural_gas"] - / 1e6 - / 1000 + self.data["Total Building Natural Gas"] * hourly_emissions_data.other_fuels["natural_gas"] / 1e6 / 1000 ) - self.data["Total Natural Gas Carbon Emissions"] = self.data[ - "Total Building Natural Gas Carbon Emissions" - ] + self.data["Total Natural Gas Carbon Emissions"] = self.data["Total Building Natural Gas Carbon Emissions"] # Calculate the electricity carbon emissions, emissions data is in kg/MWh, so Wh->Mwh, then divide by another 1000 to get mtCO2e self.data[f"Total Electricity Carbon Emissions {future_year}"] = ( - self.data["Total Electricity"] - * hourly_emissions_data.data[lookup_egrid_subregion] - / 1e6 - / 1000 + self.data["Total Electricity"] * hourly_emissions_data.data[lookup_egrid_subregion] / 1e6 / 1000 ) # units are in kg, convert to metric tons self.data[f"Total Carbon Emissions {future_year}"] = ( - self.data["Total Natural Gas Carbon Emissions"] - + self.data[f"Total Electricity Carbon Emissions {future_year}"] + self.data["Total Natural Gas Carbon Emissions"] + self.data[f"Total Electricity Carbon Emissions {future_year}"] ) def scale_results( @@ -731,59 +644,31 @@ def scale_results( meter_names_for_building = [] for building_id in scalars["building_id"].unique(): - meter_names_for_building = [ - meter_name + f" {building_id}" for meter_name in meter_names - ] + meter_names_for_building = [meter_name + f" {building_id}" for meter_name in meter_names] for df in [self.data, self.data_15min]: # for each building_id in the scalar dataframe. Be careful not # to apply scaling factors to the same building twice from multiple # files. - elec_meters = [ - meter_name - for meter_name in meter_names_for_building - if "Electricity" in meter_name - ] - ng_meters = [ - meter_name - for meter_name in meter_names_for_building - if "NaturalGas" in meter_name - ] + elec_meters = [meter_name for meter_name in meter_names_for_building if "Electricity" in meter_name] + ng_meters = [meter_name for meter_name in meter_names_for_building if "NaturalGas" in meter_name] for meter_type in ["Electricity", "NaturalGas"]: # for each row in the analysis results dataframe, grab the scalar and multiply it by the meter # print(f"Applying scalars for meter year {year_of_meters}, sim year {year_of_data}, building {building_id}, and meter {meter_type}") - for _, scalar in scalars[ - scalars["start_time"].dt.year == year_of_meters - ].iterrows(): + for _, scalar in scalars[scalars["start_time"].dt.year == year_of_meters].iterrows(): # this is strange, but we compare the year of the meter with the year of the simulation, which # can be different. So convert the 'start_time' and 'end_time' of the meters to be the year of the # dataframe data - scalar["start_time"] = scalar["start_time"].replace( - year=year_of_data - ) + scalar["start_time"] = scalar["start_time"].replace(year=year_of_data) scalar["end_time"] = scalar["end_time"].replace(year=year_of_data) - row_filter = (df.index >= scalar["start_time"]) & ( - df.index <= scalar["end_time"] - ) + row_filter = (df.index >= scalar["start_time"]) & (df.index <= scalar["end_time"]) elec_scalar = scalar["scaling_factor_electricity"] ng_scalar = scalar["scaling_factor_natural_gas"] # print(f"data range: {scalar['start_time']} to {scalar['end_time']} with elec scalar {elec_scalar} and ng scalar {ng_scalar}") - if ( - meter_type == "Electricity" - and elec_scalar is not None - and not pd.isna(elec_scalar) - ): - df.loc[row_filter, elec_meters] = ( - df.loc[row_filter, elec_meters] * elec_scalar - ) - elif ( - meter_type == "NaturalGas" - and ng_scalar is not None - and not pd.isna(ng_scalar) - ): - df.loc[row_filter, ng_meters] = ( - df.loc[row_filter, ng_meters] * ng_scalar - ) + if meter_type == "Electricity" and elec_scalar is not None and not pd.isna(elec_scalar): + df.loc[row_filter, elec_meters] = df.loc[row_filter, elec_meters] * elec_scalar + elif meter_type == "NaturalGas" and ng_scalar is not None and not pd.isna(ng_scalar): + df.loc[row_filter, ng_meters] = df.loc[row_filter, ng_meters] * ng_scalar def get_urbanopt_feature_report_columns(self) -> dict[str, dict[str, object]]: """Return the feature report columns with the metadata such as @@ -873,19 +758,13 @@ def get_urbanopt_feature_report_columns(self) -> dict[str, dict[str, object]]: columns[key]["conversion"] = 1000.0 columns[key]["name"] = key columns[key]["description"] = key - elif "NaturalGas" in key: + elif "NaturalGas" in key or ("DistrictCooling" in key or "DistrictHeating" in key): columns[key]["unit_original"] = "kBtu" columns[key]["units"] = "Wh" columns[key]["conversion"] = 293.071 # 1 kBtu = 293.071 Wh columns[key]["name"] = key columns[key]["description"] = key - elif "DistrictCooling" in key or "DistrictHeating" in key: - columns[key]["unit_original"] = "kBtu" - columns[key]["units"] = "Wh" - columns[key]["conversion"] = 293.071 # 1 kBtu = 293.071 Wh - columns[key]["name"] = key - columns[key]["description"] = key - elif "Datetime" == key: + elif key == "Datetime": continue else: raise Exception(f"Could not find units for {key}") @@ -911,13 +790,9 @@ def get_urbanopt_default_feature_report(self, search_dir: Path): if len(dirs) == 1: report_file = dirs[0] / "default_feature_reports.csv" elif len(dirs) == 0: - raise Exception( - f"Could not find default_feature_report.csv in {search_dir}" - ) + raise Exception(f"Could not find default_feature_report.csv in {search_dir}") else: - raise Exception( - f"More than one default_feature_reports.csv found in dirs: {dirs}" - ) + raise Exception(f"More than one default_feature_reports.csv found in dirs: {dirs}") if report_file.exists(): # read the header row of the CSV file and grab the column names @@ -931,18 +806,15 @@ def get_urbanopt_default_feature_report(self, search_dir: Path): for column in columns: column_wo_units = column.split("(")[0] units = column.split("(")[-1].split(")")[0] - if column_wo_units not in desired_columns.keys(): + if column_wo_units not in desired_columns: # then move on, because we don't care about this column # print(f'Column {column_wo_units} not desired.') continue # extract the units if they exist and check against desired. It is okay if units are blank, we # just assume that they are what we wanted. - if not units == "" and units is not None: - if units != desired_columns[column_wo_units]["unit_original"]: - raise Exception( - f"Units of {units} for {column_wo_units} are not {desired_columns[column_wo_units]['unit_original']}" - ) + if units not in ["", None, desired_columns[column_wo_units]["unit_original"]]: + raise Exception(f"Units of {units} for {column_wo_units} are not {desired_columns[column_wo_units]['unit_original']}") # add the column to the rename mapping rename_mapping[column] = column_wo_units @@ -956,6 +828,4 @@ def get_urbanopt_default_feature_report(self, search_dir: Path): report[cols[1:]] = report[cols[1:]].apply(pd.to_numeric, errors="coerce") return report else: - raise Exception( - f"Could not find default_feature_report.csv in {search_dir}" - ) + raise Exception(f"Could not find default_feature_report.csv in {search_dir}")