diff --git a/Pipfile b/Pipfile index 85a607db..e7b246c8 100644 --- a/Pipfile +++ b/Pipfile @@ -10,6 +10,7 @@ urllib3 = "*" pint = "*" pandas = "*" scipy = "*" +onshape_client = "*" [dev-packages] aguaclara = {editable = true, path = "."} diff --git a/Pipfile.lock b/Pipfile.lock index 61ec20d5..95eb9e94 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "021335e1e4c104ffbb8bdcec7948b81f893d4fc802451eff36c15cdf8694a8a1" + "sha256": "633ae50217b910e195624ee3dbf63eb4b5d73b5aaafe5fe3817f1dc2c2186948" }, "pipfile-spec": 6, "requires": { @@ -16,6 +16,20 @@ ] }, "default": { + "certifi": { + "hashes": [ + "sha256:1f422849db327d534e3d0c5f02a263458c3955ec0aae4ff09b95f195c59f4edd", + "sha256:f05def092c44fbf25834a51509ef6e631dc19765ab8a57b4e7ab85531f0a9cf4" + ], + "version": "==2020.11.8" + }, + "chardet": { + "hashes": [ + "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", + "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" + ], + "version": "==3.0.4" + }, "cycler": { "hashes": [ "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", @@ -23,6 +37,14 @@ ], "version": "==0.10.0" }, + "idna": { + "hashes": [ + "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", + "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.10" + }, "importlib-metadata": { "hashes": [ "sha256:77a540690e24b0305878c37ffd421785a6f7e53c8b5720d211b211de8d0e95da", @@ -108,6 +130,13 @@ "index": "pypi", "version": "==3.3.3" }, + "nulltype": { + "hashes": [ + "sha256:16ae565745118e37e0558441f5821c76351d8c3a789640b5bca277cf65b2271b", + "sha256:64aa3cb2ab5e904d1b37175b9b922bea268c13f9ce32e3d373313150ab5ef272" + ], + "version": "==2.3.1" + }, "numpy": { "hashes": [ "sha256:08308c38e44cc926bdfce99498b21eec1f848d24c302519e64203a8da99a97db", @@ -148,6 +177,22 @@ "markers": "python_version >= '3.6'", "version": "==1.19.4" }, + "oauthlib": { + "hashes": [ + "sha256:bee41cc35fcca6e988463cacc3bcb8a96224f470ca547e697b604cc697b2f889", + "sha256:df884cd6cbe20e32633f1db1072e9356f53638e4361bef4e8b03c9127c9328ea" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==3.1.0" + }, + "onshape-client": { + "hashes": [ + "sha256:48e08936b37e49c4cd33f66cf7f32ef09e9c34e97165f103218eaf105e568254", + "sha256:95024b20f998523a8638627b8003fa47e5808e554bf924968aee3f6b22660f59" + ], + "index": "pypi", + "version": "==1.6.3" + }, "packaging": { "hashes": [ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", @@ -251,6 +296,59 @@ ], "version": "==2020.4" }, + "requests": { + "hashes": [ + "sha256:7f1a0b932f4a60a1a65caa4263921bb7d9ee911957e0ae4a23a6dd08185ad5f8", + "sha256:e786fa28d8c9154e6a4de5d46a1d921b8749f8b74e28bde23768e5e16eece998" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==2.25.0" + }, + "requests-oauthlib": { + "hashes": [ + "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d", + "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a", + "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc" + ], + "version": "==1.3.0" + }, + "ruamel.yaml": { + "hashes": [ + "sha256:012b9470a0ea06e4e44e99e7920277edf6b46eee0232a04487ea73a7386340a5", + "sha256:076cc0bc34f1966d920a49f18b52b6ad559fbe656a0748e3535cf7b3f29ebf9e" + ], + "version": "==0.16.12" + }, + "ruamel.yaml.clib": { + "hashes": [ + "sha256:058a1cc3df2a8aecc12f983a48bda99315cebf55a3b3a5463e37bb599b05727b", + "sha256:2602e91bd5c1b874d6f93d3086f9830f3e907c543c7672cf293a97c3fabdcd91", + "sha256:28116f204103cb3a108dfd37668f20abe6e3cafd0d3fd40dba126c732457b3cc", + "sha256:2d24bd98af676f4990c4d715bcdc2a60b19c56a3fb3a763164d2d8ca0e806ba7", + "sha256:30dca9bbcbb1cc858717438218d11eafb78666759e5094dd767468c0d577a7e7", + "sha256:44c7b0498c39f27795224438f1a6be6c5352f82cb887bc33d962c3a3acc00df6", + "sha256:464e66a04e740d754170be5e740657a3b3b6d2bcc567f0c3437879a6e6087ff6", + "sha256:4df5019e7783d14b79217ad9c56edf1ba7485d614ad5a385d1b3c768635c81c0", + "sha256:4e52c96ca66de04be42ea2278012a2342d89f5e82b4512fb6fb7134e377e2e62", + "sha256:5254af7d8bdf4d5484c089f929cb7f5bafa59b4f01d4f48adda4be41e6d29f99", + "sha256:52ae5739e4b5d6317b52f5b040b1b6639e8af68a5b8fd606a8b08658fbd0cab5", + "sha256:53b9dd1abd70e257a6e32f934ebc482dac5edb8c93e23deb663eac724c30b026", + "sha256:73b3d43e04cc4b228fa6fa5d796409ece6fcb53a6c270eb2048109cbcbc3b9c2", + "sha256:74161d827407f4db9072011adcfb825b5258a5ccb3d2cd518dd6c9edea9e30f1", + "sha256:839dd72545ef7ba78fd2aa1a5dd07b33696adf3e68fae7f31327161c1093001b", + "sha256:8e8fd0a22c9d92af3a34f91e8a2594eeb35cba90ab643c5e0e643567dc8be43e", + "sha256:a873e4d4954f865dcb60bdc4914af7eaae48fb56b60ed6daa1d6251c72f5337c", + "sha256:ab845f1f51f7eb750a78937be9f79baea4a42c7960f5a94dde34e69f3cce1988", + "sha256:b1e981fe1aff1fd11627f531524826a4dcc1f26c726235a52fcb62ded27d150f", + "sha256:b4b0d31f2052b3f9f9b5327024dc629a253a83d8649d4734ca7f35b60ec3e9e5", + "sha256:c6ac7e45367b1317e56f1461719c853fd6825226f45b835df7436bb04031fd8a", + "sha256:daf21aa33ee9b351f66deed30a3d450ab55c14242cfdfcd377798e2c0d25c9f1", + "sha256:e9f7d1d8c26a6a12c23421061f9022bb62704e38211fe375c645485f38df34a2", + "sha256:f6061a31880c1ed6b6ce341215336e2f3d0c1deccd84957b6fa8ca474b41e89f" + ], + "markers": "python_version < '3.9' and platform_python_implementation == 'CPython'", + "version": "==0.2.2" + }, "scipy": { "hashes": [ "sha256:168c45c0c32e23f613db7c9e4e780bc61982d71dcd406ead746c7c7c2f2004ce", @@ -327,6 +425,14 @@ "markers": "python_version >= '3.5'", "version": "==2.4.2" }, + "atomicwrites": { + "hashes": [ + "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197", + "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a" + ], + "markers": "sys_platform == 'win32'", + "version": "==1.4.0" + }, "attrs": { "hashes": [ "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", @@ -373,6 +479,14 @@ "index": "pypi", "version": "==2.1.10" }, + "colorama": { + "hashes": [ + "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", + "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" + ], + "markers": "sys_platform == 'win32' and sys_platform == 'win32' and sys_platform == 'win32'", + "version": "==0.4.4" + }, "coverage": { "hashes": [ "sha256:0203acd33d2298e19b57451ebb0bed0ab0c602e5cf5a818591b4918b1f97d516", @@ -625,6 +739,13 @@ ], "version": "==0.6.1" }, + "nulltype": { + "hashes": [ + "sha256:16ae565745118e37e0558441f5821c76351d8c3a789640b5bca277cf65b2271b", + "sha256:64aa3cb2ab5e904d1b37175b9b922bea268c13f9ce32e3d373313150ab5ef272" + ], + "version": "==2.3.1" + }, "numpy": { "hashes": [ "sha256:08308c38e44cc926bdfce99498b21eec1f848d24c302519e64203a8da99a97db", @@ -665,6 +786,22 @@ "markers": "python_version >= '3.6'", "version": "==1.19.4" }, + "oauthlib": { + "hashes": [ + "sha256:bee41cc35fcca6e988463cacc3bcb8a96224f470ca547e697b604cc697b2f889", + "sha256:df884cd6cbe20e32633f1db1072e9356f53638e4361bef4e8b03c9127c9328ea" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==3.1.0" + }, + "onshape-client": { + "hashes": [ + "sha256:48e08936b37e49c4cd33f66cf7f32ef09e9c34e97165f103218eaf105e568254", + "sha256:95024b20f998523a8638627b8003fa47e5808e554bf924968aee3f6b22660f59" + ], + "index": "pypi", + "version": "==1.6.3" + }, "packaging": { "hashes": [ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", @@ -837,9 +974,17 @@ "sha256:7f1a0b932f4a60a1a65caa4263921bb7d9ee911957e0ae4a23a6dd08185ad5f8", "sha256:e786fa28d8c9154e6a4de5d46a1d921b8749f8b74e28bde23768e5e16eece998" ], - "index": "pypi", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.25.0" }, + "requests-oauthlib": { + "hashes": [ + "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d", + "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a", + "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc" + ], + "version": "==1.3.0" + }, "rope": { "hashes": [ "sha256:786b5c38c530d4846aa68a42604f61b4e69a493390e3ca11b88df0fbfdc3ed04" @@ -847,6 +992,43 @@ "index": "pypi", "version": "==0.18.0" }, + "ruamel.yaml": { + "hashes": [ + "sha256:012b9470a0ea06e4e44e99e7920277edf6b46eee0232a04487ea73a7386340a5", + "sha256:076cc0bc34f1966d920a49f18b52b6ad559fbe656a0748e3535cf7b3f29ebf9e" + ], + "version": "==0.16.12" + }, + "ruamel.yaml.clib": { + "hashes": [ + "sha256:058a1cc3df2a8aecc12f983a48bda99315cebf55a3b3a5463e37bb599b05727b", + "sha256:2602e91bd5c1b874d6f93d3086f9830f3e907c543c7672cf293a97c3fabdcd91", + "sha256:28116f204103cb3a108dfd37668f20abe6e3cafd0d3fd40dba126c732457b3cc", + "sha256:2d24bd98af676f4990c4d715bcdc2a60b19c56a3fb3a763164d2d8ca0e806ba7", + "sha256:30dca9bbcbb1cc858717438218d11eafb78666759e5094dd767468c0d577a7e7", + "sha256:44c7b0498c39f27795224438f1a6be6c5352f82cb887bc33d962c3a3acc00df6", + "sha256:464e66a04e740d754170be5e740657a3b3b6d2bcc567f0c3437879a6e6087ff6", + "sha256:4df5019e7783d14b79217ad9c56edf1ba7485d614ad5a385d1b3c768635c81c0", + "sha256:4e52c96ca66de04be42ea2278012a2342d89f5e82b4512fb6fb7134e377e2e62", + "sha256:5254af7d8bdf4d5484c089f929cb7f5bafa59b4f01d4f48adda4be41e6d29f99", + "sha256:52ae5739e4b5d6317b52f5b040b1b6639e8af68a5b8fd606a8b08658fbd0cab5", + "sha256:53b9dd1abd70e257a6e32f934ebc482dac5edb8c93e23deb663eac724c30b026", + "sha256:73b3d43e04cc4b228fa6fa5d796409ece6fcb53a6c270eb2048109cbcbc3b9c2", + "sha256:74161d827407f4db9072011adcfb825b5258a5ccb3d2cd518dd6c9edea9e30f1", + "sha256:839dd72545ef7ba78fd2aa1a5dd07b33696adf3e68fae7f31327161c1093001b", + "sha256:8e8fd0a22c9d92af3a34f91e8a2594eeb35cba90ab643c5e0e643567dc8be43e", + "sha256:a873e4d4954f865dcb60bdc4914af7eaae48fb56b60ed6daa1d6251c72f5337c", + "sha256:ab845f1f51f7eb750a78937be9f79baea4a42c7960f5a94dde34e69f3cce1988", + "sha256:b1e981fe1aff1fd11627f531524826a4dcc1f26c726235a52fcb62ded27d150f", + "sha256:b4b0d31f2052b3f9f9b5327024dc629a253a83d8649d4734ca7f35b60ec3e9e5", + "sha256:c6ac7e45367b1317e56f1461719c853fd6825226f45b835df7436bb04031fd8a", + "sha256:daf21aa33ee9b351f66deed30a3d450ab55c14242cfdfcd377798e2c0d25c9f1", + "sha256:e9f7d1d8c26a6a12c23421061f9022bb62704e38211fe375c645485f38df34a2", + "sha256:f6061a31880c1ed6b6ce341215336e2f3d0c1deccd84957b6fa8ca474b41e89f" + ], + "markers": "python_version < '3.9' and platform_python_implementation == 'CPython'", + "version": "==0.2.2" + }, "scipy": { "hashes": [ "sha256:168c45c0c32e23f613db7c9e4e780bc61982d71dcd406ead746c7c7c2f2004ce", diff --git a/aguaclara/core/onshape_parser.py b/aguaclara/core/onshape_parser.py new file mode 100644 index 00000000..031140f2 --- /dev/null +++ b/aguaclara/core/onshape_parser.py @@ -0,0 +1,492 @@ +"""Parser to obtain a dictionary of key-value pairs from an Onshape model's URL +and add those variables to an RST file. + +Relies on the Onshape Documenter feature: +https://cad.onshape.com/documents/6b5c9b74e331c4d03a7c6b01/w/6f98333f14625dd1bdcac2f7/e/35b3d3018f18ec53eeecded7 +""" + +import json +import math +import os +from shutil import copyfile +from onshape_client.oas import BTFeatureScriptEvalCall2377 +from onshape_client.onshape_url import OnshapeElement +from onshape_client import Client +from aguaclara.core.units import u + +ureg = u + +msg_str = "message" +val_str = "value" +key_str = "key" + +# create global roles using this: https://stackoverflow.com/questions/9698702/how-do-i-create-a-global-role-roles-in-sphinx +# If this grows too much, we'll need to add a global rst as described in the post above. +def parse_quantity(q): + """Parse an Onshape units definition + + Args: + q: an Onshape units definition... for instance: + { + 'typeTag': '', + 'unitToPower': [ + { + 'key': 'METER', + 'value': 1 + } + ], + 'value': 0.0868175271040671 + } + + Returns: + a string that can be converted to any other unit engine. + """ + units_s = q[val_str] + for unit in q["unitToPower"]: + units_s = units_s * ureg(unit[key_str].lower()) ** unit[val_str] + try: + log = math.floor(math.log10(units_s.magnitude)) + except: + log = 0 + if unit[key_str] == 'METER' and unit[val_str] == 1: + if log >= 3: + units_s = units_s.to(ureg.kilometer) + elif log >= -2 and log <= -1: + units_s = units_s.to(ureg.centimeter) + elif log <= -3: + units_s = units_s.to(ureg.millimeter) + elif unit[key_str] == 'METER' and unit[val_str] == 2: + if log >= 6: + units_s = units_s.to(ureg.kilometer**2) + elif log >= -4 and log <= -1: + units_s = units_s.to(ureg.centimeter**2) + elif log <= -5: + units_s = units_s.to(ureg.millimeter**2) + elif unit[key_str] == 'METER' and unit[val_str] == 3: + log += 3 + if log >= 3: + units_s = units_s.to(ureg.kiloliter) + elif log <= -1: + units_s = units_s.to(ureg.milliliter) + else: + units_s = units_s.to(ureg.liter) + return f'{round(units_s, 2):~}' + +def is_fs_type(candidate, type_name): + """Checks if the a JSON entry is of a specific FeatureScript type. + + Args: + candidate: decoded JSON object to check the type of + type_name: string of the FeatureScript Type to check for + + Returns: + result: True if candidate is of type_name, False otherwise + """ + result = False + try: + if isinstance(type_name, str): + result = type_name == candidate["typeName"] + elif isinstance(type_name, list): + result = any( + [type_name_one == candidate["typeName"] for type_name_one in type_name] + ) + except Exception: + result = False + return result + +def copy_to_docs(file_path, base="doc_files"): + """Copies a file to the current working directory. The new file's path + will be identical to the old file's path relative to the base path. + + Args: + file_path: path to the file to be copied + base: base path to use in creating relative file path of the copy + + Returns: + none + """ + file = os.path.basename(file_path) + dir = os.path.dirname(file_path) + while os.path.basename(dir) != base: + file = os.path.basename(dir) + "/" + file + dir = os.path.dirname(dir) + try: + copyfile(file_path, file) + except IOError as io_err: + os.makedirs(os.path.dirname(file)) + copyfile(file_path, file) + +def parse_variables_from_list(unparsed): + """Helper function for parse_variables_from_map parses values from a list + instead of a map. + + Args: + unparsed: portion of deserialized JSON which has yet to be parsed + + Returns: + measurement_list: list of parsed values + """ + measurement_list = [] + + for to_parse in unparsed: + if is_fs_type(to_parse, "BTFSValueWithUnits"): + measurement_list.append(parse_quantity(to_parse[msg_str])) + elif is_fs_type(to_parse, ["BTFSValueNumber", "BTFSValueString"]): + measurement_list.append(to_parse[msg_str][val_str]) + + return measurement_list + +def merge_index_sections(new_section, old_section): + """Helper function for merge_indexes which loops through each section and + combines them. + + Args: + new_section: section which is being added to if line from old_section is absent + old_section: section which is pulled from + + Returns: + none + """ + for line in old_section: + if line in new_section: + continue + else: + new_section.append(line) + + return new_section + +def find_index_section_limits(filename, section_start=".. toctree::\n", + section_end="\n"): + """Helper function for merge_indexes which loops through the + file and marks the beginning and end of each section. + + Args: + filename: path to file to be modified + section_start: string which marks the start of each section + Default: '.. toctree::\n' + section_end: string which marks the end of each section + Default: '\n' + + Returns: + lines: list of strings of each line in the file + section_limits: list of the form [[start1, end1], [start2, end2]] + which marks the separation between sections + """ + section_limits = [] + start = 0 + first_newline = True + index_file = open(filename, "r+") + lines = index_file.readlines() + index_file.close() + + for i, line in enumerate(lines): + if line == section_start: + start = i + if line == section_end and start != 0: + if first_newline: + first_newline = False + else: + end = i + section_limits.append([start, end]) + start = end = 0 + first_newline = True + + return lines, section_limits + +def merge_indexes(new_index, old_index): + """Merges two indexes by comparing the two files, index.rst and new_index.rst + section by section and adding pieces which exist in index.rst but are missing + from new_index.rst . At the end, the one which was added to is maintained as + index.rst and new_index.rst is deleted. + + Args: + new_index: path to index file which is being merged from + old_index: path to existing index file which is being merged into + + Returns: + none + True + """ + old_lines, old_section_limits = find_index_section_limits(old_index) + new_lines, new_section_limits = find_index_section_limits(new_index) + + for start, end in old_section_limits: + included = False + caption = old_lines[start+1] + for new_start, new_end in new_section_limits: + if new_lines[new_start+1] == caption: + new_section = merge_index_sections(new_lines[new_start:new_end], old_lines[start:end]) + del new_lines[new_start:new_end] + i = new_start + for line in new_section: + new_lines.insert(i, line) + i += 1 + included = True + if not included: + i = new_end + new_lines.insert(i, "\n") + for line in old_lines[start:end]: + i += 1 + new_lines.insert(i, line) + + old_index_file = open(old_index, "w+") + old_index_file.write("".join(new_lines)) + old_index_file.close() + + os.remove(new_index) + +def find_treatment_section_limits(filename, section_delimiter=".. _heading"): + """Helper function for merge_treatment_processes which loops through the + file and marks the beginning and end of each section. + + Args: + filename: path to file to be modified + section_delimiter: string which marks the separation between sections + Default: '.. _heading' + + Returns: + lines: list of strings of each line in the file + section_limits: list of the form [[start1, end1], [start2, end2]] + which marks the separation between sections + """ + section_limits = [] + start = 0 + file = open(filename, "r+") + lines = file.readlines() + file.close() + + for i, line in enumerate(lines): + if section_delimiter in line: + end = i - 1 + section_limits.append([start, end]) + start = i + + section_limits.append([start,len(lines)]) + + return lines, section_limits + +def merge_treatment_processes(new_processes, old_processes): + """Merges two treatment process descriptions by comparing the two files + section by section and adding pieces which exist in new_processes but are missing + from old_processes. + + Args: + new_processes: path to treatment process file which is being merged from + old_processes: path to existing treatment process file which is being merged into + + Returns: + none + """ + old_lines, old_section_limits = find_treatment_section_limits(old_processes) + new_lines, new_section_limits = find_treatment_section_limits(new_processes) + + for start, end in new_section_limits: + included = False + heading = new_lines[start] + for old_start, old_end in old_section_limits: + if old_lines[old_start] == heading: + included = True + if not included: + i = old_end + old_lines.insert(i, "\n") + for line in new_lines[start:end]: + i += 1 + old_lines.insert(i, line) + + old_file = open(old_processes, "w+") + old_file.write("".join(old_lines)) + old_file.close() + +def parse_variables_from_map(unparsed, default_key=""): + """Helper function for parse_attributes which loops through an unparsed map + that matched one of the desired fields + + Args: + unparsed: portion of deserialized JSON which has yet to be parsed + default_key: key for the field. Used to detect special entries like index + + Returns: + parsed_variables: dictionary of parsed variables + templates: list of templates to move from doc_files and render in the + design specs. + """ + parsed_variables = {} + value = None + templates = [] + + if default_key == "template": + copy_to_docs(unparsed) + templates.append(unparsed) + return parsed_variables, templates + elif default_key == "index": + if unparsed != "" and unparsed is not None: + if os.path.exists('index.rst'): + copyfile(unparsed, 'new_index.rst') + merge_indexes('new_index.rst', 'index.rst') + else: + copyfile(unparsed, 'index.rst') + return parsed_variables, templates + elif default_key == "process": + if unparsed != "" and unparsed is not None: + file = "Introduction/Treatment_Process.rst" + file_path = "../../../doc_files/Introduction/Treatment_Process_" + unparsed + ".rst" + if os.path.exists(file): + merge_treatment_processes(file_path, file) + else: + try: + copyfile(file_path, file) + except IOError as io_err: + os.makedirs(os.path.dirname(file)) + copyfile(file_path, file) + return parsed_variables, templates + + if isinstance(unparsed, list): + for to_parse in unparsed: + if is_fs_type(to_parse, "BTFSValueMapEntry"): + key = to_parse[msg_str][key_str][msg_str][val_str] + candidate_message = to_parse[msg_str][val_str] + if is_fs_type(candidate_message, "BTFSValueMap"): + value, template = parse_variables_from_map(candidate_message[msg_str][val_str]) + templates.extend(template) + elif is_fs_type(candidate_message, "BTFSValueArray"): + value = parse_variables_from_list(candidate_message[msg_str][val_str]) + elif is_fs_type(candidate_message, "BTFSValueWithUnits"): + value = parse_quantity(candidate_message[msg_str]) + elif is_fs_type(candidate_message, ["BTFSValueNumber", "BTFSValueString"]): + value = candidate_message[msg_str][val_str] + parsed_variables[key] = value + else: + parsed_variables[default_key] = unparsed + + return parsed_variables, templates + +def parse_attributes(attributes, fields, type_tag="Documenter"): + """Helper function for get_parsed_measurements which loops through the + atributes, parsing only the specified fields. + + Args: + attributes: deserialized JSON object returned by Onshape link + fields: fields which we are interested in parsing, e.g. 'variables' or 'index' + type_tag: type from Onshape of the configuration we are parsing for + Default: 'Documenter' + + Returns: + measurements: dictionary of parsed variables + templates: list of templates to move from doc_files and render in the + design specs. + """ + measurements = {} + templates = [] + + for attr in attributes: + if is_fs_type(attr, "BTFSValueMap"): + if attr[msg_str]["typeTag"] == type_tag: + for attr2 in attr[msg_str][val_str]: + docs = attr2[msg_str][val_str][msg_str][val_str] + for doc in docs: + for unparsed in doc[msg_str][val_str]: + if is_fs_type(unparsed, "BTFSValueMapEntry"): + key = unparsed[msg_str][key_str][msg_str][val_str] + for field in fields: + if key == field: + new_measure, new_templates = parse_variables_from_map(unparsed[msg_str][val_str][msg_str][val_str], key) + measurements.update(new_measure) + templates.extend(new_templates) + + for i in range(len(templates)): + new_template = './' + os.path.basename(os.path.dirname(templates[i])) + \ + '/' + os.path.basename(templates[i]) + templates[i] = new_template + + return measurements, templates + +def get_parsed_measurements(link): + """Parses the output of the Onshape Documenter feature found in the Onshape + document at the given url. + + Args: + link: URL of Onshape document + + Returns: + measurements: dictionary of parsed variables + templates: list of templates to move from doc_files and render in the + design specs. + """ + script = r""" + function (context is Context, queries is map) + { + return getAttributes(context, { + "entities" : qEverything(), + }); + } + """ + + client = Client( + configuration = { + "base_url": "https://cad.onshape.com", + "access_key": "ekAHCj04TtODlvlI9yWj2bjB", + "secret_key": "sS11vEOD5CavkLVcZshLBgfBlB5aBvnpz6v3oEvC0bN0zxhW" + } + ) + + element = OnshapeElement(link) + + script_call = BTFeatureScriptEvalCall2377(script=script) + response = client.part_studios_api.eval_feature_script( + element.did, + element.wvm, + element.wvmid, + element.eid, + bt_feature_script_eval_call_2377=script_call, + _preload_content=False, + ) + + attributes = json.loads(response.data.decode("utf-8"))["result"][msg_str][val_str] + fields = ["variables", "template", "index", "process"] + + measurements, templates = parse_attributes(attributes, fields) + + return measurements, templates + +# from https://stackoverflow.com/questions/5914627/prepend-line-to-beginning-of-a-file +def line_prepender(filename, line): + """Prepends a file with the given line. + + Args: + filename: path to file to be modified + line: string of text to prepend to the file + + Returns: + none + """ + with open(filename, 'r+') as f: + content = f.read() + f.seek(0, 0) + f.write(line.rstrip('\r\n') + '\n' + content) + +def make_replace_list(parsed_dict, filename, var_attachment=''): + """Adds the dictionary of variables which have been parsed to the top of the + given file. + + Args: + parsed_dict: dictionary of variables parsed from Onshape document + filename: path to file to be modified + var_attachment: string to prepend to all variables, e.g. "LFOM" + Default: '' + + Returns: + none + """ + prefix = '.. |' + suffix = '| replace:: ' + + for var in parsed_dict: + if type(parsed_dict[var]) == dict: + make_replace_list(parsed_dict[var], filename, var_attachment + var + "_") + else: + line = prefix + var_attachment + str(var) + suffix + str(parsed_dict[var]) + line_prepender(filename, line) + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/setup.py b/setup.py index d690c87a..c0e28347 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name = 'aguaclara', - version = '0.2.6', + version = '0.2.7', description = ( 'An open-source Python package for designing and performing research ' 'on AguaClara water treatment plants.' @@ -20,6 +20,7 @@ 'pint', 'pandas', 'scipy', + 'onshape_client', ], setup_requires=["pytest-runner"], diff --git a/tests/core/test_onshape_parser.py b/tests/core/test_onshape_parser.py new file mode 100644 index 00000000..3d00b30c --- /dev/null +++ b/tests/core/test_onshape_parser.py @@ -0,0 +1,101 @@ +import unittest +import json +import os +from aguaclara.core.units import u +from aguaclara.core import onshape_parser as parse + +os.chdir(os.path.dirname(__file__)) + + +class OnshapeParserTest(unittest.TestCase): + def test_parse_quanity(self): + d0 = {'value': 0.1414213562373095, + 'unitToPower': [{'value': 1, 'key': 'METER'}], 'typeTag': ''} + d1 = {'value': 0.1414213562373095, + 'unitToPower': [{'value': 3, 'key': 'MILLIMETER'}], 'typeTag': ''} + + self.assertEqual(parse.parse_quantity(d0), '14.14 cm') + self.assertEqual(parse.parse_quantity(d1), '0.14 mm ** 3') + + def test_is_fs_type(self): + test_json = json.loads('{"type": 2077, "typeName": "BTFSValueMapEntry", "message": {}}') + + self.assertTrue(parse.is_fs_type(test_json, "BTFSValueMapEntry")) + self.assertFalse(parse.is_fs_type(test_json, "BTFSValueNumber")) + + def test_merge_index_sections(self): + new_section = ['test_line', 'test_line2'] + old_section = ['test_line', 'test_line3'] + result = parse.merge_index_sections(new_section, old_section) + + self.assertEqual(result, ['test_line', 'test_line2', 'test_line3']) + + def test_find_index_section_limits(self): + index0 = '../rst_files/index_lfom.rst' + _, limits0 = parse.find_index_section_limits(index0) + index1 = '../rst_files/index_lfom_ET.rst' + _, limits1 = parse.find_index_section_limits(index1) + + self.assertEqual(limits0, [[18, 26], [27, 32]]) + self.assertEqual(limits1, [[18, 26], [27, 33]]) + + def test_merge_indexes(self): + old_index = '../rst_files/index_lfom.rst' + new_index = '../rst_files/new_index_ET.rst' + parse.merge_indexes(new_index, old_index) + index_file = open(old_index, "r+") + lines = index_file.readlines() + test_file = open('../rst_files/index_lfom_ET.rst') + test_lines = test_file.readlines() + + self.assertEqual(test_lines, lines) + + def test_find_treatment_section_limits(self): + process0 = '../rst_files/Treatment_Process_ET.rst' + _, limits0 = parse.find_treatment_section_limits(process0) + process1 = '../rst_files/Treatment_Process_ET_Floc.rst' + lines, limits1 = parse.find_treatment_section_limits(process1) + + self.assertEqual(limits0, [[0, 14], [15, 20]]) + self.assertEqual(limits1, [[0, 14], [15, 20], [21, 26]]) + + def test_merge_treatment_processes(self): + old_processes = '../rst_files/Treatment_Process_ET.rst' + new_processes = '../rst_files/Treatment_Process_Floc.rst' + parse.merge_treatment_processes(new_processes, old_processes) + file = open(old_processes, "r+") + lines = file.readlines() + test_file = open('../rst_files/Treatment_Process_ET_Floc.rst') + test_lines = test_file.readlines() + + self.assertEqual(test_lines, lines) + # + # def test_get_parsed_measurements(self): + # link = 'https://cad.onshape.com/documents/c3a8ce032e33ebe875b9aab4/v/dc76b3f674d3d5d4f6237f35/e/d75b2f7a41dde39791b154e8' + # measurements, templates = parse.get_parsed_measurements(link) + # + # self.assertEqual(templates, ['./Entrance_Tank/LFOM.rst']) + # self.assertEqual(measurements['N.LfomOrifices'], + # [17.0, 4.0, 6.0, 3.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 1.0]) + # self.assertEqual(measurements['HL.Lfom'], '20.0 cm') + # self.assertEqual( + # measurements['H.LfomOrifices'], + # ['7.94 mm', '2.47 cm', '4.14 cm', '5.82 cm', '7.49 cm', '9.16 cm', + # '10.84 cm', '12.51 cm', '14.18 cm', '15.86 cm', '17.53 cm', '19.21 cm'] + # ) + # self.assertEqual(measurements['D.LfomOrifices'], '1.59 cm') + # self.assertEqual(measurements['B.LfomRows'], '1.67 cm') + + def test_make_replace_list(self): + var_dict = {'test': '3.0 cm'} + file_path = "../rst_files/test_prepend.rst" + parse.make_replace_list(var_dict, file_path) + file = open(file_path, "r+") + lines = file.readlines() + test_file = open('../rst_files/test_prepend_result.rst') + test_lines = test_file.readlines() + + self.assertEqual(test_lines, lines) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/rst_files/Treatment_Process_ET.rst b/tests/rst_files/Treatment_Process_ET.rst new file mode 100644 index 00000000..9dbe55cb --- /dev/null +++ b/tests/rst_files/Treatment_Process_ET.rst @@ -0,0 +1,20 @@ +.. _title_Procesos_de_Tratamiento: + +*********************** +Procesos de Tratamiento +*********************** +Las plantas producen agua limpia y segura, tras la remoción de sedimentos y patógenos. La tecnología AguaClara emplea los procesos unitarios de coagulación, floculación, sedimentación, filtración rápida con arena, y desinfección con cloro (:numref:`figure_process`). + +.. _figure_process: + +.. figure:: Images/process.png + :width: 650px + :align: center + + Los procesos de tratamiento que se utilizan en la planta AguaClara. + +.. _heading_el_tanque_de_entrada: + +El tanque de entrada +-------------------- +El proceso inicia en el **tanque de entrada**, que sirve tanto para quitar del agua el material grueso como para medir el caudal de agua para la dosificación de los químicos. El tanque de entrada funciona como tanque de sedimentación en que las partículas gruesas se caen al fondo del tanque por gravedad. Debido al diseño especial de la salida, el nivel de agua en el tanque varía en proporción al caudal de agua en la planta. Este nivel de agua está conectado con el sistema semi-automático de dosificación de químicos, de tal forma que las dosis del coagulante y del cloro se mantienen aun cuando cambia el caudal de agua en la planta. Mediante el dosificador de químicos, en la salida del tanque de entrada se aplica un coagulante, que se une con el agua cruda en la **mezcla rápida**. diff --git a/tests/rst_files/Treatment_Process_ET_Floc.rst b/tests/rst_files/Treatment_Process_ET_Floc.rst new file mode 100644 index 00000000..440336d2 --- /dev/null +++ b/tests/rst_files/Treatment_Process_ET_Floc.rst @@ -0,0 +1,26 @@ +.. _title_Procesos_de_Tratamiento: + +*********************** +Procesos de Tratamiento +*********************** +Las plantas producen agua limpia y segura, tras la remoción de sedimentos y patógenos. La tecnología AguaClara emplea los procesos unitarios de coagulación, floculación, sedimentación, filtración rápida con arena, y desinfección con cloro (:numref:`figure_process`). + +.. _figure_process: + +.. figure:: Images/process.png + :width: 650px + :align: center + + Los procesos de tratamiento que se utilizan en la planta AguaClara. + +.. _heading_el_tanque_de_entrada: + +El tanque de entrada +-------------------- +El proceso inicia en el **tanque de entrada**, que sirve tanto para quitar del agua el material grueso como para medir el caudal de agua para la dosificación de los químicos. El tanque de entrada funciona como tanque de sedimentación en que las partículas gruesas se caen al fondo del tanque por gravedad. Debido al diseño especial de la salida, el nivel de agua en el tanque varía en proporción al caudal de agua en la planta. Este nivel de agua está conectado con el sistema semi-automático de dosificación de químicos, de tal forma que las dosis del coagulante y del cloro se mantienen aun cuando cambia el caudal de agua en la planta. Mediante el dosificador de químicos, en la salida del tanque de entrada se aplica un coagulante, que se une con el agua cruda en la **mezcla rápida**. + +.. _heading_floculación: + +Floculación +----------- +Luego la mezcla de agua y coagulante entra en el **floculador**, una serie de canales con deflectores que dirigen el flujo de agua. La mezcla suave en el flujo del floculador promueve choques entre partículas, y estas se quedan pegadas por el efecto de las nanopartículas del coagulante. Durante este proceso, que se llama la floculación, las partículas crecen, formando aglomeraciones (**flóculos**). Al final han alcanzado un tamaño visible, y tienen una velocidad terminal suficiente alta para eliminarse en el siguiente proceso, la **sedimentación**. diff --git a/tests/rst_files/Treatment_Process_Floc.rst b/tests/rst_files/Treatment_Process_Floc.rst new file mode 100644 index 00000000..b1bb6060 --- /dev/null +++ b/tests/rst_files/Treatment_Process_Floc.rst @@ -0,0 +1,20 @@ +.. _title_Procesos_de_Tratamiento: + +*********************** +Procesos de Tratamiento +*********************** +Las plantas producen agua limpia y segura, tras la remoción de sedimentos y patógenos. La tecnología AguaClara emplea los procesos unitarios de coagulación, floculación, sedimentación, filtración rápida con arena, y desinfección con cloro (:numref:`figure_process`). + +.. _figure_process: + +.. figure:: Images/process.png + :width: 650px + :align: center + + Los procesos de tratamiento que se utilizan en la planta AguaClara. + +.. _heading_floculación: + +Floculación +----------- +Luego la mezcla de agua y coagulante entra en el **floculador**, una serie de canales con deflectores que dirigen el flujo de agua. La mezcla suave en el flujo del floculador promueve choques entre partículas, y estas se quedan pegadas por el efecto de las nanopartículas del coagulante. Durante este proceso, que se llama la floculación, las partículas crecen, formando aglomeraciones (**flóculos**). Al final han alcanzado un tamaño visible, y tienen una velocidad terminal suficiente alta para eliminarse en el siguiente proceso, la **sedimentación**. diff --git a/tests/rst_files/index_ET.rst b/tests/rst_files/index_ET.rst new file mode 100644 index 00000000..e40c19a6 --- /dev/null +++ b/tests/rst_files/index_ET.rst @@ -0,0 +1,38 @@ +.. _toc: + +=============== +Memoria Tecnica +=============== +Este documento está escrito y mantenido en `Github `_ via `Sphinx `_. Utiliza y se refiere a código y funciones de AguaClara en `AguaClara `_. A continuación se enumeran las versiones de los programas que utilizamos: + +.. _software_versions: +.. csv-table:: Estas son las versiones de software utilizadas para compilar esta documentación. + :header: "Software", "version" + :widths: 10, 10 + :align: center + + "Sphinx", "1.7.5" + "aguaclara", "0.1.8" + "Anaconda", "4.5.4" + "Python", "3.6.5" + +.. toctree:: + :caption: Introducción a la Tecnología AguaClara + :maxdepth: 1 + + Introduction/History.rst + Introduction/Treatment_Process.rst + Introduction/Requirements.rst + Introduction/AIDE_Tools.rst + +.. toctree:: + :caption: Tanque de Entrada + :maxdepth: 1 + + Entrance_Tank/Tank_Design_Algorithm.rst + +`Las versiones de PDF y LaTeX `_ [#pdf_warning]_. + +.. rubric:: **Notas** + +.. [#pdf_warning] Las versiones de PDF y LaTeX pueden contener rarezas visuales porque se genera automáticamente. El sitio web es la forma recomendada de leer este documento. `Por favor visite nuestro GitHub `_ para enviar un problema, contribuir o comentar. diff --git a/tests/rst_files/index_lfom.rst b/tests/rst_files/index_lfom.rst new file mode 100644 index 00000000..d87749db --- /dev/null +++ b/tests/rst_files/index_lfom.rst @@ -0,0 +1,38 @@ +.. _toc: + +=============== +Memoria Tecnica +=============== +Este documento está escrito y mantenido en `Github `_ via `Sphinx `_. Utiliza y se refiere a código y funciones de AguaClara en `AguaClara `_. A continuación se enumeran las versiones de los programas que utilizamos: + +.. _software_versions: +.. csv-table:: Estas son las versiones de software utilizadas para compilar esta documentación. + :header: "Software", "version" + :widths: 10, 10 + :align: center + + "Sphinx", "1.7.5" + "aguaclara", "0.1.8" + "Anaconda", "4.5.4" + "Python", "3.6.5" + +.. toctree:: + :caption: Introducción a la Tecnología AguaClara + :maxdepth: 1 + + Introduction/History.rst + Introduction/Treatment_Process.rst + Introduction/Requirements.rst + Introduction/AIDE_Tools.rst + +.. toctree:: + :caption: Tanque de Entrada + :maxdepth: 1 + + Entrance_Tank/LFOM.rst + +`Las versiones de PDF y LaTeX `_ [#pdf_warning]_. + +.. rubric:: **Notas** + +.. [#pdf_warning] Las versiones de PDF y LaTeX pueden contener rarezas visuales porque se genera automáticamente. El sitio web es la forma recomendada de leer este documento. `Por favor visite nuestro GitHub `_ para enviar un problema, contribuir o comentar. diff --git a/tests/rst_files/index_lfom_ET.rst b/tests/rst_files/index_lfom_ET.rst new file mode 100644 index 00000000..85166319 --- /dev/null +++ b/tests/rst_files/index_lfom_ET.rst @@ -0,0 +1,39 @@ +.. _toc: + +=============== +Memoria Tecnica +=============== +Este documento está escrito y mantenido en `Github `_ via `Sphinx `_. Utiliza y se refiere a código y funciones de AguaClara en `AguaClara `_. A continuación se enumeran las versiones de los programas que utilizamos: + +.. _software_versions: +.. csv-table:: Estas son las versiones de software utilizadas para compilar esta documentación. + :header: "Software", "version" + :widths: 10, 10 + :align: center + + "Sphinx", "1.7.5" + "aguaclara", "0.1.8" + "Anaconda", "4.5.4" + "Python", "3.6.5" + +.. toctree:: + :caption: Introducción a la Tecnología AguaClara + :maxdepth: 1 + + Introduction/History.rst + Introduction/Treatment_Process.rst + Introduction/Requirements.rst + Introduction/AIDE_Tools.rst + +.. toctree:: + :caption: Tanque de Entrada + :maxdepth: 1 + + Entrance_Tank/Tank_Design_Algorithm.rst + Entrance_Tank/LFOM.rst + +`Las versiones de PDF y LaTeX `_ [#pdf_warning]_. + +.. rubric:: **Notas** + +.. [#pdf_warning] Las versiones de PDF y LaTeX pueden contener rarezas visuales porque se genera automáticamente. El sitio web es la forma recomendada de leer este documento. `Por favor visite nuestro GitHub `_ para enviar un problema, contribuir o comentar. diff --git a/tests/rst_files/new_index_ET.rst b/tests/rst_files/new_index_ET.rst new file mode 100644 index 00000000..e38eff6e --- /dev/null +++ b/tests/rst_files/new_index_ET.rst @@ -0,0 +1,38 @@ +.. _toc: + +=============== +Memoria Tecnica +=============== +Este documento está escrito y mantenido en `Github `_ via `Sphinx `_. Utiliza y se refiere a código y funciones de AguaClara en `AguaClara `_. A continuación se enumeran las versiones de los programas que utilizamos: + +.. _software_versions: +.. csv-table:: Estas son las versiones de software utilizadas para compilar esta documentación. + :header: "Software", "version" + :widths: 10, 10 + :align: center + + "Sphinx", "1.7.5" + "aguaclara", "0.1.8" + "Anaconda", "4.5.4" + "Python", "3.6.5" + +.. toctree:: + :caption: Introducción a la Tecnología AguaClara + :maxdepth: 1 + + Introduction/History.rst + Introduction/Treatment_Process.rst + Introduction/Requirements.rst + Introduction/AIDE_Tools.rst + +.. toctree:: + :caption: Tanque de Entrada + :maxdepth: 1 + + Entrance_Tank/Tank_Design_Algorithm.rst + +`Las versiones de PDF y LaTeX `_ [#pdf_warning]_. + +.. rubric:: **Notas** + +.. [#pdf_warning] Las versiones de PDF y LaTeX pueden contener rarezas visuales porque se genera automáticamente. El sitio web es la forma recomendada de leer este documento. `Por favor visite nuestro GitHub `_ para enviar un problema, contribuir o comentar. diff --git a/tests/rst_files/test_prepend.rst b/tests/rst_files/test_prepend.rst new file mode 100644 index 00000000..e6685e44 --- /dev/null +++ b/tests/rst_files/test_prepend.rst @@ -0,0 +1,7 @@ + +.. _title_Procesos_de_Tratamiento: + +*********************** +Procesos de Tratamiento +*********************** +Las plantas producen agua limpia y segura, tras la remoción de sedimentos y patógenos. La tecnología AguaClara emplea los procesos unitarios de coagulación, floculación, sedimentación, filtración rápida con arena, y desinfección con cloro (:numref:`figure_process`). diff --git a/tests/rst_files/test_prepend_result.rst b/tests/rst_files/test_prepend_result.rst new file mode 100644 index 00000000..d0aada74 --- /dev/null +++ b/tests/rst_files/test_prepend_result.rst @@ -0,0 +1,8 @@ +.. |test| replace:: 3.0 cm + +.. _title_Procesos_de_Tratamiento: + +*********************** +Procesos de Tratamiento +*********************** +Las plantas producen agua limpia y segura, tras la remoción de sedimentos y patógenos. La tecnología AguaClara emplea los procesos unitarios de coagulación, floculación, sedimentación, filtración rápida con arena, y desinfección con cloro (:numref:`figure_process`).