diff --git a/cellpack/autopack/Analysis.py b/cellpack/autopack/Analysis.py index 7f7aa6eec..899b4f4fb 100644 --- a/cellpack/autopack/Analysis.py +++ b/cellpack/autopack/Analysis.py @@ -34,6 +34,7 @@ from cellpack.autopack.upy import colors as col from cellpack.autopack.upy.colors import map_colors from cellpack.autopack.utils import check_paired_key, get_paired_key +from cellpack.autopack.writers import MarkdownWriter class Analysis: @@ -969,7 +970,7 @@ def get_dict_from_glob( def run_distance_analysis( self, - report_md, + md_object:MarkdownWriter, recipe_data, pairwise_distance_dict, figure_path, @@ -991,19 +992,17 @@ def run_distance_analysis( pairwise_distance_dict ) - report_md.new_header(level=1, title="Distance analysis") - report_md.new_line( + md_object.add_header(level=1, header="Distance analysis") + md_object.report_md.new_line( f"Expected minimum distance: {expected_minimum_distance:.2f}" ) - report_md.new_line( + md_object.report_md.new_line( f"Actual minimum distance: {packed_minimum_distance:.2f}\n" ) if expected_minimum_distance > packed_minimum_distance: - report_md.new_header( - level=2, title="Possible errors", add_table_of_contents="n" - ) - report_md.new_list( + md_object.add_header(header="Possible errors") + md_object.report_md.new_list( [ f"Packed minimum distance {packed_minimum_distance:.2f}" " is less than the " @@ -1011,31 +1010,28 @@ def run_distance_analysis( ] ) - num_keys = len(all_pairwise_distances.keys()) + df = pd.DataFrame() + df['Ingredient key'] = [] + df['Pairwise distance distribution'] = [] + img_list = [] for ingr_key in all_pairwise_distances: - ingr_distance_histo_path = figure_path.glob( - f"{ingr_key}_pairwise_distances_*.png" - ) + ingr_distance_histo_path = figure_path.glob(f"{ingr_key}_pairwise_distances_*.png") for img_path in ingr_distance_histo_path: img_list.append( - report_md.new_inline_image( - text=f"Distance distribution {ingr_key}", - path=f"{output_image_location}/{img_path.name}", + md_object.report_md.new_inline_image( + text=f"Distance distribution {ingr_key}", + path= f"{self.output_path}/{img_path.name}" ) - ) - text_list = [ - "Ingredient key", - "Pairwise distance distribution", - *[ - val - for pair in zip(all_pairwise_distances.keys(), img_list) - for val in pair - ], - ] + ) - report_md.new_table( - columns=2, rows=(num_keys + 1), text=text_list, text_align="center" + df = pd.DataFrame() + df['Ingredient key'] = all_pairwise_distances.keys() + df['Pairwise distance distribution'] = img_list + + md_object.add_table( + header="", + table=df ) def get_ingredient_key_from_object_or_comp_name( @@ -1096,7 +1092,7 @@ def get_partner_pair_dict( def run_partner_analysis( self, - report_md, + md_object:MarkdownWriter, recipe_data, combined_pairwise_distance_dict, ingredient_radii, @@ -1112,9 +1108,14 @@ def run_partner_analysis( avg_num_packed, ) if len(partner_pair_dict): - report_md.new_header(level=1, title="Partner Analysis") + md_object.add_header( + header="Partner Analysis" + ) - val_list = [] + paired_keys = [] + touching_radii = [] + binding_probabilities = [] + close_fractions = [] for paired_key, partner_values in partner_pair_dict.items(): pairwise_distances = numpy.array( combined_pairwise_distance_dict[paired_key] @@ -1124,27 +1125,20 @@ def run_partner_analysis( numpy.count_nonzero(pairwise_distances < padded_radius) / partner_values["num_packed"] ) - val_list.extend( - [ - paired_key, - partner_values["touching_radius"], - partner_values["binding_probability"], - close_fraction, - ] - ) - - text_list = [ - "Partner pair", - "Touching radius", - "Binding probability", - "Close packed fraction", - *val_list, - ] - report_md.new_table( - columns=4, - rows=(len(partner_pair_dict) + 1), - text=text_list, - text_align="center", + paired_keys.append(paired_key) + touching_radii.append(partner_values["touching_radius"]) + binding_probabilities.append(partner_values["binding_probability"]) + close_fractions.append(close_fraction) + + df = pd.DataFrame() + df["Partner pair"] = paired_keys + df["Touching radius"] = touching_radii + df["Binding Probability"] = binding_probabilities + df["Close packing fraction"] = close_fractions + + md_object.add_table( + header="", + table=df, ) def create_report( @@ -1172,23 +1166,7 @@ def create_report( run_*_analysis: bool whether to run specific analysis """ - if report_output_path is None: - report_output_path = self.output_path - report_output_path = Path(report_output_path) - - report_md = MdUtils( - file_name=f"{report_output_path}/analysis_report", - title="Packing analysis report", - ) - report_md.new_header( - level=2, - title=f"Analysis for packing results located at {self.packing_results_path}", - add_table_of_contents="n", - ) - - if not hasattr(self, "ingredient_key_dict"): - self.ingredient_key_dict = self.get_dict_from_glob("ingredient_keys_*") - + self.ingredient_key_dict = self.get_dict_from_glob("ingredient_keys_*") if ingredient_keys is None: ingredient_keys = list(self.ingredient_key_dict.keys()) @@ -1196,32 +1174,33 @@ def create_report( ingredient_keys=ingredient_keys ) ingredient_radii = self.get_ingredient_radii(recipe_data=recipe_data) - - if not hasattr(self, "pairwise_distance_dict"): - self.pairwise_distance_dict = self.get_dict_from_glob( - "pairwise_distances_*.json" + pairwise_distance_dict = self.get_dict_from_glob("pairwise_distances_*.json") + combined_pairwise_distance_dict = self.combine_results_from_seeds( + pairwise_distance_dict + ) + if not hasattr(self, "pairwise_distance_dict"): + self.pairwise_distance_dict = self.get_dict_from_glob( + "pairwise_distances_*.json" ) - combined_pairwise_distance_dict = self.combine_results_from_seeds( - self.pairwise_distance_dict + df = pd.DataFrame() + df['Ingredient name'] = list(ingredient_keys) + df["Encapsulating radius"] = list(ingredient_radii.values()) + df["Average number packed"] = list(avg_num_packed.values()) + + # path to save report and other outputs + if output_image_location is None: + output_image_location = self.output_path + + md_object = MarkdownWriter( + title="Packing analysis report", + output_path=self.output_path, + output_image_location=output_image_location, + report_name="analysis_report" ) - val_list = [] - for key, radius, num_packed in zip( - ingredient_keys, ingredient_radii.values(), avg_num_packed.values() - ): - val_list.extend([key, radius, num_packed]) - text_list = [ - "Ingredient name", - "Encapsulating radius", - "Average number packed", - *val_list, - ] - report_md.new_table( - columns=3, - rows=(len(ingredient_keys) + 1), - text=text_list, - text_align="center", + md_object.add_header( + header=f"Analysis for packing results located at {self.packing_results_path}" ) # path to save report and other outputs @@ -1232,21 +1211,17 @@ def create_report( packing_results_path = self.packing_results_path figure_path = packing_results_path / "figures" - report_md.new_header(level=1, title="Packing image") - glob_to_packing_image = figure_path.glob("packing_image_*.png") - for img_path in glob_to_packing_image: - report_md.new_line( - report_md.new_inline_image( - text="Packing image", - path=f"{output_image_location}/{img_path.name}", - ) - ) - report_md.new_line("") + md_object.add_images( + header="Packing image", + image_text=["Packing image"], + filepaths=list(figure_path.glob("packing_image_*.png")) + ) + if run_distance_analysis: # TODO: take packing distance dict as direct input for live mode self.run_distance_analysis( - report_md, + md_object, recipe_data, self.pairwise_distance_dict, figure_path, @@ -1255,14 +1230,14 @@ def create_report( if run_partner_analysis: self.run_partner_analysis( - report_md, + md_object, recipe_data, combined_pairwise_distance_dict, ingredient_radii, avg_num_packed, ) - report_md.create_md_file() + md_object.write_file() def run_analysis_workflow( self, @@ -1925,7 +1900,10 @@ def add_ingredient_positions_to_plot( ) # plot the sphere if ingr.use_rbsphere: - (ext_recipe, pts,) = ingr.getInterpolatedSphere( + ( + ext_recipe, + pts, + ) = ingr.getInterpolatedSphere( seed_ingredient_positions[-i - 1], seed_ingredient_positions[-i], ) diff --git a/cellpack/autopack/FirebaseHandler.py b/cellpack/autopack/FirebaseHandler.py index 4dab8ad7d..69ec0db0b 100644 --- a/cellpack/autopack/FirebaseHandler.py +++ b/cellpack/autopack/FirebaseHandler.py @@ -85,7 +85,32 @@ def upload_doc(self, collection, data): return self.db.collection(collection).add(data) @staticmethod - def is_firebase_obj(obj): - return isinstance( - obj, (firestore.DocumentReference, firestore.DocumentSnapshot) - ) + def create_path(collection, doc_id): + return f"firebase:{collection}/{doc_id}" + + # `doc` is a DocumentSnapshot object + # `doc_ref` is a DocumentReference object to perform operations on the doc + def get_doc_by_id(self, collection, id): + doc_ref = self.db.collection(collection).document(id) + doc = doc_ref.get() + if doc.exists: + return doc.to_dict(), doc_ref + else: + return None, None + + @staticmethod + def get_collection_id_from_path(path): + # path example = firebase:composition/uid_1 + components = path.split(":")[1].split("/") + collection = components[0] + id = components[1] + return collection, id + + @staticmethod + def update_reference_on_doc(doc_ref, index, new_item_ref): + doc_ref.update({index: new_item_ref}) + + @staticmethod + def update_elements_in_array(doc_ref, index, new_item_ref, remove_item): + doc_ref.update({index: firestore.ArrayRemove([remove_item])}) + doc_ref.update({index: firestore.ArrayUnion([new_item_ref])}) diff --git a/cellpack/autopack/firebase.py b/cellpack/autopack/firebase.py new file mode 100644 index 000000000..8449d74c0 --- /dev/null +++ b/cellpack/autopack/firebase.py @@ -0,0 +1,229 @@ +import firebase_admin +from firebase_admin import credentials +from firebase_admin import firestore +import copy + + +default_creds = r"/Users/Ruge/Desktop/Allen Internship/cellPACK/cellpack-data-582d6-firebase-adminsdk-3pkkz-27a3ec0777.json" + +class FirebaseHandler(object): + def __init__(self, credentials=default_creds): + # fetch the service key json file + login = credentials.Certificate(credentials) + # initialize firebase + firebase_admin.initialize_app(login) + # connect to db + self.db = firestore.client() + +d = { + "version": "1.0.0", + "format_version": "2.0", + "name": "one_sphere", + "bounding_box": [ + [0, 0, 0], + [100, 100, 100], + ], + "objects": { + "base": { + "jitter_attempts": 10, + "orient_bias_range": [ + -3.1415927, + 3.1415927 + ], + "rotation_range": 6.2831, + "cutoff_boundary": 0, + "max_jitter": [ + 0.2, + 0.2, + 0.01 + ], + "perturb_axis_amplitude": 0.1, + "packing_mode": "random", + "principal_vector": [ + 0, + 0, + 1 + ], + "rejection_threshold": 50, + "place_method": "jitter", + "cutoff_surface": 42, + "rotation_axis": [ + 0, + 0, + 1 + ], + "available_regions": { + "interior": {}, + "surface": {}, + "outer_leaflet": {}, + "inner_leaflet": {} + } + }, + "sphere_25": { + "type": "single_sphere", + "inherit": "base", + "color": [ + 0.5, + 0.5, + 0.5 + ], + "radius": 25, + "max_jitter": [ + 1, + 1, + 0 + ] + } + }, + "composition": { + "space": { + "regions": { + "interior": [[["A", "B", "C"], ["A"], ["C"]], [["foo"], ["bar"]]] + } + }, + "A": {"object": "sphere_25", "count": 1}, + }, +} + + + + +def reconstruct_dict(d): + # Initialize an empty dictionary to store the modified version of d + modified_d = {} + + # Iterate over the key-value pairs in d + for k, v in d.items(): + # If the value is a list, convert it to a dictionary with keys "array_0", "array_1", etc. + if isinstance(v, list): + arr_dict = {} + for i, element in enumerate(v): + # Check if element is a list, in which case we need to convert it to a dictionary as well + if isinstance(element, list): + nested_arr_dict = {} + for j, nested_element in enumerate(element): + nested_arr_dict["array_{}".format(j)] = nested_element + arr_dict["array_{}".format(i)] = nested_arr_dict + # Otherwise, element is not a list, so we can just add it to arr_dict + else: + arr_dict["array_{}".format(i)] = element + modified_d[k] = arr_dict + # If the value is a dictionary, recursively convert its nested lists to dictionaries + elif isinstance(v, dict): + modified_d[k] = reconstruct_dict(v) + # Otherwise, the value is not a list or a dictionary, so we can just add it to modified_d + else: + modified_d[k] = v + + # Return the modified dictionary + return modified_d + +print(reconstruct_dict(d)) + + + + +# helper function -- we need to convert 2d array(bbox) into dict before storing in firestore +# def convert_nested_array_to_dict(data): +# for k, v in data.items(): +# if isinstance(v, list): +# if any(isinstance(ele, list) for ele in v): +# converted_dict = dict(zip(["array_" + str(i) for i in range(len(v))], v)) +# data[k] = converted_dict +# print(data) +# return data + +# convert_nested_array_to_dict(data) + + def convert_to_firebase_data(self, data): + modified_data = reconstruct_dict(data) + + # add documents with known IDs + def save_to_firestore(self, collection, data, id=None): + if id is None: + # use random id + name = data["name"] + # but first check db for same name + # ref = self.db.collection(collection) + # "query" query_ref = cities_ref.where(u'name', u'==', name) + # then check if all data is the same, (deep equals) + # if all data the same, don't upload + # else, upload with new id + id = create_random_id + self.db.collection(collection).document(id).set(modified_data) + else: + self.db.collection(collection).document(id).set(modified_data) + return f"firebase:{collection}/{id}" + + def upload_recipe(self, recipe_data): + key = f"{recipe_data["name"]}_v{recipe_data["version"]}" + # check if recipe exists + # doc = self.db.collection(collection).document(id) + # if doc.exists() + # if it already does, throw error, tell user to version the recipe + # if they still want to upload it + # LONGER TERM: could check to see if all the data is the same and let the user know + path = self.save_to_firestore("recipes", recipe_data, id=key) + # log("successfully uploaded to path:", path) + +def divide_recipe_into_collections(self, recipe_meta_data, recipe_data): + recipe_to_save = recipe_meta_data.copy.deepcopy() + objects = recipe_data["objects"] + composition = recipe_data["composition"] + gradients = recipe_data.get("gradients") + objects_to_path_map = {} + for obj_name in objects: + version = 1.0 + if "version" in objects[obj_name]: + version = objects[obj_name]["version"] + objects[obj_name]["version"] = version + path = self.save_to_firestore("objects", f"{obj_name}/{version}", objects[obj_name]) + objects_to_path_map[obj_name] = path + + for comp_name in composition: + comp_obj = composition[comp_name] + if "regions" in comp_obj: + for region_name, region_array in comp_obj["regions"].items(): + for index in range(len(region_array)): + region_item = region_array[index] + is_dict = isinstance(region_item, dict) + if (is_dict): + # if it is a dictionary we want to update the refeence + # to the object in the database + obj_name = region_item["object"] + region_item["object"] = objects_to_path_map[obj_name] + + else: + obj_name = comp_obj["object"] + obj_path = objects_to_path_map[obj_name] + comp_obj["object"] = obj_path + path_to_comp = self.save_to_firestore("composition", comp_obj) + recipe_to_save["composition"][comp_name] = { "inherit" : path_to_comp } + + + self.upload_recipe(recipe_to_save) +# get a document with a known ID +# def get_doc_from_firestore(collection, id): +# doc = db.collection(collection).document(id).get() +# if doc.exists: +# return doc.to_dict() +# else: +# return "requested doc doesn't exist in firestore" + get_collection_id_from_path(self ) + +# # get all documents in a collection +# def get_all_docs_from_firestore(collection): +# docs = db.collection(collection).get() +# docs_list = [] +# for doc in docs: +# docs_list.append(doc.to_dict()) +# print(docs_list) +# return docs_list + def read(self, collection, id): + return self.db.collection(collection).get(id) + + def read_recipe(self, path_to_recipe): + collection, id = self.get_collection_id_from_path(path_to_recipe) + data_from_firebase = self.read(collection, id) + # TODO: convert to recipe that looks like it was read from a file + # return converted data diff --git a/cellpack/autopack/ingredient/single_sphere.py b/cellpack/autopack/ingredient/single_sphere.py index 51be95d7e..971a3dcd1 100644 --- a/cellpack/autopack/ingredient/single_sphere.py +++ b/cellpack/autopack/ingredient/single_sphere.py @@ -149,7 +149,10 @@ def collision_jitter( distance_to_packing_location - radius_of_ing_being_packed ) - (insidePoints, newDistPoints,) = self.get_new_distances_and_inside_points( + ( + insidePoints, + newDistPoints, + ) = self.get_new_distances_and_inside_points( env, jtrans, rotMat, diff --git a/cellpack/autopack/loaders/recipe_loader.py b/cellpack/autopack/loaders/recipe_loader.py index d9fc6da90..c31848dca 100644 --- a/cellpack/autopack/loaders/recipe_loader.py +++ b/cellpack/autopack/loaders/recipe_loader.py @@ -130,6 +130,26 @@ def _sanitize_format_version(recipe_data): else: format_version = recipe_data["format_version"] return format_version + + def get_only_recipe_metadata(self): + recipe_meta_data = { + "format_version": self.recipe_data["format_version"], + "version": self.recipe_data["version"], + "name": self.recipe_data["name"], + "bounding_box": self.recipe_data["bounding_box"], + "composition": {}, + } + return recipe_meta_data + + def get_only_recipe_metadata(self): + recipe_meta_data = { + "format_version": self.recipe_data["format_version"], + "version": self.recipe_data["version"], + "name": self.recipe_data["name"], + "bounding_box": self.recipe_data["bounding_box"], + "composition": {}, + } + return recipe_meta_data def get_only_recipe_metadata(self): recipe_meta_data = { @@ -187,10 +207,16 @@ def _read(self): atomic=reps.get("atomic", None), packing=reps.get("packing", None), ) - partner_settings = obj["partners"] if "partners" in obj else [] - obj["partners"] = Partners(partner_settings) - if "type" in obj and not INGREDIENT_TYPE.is_member(obj["type"]): - raise TypeError(f"{obj['type']} is not an allowed type") + if not INGREDIENT_TYPE.is_member(obj["type"]): + raise TypeError(f"{obj['type']} is not an allowed type") + if "composition" in recipe_data: + collection = "composition" + data = recipe_data["composition"] + # save_to_firestore(collection, id, data) + partner_settings = obj["partners"] if "partners" in obj else [] + obj["partners"] = Partners(partner_settings) + if "type" in obj and not INGREDIENT_TYPE.is_member(obj["type"]): + raise TypeError(f"{obj['type']} is not an allowed type") # handle gradients if "gradients" in recipe_data: diff --git a/cellpack/autopack/writers/__init__.py b/cellpack/autopack/writers/__init__.py index f4e8b3d1d..7867507f6 100644 --- a/cellpack/autopack/writers/__init__.py +++ b/cellpack/autopack/writers/__init__.py @@ -7,10 +7,166 @@ import os import numpy from collections import OrderedDict +import typing from cellpack import autopack +from mdutils.mdutils import MdUtils +import pandas as pd +from pathlib import Path +class MarkdownWriter(object): + + def __init__(self, title:str, output_path:Path, output_image_location:Path, report_name:str): + self.title = title + self.output_path = output_path + self.output_image_location = output_image_location + self.report_md = MdUtils( + file_name=str(self.output_path / report_name ), + title=title, + ) + + # level is the header style, can only be 1 or 2 + def add_header(self, header:str, level:int=2): + self.report_md.new_header( + level = level, + title = header, + add_table_of_contents="n" + ) + + def add_table(self, header:str, table:pd.DataFrame, text_align="center"): + self.report_md.new_header( + level=1, + title=header, + add_table_of_contents="n", + ) + + text_list = [] + for row in table.values.tolist(): + for item in row: + text_list.append(item) + + self.report_md.new_table( + columns=table.shape[1], + rows=table.shape[0], + text=self.text_list, + text_align=text_align + ) + + def add_table_from_csv(self, header:str, filepath:Path, text_align="center"): + self.report_md.new_header( + level=1, + title=header, + add_table_of_contents="n", + ) + + table = pd.read_csv(filepath) + + text_list = [] + for row in table.values.tolist(): + for item in row: + text_list.append(item) + + self.report_md.new_table( + columns=table.shape[1], + rows=table.shape[0], + text=self.text_list, + text_align=text_align + ) + def write_file(self): + self.report_md.create_md_file() + +from mdutils.mdutils import MdUtils +import pandas as pd +from pathlib import Path + + +class MarkdownWriter(object): + + def __init__(self, title:str, output_path:Path, output_image_location:Path, report_name:str): + self.title = title + self.output_path = output_path + self.output_image_location = output_image_location + self.report_md = MdUtils( + file_name=str(self.output_path / report_name ), + title=title, + ) + + # level is the header style, can only be 1 or 2 + def add_header(self, header:str, level:int=2): + self.report_md.new_header( + level = level, + title = header, + add_table_of_contents="n" + ) + + def add_table(self, header:str, table:pd.DataFrame, text_align="center"): + self.report_md.new_header( + level=1, + title=header, + add_table_of_contents="n", + ) + + text_list = [] + for row in table.values.tolist(): + for item in row: + text_list.append(item) + + self.report_md.new_table( + columns=table.shape[1], + rows=table.shape[0], + text=text_list, + text_align=text_align + ) + + def add_table_from_csv(self, header:str, filepath:Path, text_align="center"): + self.report_md.new_header( + level=1, + title=header, + add_table_of_contents="n", + ) + + table = pd.read_csv(filepath) + + text_list = [] + for row in table.values.tolist(): + for item in row: + text_list.append(item) + + self.report_md.new_table( + columns=table.shape[1], + rows=table.shape[0], + text=self.text_list, + text_align=text_align + ) + def write_file(self): + self.report_md.create_md_file() + + # Image text must be a list, if list is not same length as list of filepaths, only 1st item in image_text is used + def add_images(self, header:str, image_text:typing.List[str], filepaths:typing.List[str]): + self.report_md.new_header( + level=1, + title=header, + add_table_of_contents="n", + ) + if len(image_text) == len(filepaths): + for i in range(len(filepaths)): + self.report_md.new_line( + self.report_md.new_inline_image( + text=image_text[i], + path=str(self.output_image_location / filepaths[i]) + ) + ) + else: + for i in range(len(filepaths)): + self.report_md.new_line( + self.report_md.new_inline_image( + text=image_text[0], + path=str(self.output_image_location / filepaths[i]) + ) + ) + self.report_md.new_line() + class IOingredientTool(object): # parser that can return an ingredient def __init__(self, env=None): @@ -345,4 +501,4 @@ def save( elif output_format == "simularium": self.save_as_simularium(env, setupfile, all_ingr_as_array, compartments) else: - print("format output " + output_format + " not recognized (json,python)") + print("format output " + output_format + " not recognized (json,python)") \ No newline at end of file diff --git a/cellpack/bin/upload.py b/cellpack/bin/upload.py index 3f6fede2f..96ea0b2fd 100644 --- a/cellpack/bin/upload.py +++ b/cellpack/bin/upload.py @@ -1,7 +1,9 @@ from enum import Enum import fire -from cellpack.autopack.FirebaseHandler import FirebaseHandler -from cellpack.autopack.DBRecipeHandler import DBRecipeHandler +import firebase_admin +from firebase_admin import credentials +from firebase_admin import firestore +from cellpack.autopack.firebase import FirebaseHandler from cellpack.autopack.loaders.recipe_loader import RecipeLoader @@ -22,14 +24,19 @@ def upload( :return: void """ if db_id == DATABASE_IDS.FIREBASE: + # testing path for setup cred_path = cred_path # fetch the service key json file + db_handler = FirebaseHandler(cred_path) + recipe_loader = RecipeLoader(recipe_path) recipe_full_data = recipe_loader.recipe_data recipe_meta_data = recipe_loader.get_only_recipe_metadata() recipe_db_handler = DBRecipeHandler(db_handler) - recipe_db_handler.upload_recipe(recipe_meta_data, recipe_full_data) + recipe_db_handler.divide_recipe_into_collections( + recipe_meta_data, recipe_full_data + ) def main(): diff --git a/setup.py b/setup.py index baa49d464..9c9ecf9bc 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,8 @@ ] test_requirements = [ - "black>=19.10b0, <=23.0", + "black>=19.10b0, <=23.0", + "codecov>=2.1.4", "flake8>=3.8.3", "flake8-debugger>=3.2.1", "mdutils>=1.4.0",