Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Documentation and clean code #187

Open
wants to merge 19 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
867 changes: 216 additions & 651 deletions app.py

Large diffs are not rendered by default.

17 changes: 16 additions & 1 deletion metrics/A11_Impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

class A11_Impl(AbstractFAIRMetrics):
"""
GOAL : retrieve embedded semantic annotations
GOAL: retrieve embedded semantic annotations
Check that how classes and properties are known in major standards, as reported in LOV :
1. extract RDF annotations from web page
2. list all used RDFS / OWL classes : ?class matching triple pattern ( ?x rdf:type ?class)
Expand All @@ -14,6 +14,9 @@ class A11_Impl(AbstractFAIRMetrics):
"""

def __init__(self, web_resource=None):
"""
The constructor of the metric implementation
"""
super().__init__(web_resource)
self.name = "Open resolution protocol"
self.id = "15"
Expand All @@ -26,12 +29,24 @@ def __init__(self, web_resource=None):
"""

def weak_evaluate(self):
"""
The weak evaluation for A11 metric, not doing anything at the moment, only strong is defined

Returns:
Evaluation: The Evaluation object containing eventual new informations
"""
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)
return eval

def strong_evaluate(self):
"""
The strong evaluation for A11 metric, check that the resource is accessible via an open protocol (HTTP here)

Returns:
Evaluation: The Evaluation object containing eventual new informations
"""
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)
Expand Down
113 changes: 111 additions & 2 deletions metrics/AbstractFAIRMetrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@


class AbstractFAIRMetrics(ABC):
"""
The AbstractFAIRMetrics class

Args:
ABC (ABC): Generic abstract class
"""

COMMON_SPARQL_PREFIX = """
PREFIX schema: <http://schema.org/>
Expand All @@ -24,6 +30,12 @@ class AbstractFAIRMetrics(ABC):
cache = {}

def __init__(self, web_resource=None):
"""
The constructor of the AbstractFAIRMetrics

Args:
web_resource (WebResource, optional): A WebResource that will be evaluated. Defaults to None.
"""
self.name = "My metric name"
self.id = "My id"
self.desc = "My desc"
Expand All @@ -40,55 +52,146 @@ def __init__(self, web_resource=None):

# name
def get_name(self):
"""
Get the name of the metric

Returns:
str: The name of the metric
"""
return self.name

# desc
def get_desc(self):
"""
Get the description of the metric

Returns:
str: The description of the metric
"""
return self.desc
# print(f'Description: {self.desc}')

def get_id(self):
"""
Get the ID of the metric

Returns:
str: The ID of the metric
"""
return self.id

def get_principle(self):
"""
Get the principle to which the metric apply

Returns:
str: The principle of the metric
"""
return self.principle

def get_principle_tag(self):
"""
Get the principle TAG of the metric (F1A, I2, etc)

Returns:
str: The principle TAG of the metric
"""
return self.principle_tag

def get_creator(self):
"""
Get the creator name of the metric

Returns:
str: The creator of the metric
"""
return self.creator

def get_implem(self):
"""
Get the implementator of the metric (e.g. FAIR-Checker, FAIRMetrics, etc)

Returns:
str: The implementator of the metric
"""
return self.implem

def get_creation_date(self):
"""
Get the creation date of the metric

Returns:
str: The creation date of the metric
"""
return self.created_at

def get_update_date(self):
"""
Get the latest update date of the metric

Returns:
str: The update date of the metric
"""
return self.updated_at

def get_requests_status_code(self):
"""
Get the status_code of the URL WebResource instance evaluated

Returns:
int: The status_code of the resource
"""
return self.requests_status_code

def get_web_resource(self):
"""
Get the WebResource instance that is evaluated

Returns:
WebResource: The WebResource instance
"""
return self.web_resource

def get_evaluation(self):
"""
Get the Evaluation instance for a metric and a specific resource

Returns:
Evaluation: The evaluation instance
"""
return self.evaluation

def set_id(self, id):
"""
Set the ID of the FAIR metric

Args:
id (str): The ID of the metric
"""
self.id = id

def set_web_resource(self, web_resource):
"""
Set the WebResoure using an existing instance that will be evaluated

Args:
web_resource (WebResource): The WebResource instance
"""
self.web_resource = web_resource

def set_new_evaluation(self):
"""
Set a new instance of an evaluation
"""
self.evaluation = Evaluation()

def evaluate(self) -> Evaluation:
"""
Evaluate a WebResource using the FAIR metric implementation

Returns:
Evaluation: The Evaluation instance completed with results and metadata informations
"""

# print([cls.get_implem(self) for cls in AbstractFAIRMetrics.__subclasses__()])
logging.debug(f"Evaluating metrics {self.get_name()}")
logging.debug(f"Evaluating metrics {self.get_principle_tag()}")
self.set_new_evaluation()
Expand Down Expand Up @@ -151,6 +254,12 @@ def strong_evaluate(self) -> Evaluation:
pass

def __str__(self):
"""
The string method that displays the metric informations.

Returns:
str: The string method
"""
return (
f"FAIR metrics {self.id} : "
f"\n\t {self.principle} "
Expand Down
Loading