Skip to content

Commit

Permalink
add in dataframes for building loads only
Browse files Browse the repository at this point in the history
  • Loading branch information
nllong committed Aug 27, 2024
1 parent d78891c commit a1f35be
Show file tree
Hide file tree
Showing 3 changed files with 188 additions and 9 deletions.
8 changes: 7 additions & 1 deletion urbanopt_des/urbanopt_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ def __init__(
self.number_of_buildings = len(self.geojson.get_building_ids())

# Container for URBANopt results
# TODO: make this a list in the future to hold multiple URBANopt results
self.urbanopt = None

# Container for Modelica results
Expand Down Expand Up @@ -681,7 +682,10 @@ def create_rollups(self) -> None:
# roll up the urbanopt results (single analysis)
self.urbanopt.data_monthly = self.urbanopt.data.resample("M").sum()
self.urbanopt.data_annual = self.urbanopt.data.resample("Y").sum()

# loads
self.urbanopt.data_loads_monthly = self.urbanopt.data_loads.resample("M").sum()
self.urbanopt.data_loads_annual = self.urbanopt.data_loads.resample("Y").sum()

# roll up the Modelica results (each analysis)
for analysis_name in self.modelica.keys():
self.modelica[analysis_name].monthly = (
Expand Down Expand Up @@ -736,6 +740,8 @@ def create_building_level_results(self) -> None:
# combine all the data together for the final dataframe. The list comprehension here
# will create the table that is shown in the docstring above
df = pd.DataFrame([data[key] for key in data.keys()])
# set the index to be the metric and the unit
df.set_index(['Metric', 'Unit'], inplace=True)

return df

Expand Down
47 changes: 47 additions & 0 deletions urbanopt_des/urbanopt_geojson.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,19 @@ def get_building_names(self) -> list:

return result

def get_buildings(self, ids: list[str] = None, keep_properties: list[str] = None) -> list:
"""Return a list of all the properties of type Building"""
result = []
for feature in self.data["features"]:
if feature["properties"]["type"] == "Building":
# check if it is in the list of ids
if ids is None or feature["properties"]["id"] in ids:
# only keep the fields that in the keep_properties list
result.append(feature)


return result

def get_building_properties_by_id(self, building_id: str) -> dict:
"""Get the list of building ids in the GeoJSON file. The Building id is what
is used in URBANopt as the identifier. It is common that this is used to name
Expand Down Expand Up @@ -118,3 +131,37 @@ def get_monthly_readings(self, building_id: str, meter_type: str) -> list:
result = feature["properties"]["monthly_electricity"]

return result

def set_property_on_building_id(self, building_id: str, property_name: str, property_value: str, overwrite=True) -> None:
"""Set a property on a building_id"""
for feature in self.data["features"]:
if feature["properties"]["type"] == "Building":
if feature["properties"]["id"] == building_id:
if overwrite:
feature["properties"][property_name] = property_value
else:
if property_name not in feature["properties"]:
feature["properties"][property_name] = property_value

def get_property_on_building_id(self, building_id: str, property_name: str) -> str:
"""Get a property on a building_id"""
for feature in self.data["features"]:
if feature["properties"]["type"] == "Building":
if feature["properties"]["id"] == building_id:
return feature["properties"].get(property_name, None)

def get_site_lat_lon(self) -> tuple:
"""Return the site's latitude and longitude"""
for feature in self.data["features"]:
if feature["properties"]["name"] == "Site Origin":
# reverse the order of the coordinates
return feature["geometry"]["coordinates"][::-1]

def save(self) -> None:
"""Save the GeoJSON file"""
self.save_as(self._filename)

def save_as(self, filename: Path) -> None:
"""Save the GeoJSON file"""
with open(filename, "w") as f:
json.dump(self.data, f, indent=2)
142 changes: 134 additions & 8 deletions urbanopt_des/urbanopt_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,20 @@ def __init__(self, uo_path: Path, scenario_name: str) -> None:

# This is the default data resolution, which has to be 60 minutes!
self.data = None
# create object to store 15min data.
self.data_15min = None

self.data_monthly = None
self.data_annual = None

# objects to store building loads
self.data_loads = None
self.data_loads_15min = None
self.data_loads_monthly = None
self.data_loads_annual = None

# end use summaries
self.end_use_summary = None

# grid metrics
self.grid_metrics_daily = None
self.grid_metrics_annual = None

Expand Down Expand Up @@ -390,13 +397,28 @@ def save_dataframes(self) -> None:
if self.data_annual is not None:
self.data_annual.to_csv(self.output_path / "power_annual.csv")

# loads
if self.data_loads is not None:
self.data_loads.to_csv(self.output_path / "loads_60min.csv")

if self.data_loads_15min is not None:
self.data_loads_15min.to_csv(self.output_path / "loads_15min.csv")

if self.data_loads_monthly is not None:
self.data_loads_monthly.to_csv(self.output_path / "loads_monthly.csv")

if self.data_loads_annual is not None:
self.data_loads_annual.to_csv(self.output_path / "loads_annual.csv")

if self.grid_metrics_daily is not None:
self.grid_metrics_daily.to_csv(self.output_path / "grid_metrics_daily.csv")

if self.grid_metrics_annual is not None:
self.grid_metrics_annual.to_csv(
self.output_path / "grid_metrics_annual.csv"
)



def create_aggregations(self, building_names: list[str]) -> None:
"""Aggregate the results from all the buildings together to get the totals
Expand Down Expand Up @@ -583,9 +605,7 @@ def create_aggregations(self, building_names: list[str]) -> None:
finally:
pass

def process_results(
self, building_names: list[str], year_of_data: int = 2017
) -> None:
def process_results(self, building_names: list[str], year_of_data: int = 2017) -> None:
"""The building-by-building end uses are only available in each run directory's feature
report. This method will create a dataframe with the end uses for each building.
Expand Down Expand Up @@ -659,6 +679,63 @@ def process_results(
# TODO: add variables to the urbanopt_single_feature_file_variables.json

return True

def process_load_results(self, building_names: list[str], year_of_data: int = 2017) -> None:
"""The building-by-building loads are results of an OpenStudio measure. The data are only
available in each run directory's modelica_report. This method will create a dataframe with
the end uses for each building.
Args:
scenario_name (str): Name of the scenario that was run with URBANopt
building_name (list): Must be passed since the names come from the GeoJSON which we don't load
year_of_data (int): Year of the data. This is used to set the year of the datetime index. Defaults to 2017
"""
self.data_loads = None # TODO: init this above and make a note what it is

for building_id in building_names:
print(f"Processing building time series loads for {building_id}")
load_report = self.get_urbanopt_export_building_loads(
self.path / "run" / f"{self.scenario_name}" / f"{building_id}"
)

# update the column names to include the building id
for column in load_report.columns:
# skip if Datetime
if column == "Datetime":
continue
load_report.rename(columns={column: f"{column} Building {building_id}"}, inplace=True)

# convert Datetime column in data frame to be datetime from the string. The year
# should be set to a year that has the day of week starting correctly for the real data
# This defaults to year_of_data
load_report["Datetime"] = pd.to_datetime(load_report["Datetime"], format="%m/%d/%Y %H:%M")
load_report["Datetime"] = load_report["Datetime"].apply(lambda x: x.replace(year=year_of_data))

# set the datetime column and make it the index
load_report = load_report.set_index("Datetime")

if self.data_loads is None:
self.data_loads = load_report
else:
# remove the datetime from the second data frame
self.data_loads = pd.concat([self.data_loads, load_report], axis=1, join="inner")

# aggregate the data to create totals
self.data_loads["TotalCoolingSensibleLoad"] = self.data_loads.filter(like="TotalCoolingSensibleLoad").sum(axis=1)
self.data_loads["TotalHeatingSensibleLoad"] = self.data_loads.filter(like="TotalHeatingSensibleLoad").sum(axis=1)
self.data_loads["TotalWaterHeating"] = self.data_loads.filter(like="TotalWaterHeating").sum(axis=1)
self.data_loads["TotalSensibleLoad"] = self.data_loads["TotalCoolingSensibleLoad"] + self.data_loads["TotalHeatingSensibleLoad"]
self.data_loads["TotalSensibleLoadWithWaterHeating"] = self.data_loads["TotalSensibleLoad"] + self.data_loads["TotalWaterHeating"]


# self.data_loads["Total Building Natural Gas"] = self.data_loads.filter(like="NaturalGas").sum(axis=1)

# Upsample to 15 minutes, provides a higher resolution date for
# the end uses for comparison sake. This only works for specific
# variables such as energy (kWh, Btu, etc.)
self.data_loads_15min = self.data_loads.resample("15min").ffill()

return True

def calculate_carbon_emissions(
self,
Expand Down Expand Up @@ -921,21 +998,38 @@ def save_urbanopt_variables(self, save_filename: Path) -> None:
with open(self.path / save_filename, "w") as f:
json.dump(self.get_urbanopt_feature_report_columns(), f, indent=2)

def _search_for_file_in_reports(self, search_dir: Path, filename: str) -> Path:
def _search_for_file_in_reports(self, search_dir: Path, filename: str, measure_name: str = None) -> Path:
"""Search for a report file in a directory and return the path, if exists.
If the filename has more than one period, e.g., .tar.gz, then this will not work
as expected
as expected.
Args:
search_dir (Path): Path for where to start looking for the file
filename (str): Name of the file to search for
measure_name (str): Name of the measure directory to search in. Defaults to None.
"""
# FIXME, this method needs some tests and can be cleaned up... for sure
report_file = search_dir / "feature_reports" / filename
if not report_file.exists():
filename = Path(filename)
# OpenStudio puts the results in the filename without the extension
dirs = list(search_dir.glob(f"*_{filename.stem}"))
if len(dirs) == 1:
report_file = dirs[0] / filename
elif len(dirs) == 0:
raise Exception(f"Could not find {filename} in {search_dir}")
# If we are here, then it is likely that the report is in
# another measure directory which we need to find. This is
# when the measure_name is used, to make sure we return the
# file from the appropriate measure since it could be in multiple
# measure directories.
dirs_2 = list(search_dir.glob(f"*_{measure_name}"))
if len(dirs_2) == 1:
report_file = dirs_2[0] / filename
elif len(dirs_2) == 0:
raise Exception(f"Could not find {filename} in {search_dir} with measure name {measure_name}")
else:
raise Exception(f"More than one {filename} found in dirs: {dirs_2}")
else:
raise Exception(f"More than one {filename} found in dirs: {dirs}")

Expand Down Expand Up @@ -998,3 +1092,35 @@ def get_urbanopt_default_feature_report(self, search_dir: Path) -> pd.DataFrame:
return report
else:
raise Exception(f"Could not find default_feature_report.csv in {search_dir}") # noqa

def get_urbanopt_export_building_loads(self, search_dir: Path) -> pd.DataFrame:
"""Return the building_loads.csv file path.
Args:
search_dir (Path): Path for where to start looking for the file
Returns:
dict: dictionary of the default_feature_report.json file
"""
report_file = self._search_for_file_in_reports(search_dir, "building_loads.csv", measure_name='export_modelica_loads')
print(f"Processing building loads from {report_file}")
if report_file.exists():
# only grab the columns that we care about
columns_to_keep_and_map = {
"Date Time": "Datetime",
"TotalSensibleLoad": "TotalSensibleLoad (W)",
"TotalCoolingSensibleLoad": "TotalCoolingSensibleLoad (W)",
"TotalHeatingSensibleLoad": "TotalHeatingSensibleLoad (W)",
"TotalWaterHeating": "TotalWaterHeating (W)",
}

# re-read the file with the column names and rename the columns to not have the units
report = pd.read_csv(report_file, usecols=columns_to_keep_and_map.keys())
report = report.rename(columns=columns_to_keep_and_map)

# convert all values to floats except the first column which is the date time
cols = report.columns
report[cols[1:]] = report[cols[1:]].apply(pd.to_numeric, errors="coerce")
return report
else:
raise Exception(f"Could not find building_loads.csv in {search_dir}") # noqa

0 comments on commit a1f35be

Please sign in to comment.