Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integrates Timeslices, change time-series to PU #32

Merged
merged 16 commits into from
Sep 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions src/r2x/defaults/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
},
"default_num_units": 1,
"default_reserve_types": [
"Spinning",
"Flexibility",
"Regulation"
"SPINNING",
"FLEXIBILITY",
"REGULATION"
],
"device_inference_string": {},
"distribution_losses": 1,
Expand Down Expand Up @@ -260,24 +260,24 @@
},
"reserve_types": {
"1": {
"direction": "Up",
"type": "Spinning"
"direction": "UP",
"type": "SPINNING"
},
"2": {
"direction": "Down",
"type": "Spinning"
"direction": "DOWN",
"type": "SPINNING"
},
"3": {
"direction": "Up",
"type": "Regulation"
"direction": "UP",
"type": "REGULATION"
},
"4": {
"direction": "Down",
"type": "Regulation"
"direction": "DOWN",
"type": "REGULATION"
},
"default": {
"direction": "Up",
"type": "Spinning"
"direction": "UP",
"type": "SPINNING"
}
},
"reserve_vors": {
Expand Down
4 changes: 2 additions & 2 deletions src/r2x/defaults/plexos_input.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"Load Risk": "load_risk",
"Loss Incr": "losses",
"Maintenance Rate": "planned_outage_rate",
"Max Capacity": "active_power",
"Max Capacity": "rating",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this rating?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I changed this to rating since active_power in Sienna only refers to the initial set point of active power (MW). Although rating in Sienna is (MVA), I still think rating makes more sense than active power since we're assuming these plexos models are not running models with reactive power.

I considered using max_active_power, but I reserved that for the plexos Rating field since that is typically used to specify time-varying max_active_power limits in combination with the Rating Factor field.

"Max Flow": "max_power_flow",
"Max Ramp Down": "ramp_down",
"Max Ramp Up": "ramp_up",
Expand All @@ -34,7 +34,7 @@
"Production Rate": "rate",
"Pump Efficiency": "pump_efficiency",
"Pump Load": "pump_load",
"Rating": "rating",
"Rating": "max_active_power",
"Reactance": "reactance",
"Resistance": "resistance",
"Start Cost": "startup_cost",
Expand Down
6 changes: 5 additions & 1 deletion src/r2x/exporter/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,14 @@ def export_data_files(self, time_series_folder: str = "Data") -> None:
config_dict["component_type"] = component_type
csv_fname = string_template.safe_substitute(config_dict)
csv_table = np.column_stack([date_time_column, *time_series_arrays])
header = '"DateTime",' + ",".join(
[f'"{name}"' for name in self.time_series_name_by_type[component_type]]
)

np.savetxt(
csv_fpath / csv_fname,
csv_table,
header="DateTime," + ",".join(self.time_series_name_by_type[component_type]),
header=header,
delimiter=",",
comments="",
fmt="%s",
Expand Down
24 changes: 1 addition & 23 deletions src/r2x/exporter/sienna.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@
from loguru import logger

# Local imports
from infrasys.time_series_models import SingleTimeSeries
from pint import Quantity
from r2x.exporter.handler import BaseExporter
from r2x.models import (
ACBranch,
Expand Down Expand Up @@ -228,6 +226,7 @@ def process_gen_data(self, fname="gen.csv"):
"prime_mover_type",
"bus_id",
"fuel",
"base_mva",
"rating",
"unit_type",
"active_power",
Expand Down Expand Up @@ -436,27 +435,6 @@ def create_timeseries_pointers(self) -> None:
logger.info("File timeseries_pointers.json created.")
return

def create_extra_data_json(self) -> None:
"""Create extra_data.json file."""
extra_data = []
for model in self.system.get_component_types():
model_type_name = model.__name__
component_dict = {
component.name: {
item: value.to_tuple() if isinstance(value, Quantity) else value
for item, value in component.ext.items()
if not isinstance(value, SingleTimeSeries)
}
for component in self.system.get_components(model)
}
extra_data.append({model_type_name: component_dict})

with open(os.path.join(self.output_folder, "extra_data.json"), mode="w") as f:
json.dump(extra_data, f)

logger.info("File extra_data.json created.")
return

def export_data(self) -> None:
"""Export csv data to specified folder from output_data attribute."""
logger.debug("Saving Sienna data and timeseries files.")
Expand Down
22 changes: 22 additions & 0 deletions src/r2x/parser/parser_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from loguru import logger
import polars as pl
import pandas as pd
from datetime import datetime


def pl_filter_year(df, year: int | None = None, year_columns=["t", "year"], **kwargs):
Expand All @@ -22,6 +23,27 @@ def pl_filter_year(df, year: int | None = None, year_columns=["t", "year"], **kw
return df.filter(pl.col(matching_names[0]) == year)


def filter_property_dates(system_data: pl.DataFrame, study_year: int):
"""filters query by date_from and date_to"""
# note this only filters by first day of year, at some point revisit this to include partial years
# Remove Property by study year & date_from/to
study_year_date = datetime(study_year, 1, 1)
date_filter = ((pl.col("date_from").is_null()) | (pl.col("date_from") <= study_year_date)) & (
(pl.col("date_to").is_null()) | (pl.col("date_to") >= study_year_date)
)

# Convert date_from and date_to to datetime
system_data = system_data.with_columns(
[
pl.col("date_from").str.strptime(pl.Datetime, "%Y-%m-%dT%H:%M:%S").cast(pl.Date),
pl.col("date_to").str.strptime(pl.Datetime, "%Y-%m-%dT%H:%M:%S").cast(pl.Date),
]
)

system_data = system_data.filter(date_filter)
return system_data


def pl_lowercase(df: pl.DataFrame, **kwargs):
logger.trace("Lowercase columns: {}", df.collect_schema().names())
result = df.with_columns(pl.col(pl.String).str.to_lowercase()).rename(
Expand Down
Loading