-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
149 lines (111 loc) · 5.66 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
# Import models
from models.LSTM import LSTMModel
from models.GRU import GRUModel
from models.Transformer import TransformerModel
# Import functions for data preprocessing
from data.data_functions import data_preprocess
from data.data_functions import create_sequences
from data.data_functions import create_data_loaders
# Import functions for model training and evaluation
from train.train import model_train
from train.evaluation import model_evaluate
# Import argument parser
from utils.argparser import arg_parse
if __name__ == "__main__":
# Parse the arguments
args = arg_parse()
# Get the device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load and preprocess the data
data = data_preprocess("data/powerconsumption.csv")
# Extract sequences and targets from data
sequences, targets = create_sequences(data, args.sequence_length, args.prediction_step, args.prediction_target)
# Create data loaders
train_loader, test_loader = create_data_loaders(sequences, targets)
# Train the model with given parameters
if args.train:
# Initialize the model
if args.model == 'LSTM':
# Parameters for the model
input_size = sequences.shape[2] # Number of features
hidden_size = args.hidden_size # Number of features in the hidden state
num_layers = args.number_of_layers # Number of recurrent layers
output_size = 1 # Number of outputs (Power Consumption)
print("Training the LSTM model")
model = LSTMModel(input_size, hidden_size, num_layers, output_size)
elif args.model == 'GRU':
# Parameters for the model
input_size = sequences.shape[2] # Number of features
hidden_size = args.hidden_size # Number of features in the hidden state
num_layers = args.number_of_layers # Number of recurrent layers
output_size = 1 # Number of outputs (Power Consumption)
print("Training the GRU model")
model = GRUModel(input_size, hidden_size, num_layers, output_size)
elif args.model == 'Transformer':
# Parameters for the Transformer model
input_size = sequences.shape[2] # Number of features
d_model = args.model_dimension # Transformer model dimension
nhead = args.attention_heads # Number of attention heads
num_layers = args.number_of_layers # Number of transformer encoder layers
output_size = 1 # Number of outputs (Power Consumption)
print("Training the Transformer model")
model = TransformerModel(input_size, d_model, nhead, num_layers, output_size)
# Move the model to the device
model.to(device)
# Define the loss function and the optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
num_epochs = args.epochs
# Get the name of the model
model_name = args.model
if args.model_name != '':
model_name = args.model_name
# Train the model
model_train(model, criterion, optimizer, num_epochs, train_loader, device, model_name)
# Evaluate the model
model_evaluate(model, test_loader, criterion, device)
# Load the model from the chosen directory
elif args.load != '':
# Initialize the model
if args.model == 'LSTM':
# Parameters for the model
input_size = sequences.shape[2] # Number of features
hidden_size = args.hidden_size # Number of features in the hidden state
num_layers = args.number_of_layers # Number of recurrent layers
output_size = 1 # Number of outputs (Power Consumption)
print("Using the LSTM model")
model = LSTMModel(input_size, hidden_size, num_layers, output_size)
elif args.model == 'GRU':
# Parameters for the model
input_size = sequences.shape[2] # Number of features
hidden_size = args.hidden_size # Number of features in the hidden state
num_layers = args.number_of_layers # Number of recurrent layers
output_size = 1 # Number of outputs (Power Consumption)
print("Using the GRU model")
model = GRUModel(input_size, hidden_size, num_layers, output_size)
elif args.model == 'Transformer':
# Parameters for the Transformer model
input_size = sequences.shape[2] # Number of features
d_model = args.model_dimension # Transformer model dimension
nhead = args.attention_heads # Number of attention heads
num_layers = args.number_of_layers # Number of transformer encoder layers
output_size = 1 # Number of outputs (Power Consumption)
print("Using the Transformer model")
model = TransformerModel(input_size, d_model, nhead, num_layers, output_size)
# Load the model
model.load_state_dict(torch.load(args.load))
print(f'Model loaded from {args.load}\n')
# Move the model to the device
model.to(device)
# Evaluate the model
model.eval()
criterion = nn.MSELoss()
model_evaluate(model, test_loader, criterion, device)