diff --git a/heuristic.py b/heuristic.py index 004fe26..0ed9802 100644 --- a/heuristic.py +++ b/heuristic.py @@ -19,10 +19,10 @@ def __col_fil_movies(self, user, item): else: continue data = self.graph.get_edge_data(movie['label'], item['label']) - print(data) + # print(data) if data is None: continue - print(edge, data) + # print(edge, data) k += data['similarity'] sim_r += data['similarity'] * edge[2]['rating'] return (1 / k) * sim_r if k else 0 @@ -40,10 +40,10 @@ def __col_fil_users(self, user, item): else: continue data = self.graph.get_edge_data(user_1['label'], user['label']) - print(data) + # print(data) if data is None: continue - print(edge, data) + # print(edge, data) k += data['similarity'] sim_r += data['similarity'] * edge[2]['rating'] return (1 / k) * sim_r if k else 0 diff --git a/learning.py b/learning.py index a66b94b..31bb4b7 100644 --- a/learning.py +++ b/learning.py @@ -63,7 +63,7 @@ def get_x_y(self, edges): self.clustering[movie['label']], self.degree_centrality[movie['label']], self.closeness_centrality[movie['label']], self.betweenness_centrality[movie['label']]] for genre in self.embedding.transform(movie['genres']): - print(genre) + # print(genre) features.append(float(genre)) # print(features) x.append(features) # [useId, movieId, genre1, genre2, ...] diff --git a/main.py b/main.py index ce56e38..19d828d 100644 --- a/main.py +++ b/main.py @@ -29,7 +29,7 @@ def get_mlps(input_1, input_2): # graph.init(dataset_directory='data/dataset/') # graph.export_gexf(directory='data/graph/') # exit(0) - graph.read_gexf('data/graph/graph.gexf') + graph.read_gexf('data/graph/graph0_6.gexf') graph_train, test_edges = graph.split_train_test(0.2) print(len(test_edges)) @@ -54,11 +54,13 @@ def get_mlps(input_1, input_2): hybrid_features, hybrid_target = hybrid.get_x_y(graph_train.edges(data=True)) mlps = get_mlps(learning_features, hybrid_features) mlps.fit([learning_features, hybrid_features], learning_target, batch_size=1000, epochs=70) - loss = mlps.evaluate([test_x_l, test_x_h], test_y_h) + rec_m = mlps.predict([test_x_l, test_x_h]) + # loss = mlps.evaluate([test_x_l, test_x_h], test_y_h) ##### EVALUATION ##### Evaluation.model_evaluation(test_edges, rec_e) Evaluation.model_evaluation(test_edges, rec_l) Evaluation.model_evaluation(test_edges, rec_h) + Evaluation.model_evaluation(test_edges, rec_m) - print(f"Mean squared error: {loss}") + # print(f"Mean squared error: {loss}")