Skip to content

Commit

Permalink
Merge pull request #172 from geometric-intelligence/scikitapi
Browse files Browse the repository at this point in the history
Scikitapi
  • Loading branch information
franciscoeacosta authored Aug 31, 2024
2 parents ed70902 + 759996b commit cf76348
Show file tree
Hide file tree
Showing 36 changed files with 31,631 additions and 45,147 deletions.
4 changes: 1 addition & 3 deletions neurometry/datasets/piRNNs/dual_agent/dual_agent_activity.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,7 @@ def compute_all_scores(options, file_path, res, rate_map_dual_agent):
)

scores_dir = os.path.join(file_path, "scores")
np.save(
scores_dir + f"score_60_dual_agent_epoch_{epoch}.npy", score_60_dual_agent
)
np.save(scores_dir + f"score_60_dual_agent_epoch_{epoch}.npy", score_60_dual_agent)
np.save(
scores_dir + f"border_scores_dual_agent_epoch_{epoch}.npy",
border_scores_dual_agent,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,4 +175,3 @@ def compute_all_scores(options, file_path, res, rate_map_single_agent):
score_60_single_agent, border_scores_single_agent, band_scores_single_agent = (
compute_all_scores(options, file_path, res, rate_map_single_agent)
)

2 changes: 1 addition & 1 deletion neurometry/datasets/piRNNs/dual_agent/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def compute_ratemaps(
all_activations = np.zeros([Ng, res, res, n_avg])
counts = np.zeros([res, res])

#model = model.double()
# model = model.double()
model.eval()

for index in tqdm(range(n_avg), desc="Processing"):
Expand Down
87 changes: 49 additions & 38 deletions neurometry/datasets/piRNNs/load_rnn_grid_cells.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,27 +17,31 @@


def load_rate_maps(run_id, step):
#XU_RNN
model_dir = os.path.join(os.getcwd(), "curvature/grid-cells-curvature/models/xu_rnn")
# XU_RNN
model_dir = os.path.join(
os.getcwd(), "curvature/grid-cells-curvature/models/xu_rnn"
)
run_dir = os.path.join(model_dir, f"logs/rnn_isometry/{run_id}")
activations_file = os.path.join(run_dir, f"ckpt/activations/activations-step{step}.pkl")
activations_file = os.path.join(
run_dir, f"ckpt/activations/activations-step{step}.pkl"
)
with open(activations_file, "rb") as f:
return pickle.load(f)


def load_config(run_id):
model_dir = os.path.join(os.getcwd(), "curvature/grid-cells-curvature/models/xu_rnn")
model_dir = os.path.join(
os.getcwd(), "curvature/grid-cells-curvature/models/xu_rnn"
)
run_dir = os.path.join(model_dir, f"logs/rnn_isometry/{run_id}")
config_file = os.path.join(run_dir, "config.txt")

with open(config_file) as file:
return yaml.safe_load(file)





def extract_tensor_events(event_file, verbose=True):
#XU_RNN
# XU_RNN
records = []
losses = []
try:
Expand Down Expand Up @@ -67,7 +71,6 @@ def extract_tensor_events(event_file, verbose=True):
return records, losses



def _compute_scores(activations, config):
block_size = config["model"]["block_size"]
num_neurons = config["model"]["num_neurons"]
Expand All @@ -81,23 +84,23 @@ def _compute_scores(activations, config):

score_list = np.zeros(shape=[len(activations["v"])], dtype=np.float32)
scale_list = np.zeros(shape=[len(activations["v"])], dtype=np.float32)
#orientation_list = np.zeros(shape=[len(weights)], dtype=np.float32)
# orientation_list = np.zeros(shape=[len(weights)], dtype=np.float32)
sac_list = []

for i in range(len(activations["v"])):
rate_map = activations["v"][i]
rate_map = (rate_map - rate_map.min()) / (rate_map.max() - rate_map.min())

score_60, score_90, max_60_mask, max_90_mask, sac, _ = scorer.get_scores(
activations["v"][i])
activations["v"][i]
)
sac_list.append(sac)

score_list[i] = score_60
# scale_list[i] = scale
scale_list[i] = max_60_mask[1]
# orientation_list[i] = orientation


scale_tensor = torch.from_numpy(scale_list)
score_tensor = torch.from_numpy(score_list)
max_scale = torch.max(scale_tensor[score_list > 0.37])
Expand All @@ -109,9 +112,12 @@ def _compute_scores(activations, config):
score_tensor = torch.mean(score_tensor)
sac_array = np.array(sac_list)

return {"sac":sac_array, "scale":scale_tensor, "score": score_tensor, "max_scale": max_scale}


return {
"sac": sac_array,
"scale": scale_tensor,
"score": score_tensor,
"max_scale": max_scale,
}


def get_scores(run_dir, activations, config):
Expand All @@ -131,7 +137,7 @@ def get_scores(run_dir, activations, config):


def load_activations(epochs, file_path, version="single", verbose=True, save=True):
#SORSCHER RNN
# SORSCHER RNN
activations = []
rate_maps = []
state_points = []
Expand All @@ -154,14 +160,13 @@ def load_activations(epochs, file_path, version="single", verbose=True, save=Tru
# activations_dir, f"g_{version}_agent_epoch_{epoch}.npy"
# )

if (
os.path.exists(activations_epoch_path)
and os.path.exists(rate_map_epoch_path)
if os.path.exists(activations_epoch_path) and os.path.exists(
rate_map_epoch_path
):
activations.append(np.load(activations_epoch_path))
rate_maps.append(np.load(rate_map_epoch_path))
#positions.append(np.load(positions_epoch_path))
#g_s.append(np.load(gs_epoch_path))
# positions.append(np.load(positions_epoch_path))
# g_s.append(np.load(gs_epoch_path))
if verbose:
print(f"Epoch {epoch} found.")
else:
Expand All @@ -178,8 +183,8 @@ def load_activations(epochs, file_path, version="single", verbose=True, save=Tru
) = single_agent_activity.main(options, file_path, epoch=epoch)
activations.append(activations_single_agent)
rate_maps.append(rate_map_single_agent)
#positions.append(positions_single_agent)
#g_s.append(g_single_agent)
# positions.append(positions_single_agent)
# g_s.append(g_single_agent)
elif version == "dual":
(
activations_dual_agent,
Expand All @@ -189,14 +194,14 @@ def load_activations(epochs, file_path, version="single", verbose=True, save=Tru
) = dual_agent_activity.main(options, file_path, epoch=epoch)
activations.append(activations_dual_agent)
rate_maps.append(rate_map_dual_agent)
#positions.append(positions_dual_agent)
#g_s.append(g_dual_agent)
# positions.append(positions_dual_agent)
# g_s.append(g_dual_agent)

if save:
np.save(activations_epoch_path, activations[-1])
np.save(rate_map_epoch_path, rate_maps[-1])
#np.save(positions_epoch_path, positions[-1])
#np.save(gs_epoch_path, g_s[-1])
# np.save(positions_epoch_path, positions[-1])
# np.save(gs_epoch_path, g_s[-1])

state_points_epoch = activations[-1].reshape(activations[-1].shape[0], -1)
state_points.append(state_points_epoch)
Expand All @@ -212,9 +217,9 @@ def load_activations(epochs, file_path, version="single", verbose=True, save=Tru
print(
f"rate_maps has shape {rate_maps[0].shape}. There are {rate_maps[0].shape[1]} data points averaged over {activations[0].shape[3]} trajectories in the {rate_maps[0].shape[0]}-dimensional state space."
)
#print(f"positions has shape {positions[0].shape}.")
# print(f"positions has shape {positions[0].shape}.")

return activations, rate_maps, state_points#, positions, g_s
return activations, rate_maps, state_points # , positions, g_s


def plot_rate_map(indices, num_plots, activations, title, seed=None):
Expand Down Expand Up @@ -251,11 +256,10 @@ def plot_rate_map(indices, num_plots, activations, title, seed=None):
axes[i * cols + j].axis("off")

fig.suptitle(title, fontsize=30)
#plt.tight_layout()
# plt.tight_layout()
plt.show()



def draw_heatmap(activations, title):
# activations should a 4-D tensor: [M, N, H, W]
nrow, ncol = activations.shape[0], activations.shape[1]
Expand Down Expand Up @@ -295,8 +299,6 @@ def draw_heatmap(activations, title):
return np.expand_dims(image_from_plot, axis=0)




def _z_standardize(matrix):
return (matrix - np.mean(matrix, axis=0)) / np.std(matrix, axis=0)

Expand All @@ -319,7 +321,9 @@ def umap_dbscan(activations, run_dir, config, sac_array=None, plot=True):
if sac_array is None:
sac_array = get_scores(run_dir, activations, config)["sac"]

spatial_autocorrelation_matrix = _vectorized_spatial_autocorrelation_matrix(sac_array)
spatial_autocorrelation_matrix = _vectorized_spatial_autocorrelation_matrix(
sac_array
)

umap_reducer_2d = umap.UMAP(n_components=2, random_state=10)
umap_embedding = umap_reducer_2d.fit_transform(spatial_autocorrelation_matrix.T)
Expand All @@ -340,16 +344,24 @@ def umap_dbscan(activations, run_dir, config, sac_array=None, plot=True):
# col = [0, 0, 0, 1]
continue

class_member_mask = (umap_dbscan.labels_ == k)
class_member_mask = umap_dbscan.labels_ == k

xy = umap_embedding[class_member_mask]
if plot:
axes[0].plot(xy[:, 0], xy[:, 1], "o", markerfacecolor=tuple(col), markeredgecolor="none", markersize=5, label=f"Cluster {k}")
axes[0].plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="none",
markersize=5,
label=f"Cluster {k}",
)

umap_cluster_labels = umap_dbscan.fit_predict(umap_embedding)
clusters = {}
for i in np.unique(umap_cluster_labels):
#cluster = _get_data_from_cluster(activations,i, umap_cluster_labels)
# cluster = _get_data_from_cluster(activations,i, umap_cluster_labels)
cluster = activations[umap_cluster_labels == i]
clusters[i] = cluster

Expand All @@ -366,4 +378,3 @@ def umap_dbscan(activations, run_dir, config, sac_array=None, plot=True):
plt.tight_layout()
plt.show()
return clusters, umap_cluster_labels

86 changes: 44 additions & 42 deletions neurometry/datasets/piRNNs/saliency/default_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,57 +7,59 @@


###-----VARIABLE PARAMETERS-----###
#training
lr=[3e-4,6e-4,9e-4,3e-3,6e-3,9e-3,3e-2]
#model
rnn_step=[10,20,40,60]#,20] #10
w_trans=[0.1,0.3,0.5,0.9,2]#,0.5] #0.1
s_0 = [1,10,100,1000,10000]#,1000]
x_saliency = [0.5,0.8]#,0.8]
sigma_saliency = [0.05,0.1,0.15,0.2,0.5]#,0.5]
# training
lr = [3e-4, 6e-4, 9e-4, 3e-3, 6e-3, 9e-3, 3e-2]
# model
rnn_step = [10, 20, 40, 60] # ,20] #10
w_trans = [0.1, 0.3, 0.5, 0.9, 2] # ,0.5] #0.1
s_0 = [1, 10, 100, 1000, 10000] # ,1000]
x_saliency = [0.5, 0.8] # ,0.8]
sigma_saliency = [0.05, 0.1, 0.15, 0.2, 0.5] # ,0.5]
freeze_decoder = True
#integration
n_inte_step=[50,75,100]#,100] # 50
# integration
n_inte_step = [50, 75, 100] # ,100] # 50

###-----TRAINING PARAMETERS-----###
load_pretrain=True
pretrain_path=os.path.join(os.getcwd(),"pretrained/20240418-180712/ckpt/model/checkpoint-step25000.pth")
num_steps_train=10000#7500 # 10000
lr_decay_from=10000
steps_per_logging=20
steps_per_large_logging=500 # 500
steps_per_integration=1000 #2000
norm_v=True
positive_v=True
positive_u=False
optimizer_type="adam"
load_pretrain = True
pretrain_path = os.path.join(
os.getcwd(), "pretrained/20240418-180712/ckpt/model/checkpoint-step25000.pth"
)
num_steps_train = 10000 # 7500 # 10000
lr_decay_from = 10000
steps_per_logging = 20
steps_per_large_logging = 500 # 500
steps_per_integration = 1000 # 2000
norm_v = True
positive_v = True
positive_u = False
optimizer_type = "adam"

###-----SIMULATED DATA PARAMETERS-----###
max_dr_trans=3.0
max_dr_isometry=15.0
batch_size=10000
sigma_data=0.48
add_dx_0=False
small_int=False
max_dr_trans = 3.0
max_dr_isometry = 15.0
batch_size = 10000
sigma_data = 0.48
add_dx_0 = False
small_int = False

###-----MODEL PARAMETERS-----###
trans_type="nonlinear_simple"
num_grid=40
num_neurons=1800
block_size=12
sigma=0.07
w_kernel=1.05
w_isometry=0.005
w_reg_u=0.2
reg_decay_until=15000
adaptive_dr=True
trans_type = "nonlinear_simple"
num_grid = 40
num_neurons = 1800
block_size = 12
sigma = 0.07
w_kernel = 1.05
w_isometry = 0.005
w_reg_u = 0.2
reg_decay_until = 15000
adaptive_dr = True
reward_step = 10000
saliency_type = "gaussian"

###-----PATH INTEGRATION PARAMETERS-----###
n_traj=100
n_inte_step_vis=50
n_traj_vis=5
n_traj = 100
n_inte_step_vis = 50
n_traj_vis = 5

###-----WORK DIRECTORY-----###
work_dir = os.path.join(os.getcwd(), "results")
Expand All @@ -80,5 +82,5 @@
os.makedirs(figs_dir)

###-----RAY TUNE PARAMETERS-----###
sweep_metric= "error_reencode"
num_samples = 1000#1000
sweep_metric = "error_reencode"
num_samples = 1000 # 1000
Loading

0 comments on commit cf76348

Please sign in to comment.