diff --git a/abm/data/metaprotocol/experiments/scripts/organize_distributed_experiment.py b/abm/data/metaprotocol/experiments/scripts/organize_distributed_experiment.py index 133e9841..3cf749b8 100644 --- a/abm/data/metaprotocol/experiments/scripts/organize_distributed_experiment.py +++ b/abm/data/metaprotocol/experiments/scripts/organize_distributed_experiment.py @@ -4,7 +4,7 @@ # folder in which the individual hashed exp folders are # distributed_exp_path = "/home/david/Desktop/clustermount/ABM/abm/data/simulation_data/figExp1N100" -distributed_exp_path = "/home/david/Desktop/database/figExp2A/figExp2AintermedN25NoColl" +distributed_exp_path = "/home/david/Desktop/database/figExp2AintermedN100NoColl" hashed_subfolders = glob(os.path.join(distributed_exp_path, "*/"), recursive=False) batch_num = 0 diff --git a/abm/load_data_test.py b/abm/load_data_test.py index 42f2efa4..490b70a8 100644 --- a/abm/load_data_test.py +++ b/abm/load_data_test.py @@ -1,6 +1,6 @@ from abm.replay.replay import ExperimentReplay -loaded_experiment = ExperimentReplay("/home/david/Desktop/database/figExp0NoColl/figExp0N25NoColl") +loaded_experiment = ExperimentReplay("/media/david/DMezeySCIoI/ABMData/VisFlock/VFExp4c") loaded_experiment.start() # loaded_experiment.experiment.calculate_search_efficiency() # m_eff = np.mean(loaded_experiment.experiment.efficiency, axis=0) diff --git a/abm/loader/data_loader.py b/abm/loader/data_loader.py index 16dfd077..abd33ac3 100644 --- a/abm/loader/data_loader.py +++ b/abm/loader/data_loader.py @@ -15,6 +15,8 @@ import numpy as np from matplotlib import pyplot as plt import zarr +from fastcluster import linkage +from scipy.cluster.hierarchy import dendrogram class LoadedAgent(Agent): @@ -1020,6 +1022,91 @@ def calculate_pairwise_pol_matrix_supervect(self, t, undersample=1, batchi=0): pol_matrix = np.moveaxis(pol_matrix, -1, agent_dim) return pol_matrix + def return_clustering_distnace(self, condition_idx, t_real, undersample, pm=None): + """Returns the clustering sidtance calculated from orientation and iid""" + t_closest = int(t_real / undersample) + # print("Using t_closest=", t_closest, "for t_real=", t_real) + max_dist = (self.env.get("ENV_WIDTH") ** 2 + self.env.get("ENV_HEIGHT") ** 2) ** 0.5 / 2 + niidm = self.iid_matrix[condition_idx + tuple([slice(None), slice(None), t_closest])] # /max_dist + niidm = np.abs(np.median(niidm)-niidm) / max_dist + # niidm = (niidm - mean_iidm) / std_iidm + # niidm = (niidm - np.mean(niidm)) / max_dist + # # punishing large distances more + # niidm = niidm ** 2 + # make lower triangle elements the same as upper triangle elements + niidm = niidm + niidm.T + + if pm is None: + pm = self.calculate_pairwise_pol_matrix_vectorized(condition_idx, int(t_closest*undersample)) + + # standardizing pm and normiidm so that their mean is 0 and their std is 1 + # todo: here we normalize with the mean and std of the slice and not the whole matrix + # pm = (pm - np.mean(pm)) / np.std(pm) + dist_pm = 1 - pm.astype('float') + # # squaring distances to punish large distances more + # dist_pm = dist_pm ** 2 + + # calculating the final distance matrix as a weighted average of the two + dist = (dist_pm + niidm) / 2 + + return niidm, dist_pm, dist + + def return_dendogram_data(self, linkage, labels, thr=1, show_leaf_counts=True, no_plot=True): + """Returning the clustering data according to the linkage matrix""" + ret = dendrogram(linkage, color_threshold=thr, labels=labels, + show_leaf_counts=show_leaf_counts, no_plot=no_plot) + return ret + + def plot_clustering_in_current_t(self, idx, with_plotting=True): + """Calculating clustering in a given time step + param: idx: index tuple of the currently viewed slice from the replay tool + idx = (batchi, , slice(None), slice(None), t) + idx will be used to index both the IID and polarization matrices to get an NxN matrix of pairwise distances + for both the interindividual distance and the pairwise polarization.""" + if self.iid_matrix is None: + self.calculate_interindividual_distance() + + num_agents = int(self.env.get("N")) + # The shape will depend on the IID matrix which is costly. + # If we use undersample there, we also use undersample here + num_timesteps_orig = self.env.get("T") + num_timesteps = self.iid_matrix.shape[-1] + undersample = int(num_timesteps_orig // num_timesteps) + print(f"Num varying params: {len(self.varying_params)}") + num_varying_params = len(self.varying_params) + + condition_idx = idx[0:-1] + t = idx[-1] + t_closest = int(t / undersample) + + # calculating the clustering distance + niidm, dist_pm, dist = self.return_clustering_distnace(condition_idx, t, undersample) + + # clustering + linkage_matrix = linkage(dist, "ward") + # print(linkage_matrix.shape, linkage_matrix) + ret = self.return_dendogram_data(linkage_matrix, [i for i in range(num_agents)], no_plot=not with_plotting) + colors = [color for _, color in sorted(zip(ret['leaves'], ret['leaves_color_list']))] + # print(colors) + # print(len(list(set(colors)))) + group_ids = np.array([int(a.split("C")[1]) for a in colors]) + for i, gid in enumerate(group_ids): + # when gid is zero we make it the maximum element + # because it means it is an independent leaf + if gid == 0: + group_ids[i] = np.max(group_ids) + 1 + group_ids -= 1 + # print(group_ids) + + fig, ax = plt.subplots(1, 2) + ax[0].imshow(dist_pm, cmap="viridis") + ax[0].set_title("Pairwise polarization") + ax[1].imshow(niidm, cmap="viridis") + ax[1].set_title("Interindividual distance") + plt.show() + + + def calculate_clustering(self): """Using hierarhical clustering according to inter-individual distance and order scores to get number of subgroups""" @@ -1061,16 +1148,10 @@ def calculate_clustering(self): print("Calculating polarizations first") t_slice = slice(0, num_timesteps_orig, undersample) - # calculating the normalized iid distance matrix with punishing large distances more - niidm = self.iid_matrix.copy() - niidm /= max_dist - niidm = (niidm - np.mean(niidm)) / np.std(niidm) - # punishing large distances more - niidm = niidm ** 2 for batchi in range(self.agent_summary["orientation"].shape[0]): print("Batchi: ", batchi) - # if batchi < 5: + # if batchi < 3: # pass # else: # break @@ -1083,21 +1164,10 @@ def calculate_clustering(self): idx = tuple(list(idx_base) + [slice(None), slice(None), t]) idx_ = tuple(list(idx_base_) + [slice(None), slice(None), t]) - # getting the distance matrix for current condition and timestep - normiidm = niidm[idx] + # calculating the clustering distance + normiidm, dist_pm, dist = self.return_clustering_distnace(idx_base, t*undersample, undersample, pm=pol_m_large[idx_]) - # calculating the distance matrix for polarization - pm = pol_m_large[idx_] - # standardizing pm and normiidm so that their mean is 0 and their std is 1 - pm = (pm - np.mean(pm)) / np.std(pm) - dist_pm = 1 - pm.astype('float') - # squaring distances to punish large distances more - dist_pm = dist_pm ** 2 - - # calculating the final distance matrix as a weighted average of the two - dist = (dist_pm + normiidm) / 2 - - if idx_base == (0, 1, 5, 2) and 326 < t < 310: + if idx_base == (0, 0, 0, 0) and 5 < t < 3: with_plotting = True print(dist_pm) print(normiidm) @@ -1106,8 +1176,7 @@ def calculate_clustering(self): # clustering linkage_matrix = linkage(dist, "ward") - ret = dendrogram(linkage_matrix, color_threshold=15, labels=[i for i in range(num_agents)], - show_leaf_counts=True, no_plot=not with_plotting) + ret = self.return_dendogram_data(linkage_matrix, [i for i in range(num_agents)], no_plot=not with_plotting) colors = [color for _, color in sorted(zip(ret['leaves'], ret['leaves_color_list']))] group_ids = np.array([int(a.split("C")[1]) for a in colors]) for i, gid in enumerate(group_ids): @@ -1117,7 +1186,7 @@ def calculate_clustering(self): group_ids[i] = np.max(group_ids) + 1 group_ids -= 1 - clustering_data[tuple(list(idx_base) + [t])] = len(list(set(colors))) + clustering_data[tuple(list(idx_base) + [t])] = len(list(set(group_ids))) largest_clustering_data[tuple(list(idx_base) + [t])] = self.calculate_largest_subcluster_size(group_ids) clustering_ids[tuple(list(idx_base) + [slice(None), t])] = group_ids if with_plotting: @@ -1148,7 +1217,7 @@ def plot_largest_subclusters(self): experiments.""" cbar = None self.calculate_clustering() - self.mean_largest_clusters = np.mean(self.largest_clustering_data, axis=0) + self.mean_largest_clusters = np.mean(np.mean(self.largest_clustering_data, axis=0), axis=-1) min_data = np.min(self.mean_largest_clusters) max_data = np.max(self.mean_largest_clusters) @@ -1222,7 +1291,7 @@ def plot_clustering(self): experiments.""" cbar = None self.calculate_clustering() - self.mean_clusters = np.mean(self.clustering_data, axis=0) + self.mean_clusters = np.mean(np.mean(self.clustering_data, axis=0), axis=-1) min_data = np.min(self.mean_clusters) max_data = np.max(self.mean_clusters) @@ -1479,6 +1548,9 @@ def calculate_mean_NN_dist(self, undersample=1, avg_over_time=False): num_agents = iid.shape[-2] for i in range(num_agents): iid[..., i, i, :] = np.nan + # setting lower triangle equal to upper triangular + iid[..., i:, i, :] = iid[..., i, i:, :] + nearest_per_agents = np.nanmin(iid, axis=-2) mean_nearest = np.mean(nearest_per_agents, axis=-2) self.mean_nn_dist = np.mean(mean_nearest, axis=0) @@ -1693,6 +1765,9 @@ def calculate_pairwise_pol_matrix_vectorized(self, condition_idx, t): # Get the orientations of all agents at time t agent_ori = self.agent_summary["orientation"][condition_idx + (slice(None), t)] + # plt.figure() + # plt.plot(agent_ori) + # plt.show() # print(agent_ori.shape) # input() @@ -1981,53 +2056,36 @@ def plot_mean_NN_dist(self, from_script=False, undersample=1): # self.calculate_interindividual_distance(undersample=undersample) self.calculate_mean_NN_dist(undersample=undersample) + print(self.mean_nn_dist.shape) + _mean_nn_dist = np.mean(self.mean_nn_dist, axis=-1) + batch_dim = 0 num_var_params = len(list(self.varying_params.keys())) agent_dim = batch_dim + num_var_params + 1 time_dim = agent_dim + 1 if num_var_params == 1: - fig, ax = plt.subplots(1, 1) - plt.title("Inter-individual distance (mean)") - plt.plot(self.mean_nn_dist) - num_agents = self.iid_matrix.shape[-2] - restr_m = self.iid_matrix[..., np.triu_indices(num_agents, k=1)[0], np.triu_indices(num_agents, k=1)[1]] - for run_i in range(self.iid_matrix.shape[0]): - plt.plot(restr_m[run_i, :, :], marker=".", linestyle='None') - ax.set_xticks(range(len(self.varying_params[list(self.varying_params.keys())[0]]))) - ax.set_xticklabels(self.varying_params[list(self.varying_params.keys())[0]]) - plt.xlabel(list(self.varying_params.keys())[0]) + raise Exception("Not implemented yet!") elif num_var_params == 2: - fig, ax = plt.subplots(1, 1) - plt.title("Inter-individual distance (mean)") - keys = sorted(list(self.varying_params.keys())) - im = ax.imshow(self.mean_nn_dist) - - ax.set_yticks(range(len(self.varying_params[keys[0]]))) - ax.set_yticklabels(self.varying_params[keys[0]]) - ax.set_ylabel(keys[0]) - - ax.set_xticks(range(len(self.varying_params[keys[1]]))) - ax.set_xticklabels(self.varying_params[keys[1]]) - ax.set_xlabel(keys[1]) + raise Exception("Not implemented yet!") elif num_var_params == 3 or num_var_params == 4: - if len(self.mean_nn_dist.shape) == 4: + if len(_mean_nn_dist.shape) == 4: # reducing the number of variables to 3 by connecting 2 of the dimensions - self.new_mean_nn_dist = np.zeros((self.mean_nn_dist.shape[0:3])) + self.new_mean_nn_dist = np.zeros((_mean_nn_dist.shape[0:3])) print(self.new_mean_nn_dist.shape) - for j in range(self.mean_nn_dist.shape[0]): - for i in range(self.mean_nn_dist.shape[1]): - self.new_mean_nn_dist[j, i, :] = self.mean_nn_dist[j, i, :, i] - self.mean_nn_dist = self.new_mean_nn_dist + for j in range(_mean_nn_dist.shape[0]): + for i in range(_mean_nn_dist.shape[1]): + self.new_mean_nn_dist[j, i, :] = _mean_nn_dist[j, i, :, i] + _mean_nn_dist = self.new_mean_nn_dist if self.collapse_plot is None: - num_plots = self.mean_nn_dist.shape[0] + num_plots = _mean_nn_dist.shape[0] fig, ax = plt.subplots(1, num_plots, sharex=True, sharey=True) keys = sorted(list(self.varying_params.keys())) for i in range(num_plots): - img = ax[i].imshow(self.mean_nn_dist[i, :, :]) + img = ax[i].imshow(_mean_nn_dist[i, :, :]) ax[i].set_title(f"{keys[0]}={self.varying_params[keys[0]][i]}") if i == 0: @@ -2046,7 +2104,7 @@ def plot_mean_NN_dist(self, from_script=False, undersample=1): fig, ax = plt.subplots(1, 1, sharex=True, sharey=True) keys = sorted(list(self.varying_params.keys())) - collapsed_data, labels = self.collapse_mean_data(self.mean_nn_dist, save_name="coll_iid.npy") + collapsed_data, labels = self.collapse_mean_data(_mean_nn_dist, save_name="coll_iid.npy") img = ax.imshow(collapsed_data) ax.set_yticks(range(len(self.varying_params[keys[self.collapse_fixedvar_ind]]))) @@ -2077,6 +2135,9 @@ def plot_mean_iid(self, from_script=False, undersample=1): if self.iid_matrix is None: self.calculate_interindividual_distance(undersample=undersample) + _mean_iid = np.mean(self.mean_iid, axis=-1) + print("shape:", _mean_iid.shape) + batch_dim = 0 num_var_params = len(list(self.varying_params.keys())) agent_dim = batch_dim + num_var_params + 1 @@ -2085,7 +2146,7 @@ def plot_mean_iid(self, from_script=False, undersample=1): if num_var_params == 1: fig, ax = plt.subplots(1, 1) plt.title("Inter-individual distance (mean)") - plt.plot(self.mean_iid) + plt.plot(_mean_iid) num_agents = self.iid_matrix.shape[-2] restr_m = self.iid_matrix[..., np.triu_indices(num_agents, k=1)[0], np.triu_indices(num_agents, k=1)[1]] for run_i in range(self.iid_matrix.shape[0]): @@ -2098,7 +2159,7 @@ def plot_mean_iid(self, from_script=False, undersample=1): fig, ax = plt.subplots(1, 1) plt.title("Inter-individual distance (mean)") keys = sorted(list(self.varying_params.keys())) - im = ax.imshow(self.mean_iid) + im = ax.imshow(_mean_iid) ax.set_yticks(range(len(self.varying_params[keys[0]]))) ax.set_yticklabels(self.varying_params[keys[0]]) @@ -2109,21 +2170,21 @@ def plot_mean_iid(self, from_script=False, undersample=1): ax.set_xlabel(keys[1]) elif num_var_params == 3 or num_var_params == 4: - if len(self.mean_iid.shape) == 4: + if len(_mean_iid.shape) == 4: # reducing the number of variables to 3 by connecting 2 of the dimensions - self.new_mean_iid = np.zeros((self.mean_iid.shape[0:3])) + self.new_mean_iid = np.zeros((_mean_iid.shape[0:3])) print(self.new_mean_iid.shape) - for j in range(self.mean_iid.shape[0]): - for i in range(self.mean_iid.shape[1]): - self.new_mean_iid[j, i, :] = self.mean_iid[j, i, :, i] - self.mean_iid = self.new_mean_iid + for j in range(_mean_iid.shape[0]): + for i in range(_mean_iid.shape[1]): + self.new_mean_iid[j, i, :] = _mean_iid[j, i, :, i] + _mean_iid = self.new_mean_iid if self.collapse_plot is None: - num_plots = self.mean_iid.shape[0] + num_plots = _mean_iid.shape[0] fig, ax = plt.subplots(1, num_plots, sharex=True, sharey=True) keys = sorted(list(self.varying_params.keys())) for i in range(num_plots): - img = ax[i].imshow(self.mean_iid[i, :, :], vmin=np.min(self.mean_iid), vmax=np.max(self.mean_iid)) + img = ax[i].imshow(_mean_iid[i, :, :], vmin=np.min(_mean_iid), vmax=np.max(_mean_iid)) ax[i].set_title(f"{keys[0]}={self.varying_params[keys[0]][i]}") if i == 0: @@ -2142,7 +2203,7 @@ def plot_mean_iid(self, from_script=False, undersample=1): fig, ax = plt.subplots(1, 1, sharex=True, sharey=True) keys = sorted(list(self.varying_params.keys())) - collapsed_data, labels = self.collapse_mean_data(self.mean_iid, save_name="coll_iid.npy") + collapsed_data, labels = self.collapse_mean_data(_mean_iid, save_name="coll_iid.npy") img = ax.imshow(collapsed_data) ax.set_yticks(range(len(self.varying_params[keys[self.collapse_fixedvar_ind]]))) diff --git a/abm/projects/visual_flocking/vf_agent/vf_agent.py b/abm/projects/visual_flocking/vf_agent/vf_agent.py index f74b80ee..309cdfc8 100644 --- a/abm/projects/visual_flocking/vf_agent/vf_agent.py +++ b/abm/projects/visual_flocking/vf_agent/vf_agent.py @@ -145,36 +145,37 @@ def draw_update(self): self.image = pygame.Surface([self.radius * 2, self.radius * 2]) self.image.fill(colors.BACKGROUND) self.image.set_colorkey(colors.BACKGROUND) - if self.is_moved_with_cursor: - pygame.gfxdraw.filled_circle( - self.image, - self.radius, - self.radius, - self.radius, - self.selected_color - ) - pygame.gfxdraw.aacircle(self.image, - self.radius, - self.radius, - self.radius, - colors.BACKGROUND) - else: - pygame.gfxdraw.filled_circle( - self.image, - self.radius, - self.radius, - self.radius-1, - self.color[0:-1] + try: + if self.is_moved_with_cursor: + pygame.gfxdraw.filled_circle( + self.image, + self.radius, + self.radius, + self.radius, + self.selected_color + ) + pygame.gfxdraw.aacircle(self.image, + self.radius, + self.radius, + self.radius, + colors.BACKGROUND) + else: + pygame.gfxdraw.filled_circle( + self.image, + self.radius, + self.radius, + self.radius-1, + self.color[0:-1] + ) + pygame.gfxdraw.aacircle(self.image, + self.radius, + self.radius, + self.radius-1, + colors.BLACK) + except: + pygame.draw.circle( + self.image, self.color, (self.radius, self.radius), self.radius ) - pygame.gfxdraw.aacircle(self.image, - self.radius, - self.radius, - self.radius-1, - colors.BLACK) - - # pygame.draw.circle( - # self.image, self.color, (self.radius, self.radius), self.radius - # ) # showing agent orientation with a line towards agent orientation new_white = (255, 255, 254) diff --git a/abm/projects/visual_flocking/vf_agent/vf_supcalc.py b/abm/projects/visual_flocking/vf_agent/vf_supcalc.py index 54d7aa52..9814a3b0 100644 --- a/abm/projects/visual_flocking/vf_agent/vf_supcalc.py +++ b/abm/projects/visual_flocking/vf_agent/vf_supcalc.py @@ -6,6 +6,7 @@ from scipy import integrate + def distance_infinite(p1, p2, L=500, dim=2): """ Returns the distance vector of two position vectors x,y by tanking periodic boundary conditions into account. @@ -13,10 +14,11 @@ def distance_infinite(p1, p2, L=500, dim=2): """ distvec = p2 - p1 distvec_periodic = np.copy(distvec) - distvec_periodic[distvec < -0.5*L] += L - distvec_periodic[distvec > 0.5*L] -= L + distvec_periodic[distvec < -0.5 * L] += L + distvec_periodic[distvec > 0.5 * L] -= L return distvec_periodic + def projection_field(fov, v_field_resolution, position, radius, orientation, object_positions, object_sizes=None, boundary_cond="walls", arena_width=None, arena_height=None, vision_range=None, ag_id=0): @@ -68,12 +70,12 @@ def projection_field(fov, v_field_resolution, position, radius, # in case torus, positions might change if boundary_cond == "infinite": - if np.abs(v2[0]) > arena_width/2: + if np.abs(v2[0]) > arena_width / 2: if agents_center[0] < object_center[0]: object_center[0] -= arena_width elif agents_center[0] > object_center[0]: object_center[0] += arena_width - if np.abs(v2[1]) > arena_height/2: + if np.abs(v2[1]) > arena_height / 2: if agents_center[1] < object_center[1]: object_center[1] -= arena_height elif agents_center[1] > object_center[1]: @@ -128,7 +130,6 @@ def projection_field(fov, v_field_resolution, position, radius, v_field[i, proj_start:proj_end] = 1 - # post_processing and limiting FOV # flip field data along second dimension v_field_post = np.flip(v_field, axis=1) @@ -138,7 +139,6 @@ def projection_field(fov, v_field_resolution, position, radius, return v_field_post - def calculate_closed_angle(v1, v2): """ Calculating closed angle between two vectors v1 and v2. Rotated with the orientation of the agent as it is relative. @@ -157,6 +157,7 @@ def calculate_closed_angle(v1, v2): closed_angle = 2 * np.pi - closed_angle return closed_angle + # Functions needed for VSWRM functionality def VSWRM_flocking_state_variables(vel_now, Phi, V_now, vf_params, t_now=None, V_prev=None, t_prev=None, verbose=False, ALP0=None, BET0=None, V0=None): @@ -226,24 +227,41 @@ def VSWRM_flocking_state_variables(vel_now, Phi, V_now, vf_params, t_now=None, V # dpsi = vf_params.BET0 * integrate.trapz(np.sin(Phi) * G_psi, Phi) + \ # vf_params.BET0 * vf_params.BET1 * np.sum(np.sin(FOV_rescaling_sin * Phi) * G_psi_spike) * dPhi - - if not verbose: + # without reacling edge information + # dvel = vf_params.GAM * (V0 - vel_now) + \ + # ALP0 * integrate.trapz(np.cos(Phi) * G_vel, Phi) + \ + # ALP0 * vf_params.ALP1 * np.sum(np.cos(Phi) * G_vel_spike) + # + # dpsi = BET0 * integrate.trapz(np.sin(Phi) * G_psi, Phi) + \ + # BET0 * vf_params.BET1 * np.sum(np.sin(Phi) * G_psi_spike) + + alpha_blob = ALP0 * integrate.trapz(cos_sigmoid(Phi, 3*np.pi) * G_vel, Phi) + alpha_edge = ALP0 * vf_params.ALP1 * np.sum(cos_sigmoid(Phi, 3*np.pi) * G_vel_spike) + + beta_blob = BET0 * integrate.trapz(sin_sigmoid(Phi, 3*np.pi) * G_psi, Phi) + beta_edge = BET0 * vf_params.BET1 * np.sum(sin_sigmoid(Phi, 3*np.pi) * G_psi_spike) # without reacling edge information dvel = vf_params.GAM * (V0 - vel_now) + \ - ALP0 * integrate.trapz(np.cos(Phi) * G_vel, Phi) + \ - ALP0 * vf_params.ALP1 * np.sum(np.cos(Phi) * G_vel_spike) + alpha_blob + \ + alpha_edge - dpsi = BET0 * integrate.trapz(np.sin(Phi) * G_psi, Phi) + \ - BET0 * vf_params.BET1 * np.sum(np.sin(Phi) * G_psi_spike) + dpsi = beta_blob + \ + beta_edge return dvel, dpsi else: - alpha_blob = ALP0 * integrate.trapz(np.cos(Phi) * G_vel, Phi) - alpha_edge = ALP0 * vf_params.ALP1 * np.sum(np.cos(Phi) * G_vel_spike) + # alpha_blob = ALP0 * integrate.trapz(np.cos(Phi) * G_vel, Phi) + # alpha_edge = ALP0 * vf_params.ALP1 * np.sum(np.cos(Phi) * G_vel_spike) + # + # beta_blob = BET0 * integrate.trapz(np.sin(Phi) * G_psi, Phi) + # beta_edge = BET0 * vf_params.BET1 * np.sum(np.sin(Phi) * G_psi_spike) + + alpha_blob = ALP0 * integrate.trapz(cos_sigmoid(Phi, 3*np.pi) * G_vel, Phi) + alpha_edge = ALP0 * vf_params.ALP1 * np.sum(cos_sigmoid(Phi, 3*np.pi) * G_vel_spike) - beta_blob = BET0 * integrate.trapz(np.sin(Phi) * G_psi, Phi) - beta_edge = BET0 * vf_params.BET1 * np.sum(np.sin(Phi) * G_psi_spike) + beta_blob = BET0 * integrate.trapz(sin_sigmoid(Phi, 3*np.pi) * G_psi, Phi) + beta_edge = BET0 * vf_params.BET1 * np.sum(sin_sigmoid(Phi, 3*np.pi) * G_psi_spike) # without reacling edge information dvel = vf_params.GAM * (V0 - vel_now) + \ alpha_blob + \ @@ -254,6 +272,39 @@ def VSWRM_flocking_state_variables(vel_now, Phi, V_now, vf_params, t_now=None, V return dvel, dpsi, alpha_blob, alpha_edge, beta_blob, beta_edge +def sigmoid(x, steepness): + return 2 / (1 + np.exp(-steepness * x)) - 1 + + +def cos_sigmoid(x, s): + # left part + left = 2 / (1 + np.exp(-s * (x + (np.pi / 2)))) - 1 + right = -2 / (1 + np.exp(-s * (x - (np.pi / 2)))) + 1 + final = [] + for xid, xi in enumerate(list(x)): + if xi < 0: + final.append(left[xid]) + else: + final.append(right[xid]) + return final + + +def sin_sigmoid(x, s): + # left part + middle = 2 / (1 + np.exp(-s * (x))) - 1 + left = -2 / (1 + np.exp(-s * (x + (np.pi)))) + 1 + right = -2 / (1 + np.exp(-s * (x - (np.pi)))) + 1 + final = [] + for xid, xi in enumerate(list(x)): + if -np.pi / 2 < xi < np.pi / 2: + final.append(middle[xid]) + elif xi < -np.pi / 2: + final.append(left[xid]) + else: + final.append(right[xid]) + return final + + def dPhi_V_of(Phi, V): """Calculating derivative of VPF according to Phi visual angle array at a given timepoint t Args: @@ -273,9 +324,10 @@ def dPhi_V_of(Phi, V): else: dPhi_V_raw = dPhi_V_raw[1:, ...] - dPhi_V = dPhi_V_raw #/ (Phi[-1] - Phi[-2]) + dPhi_V = dPhi_V_raw # / (Phi[-1] - Phi[-2]) return dPhi_V + def distance_coords(x1, y1, x2, y2, vectorized=False): """Distance between 2 points in 2D space calculated from point coordinates. if vectorized is True, we use multidimensional (i.e. vectorized) form of distance @@ -293,13 +345,13 @@ def distance_coords(x1, y1, x2, y2, vectorized=False): def follow_lines_local(agposition, agradius, agorientation, linemap, agvel, sensor_radius=10, sensor_distance=5): """Following line with 2 sensors""" sensor1_pos = [agposition[1] + agradius - sensor_distance + ( - 1 + np.sin(agorientation + (3*np.pi / 4))) * sensor_distance, + 1 + np.sin(agorientation + (3 * np.pi / 4))) * sensor_distance, agposition[0] + agradius - sensor_distance + ( - 1 - np.cos(agorientation + (3*np.pi / 4))) * sensor_distance] + 1 - np.cos(agorientation + (3 * np.pi / 4))) * sensor_distance] sensor2_pos = [agposition[1] + agradius - sensor_distance + ( - 1 + np.sin(agorientation - (3*np.pi / 4))) * sensor_distance, + 1 + np.sin(agorientation - (3 * np.pi / 4))) * sensor_distance, agposition[0] + agradius - sensor_distance + ( - 1 - np.cos(agorientation - (3*np.pi / 4))) * sensor_distance] + 1 - np.cos(agorientation - (3 * np.pi / 4))) * sensor_distance] # superline = [] # for line in lines: # superline.extend(line) @@ -307,15 +359,16 @@ def follow_lines_local(agposition, agradius, agorientation, linemap, agvel, sens # line = superline # points_s1_range = np.array([point for point in line if distance_coords(sensor1_pos[1], sensor1_pos[0], point[1], point[0]) s2: @@ -326,4 +379,4 @@ def follow_lines_local(agposition, agradius, agorientation, linemap, agvel, sens if s1 != 0: return 0.01 else: - return 0 \ No newline at end of file + return 0 diff --git a/abm/replay/replay.py b/abm/replay/replay.py index 72f84280..ae03eb9a 100644 --- a/abm/replay/replay.py +++ b/abm/replay/replay.py @@ -384,6 +384,58 @@ def __init__(self, data_folder_path, undersample=1, t_start=None, t_end=None, co 'MIN-1'], direction='down', textHAlign='centre' ) + # Clustering buttons for VSWRM version + if self.experiment.env.get("APP_VERSION") == "VisualFlocking": + button_start_y += self.button_height + self.plot_clus_b = Button( + # Mandatory Parameters + self.screen, # Surface to place button on + self.slider_start_x, # X-coordinate of top left corner + button_start_y, # Y-coordinate of top left corner + int(self.slider_width / 2), # Width + self.button_height, # Height + + # Optional Parameters + text='Plot No. Clusters ', # Text to display + fontSize=20, # Size of font + margin=20, # Minimum distance between text/image and edge of button + inactiveColour=colors.GREY, + onClick=lambda: self.experiment.plot_clustering(), # Function to call when clicked on + borderThickness=1 + ) + self.plot_largest_clus_b = Button( + # Mandatory Parameters + self.screen, # Surface to place button on + self.button_start_x_2, # X-coordinate of top left corner + button_start_y, # Y-coordinate of top left corner + int(self.slider_width / 2), # Width + self.button_height, # Height + + # Optional Parameters + text='Plot largest clus.', # Text to display + fontSize=20, # Size of font + margin=20, # Minimum distance between text/image and edge of button + inactiveColour=colors.GREY, + onClick=lambda: self.experiment.plot_largest_subclusters(), # Function to call when clicked on + borderThickness=1 + ) + self.dendogram_b = Button( + # Mandatory Parameters + self.screen, # Surface to place button on + self.button_start_x_2 + int(self.slider_width / 2), # X-coordinate of top left corner + button_start_y, # Y-coordinate of top left corner + int(self.slider_width / 2), # Width + self.button_height, # Height + + # Optional Parameters + text='Dendogram', # Text to display + fontSize=20, # Size of font + margin=20, # Minimum distance between text/image and edge of button + inactiveColour=colors.GREY, + onClick=lambda: self.experiment.plot_clustering_in_current_t(self.index + tuple([self.t])), # Function to call when clicked on + borderThickness=1 + ) + # Plotting Details Button Line button_start_y += self.button_height self.t_start = None @@ -626,8 +678,9 @@ def on_print_NNdist(self, with_read_collapse_param=True, used_batches=None): f"Experiment longer than 1000 timesteps! To calculate iid reducing timesteps to 1000 with undersampling rate {undersample}.") else: undersample = 1 - fig, ax, cbar = self.experiment.plot_mean_NN_dist(from_script=self.from_script, undersample=undersample) - return fig, ax, cbar + # fig, ax, cbar = self.experiment.plot_mean_NN_dist(from_script=self.from_script, undersample=undersample) + # return fig, ax, cbar + self.experiment.plot_mean_NN_dist(from_script=self.from_script, undersample=undersample) def on_set_t_start(self): """Setting starting timestep for plotting and calculations""" @@ -904,7 +957,6 @@ def update_frame_data(self): t_ind_cl = int(t_ind / 25) clusters_idx = tuple(list(self.index) + [slice(None), t_ind_cl]) clusters = self.experiment.clustering_ids[clusters_idx] - print(clusters) else: clusters = None