Skip to content

Commit

Permalink
final commit
Browse files Browse the repository at this point in the history
  • Loading branch information
pblankley committed Dec 8, 2017
1 parent 2ade031 commit 2d262ff
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 98 deletions.
8 changes: 7 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,10 @@ Barron, E. G., Kaplan, G. H., Bangert, J., Bartlett, J. L., Puatua, W., Harris,

Other than these packages, it is expected that the user has the usual python numerical libraries.

Once you have these installed, go to the linkitf directory of the repo and run the demo.py file.
Once you have these installed, go to the linkitf directory of the repo and run the demo.py file.

To run the entire codebase, you will need to get two files from google drive and put them in a folder named 'data' in the linkitf directory

https://drive.google.com/drive/u/0/folders/1Dkzs4HMFHf-AaG5wrHtMsWi0zHhWsrlI

the files are itf_new_1_line.txt and itf_new_1_line_ec.mpc enter the paths for these files when you run driver.py
100 changes: 11 additions & 89 deletions linkitf/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,19 +27,17 @@

infilename=os.path.join(BASE_DIR, 'demo_train/UnnObs_Training_1_line_A_ec_labelled_2457308.5_pm15.0_r2.5.trans')

# pickle_filename = infilename.rstrip('.trans') + '.pickle'

print('Based on our tuning, the best dt is {0} and best cluster radius is {1}'.format(dt,cr))

# true_count_set, mergedCounter_dict, mergedTime_dict = accessible_clusters(pixels, infilename=infilename)
# true_count = len(true_count_set)
# print('True count of clusters: {}'.format(true_count))
#
# right, wrong, ids_right, ids_wrong = test_clusters(pixels, infilename, util.lunation_center(n), \
# dt=dt,rad=cr)
#
# print('Using our optimal parameters we got {0} percent of clusters with {1} percent errors.'.format(right/true_count,wrong/true_count))
# print('We got',right,'right and',wrong,'wrong out of total',true_count)
true_count_set, mergedCounter_dict, mergedTime_dict = accessible_clusters(pixels, infilename=infilename)
true_count = len(true_count_set)
print('True count of clusters: {}'.format(true_count))

right, wrong, ids_right, ids_wrong = test_clusters(pixels, infilename, util.lunation_center(n), \
dt=dt,rad=cr)

print('Using our optimal parameters we got {0} percent of clusters with {1} percent errors.'.format(right/true_count,wrong/true_count))
print('We got',right,'right and',wrong,'wrong out of total',true_count)

print('Now that we have shown our performance on training data, lets run on the ITF.')

Expand All @@ -49,37 +47,17 @@
itf_nside = 8
itf_pixels = range(hp.nside2npix(itf_nside))

# itf_raw_results, itf_clust_ids = find_clusters(pixels, itf_file, util.lunation_center(itf_n), g_gdots=g_gdots,dt=dt,rad=cr)

# with open(itf_pickle, 'wb') as handle:
# pickle.dump(itf_raw_results, handle, protocol=pickle.HIGHEST_PROTOCOL)
#
# with open('demo_itf/itf_clust_ids.pickle', 'wb') as handle:
# pickle.dump(itf_clust_ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
#
# get results back
with open(itf_pickle, 'rb') as handle:
itf_raw_results = pickle.load(handle)
#
with open('demo_itf/itf_clust_ids.pickle', 'rb') as handle:
itf_clust_ids = pickle.load(handle)
itf_raw_results, itf_clust_ids = find_clusters(pixels, itf_file, util.lunation_center(itf_n), g_gdots=g_gdots,dt=dt,rad=cr)

check_mpc = os.path.join(BASE_DIR, 'demo_itf/itf_new_1_line_ec_subset.mpc')
check_txt = os.path.join(BASE_DIR, 'demo_itf/itf_new_1_line_subset.txt')
itf_tracklets_dict = util.get_original_tracklets_dict(check_mpc)
itf_obs_array = util.get_original_observation_array(check_txt)
#

obs_dict={}
for cluster_key in itf_raw_results.keys():
obs_dict[cluster_key] = util.get_observations(cluster_key, itf_tracklets_dict, itf_obs_array)

# with open('demo_itf/itf_results', 'wb') as handle:
# pickle.dump(obs_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
#
# # get results back
# with open('demo_itf/itf_results', 'rb') as handle:
# itf_results = pickle.load(handle)
#
print('We clustered {0} asteroids from the ITF!'.format(len(obs_dict.keys())))
print('NOTE: We only count clusters with 3 or more tracklets.')

Expand All @@ -104,59 +82,3 @@

print('\n')
print('Check out the pdf result in the current directory!')
# generate_sky_region_files('demo_itf/itf_new_1_line_2457308.5_pm15.0_r2.5.trans', nside=nside, n=-14)

# visual.make_figure('demo_itf/itf_new_1_line_2457308.5_pm15.0_r2.5_hp_017_g0.40_gdot+0.0e+00')

# pull in the orbit fitting results then plot revised with orbit fitting (drop non-members)

# after fitting orbits, we have found these asteroids.

# whole run is -825 to 10
# TODO activate above this

# gs = [0.4]
# gdots = [-0.004, -0.003, -0.002, -0.001, 0.0, 0.001, 0.002, 0.003, 0.004]
# g_gdots = [(x,y) for x in gs for y in gdots]
#
# pixels= range(hp.nside2npix(nside))
# #
# real_run, clust_ids = find_clusters(pixels, infilename, util.lunation_center(n), g_gdots=g_gdots, dt=15)
# print(clust_ids)
# # print('How many in the run? There were {}.'.format(len(real_run)))
# #
# # with open('demo_data/demo_results.pickle', 'wb') as handle:
# # pickle.dump(real_run, handle, protocol=pickle.HIGHEST_PROTOCOL)
# #
# generate_sky_region_files(infilename, [281], nside=nside, n=n)
# generate_sky_region_files(infilename, [281], nside=nside, n=n, cluster_id_dict=clust_ids)
#
# visual.make_figure('demo_train/UnnObs_Training_1_line_A_ec_labelled_2457308.5_pm15.0_r2.5_hp_281_g0.40_gdot+0.0e+00')
# visual.make_figure('demo_train/UnnObs_Training_1_line_A_ec_labelled_2457308.5_pm15.0_r2.5_hp_281_g0.40_gdot+0.0e+00_cid',cluster=True)
# visual.make_figure(path_makefig)
# dne = []
# for p in range(hp.nside2npix(nside)):
# path_makefig = 'demo_itf/itf_new_1_line_2457308.5_pm15.0_r2.5_hp_{}_g0.40_gdot+0.0e+00'.format(('%03d' % (p)))
# try:
# visual.make_figure(path_makefig)
# except:
# dne.append(p)
# pass
# print('There were {} that had no clusters.'.format(len(dne)))

# dne = []
# for p in range(hp.nside2npix(nside)):
# path_makefig = 'demo_train/UnnObs_Training_1_line_A_ec_labelled_2457308.5_pm15.0_r2.5_hp_{}_g0.40_gdot+0.0e+00'.format(('%03d' % (p)))
# try:
# visual.make_figure(path_makefig)
# except:
# dne.append(p)
# pass
# print('There were {} that had no clusters.'.format(len(dne)))

#############################################################################


# earth_vec = Observatories.getObservatoryPosition('500', util.lunation_center(n))
# pixels, infilename, t_ref,g_gdots=g_gdots, mincount=3,dt=15,rad=0.00124
# errs, clusts, trues = util.do_test_run(pix_runs, true_count_dict, earth_vec, dt=15, nside=nside)
Binary file removed linkitf/demo_itf/itf_clust_ids.pickle
Binary file not shown.
Binary file not shown.
Binary file removed linkitf/demo_itf/itf_results
Binary file not shown.
58 changes: 50 additions & 8 deletions linkitf/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,53 @@
import util
import cleaning as cl
from clustering import find_clusters
from itf_clean import clean_itf_data

# clean_itf_data('data/here_is_itf.txt')
with open('demo_itf/itf_clust_ids.pickle', 'rb') as handle:
itf_clust_ids = pickle.load(handle)
print(type(itf_clust_ids))
print(list(itf_clust_ids.keys())[:10])
print(list(itf_clust_ids.values())[:10])
from itf_clean import clean_itf_data_mpc
import os


print('enter the path where you have the large mpc file. (no quotes)')
mpcpath = input()
mpc_path = os.path.abspath(mpcpath)
txtpath = input()
txt_path = os.path.abspath(txtpath)
clean_itf_data_mpc(mpc_path)

gs = [0.4]
gdots = [-0.004, -0.002, 0.0, 0.002, 0.004]
g_gdots = [(x,y) for x in gs for y in gdots]
nside=8
dt=15.0
cr=0.00124
pixels = range(hp.nside2npix(nside))


for n in range(-825,14):
itf_file = os.path.join(home_dir, 'itf_new_1_line_ec_{}_pm15.0_r2.5.trans'.format(str(util.lunation_center(n))))
itf_raw_results, itf_clust_ids = find_clusters(pixels, itf_file, util.lunation_center(n), g_gdots=g_gdots,dt=dt,rad=cr)

itf_tracklets_dict = util.get_original_tracklets_dict(os.path.join(mpc_path))
itf_obs_array = util.get_original_observation_array(os.path.join(txt_path))

obs_dict={}
for cluster_key in itf_raw_results.keys():
obs_dict[cluster_key] = util.get_observations(cluster_key, itf_tracklets_dict, itf_obs_array)

with open('result_for_{}.pickle'.format(n),'wb') as handle:
pickle.dump(obs_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)












# with open('demo_itf/itf_clust_ids.pickle', 'rb') as handle:
# itf_clust_ids = pickle.load(handle)
# print(type(itf_clust_ids))
# print(list(itf_clust_ids.keys())[:10])
# print(list(itf_clust_ids.values())[:10])
9 changes: 9 additions & 0 deletions linkitf/itf_clean.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,15 @@

BASE_DIR = os.path.dirname(os.path.abspath(__file__))

def clean_itf_data_mpc(path):
mpc_path = os.path.abspath(path)
# home_dir = os.path.dirname(itf_path)
tracklets, tracklets_jd_dict, sortedTracklets = cl.get_sorted_tracklets(mpc_path)
cl.separate_time_windows(tracklets, sortedTracklets, tracklets_jd_dict, \
file_stem=mpc_path, dt=15.)
for n in range(-825,14):
cl.index_positions(n, lambda t: 2.5, file_stem=mpc_path, dt=15.)

def clean_itf_data(path):
itf_path = os.path.abspath(path)
home_dir = os.path.dirname(itf_path)
Expand Down

0 comments on commit 2d262ff

Please sign in to comment.