-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscout.py
427 lines (391 loc) · 16 KB
/
scout.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
"""
Scout
-----
.Genera tonos de llamado random
.Monitorea los sensores IR
.Si detecta una presencia:
.Inicia modo de percepción/clasificación
.Si se trata de especimenes de la especie de interés
.Estampa de tiempo con especie, sonido, video
.Inicia registro de video
emmanuel@interspecifics.cc
2020.01.29 // v.ii.x_:x
>> OTRAS NOTAS
https://www.raspberrypi.org/forums/viewtopic.php?t=240200
https://learn.adafruit.com/adafruit-amg8833-8x8-thermal-camera-sensor/raspberry-pi-thermal-camera
check devices
$ v4l2-ctl --list-devices
try recording
$ ffmpeg -i /dev/video7 -vcodec copy capture/cinco.mkv # 6.5Mbps sin reencodear
rangos de frecuencias
A- Hz(179-243)
B- Hz(158-174)
C- Hz(142-148)
D- Hz(128-139)
E- Hz(117-124)
F- Hz(106-115)
G- Hz(90-104)
"""
import busio, board, adafruit_amg88xx
import time, argparse, collections, random
import operator, re, os, subprocess
import cv2
import cvtf
import numpy as np
import tflite_runtime.interpreter as tflite
from PIL import Image
from oscpy.client import OSCClient
# minimal temperature difference
MIN_TEMP_DIFF = 2
MIN_MASA_DETEC = 3
# -create objects to communicate the sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
sensor_a = adafruit_amg88xx.AMG88XX(i2c_bus, 0x68)
sensor_b = adafruit_amg88xx.AMG88XX(i2c_bus, 0x69)
Category = collections.namedtuple('Category', ['id', 'score'])
# img utils
def create_blank(w, h, rgb_color=(0, 0, 0)):
""" create new image(numpy array) filled with certain color in rgb """
image = np.zeros((h, w), np.uint8)
color = tuple(reversed(rgb_color))
image[:] = 0
return image
# sensor functions
def read_sensor_pixels(sensor, verbose=False):
""" Lee los pixeles de temperatura de un sensor
Devuelve la temperatura media y una lista de temperaturas
La opción verbose muestra los valores
"""
mean_temp = 0
array_temps = []
for row in sensor.pixels:
array_temps.extend(row)
mean_temp = sum(array_temps)
mean_temp = mean_temp / len(array_temps)
if verbose:
print("\n")
print ('[Tm]: {0:.2f}'.format(mean_temp))
for row in sensor.pixels:
ls = ['{0:.1f}'.format(temp) for temp in row]
print(' '.join(ls))
print("\n")
return mean_temp, array_temps
def dual_detect(verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t1]:{0:.1f}\t[t2]:{1:.1f}'.format(m_tb, m_ta))
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
print(lb,'\t',la)
print ('[o1]:{0:d}\t\t[o2]:{1:d}'.format(nb, na))
print("\n")
return na, nb
def dual_detect(arg_name, verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor (mas los data_sens para log)
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t{2}]:{0:.1f}\t[t{3}]:{1:.1f}'.format(m_tb, m_ta, arg_name[1], arg_name[0]))
sens_a = ""
sens_b = ""
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
sens_a+=la+'\n'
sens_b+=lb+'\n'
if verbose:
print(lb,'\t',la)
if verbose:
print ('[o{2}]:{0:d}\t\t[o{3}]:{1:d}'.format(nb, na, arg_name[1], arg_name[0]))
print("\n")
return na, nb, [sens_a, sens_b, m_ta, m_tb]
# detection functions
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def get_output(interpreter, top_k, score_threshold):
"""Returns no more than top_k categories with score >= score_threshold."""
scores = cvtf.output_tensor(interpreter, 0)
categories = [
Category(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(categories, key=operator.itemgetter(1), reverse=True)
def append_results_to_img(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
def parse_results(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
# define callbacks
def human_callback(witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
def label_callback(label, witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
#label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
# soundsys
def update_soundsystem(arg_recfile, arg_name, osc_c):
"""
envía mensajes a sc que disparan notas aleatorias en los rangos establecidos
registra las notas en el archivo de log
"""
# generate note and send osc message
note_val = random.randint(0,6)
synthnames = ['A','B', 'C', 'D', 'E', 'F', 'G']
ruta = '/scout/note/'+arg_name+'/' + synthnames[note_val]
ruta = ruta.encode()
osc_c.send_message(ruta, [1])
# log to record file
timetag = time.strftime("%Y%m%d_%H%M%S")
record_file = open(arg_recfile, 'a+')
record_file.write("\n[scout.note]: <{0}> {1}\n".format(timetag, ruta.decode()))
record_file.close()
return
# -main
def main():
# -parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Path of capture folder', default="/media/pi/DATA/capture/video/")
parser.add_argument('--recfile', help='Path of capture folder', default="/media/pi/DATA/capture/record/")
parser.add_argument('--name', help='Name of the directions to scout [NE || SW]', default="NE")
parser.add_argument('--verbose', help='Show additional info for debugging', default=False)
parser.add_argument('--show', help='Show video', default=False)
parser.add_argument('--ip', help='OSC ip', default="192.168.1.207")
parser.add_argument('--port', help='OSC port', default="57120")
args = parser.parse_args()
# -init osc client
osc_addr = args.ip
osc_port = int(args.port)
osc_client = OSCClient(osc_addr, osc_port)
# -load model and labels for detection
default_model_dir = '/home/pi/Dev/animals/train'
default_model = 'animals_duo_model.tflite'
default_labels = 'animals_duo_model.txt'
args_model = os.path.join(default_model_dir, default_model)
args_labels = os.path.join(default_model_dir, default_labels)
args_top_k = 1
args_camera_idx = 0
args_threshold = 0.1
os.makedirs(args.path, exist_ok=True)
os.makedirs(args.recfile, exist_ok=True)
# -create the detection interpreter
print('Cargando {} con {} categorias de objetos.'.format(args_model, args_labels))
interpreter = cvtf.make_interpreter(args_model)
interpreter.allocate_tensors()
labels = load_labels(args_labels)
# -record file
timetag = time.strftime("%Y%m%d_%H%M%S")
arg_recfile = args.recfile + timetag + ".log"
record_file = open(arg_recfile, 'w+')
record_file.write("[scout.record.start]:\t----\t----\t-- <{0}>: \n".format(timetag));
record_file.close()
# -create a capture object and connect to cam
cam = None
witch = 0
empty = create_blank(640, 480, rgb_color=(0,0,0))
buffstream = ''
# -the loop (hole)
t0 = time.time()
t2 = time.time()
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
while True:
# -check sensors,
if (time.time()-t0 > 1):
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
t0 = time.time()
# -then setup capture device
if (witch == 0):
if (nc_a > MIN_MASA_DETEC):
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
cam = cv2.VideoCapture(0)
witch = 2
else:
#continue
time.sleep(1)
pass
elif(witch == 1):
if (nc_a > MIN_MASA_DETEC):
#continue
pass
elif(nc_b > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(0)
witch = 2
else:
cam.release()
witch = 0
elif(witch == 2):
if (nc_a > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
#continue
pass
else:
cam.release()
witch = 0
# luego, cuando haya un dispositivo activo
if (witch > 0):
if (cam.isOpened()):
# read and convert
ret, frame = cam.read()
if not ret:
print("-.-* No Video Source")
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
# make the classification
cvtf.set_input(interpreter, pil_im)
interpreter.invoke()
results = get_output(interpreter, args_top_k, args_threshold)
# parse and print results, compare, count!
# cv2_im = append_results_to_img(cv2_im, results, labels)
label = labels[results[0][0]]
percent = int(100 * results[0].score)
tag = '{}% {}'.format(percent, label)
ch = ''
if (label=='Jaguar'): ch='J'
elif(label=='MexicanGrayWolf'): ch='w'
elif(label=='Human'): ch='H'
else: ch = ' '
# update the buffstream
buffstream += ch
if (len(buffstream) > 20):
buffstream = buffstream[1:]
if (args.verbose == True):
print(buffstream+'/n')
# count and trigger events, reset buff
c_J = buffstream.count('J')
c_W = buffstream.count('w')
c_H = len(list(filter(lambda x: x == 'H', buffstream)))
if (c_J>15):
lab = "JAGUAR"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_W>15):
lab = "MexGrayWOLF"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_H>15):
print("\n\n[->] .. t[._.]H\n")
human_callback(witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
# draw image
if (args.show==True): cv2.imshow('frame', cv2_im)
else:
if (args.show==True): cv2.imshow('frame', empty)
# pass
# actualiza la maquina de sonido
if (time.time() - t2 > 30):
update_soundsystem(arg_recfile, args.name, osc_client)
t2 = time.time()
# - detect break key
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
# ----
if __name__ == '__main__':
main()