-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprojekt-20-literatura-bibliography.bib
380 lines (332 loc) · 18.3 KB
/
projekt-20-literatura-bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
@book{burtbikefit,
title = {Bike Fit 2nd Edition: Optimise Your Bike Position for High Performance and Injury Avoidance},
author = {Burt, Phil},
year = {2022},
publisher = {Bloomsbury Publishing}
}
@article{retulReliability,
title = {Does the Retül System provide reliable kinematics information for cycling analysis?},
volume = {11},
url = {https://www.jsc-journal.com/index.php/JSC/article/view/759},
doi = {10.28985/1322.jsc.15},
abstractnote = {<div><span lang="EN-US">The Retül Vantage system is a popular tool to assess dynamic positioning of cyclists. Despite of using a low sampling rate (18 Hz) to record position data, Retül measures shows a moderate to very high correlation with data from gold-standard tridimensional camera systems reaching higher sampling rates, but its reliability has not been tested. Here we assess the reliability of the Retül Vantage system for kinematic assessment of cyclists. This cross-sectional study had two phases. Phase 1 included a survey with certified Retül bike fitters to select the most common variables used in cycling kinematics assessment. Phase 2 involved assessment of the selected cycling kinematics variables to check for intra-examiner reliability. Ten bike fitters answered the online survey (response rate of 47.6%) and 7 variables were identified as the most common to conduct during bike fitting analysis. Then, ten cyclists were submitted to kinematic assessments and Vantage system variables were checked for inter-examiner reliability and standard error of the variables. Good to excellent inter-tester reliability levels were found for all the 7 kinematics variables tested. Standard error of angular variables was lower than 3º for all as well as lower than 5 mm for the linear variable tested. The minimal detectable difference values ranged from 2.15 to 6.55º for angular variables and of 15.51 mm for linear variables. A high and very high degree of intra-rater reliability can be achieved using Retül Vantage system for kinematics assessment of the most common variables included in bike fitting.</span></div&gt;},
number = {3},
journal = {Journal of Science and Cycling},
author = {Ribeiro Branco, Guilherme and De Michelis Mendonça, Luciana and Alves Resende, Renan and Pivetta Carpes, Felipe},
year = {2022},
month = {Dec.},
pages = {76-84}
}
@misc{retulMarkersYoutube,
title = {XY Fit Steps 1-8: Markers and Harness},
author = {{Retül Technology}},
url = {https://www.youtube.com/watch?v=k-FsejpibKE},
year = {2021}
}
@misc{retulScreenYoutube,
title = {XY Fit Step 4: Saddle Fore-Aft},
author = {{Retül Technology}},
url = {https://www.youtube.com/watch?v=hZgI87DUUbU},
year = {2021}
}
@misc{bikeFastFitElitev2Youtube,
title = {Bike Fast Fit Elite v2 - Basic Bike Fitting},
author = {{Bike Fast Fit}},
url = {https://www.youtube.com/watch?v=4xhFhvIM7R4},
year = {2023}
}
@misc{mmpose2020,
title = {OpenMMLab Pose Estimation Toolbox and Benchmark},
author = {MMPose Contributors},
howpublished = {\url{https://github.com/open-mmlab/mmpose}},
year = {2020}
}
@inproceedings{simcc,
title = {Simcc: A simple coordinate classification perspective for human pose estimation},
author = {Li, Yanjie and Yang, Sen and Liu, Peidong and Zhang, Shoukui and Wang, Yunxiao and Wang, Zhicheng and Yang, Wankou and Xia, Shu-Tao},
booktitle = {European Conference on Computer Vision},
pages = {89--106},
year = {2022},
organization = {Springer}
}
@inproceedings{vipnas,
title = {Vipnas: Efficient video pose estimation via neural architecture search},
author = {Xu, Lumin and Guan, Yingda and Jin, Sheng and Liu, Wentao and Qian, Chen and Luo, Ping and Ouyang, Wanli and Wang, Xiaogang},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {16072--16081},
year = {2021}
}
@inproceedings{coco,
author = {Lin, Tsung-Yi
and Maire, Michael
and Belongie, Serge
and Hays, James
and Perona, Pietro
and Ramanan, Deva
and Doll{\'a}r, Piotr
and Zitnick, C. Lawrence},
editor = {Fleet, David
and Pajdla, Tomas
and Schiele, Bernt
and Tuytelaars, Tinne},
title = {Microsoft COCO: Common Objects in Context},
booktitle = {Computer Vision -- ECCV 2014},
year = {2014},
publisher = {Springer International Publishing},
address = {Cham},
pages = {740--755},
abstract = {We present a new dataset with the goal of advancing the state-of-the-art in object recognition by placing the question of object recognition in the context of the broader question of scene understanding. This is achieved by gathering images of complex everyday scenes containing common objects in their natural context. Objects are labeled using per-instance segmentations to aid in precise object localization. Our dataset contains photos of 91 objects types that would be easily recognizable by a 4 year old. With a total of 2.5 million labeled instances in 328k images, the creation of our dataset drew upon extensive crowd worker involvement via novel user interfaces for category detection, instance spotting and instance segmentation. We present a detailed statistical analysis of the dataset in comparison to PASCAL, ImageNet, and SUN. Finally, we provide baseline performance analysis for bounding box and segmentation detection results using a Deformable Parts Model.},
isbn = {978-3-319-10602-1}
}
@inproceedings{hrnet,
author = {Sun, Ke and Xiao, Bin and Liu, Dong and Wang, Jingdong},
title = {Deep High-Resolution Representation Learning for Human Pose Estimation},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2019}
}
@article{rtmpose,
title = {RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose},
author = {Jiang, Tao and Lu, Peng and Zhang, Li and Ma, Ningsheng and Han, Rui and Lyu, Chengqi and Li, Yining and Chen, Kai},
journal = {arXiv preprint arXiv:2303.07399},
year = {2023}
}
@article{rtmdet,
title = {Rtmdet: An empirical study of designing real-time object detectors},
author = {Lyu, Chengqi and Zhang, Wenwei and Huang, Haian and Zhou, Yue and Wang, Yudong and Liu, Yanyi and Zhang, Shilong and Chen, Kai},
journal = {arXiv preprint arXiv:2212.07784},
year = {2022}
}
@inproceedings{yoloPose,
title = {Yolo-pose: Enhancing yolo for multi person pose estimation using object keypoint similarity loss},
author = {Maji, Debapriya and Nagori, Soyeb and Mathew, Manu and Poddar, Deepak},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {2637--2646},
year = {2022}
}
@article{yolox,
title = {Yolox: Exceeding yolo series in 2021},
author = {Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian},
journal = {arXiv preprint arXiv:2107.08430},
year = {2021}
}
@article{mmdetection,
title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
journal = {arXiv preprint arXiv:1906.07155},
year = {2019}
}
@inproceedings{gau,
title = {Transformer quality in linear time},
author = {Hua, Weizhe and Dai, Zihang and Liu, Hanxiao and Le, Quoc},
booktitle = {International Conference on Machine Learning},
pages = {9099--9117},
year = {2022},
organization = {PMLR}
}
@article{attentionIsAllYouNeed,
title = {Attention is all you need},
author = {Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and Polosukhin, Illia},
journal = {Advances in neural information processing systems},
volume = {30},
year = {2017}
}
@inproceedings{coco-wholebody,
title = {Whole-Body Human Pose Estimation in the Wild},
author = {Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
year = {2020}
}
@article{ai_challenger,
title = {Ai challenger: A large-scale dataset for going deeper in image understanding},
author = {Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others},
journal = {arXiv preprint arXiv:1711.06475},
year = {2017}
}
@inproceedings{crowdpose,
author = {Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu},
title = {CrowdPose: Efficient Crowded Scenes Pose Estimation and a New Benchmark},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2019}
}
@inproceedings{mpii,
author = {Andriluka, Mykhaylo and Pishchulin, Leonid and Gehler, Peter and Schiele, Bernt},
title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2014}
}
@inproceedings{JHMDB,
title = {Towards understanding action recognition},
author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black},
booktitle = {International Conf. on Computer Vision (ICCV)},
month = Dec,
pages = {3192-3199},
year = {2013}
}
@inproceedings{posetrack,
author = {Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt},
title = {PoseTrack: A Benchmark for Human Pose Estimation and Tracking},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2018}
}
@inproceedings{ochuman,
author = {Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min},
title = {Pose2Seg: Detection Free Human Instance Segmentation},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2019}
}
@article{halpe,
author = {Fang, Hao-Shu and Li, Jiefeng and Tang, Hongyang and Xu, Chao and Zhu, Haoyi and Xiu, Yuliang and Li, Yong-Lu and Lu, Cewu},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
title = {AlphaPose: Whole-Body Regional Multi-Person Pose Estimation and Tracking in Real-Time},
year = {2022}
}
@inproceedings{ubody,
title = {One-Stage 3D Whole-Body Mesh Recovery with Component Aware Transformer},
author = {Lin, Jing and Zeng, Ailing and Wang, Haoqian and Zhang, Lei and Li, Yu},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {21159--21168},
year = {2023}
}
@misc{yolov5,
author = {Ultralytics},
title = {{YOLOv5}: {A} state-of-the-art real-time object detection system},
year = {2021},
howpublished = {\url{https://docs.ultralytics.com}},
note = {Accessed: 14.01.2024}
}
@inproceedings{mobilenetv3,
title = {Searching for mobilenetv3},
author = {Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others},
booktitle = {Proceedings of the IEEE/CVF international conference on computer vision},
pages = {1314--1324},
year = {2019}
}
@inproceedings{opencvjs,
title = {Opencv. js: Computer vision processing for the open web platform},
author = {Taheri, Sajjad and Vedienbaum, Alexander and Nicolau, Alexandru and Hu, Ningxin and Haghighat, Mohammad R},
booktitle = {Proceedings of the 9th ACM Multimedia Systems Conference},
pages = {478--483},
year = {2018}
}
@inproceedings{sort,
title = {Simple online and realtime tracking},
author = {Bewley, Alex and Ge, Zongyuan and Ott, Lionel and Ramos, Fabio and Upcroft, Ben},
booktitle = {2016 IEEE international conference on image processing (ICIP)},
pages = {3464--3468},
year = {2016},
organization = {IEEE}
}
@inproceedings{kmeans,
title = {Some methods for classification and analysis of multivariate observations},
author = {MacQueen, James and others},
booktitle = {Proceedings of the fifth Berkeley symposium on mathematical statistics and probability},
volume = {1},
number = {14},
pages = {281--297},
year = {1967},
organization = {Oakland, CA, USA}
}
@inproceedings{ns-inpainting,
author = {Bertalmio, M. and Bertozzi, A.L. and Sapiro, G.},
booktitle = {Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001},
title = {Navier-stokes, fluid dynamics, and image and video inpainting},
year = {2001},
volume = {1},
number = {},
pages = {I-I},
keywords = {Fluid dynamics;Streaming media;Filling;Image restoration;Motion pictures;Laplace equations;Algorithm design and analysis;Navier-Stokes equations;Computational fluid dynamics;Computer vision},
doi = {10.1109/CVPR.2001.990497}
}
@article{fast-marching-inpainting,
title = {An Image Inpainting Technique Based on the Fast Marching Method},
author = {Alexandru Cristian Telea},
journal = {Journal of Graphics Tools},
year = {2004},
volume = {9},
pages = {23 - 34},
url = {https://api.semanticscholar.org/CorpusID:5908881}
}
@article{opencv,
author = {Bradski, G.},
citeulike-article-id = {2236121},
journal = {Dr. Dobb's Journal of Software Tools},
keywords = {bibtex-import},
posted-at = {2008-01-15 19:21:54},
priority = {4},
title = {{The OpenCV Library}},
year = {2000}
}
@inproceedings{oks,
author = {Ronchi, Matteo Ruggero and Perona, Pietro},
year = {2017},
month = {10},
pages = {},
title = {Benchmarking and Error Diagnosis in Multi-instance Pose Estimation},
doi = {10.1109/ICCV.2017.48}
}
@article{adamw,
title = {Decoupled weight decay regularization},
author = {Loshchilov, Ilya and Hutter, Frank},
journal = {arXiv preprint arXiv:1711.05101},
year = {2017}
}
@misc{mmdeploy,
title = {OpenMMLab's Model Deployment Toolbox.},
author = {MMDeploy Contributors},
howpublished = {\url{https://github.com/open-mmlab/mmdeploy}},
year = {2021}
}
@misc{onnx2tf,
title = {Tools to convert ONNX files (NCHW) to TensorFlow format (NHWC)},
author = {Hyodo, Katsuya},
orcid = {https://orcid.org/0000-0002-6163-6251},
date = {2022-09-25},
url = {https://github.com/PINTO0309/onnx2tf}
}
@inproceedings{ssd,
author = {Liu, Wei
and Anguelov, Dragomir
and Erhan, Dumitru
and Szegedy, Christian
and Reed, Scott
and Fu, Cheng-Yang
and Berg, Alexander C.},
editor = {Leibe, Bastian
and Matas, Jiri
and Sebe, Nicu
and Welling, Max},
title = {SSD: Single Shot MultiBox Detector},
booktitle = {Computer Vision -- ECCV 2016},
year = {2016},
publisher = {Springer International Publishing},
address = {Cham},
pages = {21--37},
abstract = {We present a method for detecting objects in images using a single deep neural network. Our approach, named SSD, discretizes the output space of bounding boxes into a set of default boxes over different aspect ratios and scales per feature map location. At prediction time, the network generates scores for the presence of each object category in each default box and produces adjustments to the box to better match the object shape. Additionally, the network combines predictions from multiple feature maps with different resolutions to naturally handle objects of various sizes. SSD is simple relative to methods that require object proposals because it completely eliminates proposal generation and subsequent pixel or feature resampling stages and encapsulates all computation in a single network. This makes SSD easy to train and straightforward to integrate into systems that require a detection component. Experimental results on the PASCAL VOC, COCO, and ILSVRC datasets confirm that SSD has competitive accuracy to methods that utilize an additional object proposal step and is much faster, while providing a unified framework for both training and inference. For {\$}{\$}300 {\backslash}times 300{\$}{\$}300{\texttimes}300input, SSD achieves 74.3 {\%} mAP on VOC2007 test at 59 FPS on a Nvidia Titan X and for {\$}{\$}512 {\backslash}times 512{\$}{\$}512{\texttimes}512input, SSD achieves 76.9 {\%} mAP, outperforming a comparable state of the art Faster R-CNN model. Compared to other single stage methods, SSD has much better accuracy even with a smaller input image size. Code is available at https://github.com/weiliu89/caffe/tree/ssd.},
isbn = {978-3-319-46448-0}
}
@article{savgol,
author = {Savitzky, Abraham. and Golay, M. J. E.},
title = {Smoothing and Differentiation of Data by Simplified Least Squares Procedures.},
journal = {Analytical Chemistry},
volume = {36},
number = {8},
pages = {1627-1639},
year = {1964},
doi = {10.1021/ac60214a047},
url = {
https://doi.org/10.1021/ac60214a047
},
eprint = {
https://doi.org/10.1021/ac60214a047
}
}