-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathworkflow.yaml
162 lines (151 loc) · 5.01 KB
/
workflow.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
arguments:
parameters:
- name: cvat-annotation-path
value: annotation-dump/license-workflow-test
displayName: Path to dataset
visibility: private
- name: cvat-output-path
value: workflow-data/output/test-workflow-output
visibility: private
- name: dump-format
value: cvat_coco
displayName: CVAT dump format
visibility: public
- name: detector-path
value: rush/workflow-data/output/license-detector/tf-object-detection-training/frcnn-res101-coco/tf-object-detection-training-66pnc/
displayName: Path to object detector model
visibility: public
- name: ocr-model-path
value: savan/workflow-data/output/license-plate-ocr-output1/attention-ocr-training-vstcx/
displayName: Path to ocr detector model
visibility: public
- name: tf-image
value: tensorflow/tensorflow:1.13.1-py3
type: select.select
displayName: Select TensorFlow image
visibility: public
hint: Select the GPU image if you are running on a GPU node pool
options:
- name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
entrypoint: main
templates:
- name: main
dag:
tasks:
- name: process-input-data
template: bash
- name: detect-license-plate
dependencies: [process-input-data]
template: license-detector
- name: detect-ocr
dependencies: [detect-license-plate]
template: ocr-detector
- name: ocr-detector
inputs:
artifacts:
- name: tsrc
path: /mnt/src/train
git:
repo: 'https://github.com/onepanelio/LicensePlateOcr.git'
- git:
repo: https://github.com/tensorflow/models.git
name: src
path: /mnt/src/tf
- name: data
path: /mnt/data/datasets/
s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- name: ocr-model
path: /mnt/data/models/
s3:
key: '{{workflow.parameters.ocr-model-path}}'
- name: output-data
path: /mnt/data/outputdata/
s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
outputs:
artifacts:
- name: model
path: /mnt/output
optional: true
s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
container:
image: '{{workflow.parameters.tf-image}}'
command: [sh,-c]
args:
- |
apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
cd /mnt/src/train && \
pip install -r requirements.txt && \
cp -f custom.py /mnt/src/tf/research/attention_ocr/python/datasets/ && \
cp -f __init__.py /mnt/src/tf/research/attention_ocr/python/datasets/ && \
cp -f demo_inference.py /mnt/src/tf/research/attention_ocr/python/ && \
cp -f ./data/charset_size.txt /mnt/data/datasets/ && \
cd /mnt/src/tf/research/attention_ocr/python/ && \
export PYTHONPATH=$PYTHONPATH:./datasets/ && \
python demo_inference.py \
--dataset_name=custom \
--checkpoint=/mnt/data/models/ \
--batch_size=1 \
--license_boxes_json_path=/mnt/data/outputdata/output.json
workingDir: /mnt/src
- name: license-detector
inputs:
artifacts:
- name: src
path: /mnt/src
git:
repo: "https://github.com/onepanelio/LicensePlateOcr.git"
- name: data
path: /mnt/data/datasets/
s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- name: models
path: /mnt/data/models
s3:
key: '{{workflow.parameters.detector-path}}'
outputs:
artifacts:
- name: model
path: /mnt/output
optional: true
s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
container:
image: '{{workflow.parameters.tf-image}}'
command: [sh,-c]
args:
- |
ls /mnt/data/ \
&& apt update \
&& apt install libgl1-mesa-glx ffmpeg libsm6 libxext6 libglib2.0-0 libxext6 libxrender-dev wget unzip -y \
&& cd /mnt/src/ \
&& pip install -r requirements.txt \
&& python license_detection.py --weights=/mnt/data/models/frozen_inference_graph.pb --dataset=/mnt/data/datasets/images/
workingDir: /mnt/src
volumeMounts:
- name: output
mountPath: /mnt/output
- name: bash
container:
args:
- sleep 15
command:
- bash
- -c
image: bash
volumeClaimTemplates:
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi