Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(v2): iterator and sub-DAG basic support #6985

Merged
merged 9 commits into from
Dec 9, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 62 additions & 1 deletion samples/core/loop_output/loop_output_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,72 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import unittest
import kfp
import kfp_server_api
from ml_metadata.proto import Execution
from .loop_output import my_pipeline
from ...test.util import run_pipeline_func, TestCase
from .loop_output_v2 import my_pipeline as my_pipeline_v2
from ...test.util import KfpTask, run_pipeline_func, TestCase


def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,
tasks: dict[str, KfpTask], **kwargs):
t.assertEqual(run.status, 'Succeeded')
# assert DAG structure
t.assertCountEqual(tasks.keys(), ['args-generator-op', 'for-loop-1'])
t.assertCountEqual(
['for-loop-1-#0', 'for-loop-1-#1', 'for-loop-1-#2'],
tasks['for-loop-1'].children.keys(),
)
# assert all iteration parameters
t.assertCountEqual(
[1.1, 1.2, 1.3],
[
x.inputs
.parameters['pipelinechannel--args-generator-op-Output-loop-item']
for x in tasks['for-loop-1'].children.values()
],
)
# assert 1 iteration task
t.assertEqual(
{
'name': 'for-loop-1-#0',
'type': 'system.DAGExecution',
'state':
Execution.State.RUNNING, # TODO(Bobgy): this should be COMPLETE
'inputs': {
'parameters': {
'pipelinechannel--args-generator-op-Output-loop-item': 1.1
}
}
},
tasks['for-loop-1'].children['for-loop-1-#0'].get_dict(),
)
t.assertEqual(
{
'name': 'print-op',
'type': 'system.ContainerExecution',
'state': Execution.State.COMPLETE,
'inputs': {
'parameters': {
's': 1.1
}
}
},
tasks['for-loop-1'].children['for-loop-1-#0'].children['print-op']
.get_dict(),
)


run_pipeline_func([
TestCase(
pipeline_func=my_pipeline_v2,
mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE,
verify_func=verify,
),
TestCase(
pipeline_func=my_pipeline,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY,
Expand Down
41 changes: 41 additions & 0 deletions samples/core/loop_output/loop_output_v2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
from kfp.v2 import dsl

# In tests, we install a KFP package from the PR under test. Users should not
# normally need to specify `kfp_package_path` in their component definitions.
_KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH')


@dsl.component(kfp_package_path=_KFP_PACKAGE_PATH)
def args_generator_op() -> str:
return '[1.1, 1.2, 1.3]'


# TODO(Bobgy): how can we make this component with type float?
# got error: kfp.v2.components.types.type_utils.InconsistentTypeException:
# Incompatible argument passed to the input "s" of component "Print op": Argument
# type "STRING" is incompatible with the input type "NUMBER_DOUBLE"
@dsl.component(kfp_package_path=_KFP_PACKAGE_PATH)
def print_op(s: str):
print(s)


@dsl.pipeline(name='pipeline-with-loop-output-v2')
def my_pipeline():
args_generator = args_generator_op()
with dsl.ParallelFor(args_generator.output) as item:
print_op(s=item)
30 changes: 30 additions & 0 deletions samples/core/loop_static/loop_static_v2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from kfp.v2 import components, dsl
from typing import List


@dsl.component
def print_op(text: str) -> str:
print(text)
return text


@dsl.component
def concat_op(a: str, b: str) -> str:
print(a + b)
return a + b


_DEFAULT_LOOP_ARGUMENTS = [{'a': '1', 'b': '2'}, {'a': '10', 'b': '20'}]


@dsl.pipeline(name='pipeline-with-loop-static')
def my_pipeline(
static_loop_arguments: List[dict] = _DEFAULT_LOOP_ARGUMENTS,
greeting: str = 'this is a test for looping through parameters',
):
print_task = print_op(text=greeting)

with dsl.ParallelFor(static_loop_arguments) as item:
concat_task = concat_op(a=item.a, b=item.b)
concat_task.after(print_task)
print_task_2 = print_op(text=concat_task.output)
6 changes: 2 additions & 4 deletions samples/test/lightweight_python_functions_v2_pipeline_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ def verify(run: kfp_server_api.ApiRun, mlmd_connection_config, **kwargs):
t.assertEqual(
{
'inputs': {
'artifacts': [],
'parameters': {
'message': 'message',
}
Expand Down Expand Up @@ -93,8 +92,8 @@ def verify(run: kfp_server_api.ApiRun, mlmd_connection_config, **kwargs):
'parameters': {
'input_bool': True,
'input_dict': {
"A": 1,
"B": 2
"A": 1.0,
"B": 2.0,
},
'input_list': ["a", "b", "c"],
'message': 'message'
Expand All @@ -110,7 +109,6 @@ def verify(run: kfp_server_api.ApiRun, mlmd_connection_config, **kwargs):
'name': 'model',
'type': 'system.Model'
}],
'parameters': {}
},
'type': 'system.ContainerExecution',
'state': Execution.State.COMPLETE,
Expand Down
22 changes: 0 additions & 22 deletions samples/test/metrics_visualization_v2_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,6 @@ def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,

t.assertEqual(
{
'inputs': {
'artifacts': [],
'parameters': {}
},
Comment on lines -43 to -46
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I removed all the empty structs for more concise comparison

'name': 'wine-classification',
'outputs': {
'artifacts': [{
Expand Down Expand Up @@ -100,15 +96,13 @@ def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,
'name': 'metrics',
'type': 'system.ClassificationMetrics'
}],
'parameters': {}
},
'type': 'system.ContainerExecution',
'state': Execution.State.COMPLETE,
}, wine_classification.get_dict())
t.assertEqual(
{
'inputs': {
'artifacts': [],
'parameters': {
'test_samples_fraction': 0.3
}
Expand Down Expand Up @@ -144,7 +138,6 @@ def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,
'name': 'metrics',
'type': 'system.ClassificationMetrics'
}],
'parameters': {}
},
'type': 'system.ContainerExecution',
'state': Execution.State.COMPLETE,
Expand All @@ -160,10 +153,6 @@ def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,

t.assertEqual(
{
'inputs': {
'artifacts': [],
'parameters': {}
},
'name': 'digit-classification',
'outputs': {
'artifacts': [{
Expand All @@ -174,18 +163,13 @@ def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,
'name': 'metrics',
'type': 'system.Metrics'
}],
'parameters': {}
},
'type': 'system.ContainerExecution',
'state': Execution.State.COMPLETE,
}, digit_classification.get_dict())

t.assertEqual(
{
'inputs': {
'artifacts': [],
'parameters': {}
},
'name': 'html-visualization',
'outputs': {
'artifacts': [{
Expand All @@ -195,18 +179,13 @@ def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,
'name': 'html_artifact',
'type': 'system.HTML'
}],
'parameters': {}
},
'state': Execution.State.COMPLETE,
'type': 'system.ContainerExecution'
}, html_visualization.get_dict())

t.assertEqual(
{
'inputs': {
'artifacts': [],
'parameters': {}
},
'name': 'markdown-visualization',
'outputs': {
'artifacts': [{
Expand All @@ -216,7 +195,6 @@ def verify(t: unittest.TestCase, run: kfp_server_api.ApiRun,
'name': 'markdown_artifact',
'type': 'system.Markdown'
}],
'parameters': {}
},
'state': Execution.State.COMPLETE,
'type': 'system.ContainerExecution'
Expand Down
16 changes: 1 addition & 15 deletions samples/test/two_step_with_uri_placeholder_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,24 +26,16 @@


def verify_tasks(t: unittest.TestCase, tasks: Dict[str, KfpTask]):
task_names = [*tasks.keys()]
t.assertCountEqual(task_names, ['read-from-gcs', 'write-to-gcs'],
t.assertCountEqual(tasks.keys(), ['read-from-gcs', 'write-to-gcs'],
'task names')

write_task = tasks['write-to-gcs']
read_task = tasks['read-from-gcs']

pprint('======= preprocess task =======')
pprint(write_task.get_dict())
pprint('======= train task =======')
pprint(read_task.get_dict())
pprint('==============')

t.assertEqual(
{
'name': 'write-to-gcs',
'inputs': {
'artifacts': [],
'parameters': {
'msg': 'Hello world!',
}
Expand All @@ -56,7 +48,6 @@ def verify_tasks(t: unittest.TestCase, tasks: Dict[str, KfpTask]):
'name': 'artifact',
'type': 'system.Artifact'
}],
'parameters': {}
},
'type': 'system.ContainerExecution',
'state': Execution.State.COMPLETE,
Expand All @@ -72,11 +63,6 @@ def verify_tasks(t: unittest.TestCase, tasks: Dict[str, KfpTask]):
'name': 'artifact',
'type': 'system.Artifact',
}],
'parameters': {}
},
'outputs': {
'artifacts': [],
'parameters': {}
},
'type': 'system.ContainerExecution',
'state': Execution.State.COMPLETE,
Expand Down
Loading