From 12678b81c12e6e8c08e21ace5a8e740ce52ebe71 Mon Sep 17 00:00:00 2001 From: etaques <97463920+etaques@users.noreply.github.com> Date: Tue, 18 Oct 2022 10:38:04 -0300 Subject: [PATCH 01/94] new development version --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 5a03fb737..885415662 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.20.0 +0.21.0 From 349309dda0d0876a4ef97a4e538e7d2d0a8d45be Mon Sep 17 00:00:00 2001 From: manrodrigues <78241475+manrodrigues@users.noreply.github.com> Date: Tue, 18 Oct 2022 13:23:29 -0300 Subject: [PATCH 02/94] propagation of policies status "unknown" and backend state check (#1886) propagation of policies status "unknown" and backend state check (#1886) --- python-test/features/environment.py | 2 + python-test/features/integration.feature | 133 ++++++++++++++++ .../features/steps/control_plane_agents.py | 150 ++++++++++++++++-- python-test/requirements.txt | 1 + 4 files changed, 276 insertions(+), 10 deletions(-) diff --git a/python-test/features/environment.py b/python-test/features/environment.py index 0f81f2e18..54c174b75 100644 --- a/python-test/features/environment.py +++ b/python-test/features/environment.py @@ -9,6 +9,8 @@ def before_scenario(context, scenario): def after_scenario(context, scenario): + if 'access_denied' in context and context.access_denied is True: + scenario.set_status(Status.skipped) if scenario.status != Status.failed: context.execute_steps(''' Then stop the orb-agent container diff --git a/python-test/features/integration.feature b/python-test/features/integration.feature index 6268a05e5..c5d702c57 100644 --- a/python-test/features/integration.feature +++ b/python-test/features/integration.feature @@ -42,6 +42,7 @@ Scenario: Apply multiple advanced policies to an agent Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -56,6 +57,7 @@ Scenario: Apply two simple policies to an agent Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -72,6 +74,7 @@ Scenario: apply one policy using multiple datasets to the same group Given the Orb user has a registered account And the Orb user logs in And that an agent with 2 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -88,6 +91,7 @@ Scenario: Remove group to which agent is linked Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -107,6 +111,7 @@ Scenario: Remove policy from agent Given the Orb user has a registered account And the Orb user logs in And that an agent with 3 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -127,6 +132,7 @@ Scenario: Remove dataset from agent with just one dataset linked Given the Orb user has a registered account And the Orb user logs in And that an agent with 3 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -145,6 +151,7 @@ Scenario: Remove dataset from agent with more than one dataset linked Given the Orb user has a registered account And the Orb user logs in And that an agent with 4 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -249,6 +256,7 @@ Scenario: Sink with invalid endpoint Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink with invalid endpoint already exists @@ -268,6 +276,7 @@ Scenario: Unapplying policies that failed by editing agent orb tags to unsubscri Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -287,6 +296,7 @@ Scenario: Unapplying policies that failed by editing group tags to unsubscribe a Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -306,6 +316,7 @@ Scenario: Unapplying policies that failed by removing group Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -326,6 +337,7 @@ Scenario: Sink with invalid username Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink with invalid username already exists @@ -345,6 +357,7 @@ Scenario: Sink with invalid password Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink with invalid password already exists @@ -431,6 +444,7 @@ Scenario: Agent subscription to multiple group with policies after editing orb a Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -451,6 +465,7 @@ Scenario: Agent subscription to group with policies after editing orb agent's ta Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -472,6 +487,7 @@ Scenario: Remove one of the groups that applies the same policy on the agent Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 2 groups And this agent's heartbeat shows that 2 groups are matching the agent And that a sink already exists @@ -492,6 +508,7 @@ Scenario: Remove one of the datasets that applies the same policy on the agent Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 2 groups And this agent's heartbeat shows that 2 groups are matching the agent And that a sink already exists @@ -527,6 +544,7 @@ Scenario: Edit agent name and apply policies to then Given the Orb user has a registered account And the Orb user logs in And that an agent with 5 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And 1 agent must be matching on response field matching_agents of the last group created And that a sink already exists @@ -556,6 +574,7 @@ Scenario: Editing tags of an Agent Group with policies (subscription - provision Given the Orb user has a registered account And the Orb user logs in And that an agent with 2 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with 1 orb tag(s) and without description And that a sink already exists And 2 simple policies are applied to the group @@ -605,6 +624,7 @@ Scenario: Editing tags of an Agent and Agent Group with policies (unsubscription Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with same tag as the agent and without description And that a sink already exists And 2 simple policies are applied to the group @@ -622,6 +642,7 @@ Scenario: Editing tags of an Agent and Agent Group with policies (subscription - Given the Orb user has a registered account And the Orb user logs in And that an agent with 3 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with 1 orb tag(s) and without description And that a sink already exists And 2 simple policies are applied to the group @@ -676,6 +697,7 @@ Scenario: Edit an advanced policy with handler dns changing the handler to net And the Orb user logs in And that a sink already exists And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a new policy is created using: handler=dns, description='policy_dns', bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.foo.com/ .example.com], only_rcode=0 And 1 new dataset is created using the policy, last group and 1 sink @@ -696,6 +718,7 @@ Scenario: Edit an advanced policy with handler dns changing the handler to dhcp And the Orb user logs in And that a sink already exists And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And this agent's heartbeat shows that 1 groups are matching the agent And a new policy is created using: handler=dns, host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.foo.com/ .example.com], only_rcode=2 @@ -714,6 +737,7 @@ Scenario: Edit a simple policy with handler dhcp changing the handler to net And the Orb user logs in And that a sink already exists And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And this agent's heartbeat shows that 1 groups are matching the agent And a new policy is created using: handler=dhcp @@ -731,6 +755,7 @@ Scenario: Edit a simple policy with handler net changing the handler to dns and And the Orb user logs in And that a sink already exists And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a new policy is created using: handler=net And 1 new dataset is created using the policy, last group and 1 sink @@ -751,6 +776,7 @@ Scenario: remove 1 sink from a dataset with 2 sinks And the Orb user logs in And that 2 sinks already exists And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And this agent's heartbeat shows that 1 groups are matching the agent And a new policy is created using: handler=dhcp @@ -768,6 +794,7 @@ Scenario: remove 1 sink from a dataset with 1 sinks And the Orb user logs in And that 2 sinks already exists And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a new policy is created using: handler=dhcp And 1 new dataset is created using the policy, last group and 1 sinks @@ -785,6 +812,7 @@ Scenario: remove one sink from a dataset with 1 sinks, edit the dataset and inse And the Orb user logs in And that 2 sinks already exists And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a new policy is created using: handler=dns And 1 new dataset is created using the policy, last group and 1 sinks @@ -804,6 +832,7 @@ Scenario: Remotely restart agents with policies applied Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -823,6 +852,7 @@ Scenario: Remotely restart agents without policies applied Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -842,6 +872,7 @@ Scenario: Create duplicated policy Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink already exists @@ -941,6 +972,7 @@ Scenario: Edit sink with invalid username and use valid one Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink with invalid username already exists @@ -962,6 +994,7 @@ Scenario: Edit sink with password and use valid one Given the Orb user has a registered account And the Orb user logs in And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running And referred agent is subscribed to 1 group And this agent's heartbeat shows that 1 groups are matching the agent And that a sink with invalid password already exists @@ -978,6 +1011,76 @@ Scenario: Edit sink with password and use valid one And 4 dataset(s) have validity valid and 0 have validity invalid in 30 seconds +@smoke +Scenario: Check policies status when agent backend stop running + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 5 mixed policies are applied to the group + And this agent's heartbeat shows that 5 policies are applied and all has status running + And the container logs contain the message "policy applied successfully" referred to each policy within 30 seconds + And 5 dataset(s) have validity valid and 0 have validity invalid in 30 seconds + When agent backend (pktvisor) stops running + And pktvisor state is backend_error + Then this agent's heartbeat shows that 5 policies are applied and all has status unknown + + +@smoke +Scenario: Check backend status when agent backend stop running + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + When agent backend (pktvisor) stops running + Then pktvisor state is backend_error + And pktvisor error is failed to retrieve backend status: signal: killed + + +@smoke +Scenario: Check auto reset after pktvisor stop running + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 10 mixed policies are applied to the group + Then this agent's heartbeat shows that 10 policies are applied and all has status running + When agent backend (pktvisor) stops running + And pktvisor state is backend_error + And pktvisor error is failed to retrieve backend status: signal: killed + And this agent's heartbeat shows that 10 policies are applied and all has status unknown + Then agent backend pktvisor restart_count is 1 + And pktvisor state is running + And this agent's heartbeat shows that 10 policies are applied and all has status running + + +@smoke +Scenario: Check new policies applied after pktvisor stop running + Given the Orb user has a registered account + And the Orb user logs in + And that an agent with 1 orb tag(s) already exists and is online + And pktvisor state is running + And referred agent is subscribed to 1 group + And this agent's heartbeat shows that 1 groups are matching the agent + And that a sink already exists + And 10 mixed policies are applied to the group + And this agent's heartbeat shows that 10 policies are applied and all has status running + And agent backend (pktvisor) stops running + And pktvisor state is backend_error + And pktvisor error is failed to retrieve backend status: signal: killed + And this agent's heartbeat shows that 10 policies are applied and all has status unknown + And agent backend pktvisor restart_count is 1 + And pktvisor state is running + And this agent's heartbeat shows that 10 policies are applied and all has status running + When 2 mixed policies are applied to the group + Then this agent's heartbeat shows that 12 policies are applied and all has status running + ### AGENTS PROVISIONED USING CONFIGURATION FILES: ########### tap_selector @@ -988,6 +1091,7 @@ Scenario: tap_selector - any - matching 0 of all tags from an agent And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a net policy pcap with tap_selector matching any of 0 agent tap tags ands settings: geoloc_notfound=False is applied to the group Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1003,6 +1107,7 @@ Scenario: tap_selector - any - matching 1 of all tags from an agent And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a net policy pcap with tap_selector matching any of 1 agent (1 tag matching) tap tags ands settings: geoloc_notfound=False is applied to the group Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1020,6 +1125,7 @@ Scenario: tap_selector - any - matching 1 of all tags (plus 1 random tag) from a And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a net policy pcap with tap_selector matching any of 1 agent (1 tag matching + 1 random tag) tap tags ands settings: geoloc_notfound=False is applied to the group Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1037,6 +1143,7 @@ Scenario: tap_selector - all - matching 0 of all tags from an agent And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a net policy pcap with tap_selector matching all of 0 agent tap tags ands settings: geoloc_notfound=False is applied to the group Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1052,6 +1159,7 @@ Scenario: tap_selector - all - matching 1 of all tags from an agent And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a net policy pcap with tap_selector matching all of 1 agent (1 tag matching) tap tags ands settings: geoloc_notfound=False is applied to the group Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1070,6 +1178,7 @@ Scenario: tap_selector - all - matching all tags from an agent And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And a net policy pcap with tap_selector matching all of an agent tap tags ands settings: geoloc_notfound=False is applied to the group Then 1 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1089,6 +1198,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1110,6 +1220,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And 3 simple policies pcap are applied to the group And a new agent is created with 0 orb tag(s) When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1126,6 +1237,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And the Orb user logs in And that a sink already exists When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group @@ -1148,6 +1260,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And 3 simple policies pcap are applied to the group And a new agent is created with 2 orb tag(s) When an agent(input_type:pcap, settings: {"iface":"default"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1164,6 +1277,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And that a sink already exists And a new agent is created with 0 orb tag(s) When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1185,6 +1299,7 @@ Scenario: agent pcap with only agent tags subscription to a group with policies And 3 simple policies pcap are applied to the group And a new agent is created with 0 orb tag(s) When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1201,6 +1316,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And that a sink already exists And a new agent is created with 2 orb tag(s) When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group @@ -1223,6 +1339,7 @@ Scenario: agent pcap with mixed tags subscription to a group with policies creat And 3 simple policies pcap are applied to the group And a new agent is created with 2 orb tag(s) When an agent(input_type:pcap, settings: {"iface":"default"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1241,6 +1358,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And the Orb user logs in And that a sink already exists When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1262,6 +1380,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And 3 simple policies flow are applied to the group And a new agent is created with 0 orb tag(s) When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1278,6 +1397,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And the Orb user logs in And that a sink already exists When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group @@ -1300,6 +1420,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And 3 simple policies flow are applied to the group And a new agent is created with 2 orb tag(s) When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1316,6 +1437,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And that a sink already exists And a new agent is created with 0 orb tag(s) When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1337,6 +1459,7 @@ Scenario: agent flow with only agent tags subscription to a group with policies And 3 simple policies flow are applied to the group And a new agent is created with 0 orb tag(s) When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1353,6 +1476,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And that a sink already exists And a new agent is created with 2 orb tag(s) When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group @@ -1375,6 +1499,7 @@ Scenario: agent flow with mixed tags subscription to a group with policies creat And 3 simple policies flow are applied to the group And a new agent is created with 2 orb tag(s) When an agent(input_type:flow, settings: {"bind":"0.0.0.0", "port":"available_port"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1394,6 +1519,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And the Orb user logs in And that a sink already exists When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1415,6 +1541,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And 3 simple policies dnstap are applied to the group And a new agent is created with 0 orb tag(s) When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1431,6 +1558,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And the Orb user logs in And that a sink already exists When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group @@ -1453,6 +1581,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And 3 simple policies dnstap are applied to the group And a new agent is created with 2 orb tag(s) When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is self-provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1469,6 +1598,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And that a sink already exists And a new agent is created with 0 orb tag(s) When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds @@ -1490,6 +1620,7 @@ Scenario: agent dnstap with only agent tags subscription to a group with policie And 3 simple policies dnstap are applied to the group And a new agent is created with 0 orb tag(s) When an agent(input_type:dnstap, settings: {{"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds @@ -1506,6 +1637,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And that a sink already exists And a new agent is created with 2 orb tag(s) When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with 3 agent tags and has status online + And pktvisor state is running And edit the orb tags on agent and use 2 orb tag(s) And 1 Agent Group(s) is created with all tags contained in the agent And 3 simple policies same input_type as created via config file are applied to the group @@ -1528,6 +1660,7 @@ Scenario: agent dnstap with mixed tags subscription to a group with policies cre And 3 simple policies dnstap are applied to the group And a new agent is created with 2 orb tag(s) When an agent(input_type:dnstap, settings: {"tcp":"0.0.0.0:available_port", "only_hosts":"0.0.0.0/32"}) is provisioned via a configuration file on port available with matching 1 group agent tags and has status online + And pktvisor state is running Then 3 dataset(s) have validity valid and 0 have validity invalid in 30 seconds And this agent's heartbeat shows that 1 groups are matching the agent And the container logs should contain the message "completed RPC subscription to group" within 30 seconds diff --git a/python-test/features/steps/control_plane_agents.py b/python-test/features/steps/control_plane_agents.py index 435ad998e..dfe1a9ba3 100644 --- a/python-test/features/steps/control_plane_agents.py +++ b/python-test/features/steps/control_plane_agents.py @@ -1,19 +1,19 @@ -import random from test_config import TestConfig -from utils import random_string, filter_list_by_parameter_start_with, generate_random_string_with_predefined_prefix, \ - create_tags_set, find_files, threading_wait_until, return_port_to_run_docker_container, validate_json +from utils import * from local_agent import run_local_agent_container, run_agent_config_file, get_orb_agent_logs, get_logs_and_check from control_plane_agent_groups import return_matching_groups, tags_to_match_k_groups from behave import given, then, step from hamcrest import * from datetime import datetime import requests -import os from agent_config_file import FleetAgent import yaml from yaml.loader import SafeLoader import re import json +import psutil +import os +import signal configs = TestConfig.configs() agent_name_prefix = "test_agent_name_" @@ -113,13 +113,11 @@ def multiple_dataset_for_policy(context, amount_of_datasets, time_to_wait): " has status {policies_status}") def list_policies_applied_to_an_agent_and_referred_status(context, amount_of_policies, amount_of_policies_with_status, policies_status): - list_policies_applied_to_an_agent(context, amount_of_policies) - list_of_policies_status = list() - for policy_id in context.list_agent_policies_id: - list_of_policies_status.append(context.agent['last_hb_data']['policy_state'][policy_id]["state"]) if amount_of_policies_with_status == "all": amount_of_policies_with_status = int(amount_of_policies) - amount_of_policies_applied_with_status = list_of_policies_status.count(policies_status) + context.agent, context.list_agent_policies_id, amount_of_policies_applied_with_status = \ + get_policies_applied_to_an_agent_by_status(context.token, context.agent['id'], amount_of_policies, + amount_of_policies_with_status, policies_status, timeout=180) logs = get_orb_agent_logs(context.container_id) assert_that(amount_of_policies_applied_with_status, equal_to(int(amount_of_policies_with_status)), f"{amount_of_policies_with_status} policies was supposed to have status {policies_status}. \n" @@ -362,6 +360,51 @@ def check_error_message(context, message): assert_that(context.error_message['error'], equal_to(message), "Unexpected error message") +@step("agent backend (pktvisor) stops running") +def kill_pktvisor_on_agent(context): + try: + current_proc_pid = None + for process in psutil.process_iter(): + if "pkt" in process.name(): + proc_con = process.connections() + proc_port = proc_con[0].laddr.port + if proc_port == context.port: + current_proc_pid = process.pid + process.send_signal(signal.SIGKILL) + break + assert_that(current_proc_pid, is_not(None), "Unable to find pid of pktvisor process") + except psutil.AccessDenied: + context.access_denied = True + raise ValueError(f"You are not allowed to run this scenario without root permissions.") + except Exception as exception: + raise exception + + +@step("{backend} state is {state}") +def check_back_state(context, backend, state): + backend_state, agent = wait_until_expected_backend_state(context.token, context.agent['id'], backend, state, + timeout=180) + logs = get_orb_agent_logs(context.container_id) + assert_that(backend_state, equal_to(state), f"Unexpected backend state on agent: {agent}. Logs: {logs}") + + +@step("{backend} error is {error}") +def check_back_error(context, backend, error): + backend_state, agent = wait_until_expected_backend_error(context.token, context.agent['id'], backend, error, + timeout=180) + logs = get_orb_agent_logs(context.container_id) + assert_that(backend_state, equal_to(error), f"Unexpected backend error on agent: {agent}. Logs: {logs}") + + +@step("agent backend {backend} restart_count is {restart_count}") +def check_auto_reset(context, backend, restart_count): + amount, agent = wait_until_expected_amount_of_restart_count(context.token, context.agent['id'], backend, + restart_count, timeout=400) + logs = get_orb_agent_logs(context.container_id) + assert_that(int(amount), equal_to(int(restart_count)), f"Unexpected restart count for backend {backend} on agent: " + f"{agent}. Logs: {logs}") + + @threading_wait_until def wait_until_expected_agent_status(token, agent_id, status, event=None): """ @@ -382,6 +425,75 @@ def wait_until_expected_agent_status(token, agent_id, status, event=None): return agent_status, agent +@threading_wait_until +def wait_until_expected_amount_of_restart_count(token, agent_id, backend, amount_of_restart, event=None): + """ + Keeps fetching agent data from Orb control plane until it gets to + the expected agent status or this operation times out + + :param (str) token: used for API authentication + :param (str) agent_id: whose backend state will be evaluated + :param (str) amount_of_restart: expected amount of restart state + :param (str) backend: backend to check state + :param (obj) event: threading.event + """ + + agent = get_agent(token, agent_id) + if 'restart_count' in agent["last_hb_data"]["backend_state"][backend].keys(): + amount = agent["last_hb_data"]["backend_state"][backend]['restart_count'] + if int(amount_of_restart) == int(amount): + event.set() + return amount, agent + return amount, agent + else: + return None, agent + + +@threading_wait_until +def wait_until_expected_backend_state(token, agent_id, backend, state, event=None): + """ + Keeps fetching agent data from Orb control plane until it gets to + the expected agent status or this operation times out + + :param (str) token: used for API authentication + :param (str) agent_id: whose backend state will be evaluated + :param (str) state: expected backend state + :param (str) backend: backend to check state + :param (obj) event: threading.event + """ + + agent = get_agent(token, agent_id) + backend_state = agent["last_hb_data"]["backend_state"][backend]['state'] + if backend_state == state: + event.set() + return backend_state, agent + return backend_state, agent + + +@threading_wait_until +def wait_until_expected_backend_error(token, agent_id, backend, error, event=None): + """ + Keeps fetching agent data from Orb control plane until it gets to + the expected agent status or this operation times out + + :param (str) token: used for API authentication + :param (str) agent_id: whose backend error will be evaluated + :param (str) error: expected backend error + :param (str) backend: backend to check error + :param (obj) event: threading.event + """ + + agent = get_agent(token, agent_id) + if 'error' in agent["last_hb_data"]["backend_state"][backend].keys(): + backend_error = agent["last_hb_data"]["backend_state"][backend]['error'] + if backend_error == error: + event.set() + return backend_error, agent + return backend_error, agent + else: + return None, agent + + def get_agent(token, agent_id, status_code=200): """ Gets an agent from Orb control plane @@ -540,6 +652,23 @@ def get_policies_applied_to_an_agent(token, agent_id, amount_of_policies, event= return agent, list_agent_policies_id +@threading_wait_until +def get_policies_applied_to_an_agent_by_status(token, agent_id, amount_of_policies, amount_of_policies_with_status, + status, event=None): + agent, list_agent_policies_id = get_policies_applied_to_an_agent(token, agent_id, amount_of_policies, timeout=180) + list_of_policies_status = list() + for policy_id in list_agent_policies_id: + list_of_policies_status.append(agent['last_hb_data']['policy_state'][policy_id]["state"]) + if amount_of_policies_with_status == "all": + amount_of_policies_with_status = int(amount_of_policies) + amount_of_policies_applied_with_status = list_of_policies_status.count(status) + if amount_of_policies_applied_with_status == amount_of_policies_with_status: + event.set() + else: + event.wait(5) + return agent, list_agent_policies_id, amount_of_policies_applied_with_status + + @threading_wait_until def get_groups_to_which_agent_is_matching(token, agent_id, groups_matching_ids, event=None): """ @@ -598,7 +727,8 @@ def create_agent_config_file(token, agent_name, iface, agent_tags, orb_url, base tags = {"tags": create_tags_set(agent_tags)} if configs.get('ignore_ssl_and_certificate_errors', 'false').lower() == 'true': mqtt_url = f"{base_orb_address}:1883" - agent_config_file, tap = FleetAgent.config_file_of_orb_agent(agent_name, token, iface, orb_url, mqtt_url, tap_name, + agent_config_file, tap = FleetAgent.config_file_of_orb_agent(agent_name, token, iface, orb_url, mqtt_url, + tap_name, tls_verify=False, auto_provision=auto_provision, orb_cloud_mqtt_id=orb_cloud_mqtt_id, orb_cloud_mqtt_key=orb_cloud_mqtt_key, diff --git a/python-test/requirements.txt b/python-test/requirements.txt index 120225f5d..bfac9974f 100644 --- a/python-test/requirements.txt +++ b/python-test/requirements.txt @@ -10,4 +10,5 @@ deepdiff==5.8.1 jsonschema==4.6.0 mkdocs==1.3.0 mkdocs-material==8.3.3 +psutil==5.9.2 webdriver-manager==3.8.3 From e36821db8a6e28d557c8ad5afab4308f30294f6e Mon Sep 17 00:00:00 2001 From: Leonardo Parente Date: Wed, 19 Oct 2022 11:21:00 -0400 Subject: [PATCH 03/94] New flow sinker (#1896) --- sinker/backend/pktvisor/pktvisor.go | 44 ++- sinker/backend/pktvisor/pktvisor_test.go | 465 +++++++++++++++-------- sinker/backend/pktvisor/types.go | 100 +++-- 3 files changed, 402 insertions(+), 207 deletions(-) diff --git a/sinker/backend/pktvisor/pktvisor.go b/sinker/backend/pktvisor/pktvisor.go index 3d283f932..b4aca2f69 100644 --- a/sinker/backend/pktvisor/pktvisor.go +++ b/sinker/backend/pktvisor/pktvisor.go @@ -207,12 +207,12 @@ func convertFlowToPromParticle(ctxt *context, statsMap map[string]interface{}, l if label == "FlowDevices" { label = strings.ReplaceAll(label, "Devices", "") - if ok := strings.Contains(key, "|"); ok { - ctxt.deviceIF = key - key = strings.Split(key, "|")[0] - } ctxt.deviceID = key convertFlowToPromParticle(ctxt, statistic, label, tsList) + } else if label == "FlowInterfaces" { + label = strings.ReplaceAll(label, "Interfaces", "") + ctxt.deviceIF = ctxt.deviceID + "|" + key + convertFlowToPromParticle(ctxt, statistic, label, tsList) } else { convertFlowToPromParticle(ctxt, statistic, label+key, tsList) } @@ -405,18 +405,30 @@ func topNMetricsParser(label string) (string, error) { mapNMetrics["TopGeoLocPackes"] = "geo_loc" mapNMetrics["TopAsnBytes"] = "asn" mapNMetrics["TopAsnPackets"] = "asn" - mapNMetrics["TopDstIpsBytes"] = "ip" - mapNMetrics["TopDstIpsPackets"] = "ip" - mapNMetrics["TopSrcIpsBytes"] = "ip" - mapNMetrics["TopSrcIpsPackets"] = "ip" - mapNMetrics["TopDstPortsBytes"] = "port" - mapNMetrics["TopDstPortsPackets"] = "port" - mapNMetrics["TopSrcPortsBytes"] = "port" - mapNMetrics["TopSrcPortsPackets"] = "port" - mapNMetrics["TopDstIpsAndPortBytes"] = "ip_port" - mapNMetrics["TopDstIpsAndPortPackets"] = "ip_port" - mapNMetrics["TopSrcIpsAndPortBytes"] = "ip_port" - mapNMetrics["TopSrcIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopInDstIpsBytes"] = "ip" + mapNMetrics["TopInDstIpsPackets"] = "ip" + mapNMetrics["TopInSrcIpsBytes"] = "ip" + mapNMetrics["TopInSrcIpsPackets"] = "ip" + mapNMetrics["TopInDstPortsBytes"] = "port" + mapNMetrics["TopInDstPortsPackets"] = "port" + mapNMetrics["TopInSrcPortsBytes"] = "port" + mapNMetrics["TopInSrcPortsPackets"] = "port" + mapNMetrics["TopInDstIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopInDstIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopInSrcIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopInSrcIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopOutDstIpsBytes"] = "ip" + mapNMetrics["TopOutDstIpsPackets"] = "ip" + mapNMetrics["TopOutSrcIpsBytes"] = "ip" + mapNMetrics["TopOutSrcIpsPackets"] = "ip" + mapNMetrics["TopOutDstPortsBytes"] = "port" + mapNMetrics["TopOutDstPortsPackets"] = "port" + mapNMetrics["TopOutSrcPortsBytes"] = "port" + mapNMetrics["TopOutSrcPortsPackets"] = "port" + mapNMetrics["TopOutDstIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopOutDstIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopOutSrcIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopOutSrcIpsAndPortPackets"] = "ip_port" mapNMetrics["TopConversationsBytes"] = "conversations" mapNMetrics["TopConversationsPackets"] = "conversations" mapNMetrics["TopInInterfacesBytes"] = "interface" diff --git a/sinker/backend/pktvisor/pktvisor_test.go b/sinker/backend/pktvisor/pktvisor_test.go index 81d7a6fa1..fc33439ec 100644 --- a/sinker/backend/pktvisor/pktvisor_test.go +++ b/sinker/backend/pktvisor/pktvisor_test.go @@ -3220,6 +3220,10 @@ func TestFlowCardinalityConversion(t *testing.T) { Name: "handler", Value: "policy_flow", }, + { + Name: "device", + Value: "192.168.4.7", + }, } cases := map[string]struct { @@ -3233,8 +3237,12 @@ func TestFlowCardinalityConversion(t *testing.T) { "flow": { "devices":{ "192.168.4.7": { - "cardinality": { - "dst_ips_out": 4 + "interfaces": { + "eth0": { + "cardinality": { + "dst_ips_out": 4 + } + } } } } @@ -3243,8 +3251,8 @@ func TestFlowCardinalityConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|eth0", }), prometheus.Label{ Name: "__name__", Value: "flow_cardinality_dst_ips_out", @@ -3261,10 +3269,14 @@ func TestFlowCardinalityConversion(t *testing.T) { "flow": { "devices":{ "192.168.4.7": { - "cardinality": { - "dst_ports_out": 31, - "src_ips_in": 4, - "src_ports_in": 31 + "interfaces": { + "37": { + "cardinality": { + "dst_ports_out": 31, + "src_ips_in": 4, + "src_ports_in": 31 + } + } } } } @@ -3273,8 +3285,8 @@ func TestFlowCardinalityConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|37", }), prometheus.Label{ Name: "__name__", Value: "flow_cardinality_dst_ports_out", @@ -3291,9 +3303,13 @@ func TestFlowCardinalityConversion(t *testing.T) { "flow": { "devices":{ "192.168.4.7": { - "cardinality": { - "src_ips_in": 4, - "src_ports_in": 31 + "interfaces": { + "37": { + "cardinality": { + "src_ips_in": 4, + "src_ports_in": 31 + } + } } } } @@ -3301,10 +3317,11 @@ func TestFlowCardinalityConversion(t *testing.T) { } }`), expected: prometheus.TimeSeries{ - Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", - }), prometheus.Label{ + Labels: append(prependLabel(append(commonLabels, + prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|37", + }), prometheus.Label{ Name: "__name__", Value: "flow_cardinality_src_ips_in", })), @@ -3320,8 +3337,12 @@ func TestFlowCardinalityConversion(t *testing.T) { "flow": { "devices":{ "192.168.4.7": { - "cardinality": { - "src_ports_in": 31 + "interfaces": { + "eth0": { + "cardinality": { + "src_ports_in": 31 + } + } } } } @@ -3330,8 +3351,8 @@ func TestFlowCardinalityConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|eth0", }), prometheus.Label{ Name: "__name__", Value: "flow_cardinality_src_ports_in", @@ -3423,6 +3444,10 @@ func TestFlowConversion(t *testing.T) { Name: "handler", Value: "policy_flow", }, + { + Name: "device", + Value: "192.168.4.7", + }, } cases := map[string]struct { @@ -3434,7 +3459,11 @@ func TestFlowConversion(t *testing.T) { { "policy_flow": { "flow": { - "records_filtered": 8 + "devices":{ + "192.168.4.7": { + "records_filtered": 8 + } + } } } }`), @@ -3453,7 +3482,11 @@ func TestFlowConversion(t *testing.T) { { "policy_flow": { "flow": { - "records_total": 8 + "devices":{ + "192.168.4.7": { + "records_total": 8 + } + } } } }`), @@ -3467,14 +3500,18 @@ func TestFlowConversion(t *testing.T) { }, }, }, - "FlowPayloadIpv4": { + "FlowPayloadInIpv4Bytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "ipv4": 52785 + "interfaces": { + "37": { + "in_ipv4_bytes": 52785 + } + } } } } @@ -3482,25 +3519,29 @@ func TestFlowConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|37", }), prometheus.Label{ Name: "__name__", - Value: "flow_ipv4", + Value: "flow_in_ipv4_bytes", })), Datapoint: prometheus.Datapoint{ Value: 52785, }, }, }, - "FlowPayloadIpv6": { + "FlowPayloadOutIpv6Packets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "ipv6": 52785 + "interfaces": { + "37": { + "out_ipv6_packets": 52785 + } + } } } } @@ -3508,25 +3549,29 @@ func TestFlowConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|37", }), prometheus.Label{ Name: "__name__", - Value: "flow_ipv6", + Value: "flow_out_ipv6_packets", })), Datapoint: prometheus.Datapoint{ Value: 52785, }, }, }, - "FlowPayloadOtherL4": { + "FlowPayloadInOtherL4Bytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "other_l4": 52785 + "interfaces": { + "37": { + "in_other_l4_bytes": 52785 + } + } } } } @@ -3534,25 +3579,29 @@ func TestFlowConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|37", }), prometheus.Label{ Name: "__name__", - Value: "flow_other_l4", + Value: "flow_in_other_l4_bytes", })), Datapoint: prometheus.Datapoint{ Value: 52785, }, }, }, - "FlowPayloadTCP": { + "FlowPayloadOutTCPPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "tcp": 52785 + "interfaces": { + "37": { + "out_tcp_packets": 52785 + } + } } } } @@ -3560,25 +3609,29 @@ func TestFlowConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|37", }), prometheus.Label{ Name: "__name__", - Value: "flow_tcp", + Value: "flow_out_tcp_packets", })), Datapoint: prometheus.Datapoint{ Value: 52785, }, }, }, - "FlowPayloadUdp": { + "FlowPayloadInUdpPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "udp": 52785 + "interfaces": { + "37": { + "in_udp_bytes": 52785 + } + } } } } @@ -3586,11 +3639,11 @@ func TestFlowConversion(t *testing.T) { }`), expected: prometheus.TimeSeries{ Labels: append(prependLabel(append(commonLabels, prometheus.Label{ - Name: "device", - Value: "192.168.4.7", + Name: "device_interface", + Value: "192.168.4.7|37", }), prometheus.Label{ Name: "__name__", - Value: "flow_udp", + Value: "flow_in_udp_bytes", })), Datapoint: prometheus.Datapoint{ Value: 52785, @@ -3651,19 +3704,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { data []byte expected prometheus.TimeSeries }{ - "FlowTopDstIpsAndPortBytes": { + "FlowTopInDstIpsAndPortBytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_dst_ips_and_port_bytes": [ - { - "estimate": 8, - "name": "10.4.2.2:5000" - } - ] + "interfaces": { + "38": { + "top_in_dst_ips_and_port_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } } } } @@ -3673,7 +3730,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_dst_ips_and_port_bytes", + Value: "flow_top_in_dst_ips_and_port_bytes", }, { Name: "instance", @@ -3703,6 +3760,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|38", + }, { Name: "ip_port", Value: "10.4.2.2:5000", @@ -3713,19 +3774,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopDstIpsAndPortPackets": { + "FlowTopOutDstIpsAndPortPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_dst_ips_and_port_packets": [ - { - "estimate": 8, - "name": "10.4.2.2:5000" - } - ] + "interfaces": { + "38": { + "top_out_dst_ips_and_port_packets": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } } } } @@ -3735,7 +3800,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_dst_ips_and_port_packets", + Value: "flow_top_out_dst_ips_and_port_packets", }, { Name: "instance", @@ -3765,6 +3830,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|38", + }, { Name: "ip_port", Value: "10.4.2.2:5000", @@ -3775,19 +3844,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopDstIpsBytes": { + "FlowTopInDstIpsBytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_dst_ips_bytes": [ - { - "estimate": 8, - "name": "10.4.2.2" - } - ] + "interfaces": { + "37": { + "top_in_dst_ips_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } } } } @@ -3797,7 +3870,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_dst_ips_bytes", + Value: "flow_top_in_dst_ips_bytes", }, { Name: "instance", @@ -3827,6 +3900,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, { Name: "ip", Value: "10.4.2.2", @@ -3837,19 +3914,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopDstIpsPackets": { + "FlowTopInDstIpsPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_dst_ips_packets": [ - { - "estimate": 8, - "name": "10.4.2.2" - } - ] + "interfaces": { + "4": { + "top_in_dst_ips_packets": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } } } } @@ -3859,7 +3940,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_dst_ips_packets", + Value: "flow_top_in_dst_ips_packets", }, { Name: "instance", @@ -3889,6 +3970,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|4", + }, { Name: "ip", Value: "10.4.2.2", @@ -3899,19 +3984,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopDstPortsBytes": { + "FlowTopOutDstPortsBytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_dst_ports_bytes": [ - { - "estimate": 8, - "name": "5000" - } - ] + "interfaces": { + "37": { + "top_out_dst_ports_bytes": [ + { + "estimate": 8, + "name": "5000" + } + ] + } + } } } } @@ -3921,7 +4010,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_dst_ports_bytes", + Value: "flow_top_out_dst_ports_bytes", }, { Name: "instance", @@ -3951,6 +4040,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, { Name: "port", Value: "5000", @@ -3961,19 +4054,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopDstPortsPackets": { + "FlowTopDstInPortsPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_dst_ports_packets": [ - { - "estimate": 8, - "name": "5000" - } - ] + "interfaces": { + "37": { + "top_in_dst_ports_packets": [ + { + "estimate": 8, + "name": "5000" + } + ] + } + } } } } @@ -3983,7 +4080,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_dst_ports_packets", + Value: "flow_top_in_dst_ports_packets", }, { Name: "instance", @@ -4013,6 +4110,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, { Name: "port", Value: "5000", @@ -4147,19 +4248,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopOutInterfacesBytes": { + "FlowTopOutSrcIpsBytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ - "192.168.4.7|37": { - "top_out_interfaces_bytes": [ - { - "estimate": 8, - "name": "200" - } - ] + "192.168.4.7": { + "interfaces": { + "37": { + "top_out_src_ips_bytes": [ + { + "estimate": 15267, + "name": "192.168.0.1" + } + ] + } + } } } } @@ -4169,7 +4274,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_out_interfaces_bytes", + Value: "flow_top_out_src_ips_bytes", }, { Name: "instance", @@ -4204,12 +4309,12 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Value: "192.168.4.7|37", }, { - Name: "interface", - Value: "200", + Name: "ip", + Value: "192.168.0.1", }, }, Datapoint: prometheus.Datapoint{ - Value: 8, + Value: 15267, }, }, }, @@ -4274,19 +4379,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Value: 8, }, }, - }, "FlowTopSrcIpsAndPortBytes": { + }, "FlowTopInSrcIpsAndPortBytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_src_ips_and_port_bytes": [ - { - "estimate": 8, - "name": "10.4.2.2:5000" - } - ] + "interfaces": { + "37": { + "top_in_src_ips_and_port_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } } } } @@ -4296,7 +4405,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_src_ips_and_port_bytes", + Value: "flow_top_in_src_ips_and_port_bytes", }, { Name: "instance", @@ -4326,6 +4435,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, { Name: "ip_port", Value: "10.4.2.2:5000", @@ -4336,19 +4449,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopSrcIpsAndPortPackets": { + "FlowTopOutSrcIpsAndPortPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_src_ips_and_port_packets": [ - { - "estimate": 8, - "name": "10.4.2.2:5000" - } - ] + "interfaces": { + "eth0": { + "top_out_src_ips_and_port_packets": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } } } } @@ -4358,7 +4475,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_src_ips_and_port_packets", + Value: "flow_top_out_src_ips_and_port_packets", }, { Name: "instance", @@ -4388,6 +4505,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|eth0", + }, { Name: "ip_port", Value: "10.4.2.2:5000", @@ -4398,19 +4519,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopSrcIpsBytes": { + "FlowTopInSrcIpsBytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_src_ips_bytes": [ - { - "estimate": 8, - "name": "10.4.2.2" - } - ] + "interfaces": { + "eth1": { + "top_in_src_ips_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } } } } @@ -4420,7 +4545,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_src_ips_bytes", + Value: "flow_top_in_src_ips_bytes", }, { Name: "instance", @@ -4450,6 +4575,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|eth1", + }, { Name: "ip", Value: "10.4.2.2", @@ -4460,19 +4589,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopSrcIpsPackets": { + "FlowTopOutSrcIpsPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_src_ips_packets": [ - { - "estimate": 8, - "name": "10.4.2.2" - } - ] + "interfaces": { + "36": { + "top_out_src_ips_packets": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } } } } @@ -4482,7 +4615,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_src_ips_packets", + Value: "flow_top_out_src_ips_packets", }, { Name: "instance", @@ -4512,6 +4645,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|36", + }, { Name: "ip", Value: "10.4.2.2", @@ -4522,19 +4659,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopSrcPortsBytes": { + "FlowTopInSrcPortsBytes": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_src_ports_bytes": [ - { - "estimate": 8, - "name": "4500" - } - ] + "interfaces": { + "38": { + "top_in_src_ports_bytes": [ + { + "estimate": 8, + "name": "4500" + } + ] + } + } } } } @@ -4544,7 +4685,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_src_ports_bytes", + Value: "flow_top_in_src_ports_bytes", }, { Name: "instance", @@ -4574,6 +4715,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|38", + }, { Name: "port", Value: "4500", @@ -4584,19 +4729,23 @@ func TestFlowTopKMetricsConversion(t *testing.T) { }, }, }, - "FlowTopSrcPortsPackets": { + "FlowTopOutSrcPortsPackets": { data: []byte(` { "policy_flow": { "flow": { "devices":{ "192.168.4.7": { - "top_src_ports_packets": [ - { - "estimate": 8, - "name": "4500" - } - ] + "interfaces": { + "eth0": { + "top_out_src_ports_packets": [ + { + "estimate": 8, + "name": "4500" + } + ] + } + } } } } @@ -4606,7 +4755,7 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Labels: []prometheus.Label{ { Name: "__name__", - Value: "flow_top_src_ports_packets", + Value: "flow_top_out_src_ports_packets", }, { Name: "instance", @@ -4636,6 +4785,10 @@ func TestFlowTopKMetricsConversion(t *testing.T) { Name: "device", Value: "192.168.4.7", }, + { + Name: "device_interface", + Value: "192.168.4.7|eth0", + }, { Name: "port", Value: "4500", diff --git a/sinker/backend/pktvisor/types.go b/sinker/backend/pktvisor/types.go index 2d3e5584f..09245c4ef 100644 --- a/sinker/backend/pktvisor/types.go +++ b/sinker/backend/pktvisor/types.go @@ -170,47 +170,77 @@ type PeriodPayload struct { // FlowPayload contains the information specifically for the Flow protocol type FlowPayload struct { Devices map[string]struct { - Cardinality struct { - Conversations int64 `mapstructure:"conversations"` - DstIpsOut int64 `mapstructure:"dst_ips_out"` - DstPortsOut int64 `mapstructure:"dst_ports_out"` - SrcIpsIn int64 `mapstructure:"src_ips_in"` - SrcPortsIn int64 `mapstructure:"src_ports_in"` - } `mapstructure:"cardinality"` RecordsFiltered int64 `mapstructure:"records_filtered"` - Ipv4 int64 `mapstructure:"ipv4"` - Ipv6 int64 `mapstructure:"ipv6"` - OtherL4 int64 `mapstructure:"other_l4"` - TCP int64 `mapstructure:"tcp"` - UDP int64 `mapstructure:"udp"` - TopGeoLocBytes []NameCount `mapstructure:"top_geoLoc_bytes"` - TopGeoLocPackets []NameCount `mapstructure:"top_geoLoc_packets"` - TopAsnBytes []NameCount `mapstructure:"top_ASN_bytes"` - TopAsnPackets []NameCount `mapstructure:"top_ASN_packets"` - TopDstIpsAndPortBytes []NameCount `mapstructure:"top_dst_ips_and_port_bytes"` - TopDstIpsAndPortPackets []NameCount `mapstructure:"top_dst_ips_and_port_packets"` - TopDstIpsBytes []NameCount `mapstructure:"top_dst_ips_bytes"` - TopDstIpsPackets []NameCount `mapstructure:"top_dst_ips_packets"` - TopDstPortsBytes []NameCount `mapstructure:"top_dst_ports_bytes"` - TopDstPortsPackets []NameCount `mapstructure:"top_dst_ports_packets"` + RecordsTotal int64 `mapstructure:"records_total"` TopInInterfacesBytes []NameCount `mapstructure:"top_in_interfaces_bytes"` TopInInterfacesPackets []NameCount `mapstructure:"top_in_interfaces_packets"` TopOutInterfacesBytes []NameCount `mapstructure:"top_out_interfaces_bytes"` TopOutInterfacesPackets []NameCount `mapstructure:"top_out_interfaces_packets"` - TopSrcIpsAndPortBytes []NameCount `mapstructure:"top_src_ips_and_port_bytes"` - TopSrcIpsAndPortPackets []NameCount `mapstructure:"top_src_ips_and_port_packets"` - TopConversationsBytes []NameCount `mapstructure:"top_conversations_bytes"` - TopConversationsPackets []NameCount `mapstructure:"top_conversations_packets"` - TopSrcIpsBytes []NameCount `mapstructure:"top_src_ips_bytes"` - TopSrcIpsPackets []NameCount `mapstructure:"top_src_ips_packets"` - TopSrcPortsBytes []NameCount `mapstructure:"top_src_ports_bytes"` - TopSrcPortsPackets []NameCount `mapstructure:"top_src_ports_packets"` - RecordsTotal int64 `mapstructure:"records_total"` - Udp int64 `mapstructure:"udp"` + Interfaces map[string]struct { + Cardinality struct { + Conversations int64 `mapstructure:"conversations"` + DstIpsOut int64 `mapstructure:"dst_ips_out"` + DstPortsOut int64 `mapstructure:"dst_ports_out"` + SrcIpsIn int64 `mapstructure:"src_ips_in"` + SrcPortsIn int64 `mapstructure:"src_ports_in"` + } `mapstructure:"cardinality"` + InIpv4Bytes int64 `mapstructure:"in_ipv4_bytes"` + InIpv4Packets int64 `mapstructure:"in_ipv4_packets"` + InIpv6Bytes int64 `mapstructure:"in_ipv6_bytes"` + InIpv6Packets int64 `mapstructure:"in_ipv6_packets"` + InOtherL4Bytes int64 `mapstructure:"in_other_l4_bytes"` + InOtherL4Packets int64 `mapstructure:"in_other_l4_packets"` + InTcpBytes int64 `mapstructure:"in_tcp_bytes"` + InTcpPackets int64 `mapstructure:"in_tcp_packets"` + InUdpBytes int64 `mapstructure:"in_udp_bytes"` + InUdpPackets int64 `mapstructure:"in_udp_packets"` + InBytes int64 `mapstructure:"in_bytes"` + InPackets int64 `mapstructure:"in_packets"` + OutIpv4Bytes int64 `mapstructure:"out_ipv4_bytes"` + OutIpv4Packets int64 `mapstructure:"out_ipv4_packets"` + OutIpv6Bytes int64 `mapstructure:"out_ipv6_bytes"` + OutIpv6Packets int64 `mapstructure:"out_ipv6_packets"` + OutOtherL4Bytes int64 `mapstructure:"out_other_l4_bytes"` + OutOtherL4Packets int64 `mapstructure:"out_other_l4_packets"` + OutTcpBytes int64 `mapstructure:"out_tcp_bytes"` + OutTcpPackets int64 `mapstructure:"out_tcp_packets"` + OutUdpBytes int64 `mapstructure:"out_udp_bytes"` + OutUdpPackets int64 `mapstructure:"out_udp_packets"` + OutBytes int64 `mapstructure:"out_bytes"` + OutPackets int64 `mapstructure:"out_packets"` + TopInSrcIpsBytes []NameCount `mapstructure:"top_in_src_ips_bytes"` + TopInSrcIpsPackets []NameCount `mapstructure:"top_in_src_ips_packets"` + TopInSrcPortsBytes []NameCount `mapstructure:"top_in_src_ports_bytes"` + TopInSrcPortsPackets []NameCount `mapstructure:"top_in_src_ports_packets"` + TopInSrcIpsAndPortBytes []NameCount `mapstructure:"top_in_src_ips_and_port_bytes"` + TopInSrcIpsAndPortPackets []NameCount `mapstructure:"top_in_src_ips_and_port_packets"` + TopInDstIpsBytes []NameCount `mapstructure:"top_in_dst_ips_bytes"` + TopInDstIpsPackets []NameCount `mapstructure:"top_in_dst_ips_packets"` + TopInDstPortsBytes []NameCount `mapstructure:"top_in_dst_ports_bytes"` + TopInDstPortsPackets []NameCount `mapstructure:"top_in_dst_ports_packets"` + TopInDstIpsAndPortBytes []NameCount `mapstructure:"top_in_dst_ips_and_port_bytes"` + TopInDstIpsAndPortPackets []NameCount `mapstructure:"top_in_dst_ips_and_port_packets"` + TopOutSrcIpsBytes []NameCount `mapstructure:"top_out_src_ips_bytes"` + TopOutSrcIpsPackets []NameCount `mapstructure:"top_out_src_ips_packets"` + TopOutSrcPortsBytes []NameCount `mapstructure:"top_out_src_ports_bytes"` + TopOutSrcPortsPackets []NameCount `mapstructure:"top_out_src_ports_packets"` + TopOutSrcIpsAndPortBytes []NameCount `mapstructure:"top_out_src_ips_and_port_bytes"` + TopOutSrcIpsAndPortPackets []NameCount `mapstructure:"top_out_src_ips_and_port_packets"` + TopOutDstIpsBytes []NameCount `mapstructure:"top_out_dst_ips_bytes"` + TopOutDstIpsPackets []NameCount `mapstructure:"top_out_dst_ips_packets"` + TopOutDstPortsBytes []NameCount `mapstructure:"top_out_dst_ports_bytes"` + TopOutDstPortsPackets []NameCount `mapstructure:"top_out_dst_ports_packets"` + TopOutDstIpsAndPortBytes []NameCount `mapstructure:"top_out_dst_ips_and_port_bytes"` + TopOutDstIpsAndPortPackets []NameCount `mapstructure:"top_out_dst_ips_and_port_packets"` + TopConversationsBytes []NameCount `mapstructure:"top_conversations_bytes"` + TopConversationsPackets []NameCount `mapstructure:"top_conversations_packets"` + TopGeoLocBytes []NameCount `mapstructure:"top_geoLoc_bytes"` + TopGeoLocPackets []NameCount `mapstructure:"top_geoLoc_packets"` + TopAsnBytes []NameCount `mapstructure:"top_ASN_bytes"` + TopAsnPackets []NameCount `mapstructure:"top_ASN_packets"` + } `mapstructure:"interfaces"` } `mapstructure:"devices"` - RecordsFiltered int64 `mapstructure:"records_filtered"` - RecordsTotal int64 `mapstructure:"records_total"` - Period PeriodPayload `mapstructure:"period"` + Period PeriodPayload `mapstructure:"period"` } // StatSnapshot is a snapshot of a given period from pktvisord From 944895f133c5c1d19b61f4bc30c3b11962ff539b Mon Sep 17 00:00:00 2001 From: Luiz Henrique Pegoraro Date: Wed, 19 Oct 2022 15:18:39 -0300 Subject: [PATCH 04/94] feat(otel): add redis topics and mechanisms and OTEL diagrams explaining new service's specifications (#1882) * feat(sinks/sinker): wip add redis topic to handle config yaml. Signed-off-by: Luiz Pegoraro * feat(otel): add diagrams to explain new head on otel. Signed-off-by: Luiz Pegoraro * feat(otel): add create diagram. Signed-off-by: Luiz Pegoraro * feat(otel): fix missing dataset. Signed-off-by: Luiz Pegoraro * feat(otel): update diagrams Signed-off-by: Luiz Pegoraro * feat(sinks/sinker): wip add redis topic to handle config yaml. Signed-off-by: Luiz Pegoraro * feat(maestro): wip, need to fetch config from redis cache Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro Signed-off-by: Luiz Pegoraro --- cmd/maestro/main.go | 2 +- maestro/config_builder.go | 313 +++++++++++++++++++++ {sinker => maestro}/config_builder_test.go | 2 +- maestro/maestro.go | 2 +- maestro/maestro_service.go | 230 ++------------- maestro/redis/consumer/events.go | 3 +- maestro/redis/consumer/hashset.go | 62 ++++ maestro/redis/consumer/streams.go | 60 +--- maestro/service.go | 9 +- sinker/config_builder.go | 142 ---------- sinker/config_state_check.go | 6 +- sinker/otel/README.md | 64 +++++ sinker/redis/consumer/streams.go | 9 +- sinks/redis/consumer/streams.go | 2 + sinks/sinks.go | 5 +- 15 files changed, 496 insertions(+), 415 deletions(-) create mode 100644 maestro/config_builder.go rename {sinker => maestro}/config_builder_test.go (99%) create mode 100644 maestro/redis/consumer/hashset.go delete mode 100644 sinker/config_builder.go diff --git a/cmd/maestro/main.go b/cmd/maestro/main.go index df8e67e35..56e14576f 100644 --- a/cmd/maestro/main.go +++ b/cmd/maestro/main.go @@ -97,7 +97,7 @@ func connectToRedis(redisURL, redisPass, redisDB string, logger *zap.Logger) *r. } func newMaestroService(logger *zap.Logger, esClient *r.Client) maestro.MaestroService { - svc := maestro.NewMaestroService(logger) + svc := maestro.NewMaestroService(logger, esClient) return svc } diff --git a/maestro/config_builder.go b/maestro/config_builder.go new file mode 100644 index 000000000..64396111e --- /dev/null +++ b/maestro/config_builder.go @@ -0,0 +1,313 @@ +package maestro + +import ( + "context" + "fmt" + "github.com/ns1labs/orb/pkg/errors" + "gopkg.in/yaml.v2" + "strings" +) + +var k8sOtelCollector = ` +{ + "kind": "List", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "kind": "ConfigMap", + "apiVersion": "v1", + "metadata": { + "name": "otel-collector-config-SINK_ID", + "creationTimestamp": null + }, + "data": { + "config.yaml": "SINK_CONFIG" + } + }, + { + "kind": "Deployment", + "apiVersion": "apps/v1", + "metadata": { + "name": "otel-SINK_ID", + "creationTimestamp": null, + "labels": { + "app": "opentelemetry", + "component": "otel-collector" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "opentelemetry", + "component": "otel-collector-SINK_ID" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "opentelemetry", + "component": "otel-collector-SINK_ID" + } + }, + "spec": { + "volumes": [ + { + "name": "varlog", + "hostPath": { + "path": "/var/log", + "type": "" + } + }, + { + "name": "varlibdockercontainers", + "hostPath": { + "path": "/var/lib/docker/containers", + "type": "" + } + }, + { + "name": "data", + "configMap": { + "name": "otel-collector-config-SINK_ID", + "defaultMode": 420 + } + } + ], + "containers": [ + { + "name": "otel-collector", + "image": "otel/opentelemetry-collector-contrib:0.60.0", + "resources": { + "limits": { + "cpu": "100m", + "memory": "200Mi" + }, + "requests": { + "cpu": "100m", + "memory": "200Mi" + } + }, + "volumeMounts": [ + { + "name": "varlog", + "readOnly": true, + "mountPath": "/var/log" + }, + { + "name": "varlibdockercontainers", + "readOnly": true, + "mountPath": "/var/lib/docker/containers" + }, + { + "name": "data", + "readOnly": true, + "mountPath": "/etc/otelcol-contrib/config.yaml", + "subPath": "config.yaml" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent" + } + ], + "restartPolicy": "Always", + "terminationGracePeriodSeconds": 30, + "dnsPolicy": "ClusterFirst", + "securityContext": {}, + "schedulerName": "default-scheduler" + } + }, + "strategy": { + "type": "RollingUpdate", + "rollingUpdate": { + "maxUnavailable": "25%", + "maxSurge": "25%" + } + }, + "revisionHistoryLimit": 10, + "progressDeadlineSeconds": 600 + }, + "status": {} + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "otel-SINK_ID", + "creationTimestamp": null, + "labels": { + "app": "opentelemetry", + "component": "otel-collector-SINK_ID" + } + }, + "spec": { + "ports": [ + { + "name": "metrics", + "protocol": "TCP", + "port": 8888, + "targetPort": 8888 + } + ], + "selector": { + "component": "otel-collector-SINK_ID" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + } + ] +} +` + +func GetDeploymentJson(sinkId, sinkUrl, sinkUsername, sinkPassword string) (string, error) { + // prepare manifest + manifest := strings.Replace(k8sOtelCollector, "SINK_ID", sinkId, -1) + config, err := ReturnConfigYamlFromSink(context.Background(), "orb-live-stg-kafka.orb-live.svc.cluster.local:9092", sinkId, sinkUrl, sinkUsername, sinkPassword) + if err != nil { + return "", errors.Wrap(errors.New("failed to build YAML"), err) + } + manifest = strings.Replace(manifest, "SINK_CONFIG", config, -1) + return manifest, nil +} + +// ReturnConfigYamlFromSink this is the main method, which will generate the YAML file from the +func ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig, sinkId, sinkUrl, sinkUsername, sinkPassword string) (string, error) { + + config := OtelConfigFile{ + Receivers: Receivers{ + Kafka: KafkaReceiver{ + Brokers: []string{kafkaUrlConfig}, + Topic: fmt.Sprintf("otlp_metrics-%s", sinkId), + ProtocolVersion: "2.0.0", // Leaving default of over 2.0.0 + }, + }, + Extensions: &Extensions{ + HealthCheckExtConfig: &HealthCheckExtension{}, + PProf: &PProfExtension{ + Endpoint: ":1888", // Leaving default for now, will need to change with more processes + }, + BasicAuth: &BasicAuthenticationExtension{ + ClientAuth: &struct { + Username string `json:"username" yaml:"username"` + Password string `json:"password" yaml:"password"` + }{Username: sinkUsername, Password: sinkPassword}, + }, + }, + Exporters: Exporters{ + PrometheusRemoteWrite: &PrometheusRemoteWriteExporterConfig{ + Endpoint: sinkUrl, + auth: struct { + Authenticator string `json:"authenticator" yaml:"authenticator"` + }{Authenticator: "basicauth/exporter"}, + }, + }, + Service: ServiceConfig{ + Extensions: []string{"pprof", "health_check", "basicauth/exporter"}, + Pipelines: struct { + Metrics struct { + Receivers []string `json:"receivers" yaml:"receivers"` + Processors []string `json:"processors,omitempty" yaml:"processors,omitempty"` + Exporters []string `json:"exporters" yaml:"exporters"` + } `json:"metrics" yaml:"metrics"` + }{ + Metrics: struct { + Receivers []string `json:"receivers" yaml:"receivers"` + Processors []string `json:"processors,omitempty" yaml:"processors,omitempty"` + Exporters []string `json:"exporters" yaml:"exporters"` + }{ + Receivers: []string{"kafka"}, + Exporters: []string{"prometheusremotewrite"}, + }, + }, + }, + } + marshal, err := yaml.Marshal(&config) + if err != nil { + return "", err + } + returnedString := "---\n" + string(marshal) + return returnedString, nil + +} + +type OtelConfigFile struct { + Receivers Receivers `json:"receivers" yaml:"receivers"` + Processors *Processors `json:"processors,omitempty" yaml:"processors,omitempty"` + Extensions *Extensions `json:"extensions,omitempty" yaml:"extensions,omitempty"` + Exporters Exporters `json:"exporters" yaml:"exporters"` + Service ServiceConfig `json:"service" yaml:"service"` +} + +// Receivers will receive only with Kafka for now +type Receivers struct { + Kafka KafkaReceiver `json:"kafka" yaml:"kafka"` +} + +type KafkaReceiver struct { + Brokers []string `json:"brokers" yaml:"brokers"` + Topic string `json:"topic" yaml:"topic"` + ProtocolVersion string `json:"protocol_version" yaml:"protocol_version"` +} + +type Processors struct { +} + +type Extensions struct { + HealthCheckExtConfig *HealthCheckExtension `json:"health_check,omitempty" yaml:"health_check,omitempty"` + PProf *PProfExtension `json:"pprof,omitempty" yaml:"pprof,omitempty"` + ZPages *ZPagesExtension `json:"zpages,omitempty" yaml:"zpages,omitempty"` + // Exporters Authentication + BasicAuth *BasicAuthenticationExtension `json:"basicauth/exporter,omitempty" yaml:"basicauth/exporter,omitempty"` +} + +type HealthCheckExtension struct { + CollectorPipeline *struct { + Enabled bool `json:"enabled" yaml:"enabled"` + Interval string `json:"interval" yaml:"interval"` + FailureThreshold int32 `json:"exporter_failure_threshold" yaml:"exporter_failure_threshold"` + } `json:"check_collector_pipeline,omitempty" yaml:"check_collector_pipeline,omitempty"` +} + +type PProfExtension struct { + Endpoint string `json:"endpoint" yaml:"endpoint"` +} + +type ZPagesExtension struct { + Endpoint string `json:"endpoint" yaml:"endpoint"` +} + +type BasicAuthenticationExtension struct { + ClientAuth *struct { + Username string `json:"username" yaml:"username"` + Password string `json:"password" yaml:"password"` + } `json:"client_auth" yaml:"client_auth"` +} + +type Exporters struct { + PrometheusRemoteWrite *PrometheusRemoteWriteExporterConfig `json:"prometheusremotewrite,omitempty" yaml:"prometheusremotewrite,omitempty"` +} + +type PrometheusRemoteWriteExporterConfig struct { + Endpoint string `json:"endpoint" yaml:"endpoint"` + auth struct { + Authenticator string `json:"authenticator" yaml:"authenticator"` + } +} + +type ServiceConfig struct { + Extensions []string `json:"extensions,omitempty" yaml:"extensions,omitempty"` + Pipelines struct { + Metrics struct { + Receivers []string `json:"receivers" yaml:"receivers"` + Processors []string `json:"processors,omitempty" yaml:"processors,omitempty"` + Exporters []string `json:"exporters" yaml:"exporters"` + } `json:"metrics" yaml:"metrics"` + } `json:"pipelines" yaml:"pipelines"` +} diff --git a/sinker/config_builder_test.go b/maestro/config_builder_test.go similarity index 99% rename from sinker/config_builder_test.go rename to maestro/config_builder_test.go index b69ec26eb..330de4250 100644 --- a/sinker/config_builder_test.go +++ b/maestro/config_builder_test.go @@ -1,4 +1,4 @@ -package sinker +package maestro import ( "context" diff --git a/maestro/maestro.go b/maestro/maestro.go index 37b487368..1e2731da4 100644 --- a/maestro/maestro.go +++ b/maestro/maestro.go @@ -54,7 +54,7 @@ type MaestroService interface { CreateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error // DeleteOtelCollector - delete a existing collector by id - DeleteOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error + DeleteOtelCollector(ctx context.Context, sinkID string) error // UpdateOtelCollector - update a existing collector by id UpdateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error diff --git a/maestro/maestro_service.go b/maestro/maestro_service.go index 5e106072f..683d8a173 100644 --- a/maestro/maestro_service.go +++ b/maestro/maestro_service.go @@ -12,221 +12,28 @@ import ( "bufio" "context" "fmt" - "os" - "os/exec" - "strings" - "github.com/ns1labs/orb/pkg/errors" "go.uber.org/zap" + "os" + "os/exec" ) var ( - otelCollectorCfg = `--- -receivers: - kafka: - brokers: - - orb-live-stg-kafka.orb-live.svc.cluster.local:9092 - topic: otlp_metrics-sink-id-222 - protocol_version: 2.0.0 -extensions: - health_check: - endpoint: :13133 - path: /health/status - check_collector_pipeline: - enabled: true - interval: 5m - exporter_failure_threshold: 5 - basicauth/exporter: - client_auth: - username: admin - password: amanda.joaquina -exporters: - prometheusremotewrite: - endpoint: https://prometheus.qa.orb.live/api/v1/write - auth: - authenticator: basicauth/exporter -service: - extensions: - - health_check - - basicauth/exporter - pipelines: - metrics: - receivers: - - kafka - exporters: - - prometheusremotewrite -` - - k8sOtelCollector = ` -{ - "kind": "List", - "apiVersion": "v1", - "metadata": {}, - "items": [ - { - "kind": "ConfigMap", - "apiVersion": "v1", - "metadata": { - "name": "otel-collector-config-SINK_ID", - "creationTimestamp": null - }, - "data": { - "config.yaml": "SINK_CONFIG" - } - }, - { - "kind": "Deployment", - "apiVersion": "apps/v1", - "metadata": { - "name": "otel-SINK_ID", - "creationTimestamp": null, - "labels": { - "app": "opentelemetry", - "component": "otel-collector" - } - }, - "spec": { - "replicas": 1, - "selector": { - "matchLabels": { - "app": "opentelemetry", - "component": "otel-collector-SINK_ID" - } - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "app": "opentelemetry", - "component": "otel-collector-SINK_ID" - } - }, - "spec": { - "volumes": [ - { - "name": "varlog", - "hostPath": { - "path": "/var/log", - "type": "" - } - }, - { - "name": "varlibdockercontainers", - "hostPath": { - "path": "/var/lib/docker/containers", - "type": "" - } - }, - { - "name": "data", - "configMap": { - "name": "otel-collector-config-SINK_ID", - "defaultMode": 420 - } - } - ], - "containers": [ - { - "name": "otel-collector", - "image": "otel/opentelemetry-collector-contrib:0.60.0", - "resources": { - "limits": { - "cpu": "100m", - "memory": "200Mi" - }, - "requests": { - "cpu": "100m", - "memory": "200Mi" - } - }, - "volumeMounts": [ - { - "name": "varlog", - "readOnly": true, - "mountPath": "/var/log" - }, - { - "name": "varlibdockercontainers", - "readOnly": true, - "mountPath": "/var/lib/docker/containers" - }, - { - "name": "data", - "readOnly": true, - "mountPath": "/etc/otelcol-contrib/config.yaml", - "subPath": "config.yaml" - } - ], - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "imagePullPolicy": "IfNotPresent" - } - ], - "restartPolicy": "Always", - "terminationGracePeriodSeconds": 30, - "dnsPolicy": "ClusterFirst", - "securityContext": {}, - "schedulerName": "default-scheduler" - } - }, - "strategy": { - "type": "RollingUpdate", - "rollingUpdate": { - "maxUnavailable": "25%", - "maxSurge": "25%" - } - }, - "revisionHistoryLimit": 10, - "progressDeadlineSeconds": 600 - }, - "status": {} - }, - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "otel-SINK_ID", - "creationTimestamp": null, - "labels": { - "app": "opentelemetry", - "component": "otel-collector-SINK_ID" - } - }, - "spec": { - "ports": [ - { - "name": "metrics", - "protocol": "TCP", - "port": 8888, - "targetPort": 8888 - } - ], - "selector": { - "component": "otel-collector-SINK_ID" - }, - "type": "ClusterIP", - "sessionAffinity": "None" - }, - "status": { - "loadBalancer": {} - } - } - ] -} -` ErrCreateMaestro = errors.New("failed to create Otel Collector") ErrConflictMaestro = errors.New("Otel collector already exists") ) -func (svc maestroService) collectorDeploy(operation string, namespace string, sinkId string, manifest string, config string) error { - // prepare manifest - manifest = strings.Replace(manifest, "SINK_ID", sinkId, -1) - config = strings.Replace(config, "\n", `\n`, -1) - manifest = strings.Replace(manifest, "SINK_CONFIG", config, -1) +func (svc maestroService) collectorDeploy(operation, namespace, manifest, sinkId, sinkUrl, sinkUsername, sinkPassword string) error { + manifest, err := GetDeploymentJson(sinkId, sinkUrl, sinkUsername, sinkPassword) + if err != nil { + svc.logger.Error("failed to get deployment json", zap.Error(err)) + return err + } fileContent := []byte(manifest) - err := os.WriteFile("/tmp/otel-collector-"+sinkId+".json", fileContent, 0644) + err = os.WriteFile("/tmp/otel-collector-"+sinkId+".json", fileContent, 0644) if err != nil { - panic(err) + svc.logger.Error("failed to write file content", zap.Error(err)) + return err } // execute action @@ -261,8 +68,14 @@ func (svc maestroService) collectorDeploy(operation string, namespace string, si return nil } +func (svc maestroService) getConfigFromSinkId(id string) (sinkUrl, sinkUsername, sinkPassword string) { + + return "", "", "" +} + func (svc maestroService) CreateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error { - err := svc.collectorDeploy("apply", "otelcollectors", sinkID, k8sOtelCollector, otelCollectorCfg) + sinkUrl, sinkUsername, sinkPassword := svc.getConfigFromSinkId(sinkID) + err := svc.collectorDeploy("apply", "otelcollectors", k8sOtelCollector, sinkID, sinkUrl, sinkUsername, sinkPassword) if err != nil { return err } @@ -270,15 +83,16 @@ func (svc maestroService) CreateOtelCollector(ctx context.Context, sinkID string } func (svc maestroService) UpdateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error { - err := svc.collectorDeploy("apply", "otelcollectors", sinkID, k8sOtelCollector, otelCollectorCfg) + sinkUrl, sinkUsername, sinkPassword := svc.getConfigFromSinkId(sinkID) + err := svc.collectorDeploy("apply", "otelcollectors", k8sOtelCollector, sinkID, sinkUrl, sinkUsername, sinkPassword) if err != nil { return err } return nil } -func (svc maestroService) DeleteOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error { - err := svc.collectorDeploy("delete", "otelcollectors", sinkID, k8sOtelCollector, otelCollectorCfg) +func (svc maestroService) DeleteOtelCollector(ctx context.Context, sinkID string) error { + err := svc.collectorDeploy("delete", "otelcollectors", k8sOtelCollector, sinkID, "", "", "") if err != nil { return err } diff --git a/maestro/redis/consumer/events.go b/maestro/redis/consumer/events.go index d4d31d9f9..ea5dd8029 100644 --- a/maestro/redis/consumer/events.go +++ b/maestro/redis/consumer/events.go @@ -8,13 +8,14 @@ package consumer import ( + "github.com/ns1labs/orb/pkg/types" "time" ) type sinksUpdateEvent struct { ownerID string sinkID string - config string + config types.Metadata timestamp time.Time } diff --git a/maestro/redis/consumer/hashset.go b/maestro/redis/consumer/hashset.go new file mode 100644 index 000000000..762402036 --- /dev/null +++ b/maestro/redis/consumer/hashset.go @@ -0,0 +1,62 @@ +package consumer + +import ( + "context" + "github.com/ns1labs/orb/maestro" + "go.uber.org/zap" + "time" +) + +const deploymentKey = "orb.sinks.deployment" + +// handleSinksDeleteCollector will delete Deployment Entry and force delete otel collector +func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event sinksUpdateEvent) error { + es.logger.Info("Received maestro DELETE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) + es.client.HDel(ctx, deploymentKey, event.sinkID) + err := es.maestroService.DeleteOtelCollector(ctx, event.sinkID) + if err != nil { + return err + } + return nil +} + +// handleSinksCreateCollector will create Deployment Entry in Redis +func (es eventStore) handleSinksCreateCollector(ctx context.Context, event sinksUpdateEvent) error { + es.logger.Info("Received maestro CREATE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) + sinkUrl := event.config["sink_url"].(string) + sinkUsername := event.config["username"].(string) + sinkPassword := event.config["password"].(string) + deploy, err := maestro.GetDeploymentJson(event.sinkID, sinkUrl, sinkUsername, sinkPassword) + if err != nil { + es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", event.sinkID)) + return err + } + es.client.HSet(ctx, deploymentKey, event.sinkID, deploy) + + return nil +} + +// handleSinksUpdateCollector will update Deployment Entry in Redis +func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event sinksUpdateEvent) error { + es.logger.Info("Received maestro UPDATE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) + sinkUrl := event.config["sink_url"].(string) + sinkUsername := event.config["username"].(string) + sinkPassword := event.config["password"].(string) + deploy, err := maestro.GetDeploymentJson(event.sinkID, sinkUrl, sinkUsername, sinkPassword) + if err != nil { + es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", event.sinkID)) + return err + } + es.client.HSet(ctx, deploymentKey, event.sinkID, deploy) + return nil +} + +func decodeSinksUpdate(event map[string]interface{}) sinksUpdateEvent { + val := sinksUpdateEvent{ + ownerID: read(event, "owner", ""), + sinkID: read(event, "sink_id", ""), + config: readMetadata(event, "config"), + timestamp: time.Time{}, + } + return val +} diff --git a/maestro/redis/consumer/streams.go b/maestro/redis/consumer/streams.go index 5a7d20877..12468223f 100644 --- a/maestro/redis/consumer/streams.go +++ b/maestro/redis/consumer/streams.go @@ -2,6 +2,7 @@ package consumer import ( "context" + "github.com/ns1labs/orb/pkg/types" "time" "github.com/go-redis/redis/v8" @@ -10,17 +11,12 @@ import ( ) const ( - streamID = "orb.collectors" - streamLen = 1000 - streamSinker = "orb.sinker" streamSinks = "orb.sinks" group = "orb.collectors" sinkerPrefix = "sinker." sinkerUpdate = sinkerPrefix + "update" - sinkerCreate = sinkerPrefix + "create" - sinkerDelete = sinkerPrefix + "remove" sinksPrefix = "sinks." sinksUpdate = sinksPrefix + "update" @@ -134,49 +130,20 @@ func (es eventStore) SubscribeSinks(context context.Context) error { } } -//Delete collector -func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received maestro DELETE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - err := es.maestroService.DeleteOtelCollector(ctx, event.sinkID, event.config, event.ownerID) - if err != nil { - return err - } - return nil -} - -//Create collector -func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received maestro UPDATE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - err := es.maestroService.CreateOtelCollector(ctx, event.sinkID, event.config, event.ownerID) - if err != nil { - return err - } - return nil -} - -//Create collector -func (es eventStore) handleSinksCreateCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received maestro CREATE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - err := es.maestroService.CreateOtelCollector(ctx, event.sinkID, event.config, event.ownerID) - if err != nil { - return err - } - return nil -} - -//Delete collector +// Delete collector func (es eventStore) handleSinkerDeleteCollector(ctx context.Context, event sinkerUpdateEvent) error { es.logger.Info("Received maestro DELETE event from sinker, sink state=" + event.state + ", , Sink ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - err := es.maestroService.DeleteOtelCollector(ctx, event.sinkID, event.state, event.ownerID) + err := es.maestroService.DeleteOtelCollector(ctx, event.sinkID) if err != nil { return err } return nil } -//Create collector +// Create collector func (es eventStore) handleSinkerCreateCollector(ctx context.Context, event sinkerUpdateEvent) error { es.logger.Info("Received maestro CREATE event from sinker, sink state=" + event.state + ", Sink ID=" + event.sinkID + ", Owner ID=" + event.ownerID) + err := es.maestroService.CreateOtelCollector(ctx, event.sinkID, event.state, event.ownerID) if err != nil { return err @@ -194,20 +161,19 @@ func decodeSinkerStateUpdate(event map[string]interface{}) sinkerUpdateEvent { return val } -func decodeSinksUpdate(event map[string]interface{}) sinksUpdateEvent { - val := sinksUpdateEvent{ - ownerID: read(event, "owner", ""), - sinkID: read(event, "sink_id", ""), - config: read(event, "config", ""), - timestamp: time.Time{}, +func read(event map[string]interface{}, key, def string) string { + val, ok := event[key].(string) + if !ok { + return def } + return val } -func read(event map[string]interface{}, key, def string) string { - val, ok := event[key].(string) +func readMetadata(event map[string]interface{}, key string) types.Metadata { + val, ok := event[key].(types.Metadata) if !ok { - return def + return types.Metadata{} } return val diff --git a/maestro/service.go b/maestro/service.go index 87ac4c9f9..5cc705e9e 100644 --- a/maestro/service.go +++ b/maestro/service.go @@ -9,17 +9,20 @@ package maestro import ( + "github.com/go-redis/redis/v8" "go.uber.org/zap" ) var _ MaestroService = (*maestroService)(nil) type maestroService struct { - logger *zap.Logger + logger *zap.Logger + redisClient *redis.Client } -func NewMaestroService(logger *zap.Logger) MaestroService { +func NewMaestroService(logger *zap.Logger, redisClient *redis.Client) MaestroService { return &maestroService{ - logger: logger, + logger: logger, + redisClient: redisClient, } } diff --git a/sinker/config_builder.go b/sinker/config_builder.go deleted file mode 100644 index 143d6e6ae..000000000 --- a/sinker/config_builder.go +++ /dev/null @@ -1,142 +0,0 @@ -package sinker - -import ( - "context" - "fmt" - "gopkg.in/yaml.v2" -) - -// ReturnConfigYamlFromSink this is the main method, which will generate the YAML file from the -func ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig, sinkId, sinkUrl, sinkUsername, sinkPassword string) (string, error) { - - config := OtelConfigFile{ - Receivers: Receivers{ - Kafka: KafkaReceiver{ - Brokers: []string{kafkaUrlConfig}, - Topic: fmt.Sprintf("otlp_metrics-%s", sinkId), - ProtocolVersion: "2.0.0", // Leaving default of over 2.0.0 - }, - }, - Extensions: &Extensions{ - HealthCheckExtConfig: &HealthCheckExtension{}, - PProf: &PProfExtension{ - Endpoint: ":1888", // Leaving default for now, will need to change with more processes - }, - BasicAuth: &BasicAuthenticationExtension{ - ClientAuth: &struct { - Username string `json:"username" yaml:"username"` - Password string `json:"password" yaml:"password"` - }{Username: sinkUsername, Password: sinkPassword}, - }, - }, - Exporters: Exporters{ - PrometheusRemoteWrite: &PrometheusRemoteWriteExporterConfig{ - Endpoint: sinkUrl, - auth: struct { - Authenticator string `json:"authenticator" yaml:"authenticator"` - }{Authenticator: "basicauth/exporter"}, - }, - }, - Service: ServiceConfig{ - Extensions: []string{"pprof", "health_check", "basicauth/exporter"}, - Pipelines: struct { - Metrics struct { - Receivers []string `json:"receivers" yaml:"receivers"` - Processors []string `json:"processors,omitempty" yaml:"processors,omitempty"` - Exporters []string `json:"exporters" yaml:"exporters"` - } `json:"metrics" yaml:"metrics"` - }{ - Metrics: struct { - Receivers []string `json:"receivers" yaml:"receivers"` - Processors []string `json:"processors,omitempty" yaml:"processors,omitempty"` - Exporters []string `json:"exporters" yaml:"exporters"` - }{ - Receivers: []string{"kafka"}, - Exporters: []string{"prometheusremotewrite"}, - }, - }, - }, - } - marshal, err := yaml.Marshal(&config) - if err != nil { - return "", err - } - returnedString := "---\n" + string(marshal) - return returnedString, nil - -} - -type OtelConfigFile struct { - Receivers Receivers `json:"receivers" yaml:"receivers"` - Processors *Processors `json:"processors,omitempty" yaml:"processors,omitempty"` - Extensions *Extensions `json:"extensions,omitempty" yaml:"extensions,omitempty"` - Exporters Exporters `json:"exporters" yaml:"exporters"` - Service ServiceConfig `json:"service" yaml:"service"` -} - -// Receivers will receive only with Kafka for now -type Receivers struct { - Kafka KafkaReceiver `json:"kafka" yaml:"kafka"` -} - -type KafkaReceiver struct { - Brokers []string `json:"brokers" yaml:"brokers"` - Topic string `json:"topic" yaml:"topic"` - ProtocolVersion string `json:"protocol_version" yaml:"protocol_version"` -} - -type Processors struct { -} - -type Extensions struct { - HealthCheckExtConfig *HealthCheckExtension `json:"health_check,omitempty" yaml:"health_check,omitempty"` - PProf *PProfExtension `json:"pprof,omitempty" yaml:"pprof,omitempty"` - ZPages *ZPagesExtension `json:"zpages,omitempty" yaml:"zpages,omitempty"` - // Exporters Authentication - BasicAuth *BasicAuthenticationExtension `json:"basicauth/exporter,omitempty" yaml:"basicauth/exporter,omitempty"` -} - -type HealthCheckExtension struct { - CollectorPipeline *struct { - Enabled bool `json:"enabled" yaml:"enabled"` - Interval string `json:"interval" yaml:"interval"` - FailureThreshold int32 `json:"exporter_failure_threshold" yaml:"exporter_failure_threshold"` - } `json:"check_collector_pipeline,omitempty" yaml:"check_collector_pipeline,omitempty"` -} - -type PProfExtension struct { - Endpoint string `json:"endpoint" yaml:"endpoint"` -} - -type ZPagesExtension struct { - Endpoint string `json:"endpoint" yaml:"endpoint"` -} - -type BasicAuthenticationExtension struct { - ClientAuth *struct { - Username string `json:"username" yaml:"username"` - Password string `json:"password" yaml:"password"` - } `json:"client_auth" yaml:"client_auth"` -} - -type Exporters struct { - PrometheusRemoteWrite *PrometheusRemoteWriteExporterConfig `json:"prometheusremotewrite,omitempty" yaml:"prometheusremotewrite,omitempty"` -} - -type PrometheusRemoteWriteExporterConfig struct { - Endpoint string `json:"endpoint" yaml:"endpoint"` - auth struct { - Authenticator string `json:"authenticator" yaml:"authenticator"` - } -} - -type ServiceConfig struct { - Extensions []string `json:"extensions,omitempty" yaml:"extensions,omitempty"` - Pipelines struct { - Metrics struct { - Receivers []string `json:"receivers" yaml:"receivers"` - Processors []string `json:"processors,omitempty" yaml:"processors,omitempty"` - Exporters []string `json:"exporters" yaml:"exporters"` - } `json:"metrics" yaml:"metrics"` - } `json:"pipelines" yaml:"pipelines"` -} diff --git a/sinker/config_state_check.go b/sinker/config_state_check.go index 0f49e9b80..7f1693100 100644 --- a/sinker/config_state_check.go +++ b/sinker/config_state_check.go @@ -13,11 +13,11 @@ import ( const ( streamID = "orb.sinker" streamLen = 1000 - CheckerFreq = 300 * time.Second + CheckerFreq = 5 * time.Minute DefaultTimeout = 30 * time.Minute ) -func (svc *sinkerService) checkState(t time.Time) { +func (svc *sinkerService) checkState(_ time.Time) { owners, err := svc.sinkerCache.GetAllOwners() if err != nil { svc.logger.Error("failed to retrieve the list of owners") @@ -31,7 +31,7 @@ func (svc *sinkerService) checkState(t time.Time) { return } for _, cfg := range configs { - // Set idle if the sinker is more then 30 minutes not sending metrics (Remove from Redis) + // Set idle if the sinker is more than 30 minutes not sending metrics (Remove from Redis) if cfg.LastRemoteWrite.Add(DefaultTimeout).Before(time.Now()) { if cfg.State == config.Active { if err := svc.sinkerCache.Remove(cfg.OwnerID, cfg.SinkID); err != nil { diff --git a/sinker/otel/README.md b/sinker/otel/README.md index e69de29bb..c0bbe9a2f 100644 --- a/sinker/otel/README.md +++ b/sinker/otel/README.md @@ -0,0 +1,64 @@ +# Sink-Collector Sequence Diagrams + +## On Create Sink + +The new service, which will trigger K8s New pods, is called Maestro for now + +## Create + +```mermaid +sequenceDiagram + autoNumber 1 + User->>Sinks: Creates a new sink + Sinks->>Redis: add CreateSinkEvent (orb.sinks / stream) //ok + Note over Sinker,Redis: Sinker is subscribed to orb.sinks stream + Sinker->>Redis: readgroup to operation CreateSinkEvent (orb.sinks / stream) //tbd + Sinker->>Redis: add key: sink-id / val otelConfigYaml (orb.sinker.otelConfigYaml / hashmap) //tbd + +``` + +```mermaid +sequenceDiagram + autoNumber 1 + Note over Sinker: Received metrics, fetched policy, dataset, sink, retrieved sink id 222 + Sinker->>Redis: read key: sink-id / val: deploymentYaml (orb.maestro.otelCollector / hashmap) + Sinker->>Maestro: grpc: create otel-collector + Maestro->>Redis: read (orb.sinker.otelConfigYaml) with key 222 + Maestro->>Redis: add key: sink-id / val: deploymentYaml (orb.maestro.otelCollector / hashmap) + Maestro->>Kubernetes: Create otel-collector pod with Sink 222 config YAML + Note over Sinker,OtelCol222: Once collector is up metrics will flow + Sinker->>OtelCol222: +``` + + +## Update + +```mermaid +sequenceDiagram + autoNumber 1 + User->>Sinks: Updates Sink 222 Information + Sinks->>Redis: add UpdateSinkEvent (orb.sinks / stream) //ok + Note over Sinker,Redis: Sinker is subscribed to orb.sinks stream + Sinker->>Redis: read to operation UpdateSinkEvent (orb.sinks / stream) //ok + Sinker->>Redis: update key: sink-id / val otelConfigYaml (orb.sinker.otelConfigYaml / hashmap) //tbd + Sinker->>Maestro: grpc: update otel-collector + Maestro->>Redis: update key: 222 / val: deploymentYaml (orb.maestro.otelCollector / hashmap) + Maestro->>Kubernetes: Updates deployment with new values + Note over Sinker,OtelCol222: Once collector is synced metrics would flow again + Sinker->>OtelCol222: +``` + +## Delete + +```mermaid +sequenceDiagram + autoNumber 1 + User->>Sinks: Deletes Sink 222 + Sinks->>Redis: add DeleteSinkEvent (orb.sinks / stream) //ok + Note over Sinker,Redis: Sinker is subscribed to orb.sinks stream + Sinker->>Redis: read to operation RemoveSinkEvent (orb.sinks / stream) //tbd + Sinker->>Redis: remove key: sink-id / val otelConfigYaml (orb.sinker.otelConfigYaml / hashmap) //tbd + Sinker->>Maestro: grpc: remove otel-collector + Maestro->>Redis: remove key: 222 / val: deploymentYaml (orb.maestro.otelCollector / hashmap) + Maestro->>Kubernetes: removes deployment +``` diff --git a/sinker/redis/consumer/streams.go b/sinker/redis/consumer/streams.go index 806a39ba9..6c93d58d2 100644 --- a/sinker/redis/consumer/streams.go +++ b/sinker/redis/consumer/streams.go @@ -89,16 +89,15 @@ func decodeSinksUpdate(event map[string]interface{}) (updateSinkEvent, error) { owner: read(event, "owner", ""), timestamp: time.Time{}, } - - var config types.Metadata - if err := json.Unmarshal([]byte(read(event, "config", "")), &config); err != nil { + var metadata types.Metadata + if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { return updateSinkEvent{}, err } - val.config = config + val.config = metadata return val, nil } -func (es eventStore) handleSinksUpdate(ctx context.Context, e updateSinkEvent) error { +func (es eventStore) handleSinksUpdate(_ context.Context, e updateSinkEvent) error { data, err := json.Marshal(e.config) if err != nil { return err diff --git a/sinks/redis/consumer/streams.go b/sinks/redis/consumer/streams.go index b1eac2257..68baea35b 100644 --- a/sinks/redis/consumer/streams.go +++ b/sinks/redis/consumer/streams.go @@ -15,6 +15,8 @@ const ( sinkerPrefix = "sinker." sinkerUpdate = sinkerPrefix + "update" + otelYamlPrefix = "otel.yaml.sinker." + exists = "BUSYGROUP Consumer Group name already exists" ) diff --git a/sinks/sinks.go b/sinks/sinks.go index 41fd8629c..c40bd765a 100644 --- a/sinks/sinks.go +++ b/sinks/sinks.go @@ -124,13 +124,12 @@ type SinkService interface { DeleteSink(ctx context.Context, token string, key string) error // ValidateSink validate a sink configuration without saving ValidateSink(ctx context.Context, token string, sink Sink) (Sink, error) - // ChangeState + // ChangeSinkStateInternal change the sink internal state from new/idle/active ChangeSinkStateInternal(ctx context.Context, sinkID string, msg string, ownerID string, state State) error } type SinkRepository interface { - // Save persists the Sink. Successful operation is indicated by non-nil - // error response. + // Save persists the Sink. Successful operation is indicated by non-nil error response. Save(ctx context.Context, sink Sink) (string, error) // Update performs an update to the existing sink, A non-nil error is // returned to indicate operation failure From 4084e5bab06dcb75e4f3c2027416486f51d2cb90 Mon Sep 17 00:00:00 2001 From: Mariana Cavalcante Date: Wed, 19 Oct 2022 15:19:24 -0300 Subject: [PATCH 05/94] fix(agent): add nil safe for all agent's chan (#1802) --- agent/agent.go | 6 +++++- agent/backend/pktvisor/pktvisor.go | 8 +++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 78ef84337..c7c4047ad 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -187,7 +187,11 @@ func (a *orbAgent) logoffWithHeartbeat(ctx context.Context) { a.logger.Warn("failed to unsubscribe to RPC channel", zap.Error(token.Error())) } } - defer close(a.hbDone) + defer func() { + if a.hbDone != nil { + close(a.hbDone) + } + }() } func (a *orbAgent) Stop(ctx context.Context) { a.logger.Info("routine call for stop agent", zap.Any("routine", ctx.Value("routine"))) diff --git a/agent/backend/pktvisor/pktvisor.go b/agent/backend/pktvisor/pktvisor.go index 970887944..800fbd407 100644 --- a/agent/backend/pktvisor/pktvisor.go +++ b/agent/backend/pktvisor/pktvisor.go @@ -135,6 +135,8 @@ func (p *pktvisorBackend) Start(ctx context.Context, cancelFunc context.CancelFu } // the macros should be properly configured to enable crashpad + + // the macros should be properly configured to enable crashpad // pvOptions = append(pvOptions, "--cp-token", PKTVISOR_CP_TOKEN) // pvOptions = append(pvOptions, "--cp-url", PKTVISOR_CP_URL) // pvOptions = append(pvOptions, "--cp-path", PKTVISOR_CP_PATH) @@ -152,7 +154,11 @@ func (p *pktvisorBackend) Start(ctx context.Context, cancelFunc context.CancelFu // log STDOUT and STDERR lines streaming from Cmd doneChan := make(chan struct{}) go func() { - defer close(doneChan) + defer func() { + if doneChan != nil { + close(doneChan) + } + }() for p.proc.Stdout != nil || p.proc.Stderr != nil { select { case line, open := <-p.proc.Stdout: From 2a51c9e43ec200a84f68c91e11251bf3ba79565b Mon Sep 17 00:00:00 2001 From: etaques <97463920+etaques@users.noreply.github.com> Date: Thu, 20 Oct 2022 16:15:32 -0300 Subject: [PATCH 06/94] [Feat] Add maestro microservice on pipeline (#1899) [Feat] Add maestro microservice on pipeline (#1899) --- .github/workflows/go-develop.yml | 47 ++++++++++++++++++++++++++++- .github/workflows/go-production.yml | 44 +++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 1 deletion(-) diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index edaaccb7e..2974d8daa 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -41,6 +41,8 @@ jobs: - 'cmd/sinks/**' - 'sinker/**' - 'cmd/sinker/**' + - 'maestro/**' + - 'cmd/maestro/**' ui: - 'ui/**' docs: @@ -169,7 +171,23 @@ jobs: - name: Go unit tests if: ${{ needs.prebuild.outputs.orb == 'true' }} - run: SERVICE=sinker make test_service + run: SERVICE=sinker make test_service + + test-maestro: + runs-on: ubuntu-latest + needs: prebuild + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.19 + + - name: Go unit tests + run: | + if [ "${{ needs.prebuild.outputs.orb }}" == "true" ]; then + SERVICE=maestro make test_service + fi package-agent: needs: @@ -333,6 +351,33 @@ jobs: - name: Push service containers if: github.event_name != 'pull_request' run: docker push -a ns1labs/orb-sinks + + package-maestro: + needs: + - prebuild + - test-maestro + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Get short commit hash to a variable + id: commit_hash + run: | + echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" + echo ${{ needs.prebuild.outputs.VERSION }} > VERSION + + - name: Build service containers + run: SERVICE=maestro make build_docker + + - name: Login to Docker Hub + uses: docker/login-action@v2 + if: github.event_name != 'pull_request' + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Push service containers + if: github.event_name != 'pull_request' + run: docker push -a ns1labs/orb-maestro package-ui-dependencies: needs: diff --git a/.github/workflows/go-production.yml b/.github/workflows/go-production.yml index 5ac6b6291..6bd6f8829 100644 --- a/.github/workflows/go-production.yml +++ b/.github/workflows/go-production.yml @@ -37,6 +37,8 @@ jobs: - 'cmd/sinks/**' - 'sinker/**' - 'cmd/sinker/**' + - 'maestro/**' + - 'cmd/maestro/**' ui: - 'ui/**' - name: Set branch name @@ -131,6 +133,22 @@ jobs: - name: Go unit tests if: ${{ needs.prebuild.outputs.orb == 'true' }} run: SERVICE=sinker make test_service + + test-maestro: + runs-on: ubuntu-latest + needs: prebuild + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.19 + + - name: Go unit tests + run: | + if [ "${{ needs.prebuild.outputs.orb }}" == "true" ]; then + SERVICE=maestro make test_service + fi package-agent: # This is just for debug agent @@ -288,6 +306,32 @@ jobs: - name: Push service containers if: github.event_name != 'pull_request' run: docker push -a ns1labs/orb-sinks + + package-maestro: + needs: + - prebuild + - test-maestro + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Get short commit hash to a variable + id: commit_hash + run: | + echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" + echo ${{ needs.prebuild.outputs.VERSION }} > VERSION + - name: Build service containers + run: SERVICE=maestro make build_docker + + - name: Login to Docker Hub + uses: docker/login-action@v2 + if: github.event_name != 'pull_request' + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Push service containers + if: github.event_name != 'pull_request' + run: docker push -a ns1labs/orb-maestro package-ui-dependencies: needs: From 792b6ff266923b2135ac50b8193a991904f0a578 Mon Sep 17 00:00:00 2001 From: etaques <97463920+etaques@users.noreply.github.com> Date: Thu, 20 Oct 2022 16:41:29 -0300 Subject: [PATCH 07/94] [Feat] Add code coverage (#1900) [Feat] Add code coverage (#1900) --- .github/workflows/go-develop.yml | 150 ++++++++++++++++++++++++++++--- Makefile | 9 ++ README.md | 1 + 3 files changed, 146 insertions(+), 14 deletions(-) diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index 2974d8daa..a5463e5b8 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -104,6 +104,7 @@ jobs: test-agent: runs-on: ubuntu-latest needs: prebuild + if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.agent == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -112,12 +113,33 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.agent == 'true' }} - run: SERVICE=agent make test_service + run: | + SERVICE=agent make test_service_cov + + - name: Install dependencies + run: | + go mod tidy + sudo apt update && sudo apt install -y build-essential jq + go install github.com/axw/gocov/gocov@latest + go install github.com/AlekSi/gocov-xml@latest + + - name: coverage to xml + run: | + echo "Current directory: ${PWD}" + echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV + gocov convert ./coverage.out | gocov-xml > ./coverage.xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.xml + name: orb + verbose: true test-fleet: runs-on: ubuntu-latest needs: prebuild + if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -127,13 +149,32 @@ jobs: - name: Go unit tests run: | - if [ "${{ needs.prebuild.outputs.orb }}" == "true" ]; then - SERVICE=fleet make test_service - fi + SERVICE=fleet make test_service_cov + + - name: Install dependencies + run: | + go mod tidy + sudo apt update && sudo apt install -y build-essential jq + go install github.com/axw/gocov/gocov@latest + go install github.com/AlekSi/gocov-xml@latest + + - name: coverage to xml + run: | + echo "Current directory: ${PWD}" + echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV + gocov convert ./coverage.out | gocov-xml > ./coverage.xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.xml + name: orb + verbose: true test-policies: runs-on: ubuntu-latest needs: prebuild + if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -142,12 +183,33 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} - run: SERVICE=policies make test_service + run: | + SERVICE=policies make test_service_cov + + - name: Install dependencies + run: | + go mod tidy + sudo apt update && sudo apt install -y build-essential jq + go install github.com/axw/gocov/gocov@latest + go install github.com/AlekSi/gocov-xml@latest + + - name: coverage to xml + run: | + echo "Current directory: ${PWD}" + echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV + gocov convert ./coverage.out | gocov-xml > ./coverage.xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.xml + name: orb + verbose: true test-sinks: runs-on: ubuntu-latest needs: prebuild + if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -156,12 +218,33 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} - run: SERVICE=sinks make test_service + run: | + SERVICE=sinks make test_service_cov + + - name: Install dependencies + run: | + go mod tidy + sudo apt update && sudo apt install -y build-essential jq + go install github.com/axw/gocov/gocov@latest + go install github.com/AlekSi/gocov-xml@latest + + - name: coverage to xml + run: | + echo "Current directory: ${PWD}" + echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV + gocov convert ./coverage.out | gocov-xml > ./coverage.xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.xml + name: orb + verbose: true test-sinker: runs-on: ubuntu-latest needs: prebuild + if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -170,12 +253,33 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} - run: SERVICE=sinker make test_service + run: | + SERVICE=sinker make test_service_cov + + - name: Install dependencies + run: | + go mod tidy + sudo apt update && sudo apt install -y build-essential jq + go install github.com/axw/gocov/gocov@latest + go install github.com/AlekSi/gocov-xml@latest + + - name: coverage to xml + run: | + echo "Current directory: ${PWD}" + echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV + gocov convert ./coverage.out | gocov-xml > ./coverage.xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.xml + name: orb + verbose: true test-maestro: runs-on: ubuntu-latest needs: prebuild + if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -185,9 +289,27 @@ jobs: - name: Go unit tests run: | - if [ "${{ needs.prebuild.outputs.orb }}" == "true" ]; then - SERVICE=maestro make test_service - fi + SERVICE=maestro make test_service_cov + + - name: Install dependencies + run: | + go mod tidy + sudo apt update && sudo apt install -y build-essential jq + go install github.com/axw/gocov/gocov@latest + go install github.com/AlekSi/gocov-xml@latest + + - name: coverage to xml + run: | + echo "Current directory: ${PWD}" + echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV + gocov convert ./coverage.out | gocov-xml > ./coverage.xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.xml + name: orb + verbose: true package-agent: needs: diff --git a/Makefile b/Makefile index b990f675f..7523cf0cc 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,10 @@ define run_test go test -mod=mod -race -count 1 -tags test $(shell go list ./... | grep -v 'cmd' | grep '$(SERVICE)') endef +define run_test_coverage + go test -mod=mod -race -count 1 -tags test -cover -coverprofile=coverage.out -covermode=atomic $(shell go list ./... | grep -v 'cmd' | grep '$(SERVICE)') +endef + define make_docker $(eval SERVICE=$(shell [ -z "$(SERVICE)" ] && echo $(subst docker_,,$(1)) || echo $(SERVICE))) docker build \ @@ -87,9 +91,14 @@ test: run_test_service: test_service $(2) +run_test_service_cov: test_service_cov $(2) + test_service: $(call run_test,$(@)) +test_service_cov: + $(call run_test_coverage,$(@)) + proto: protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative policies/pb/policies.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative fleet/pb/fleet.proto diff --git a/README.md b/README.md index 14ef204bf..9251e22e3 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Total alerts](https://img.shields.io/lgtm/alerts/g/ns1labs/orb.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/ns1labs/orb/alerts/) [![Go Report Card](https://goreportcard.com/badge/github.com/ns1labs/orb)](https://goreportcard.com/report/github.com/ns1labs/orb) +[![CodeCov](https://codecov.io/gh/ns1labs/orb/branch/develop/graph/badge.svg)](https://app.codecov.io/gh/ns1labs/orb/tree/develop) [![Language grade: JavaScript](https://img.shields.io/lgtm/grade/javascript/g/ns1labs/orb.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/ns1labs/orb/context:javascript) **Orb** is a new kind of observability platform that helps operators, developers, and end users understand their From f964c181f0a04d84b86016d4a3a77da5646e871b Mon Sep 17 00:00:00 2001 From: etaques <97463920+etaques@users.noreply.github.com> Date: Thu, 20 Oct 2022 17:07:24 -0300 Subject: [PATCH 08/94] [FIX] upload report (#1901) [FIX] upload report (#1901) --- .github/workflows/go-develop.yml | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index a5463e5b8..1e04479cf 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -104,7 +104,6 @@ jobs: test-agent: runs-on: ubuntu-latest needs: prebuild - if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.agent == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -113,10 +112,12 @@ jobs: go-version: 1.19 - name: Go unit tests + if: ${{ needs.prebuild.outputs.agent == 'true' }} run: | SERVICE=agent make test_service_cov - name: Install dependencies + if: ${{ needs.prebuild.outputs.agent == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -124,12 +125,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml + if: ${{ needs.prebuild.outputs.agent == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov + if: ${{ needs.prebuild.outputs.agent == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -139,7 +142,6 @@ jobs: test-fleet: runs-on: ubuntu-latest needs: prebuild - if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -148,10 +150,12 @@ jobs: go-version: 1.19 - name: Go unit tests + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=fleet make test_service_cov - name: Install dependencies + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -159,12 +163,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov + if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -174,7 +180,6 @@ jobs: test-policies: runs-on: ubuntu-latest needs: prebuild - if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -183,10 +188,12 @@ jobs: go-version: 1.19 - name: Go unit tests + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=policies make test_service_cov - name: Install dependencies + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -194,12 +201,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov + if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -209,7 +218,6 @@ jobs: test-sinks: runs-on: ubuntu-latest needs: prebuild - if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -218,10 +226,12 @@ jobs: go-version: 1.19 - name: Go unit tests + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=sinks make test_service_cov - name: Install dependencies + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -229,12 +239,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov + if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -244,7 +256,6 @@ jobs: test-sinker: runs-on: ubuntu-latest needs: prebuild - if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -253,10 +264,12 @@ jobs: go-version: 1.19 - name: Go unit tests + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=sinker make test_service_cov - name: Install dependencies + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -264,12 +277,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov + if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -279,7 +294,6 @@ jobs: test-maestro: runs-on: ubuntu-latest needs: prebuild - if: ${{ github.event_name == 'pull_request' || needs.prebuild.outputs.orb == 'true' }} steps: - uses: actions/checkout@v2 - name: Set up Go @@ -288,10 +302,12 @@ jobs: go-version: 1.19 - name: Go unit tests + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=maestro make test_service_cov - name: Install dependencies + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -299,12 +315,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml + if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov + if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml From 3d471cc9269b281f07958cea5313d71ef593b67f Mon Sep 17 00:00:00 2001 From: etaques <97463920+etaques@users.noreply.github.com> Date: Thu, 20 Oct 2022 18:30:40 -0300 Subject: [PATCH 09/94] [Feat] first codecov test (#1902) [Feat] first codecov test (#1902) --- .github/workflows/go-develop.yml | 48 ++++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index 1e04479cf..52cb6a9b1 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -112,12 +112,12 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.agent == 'true' }} +# if: ${{ needs.prebuild.outputs.agent == 'true' }} run: | SERVICE=agent make test_service_cov - name: Install dependencies - if: ${{ needs.prebuild.outputs.agent == 'true' }} +# if: ${{ needs.prebuild.outputs.agent == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -125,14 +125,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml - if: ${{ needs.prebuild.outputs.agent == 'true' }} +# if: ${{ needs.prebuild.outputs.agent == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov - if: ${{ needs.prebuild.outputs.agent == 'true' }} +# if: ${{ needs.prebuild.outputs.agent == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -150,12 +150,12 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=fleet make test_service_cov - name: Install dependencies - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -163,14 +163,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -188,12 +188,12 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=policies make test_service_cov - name: Install dependencies - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -201,14 +201,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -226,12 +226,12 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=sinks make test_service_cov - name: Install dependencies - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -239,14 +239,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -264,12 +264,12 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=sinker make test_service_cov - name: Install dependencies - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -277,14 +277,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml @@ -302,12 +302,12 @@ jobs: go-version: 1.19 - name: Go unit tests - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | SERVICE=maestro make test_service_cov - name: Install dependencies - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | go mod tidy sudo apt update && sudo apt install -y build-essential jq @@ -315,14 +315,14 @@ jobs: go install github.com/AlekSi/gocov-xml@latest - name: coverage to xml - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} run: | echo "Current directory: ${PWD}" echo "GITHUB_PR_ISSUE_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")" >> $GITHUB_ENV gocov convert ./coverage.out | gocov-xml > ./coverage.xml - name: Upload coverage to Codecov - if: ${{ needs.prebuild.outputs.orb == 'true' }} +# if: ${{ needs.prebuild.outputs.orb == 'true' }} uses: codecov/codecov-action@v3 with: files: coverage.xml From ed67f47b7500877413b2f2ca76114fc720f15ea5 Mon Sep 17 00:00:00 2001 From: etaques <97463920+etaques@users.noreply.github.com> Date: Thu, 20 Oct 2022 18:57:50 -0300 Subject: [PATCH 10/94] add refresh go-report (#1903) add refresh go-report (#1903) --- .github/workflows/go-develop.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index 52cb6a9b1..f1b192958 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -83,10 +83,14 @@ jobs: uses: actions/checkout@v2 with: name: workspace + - name: Run go report uses: ./.github/actions/go-report with: go_report_threshold: 90.1 #grade A+ + + - name: refresh go-report + uses: creekorful/goreportcard-action@v1.0 update-api-docs: needs: prebuild From 67c739e279395536430bd8bbb3439a4da80ade6d Mon Sep 17 00:00:00 2001 From: etaques <97463920+etaques@users.noreply.github.com> Date: Tue, 25 Oct 2022 16:31:27 -0300 Subject: [PATCH 11/94] [Feat] Add Kafka fanout feature (#1904) * feat(sinks/sinker): wip add redis topic to handle config yaml. Signed-off-by: Luiz Pegoraro * feat(otel): add diagrams to explain new head on otel. Signed-off-by: Luiz Pegoraro * feat(otel): add create diagram. Signed-off-by: Luiz Pegoraro * feat(otel): fix missing dataset. Signed-off-by: Luiz Pegoraro * feat(otel): update diagrams Signed-off-by: Luiz Pegoraro * feat(sinks/sinker): wip add redis topic to handle config yaml. Signed-off-by: Luiz Pegoraro * feat(maestro): wip, need to fetch config from redis cache Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro * feat(maestro): wip adjusting to each event and fixing collector creation/update on Sinks, only in sinker update. Signed-off-by: Luiz Pegoraro * feat(sinker): add otel-component to inject sink-id in static topic Signed-off-by: Luiz Pegoraro * feat(sinker): add code to fetch information on sink-id from agent on metric. Signed-off-by: Luiz Pegoraro * feat(policies:proto): add grpc to fetch datasets by agent group Signed-off-by: Luiz Pegoraro * feat(kafkaexp): wip fixing dependencies Signed-off-by: Luiz Pegoraro * feat(kafkaexp): wip fixing dependencies Signed-off-by: Luiz Pegoraro * feat(kafkaexp): add context sink-id to topic in kafka. Signed-off-by: Luiz Pegoraro * feat(kafkaexp): add context sink-id to topic in kafka. Signed-off-by: Luiz Pegoraro * feat(fleet): add matching groups of an agent when retrieving agent info gRPC call * feat(sinker): add middle service to hold methods to access sinker protob and cache. Signed-off-by: Luiz Pegoraro * feat(sinker): add fan out functionality to orbreceiver. Signed-off-by: Luiz Pegoraro * feat(sinker): exchange from std exporter to custom Signed-off-by: Luiz Pegoraro * feat(sinker): inject bridge service in otel. Signed-off-by: Luiz Pegoraro * featt(policies): add retrieve datasets by group gRPC * feat(sinker): wrap up redis-maestro communication. Signed-off-by: Luiz Pegoraro * feat(fleet): add agentGroupIDs to gRPC response decoder * feat(sinker): fixing tests, will let mclcavalcante fix missing test. Signed-off-by: Luiz Pegoraro * feat(maestro): fixing maestro test Signed-off-by: Luiz Pegoraro * test(policies): fix unit tests, implement missing mocked methods * feat(maestro): fix conflicts Signed-off-by: Luiz Pegoraro Co-authored-by: Luiz Pegoraro Co-authored-by: mclcavalcante --- fleet/agent_service.go | 9 + fleet/agents.go | 2 + fleet/api/grpc/client.go | 11 +- fleet/api/grpc/endpoint.go | 18 +- fleet/api/grpc/responses.go | 9 +- fleet/api/grpc/server.go | 9 +- fleet/api/http/logging.go | 14 + fleet/api/http/metrics.go | 17 + fleet/pb/fleet.pb.go | 79 ++-- fleet/pb/fleet.proto | 2 +- fleet/postgres/agents.go | 2 +- fleet/postgres/agents_test.go | 4 + fleet/redis/producer/streams.go | 4 + go.mod | 68 ++-- go.sum | 169 ++++----- maestro/maestro.go | 17 +- maestro/maestro_service.go | 33 +- maestro/redis/consumer/hashset.go | 21 +- maestro/redis/consumer/streams.go | 7 +- policies/api/grpc/client.go | 23 ++ policies/api/grpc/endpoint.go | 25 ++ policies/api/grpc/responses.go | 4 + policies/api/grpc/server.go | 30 ++ policies/api/http/logging.go | 14 + policies/api/http/metrics.go | 17 + policies/mocks/policies.go | 15 + policies/pb/policies.pb.go | 342 +++++++++++++----- policies/pb/policies.proto | 10 + policies/pb/policies_grpc.pb.go | 36 ++ policies/policies.go | 6 + policies/policy_service.go | 7 + policies/postgres/datasets_test.go | 98 +++++ policies/postgres/policies.go | 36 ++ policies/redis/producer/streams.go | 4 + sinker/backend/pktvisor/pktvisor.go | 18 +- sinker/config_state_check.go | 4 +- sinker/message_handler.go | 28 +- sinker/otel/bridgeservice/bridge.go | 67 ++++ sinker/otel/components.go | 6 +- sinker/otel/kafkafanoutexporter/Makefile | 1 + sinker/otel/kafkafanoutexporter/README.md | 90 +++++ .../kafkafanoutexporter/authentication.go | 160 ++++++++ .../authentication_test.go | 141 ++++++++ sinker/otel/kafkafanoutexporter/config.go | 133 +++++++ .../otel/kafkafanoutexporter/config_test.go | 140 +++++++ sinker/otel/kafkafanoutexporter/doc.go | 16 + sinker/otel/kafkafanoutexporter/factory.go | 204 +++++++++++ .../otel/kafkafanoutexporter/factory_test.go | 174 +++++++++ .../internal/awsmsk/doc.go | 21 ++ .../internal/awsmsk/iam_scram_client.go | 193 ++++++++++ .../internal/awsmsk/iam_scram_client_test.go | 127 +++++++ .../kafkafanoutexporter/jaeger_marshaler.go | 105 ++++++ .../jaeger_marshaler_test.go | 82 +++++ .../kafkafanoutexporter/kafka_exporter.go | 231 ++++++++++++ sinker/otel/kafkafanoutexporter/marshaler.go | 85 +++++ .../kafkafanoutexporter/marshaler_test.go | 149 ++++++++ .../kafkafanoutexporter/pdata_marshaler.go | 109 ++++++ .../otel/kafkafanoutexporter/raw_marshaler.go | 95 +++++ .../raw_marshaller_test.go | 150 ++++++++ .../otel/kafkafanoutexporter/scram_client.go | 54 +++ .../kafkafanoutexporter/testdata/config.yaml | 40 ++ sinker/otel/orbreceiver/config.go | 3 + .../orbreceiver/internal/logs/otlp_test.go | 101 ------ .../orbreceiver/internal/metrics/otlp_test.go | 102 ------ .../otel/orbreceiver/internal/testdata/log.go | 6 +- .../orbreceiver/internal/testdata/metric.go | 60 +-- .../orbreceiver/internal/trace/otlp_test.go | 99 ----- sinker/otel/orbreceiver/otlp.go | 36 +- sinker/service.go | 14 +- 69 files changed, 3541 insertions(+), 665 deletions(-) create mode 100644 sinker/otel/bridgeservice/bridge.go create mode 100644 sinker/otel/kafkafanoutexporter/Makefile create mode 100644 sinker/otel/kafkafanoutexporter/README.md create mode 100644 sinker/otel/kafkafanoutexporter/authentication.go create mode 100644 sinker/otel/kafkafanoutexporter/authentication_test.go create mode 100644 sinker/otel/kafkafanoutexporter/config.go create mode 100644 sinker/otel/kafkafanoutexporter/config_test.go create mode 100644 sinker/otel/kafkafanoutexporter/doc.go create mode 100644 sinker/otel/kafkafanoutexporter/factory.go create mode 100644 sinker/otel/kafkafanoutexporter/factory_test.go create mode 100644 sinker/otel/kafkafanoutexporter/internal/awsmsk/doc.go create mode 100644 sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client.go create mode 100644 sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client_test.go create mode 100644 sinker/otel/kafkafanoutexporter/jaeger_marshaler.go create mode 100644 sinker/otel/kafkafanoutexporter/jaeger_marshaler_test.go create mode 100644 sinker/otel/kafkafanoutexporter/kafka_exporter.go create mode 100644 sinker/otel/kafkafanoutexporter/marshaler.go create mode 100644 sinker/otel/kafkafanoutexporter/marshaler_test.go create mode 100644 sinker/otel/kafkafanoutexporter/pdata_marshaler.go create mode 100644 sinker/otel/kafkafanoutexporter/raw_marshaler.go create mode 100644 sinker/otel/kafkafanoutexporter/raw_marshaller_test.go create mode 100644 sinker/otel/kafkafanoutexporter/scram_client.go create mode 100644 sinker/otel/kafkafanoutexporter/testdata/config.yaml delete mode 100644 sinker/otel/orbreceiver/internal/logs/otlp_test.go delete mode 100644 sinker/otel/orbreceiver/internal/metrics/otlp_test.go delete mode 100644 sinker/otel/orbreceiver/internal/trace/otlp_test.go diff --git a/fleet/agent_service.go b/fleet/agent_service.go index dc8b6df50..d2096ab0a 100644 --- a/fleet/agent_service.go +++ b/fleet/agent_service.go @@ -298,3 +298,12 @@ func (svc fleetService) GetPolicyState(ctx context.Context, agent Agent) (map[st return policyState, nil } + +func (svc fleetService) ViewAgentMatchingGroupsByIDInternal(ctx context.Context, agentID string, ownerID string) (MatchingGroups, error) { + matchingGroups, err := svc.agentGroupRepository.RetrieveMatchingGroups(ctx, ownerID, agentID) + if err != nil { + return MatchingGroups{}, err + } + + return matchingGroups, nil +} diff --git a/fleet/agents.go b/fleet/agents.go index 886f27839..9b7f8d602 100644 --- a/fleet/agents.go +++ b/fleet/agents.go @@ -109,6 +109,8 @@ type AgentService interface { ResetAgent(ct context.Context, token string, agentID string) error // GetPolicyState get all policies state per agent in a formatted way from a given existent agent GetPolicyState(ctx context.Context, agent Agent) (map[string]interface{}, error) + // ViewAgentMatchingGroupsByIDInternal Groups this Agent currently belongs to, according to matching agent and group tags + ViewAgentMatchingGroupsByIDInternal(ctx context.Context, agentID string, ownerID string) (MatchingGroups, error) } type AgentRepository interface { diff --git a/fleet/api/grpc/client.go b/fleet/api/grpc/client.go index c54b72c22..6a1135533 100644 --- a/fleet/api/grpc/client.go +++ b/fleet/api/grpc/client.go @@ -90,7 +90,7 @@ func (g grpcClient) RetrieveAgentInfoByChannelID(ctx context.Context, in *pb.Age } ir := res.(agentInfoRes) - return &pb.AgentInfoRes{OwnerID: ir.ownerID, AgentName: ir.agentName, AgentTags: ir.agentTags, OrbTags: ir.orbTags}, nil + return &pb.AgentInfoRes{OwnerID: ir.ownerID, AgentName: ir.agentName, AgentTags: ir.agentTags, OrbTags: ir.orbTags, AgentGroupIDs: ir.agentGroupIDs}, nil } // NewClient returns new gRPC client instance. @@ -193,9 +193,10 @@ func encodeRetrieveAgentInfoByChannelIDRequest(ctx context.Context, grpcReq inte func decodeAgentInfoResponse(ctx context.Context, grpcRes interface{}) (interface{}, error) { res := grpcRes.(*pb.AgentInfoRes) return agentInfoRes{ - ownerID: res.GetOwnerID(), - agentName: res.GetAgentName(), - agentTags: res.GetAgentTags(), - orbTags: res.GetOrbTags(), + ownerID: res.GetOwnerID(), + agentName: res.GetAgentName(), + agentTags: res.GetAgentTags(), + orbTags: res.GetOrbTags(), + agentGroupIDs: res.GetAgentGroupIDs(), }, nil } diff --git a/fleet/api/grpc/endpoint.go b/fleet/api/grpc/endpoint.go index 76adec99f..a6001d0fd 100644 --- a/fleet/api/grpc/endpoint.go +++ b/fleet/api/grpc/endpoint.go @@ -69,7 +69,23 @@ func retrieveAgentInfoByChannelIDEndpoint(svc fleet.Service) endpoint.Endpoint { return nil, err } - res := agentInfoRes{ownerID: agent.MFOwnerID, agentName: agent.Name.String(), agentTags: agent.AgentTags, orbTags: agent.OrbTags} + matchingGroups, err := svc.ViewAgentMatchingGroupsByIDInternal(ctx, agent.MFThingID, agent.MFOwnerID) + if err != nil { + return nil, err + } + + var groupIDs []string + for _, group := range matchingGroups.Groups { + groupIDs = append(groupIDs, group.GroupID) + } + + res := agentInfoRes{ + ownerID: agent.MFOwnerID, + agentName: agent.Name.String(), + agentTags: agent.AgentTags, + orbTags: agent.OrbTags, + agentGroupIDs: groupIDs, + } return res, nil } } diff --git a/fleet/api/grpc/responses.go b/fleet/api/grpc/responses.go index 5b7e0ffaf..35876a957 100644 --- a/fleet/api/grpc/responses.go +++ b/fleet/api/grpc/responses.go @@ -18,10 +18,11 @@ type ownerRes struct { } type agentInfoRes struct { - ownerID string - agentName string - agentTags map[string]string - orbTags map[string]string + ownerID string + agentName string + agentTags map[string]string + orbTags map[string]string + agentGroupIDs []string } type emptyRes struct { diff --git a/fleet/api/grpc/server.go b/fleet/api/grpc/server.go index 596b06ddc..93c65a36c 100644 --- a/fleet/api/grpc/server.go +++ b/fleet/api/grpc/server.go @@ -130,10 +130,11 @@ func decodeRetrieveAgentInfoByChannelIDRequest(_ context.Context, grpcReq interf func encodeAgentInfoResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { res := grpcRes.(agentInfoRes) return &pb.AgentInfoRes{ - OwnerID: res.ownerID, - AgentName: res.agentName, - AgentTags: res.agentTags, - OrbTags: res.orbTags, + OwnerID: res.ownerID, + AgentName: res.agentName, + AgentTags: res.agentTags, + OrbTags: res.orbTags, + AgentGroupIDs: res.agentGroupIDs, }, nil } diff --git a/fleet/api/http/logging.go b/fleet/api/http/logging.go index f6b6958d3..571098a16 100644 --- a/fleet/api/http/logging.go +++ b/fleet/api/http/logging.go @@ -18,6 +18,20 @@ type loggingMiddleware struct { svc fleet.Service } +func (l loggingMiddleware) ViewAgentMatchingGroupsByIDInternal(ctx context.Context, agentID string, ownerID string) (matchingGroups fleet.MatchingGroups, err error) { + defer func(begin time.Time) { + if err != nil { + l.logger.Warn("method call: view_agent_matching_groups_by_id_internal", + zap.Error(err), + zap.Duration("duration", time.Since(begin))) + } else { + l.logger.Info("method call: view_agent_matching_groups_by_idinternal", + zap.Duration("duration", time.Since(begin))) + } + }(time.Now()) + return l.svc.ViewAgentMatchingGroupsByIDInternal(ctx, agentID, ownerID) +} + func (l loggingMiddleware) ResetAgent(ct context.Context, token string, agentID string) (err error) { defer func(begin time.Time) { if err != nil { diff --git a/fleet/api/http/metrics.go b/fleet/api/http/metrics.go index 8019113ea..3a5e4c810 100644 --- a/fleet/api/http/metrics.go +++ b/fleet/api/http/metrics.go @@ -22,6 +22,23 @@ type metricsMiddleware struct { auth mainflux.AuthServiceClient } +func (m metricsMiddleware) ViewAgentMatchingGroupsByIDInternal(ctx context.Context, agentID string, ownerID string) (fleet.MatchingGroups, error) { + defer func(begin time.Time) { + labels := []string{ + "method", "viewAgentMatchingGroupsByIDInternal", + "owner_id", ownerID, + "agent_id", agentID, + "group_id", "", + } + + m.counter.With(labels...).Add(1) + m.latency.With(labels...).Observe(float64(time.Since(begin).Microseconds())) + + }(time.Now()) + + return m.svc.ViewAgentMatchingGroupsByIDInternal(ctx, agentID, ownerID) +} + func (m metricsMiddleware) ResetAgent(ct context.Context, token string, agentID string) error { ownerID, err := m.identify(token) if err != nil { diff --git a/fleet/pb/fleet.pb.go b/fleet/pb/fleet.pb.go index 10f75b62f..43abdf1f4 100644 --- a/fleet/pb/fleet.pb.go +++ b/fleet/pb/fleet.pb.go @@ -410,10 +410,11 @@ type AgentInfoRes struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - OwnerID string `protobuf:"bytes,1,opt,name=ownerID,proto3" json:"ownerID,omitempty"` - AgentName string `protobuf:"bytes,2,opt,name=agentName,proto3" json:"agentName,omitempty"` - AgentTags map[string]string `protobuf:"bytes,3,rep,name=agentTags,proto3" json:"agentTags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - OrbTags map[string]string `protobuf:"bytes,4,rep,name=orbTags,proto3" json:"orbTags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + OwnerID string `protobuf:"bytes,1,opt,name=ownerID,proto3" json:"ownerID,omitempty"` + AgentName string `protobuf:"bytes,2,opt,name=agentName,proto3" json:"agentName,omitempty"` + AgentTags map[string]string `protobuf:"bytes,3,rep,name=agentTags,proto3" json:"agentTags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + OrbTags map[string]string `protobuf:"bytes,4,rep,name=orbTags,proto3" json:"orbTags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + AgentGroupIDs []string `protobuf:"bytes,5,rep,name=agentGroupIDs,proto3" json:"agentGroupIDs,omitempty"` } func (x *AgentInfoRes) Reset() { @@ -476,6 +477,13 @@ func (x *AgentInfoRes) GetOrbTags() map[string]string { return nil } +func (x *AgentInfoRes) GetAgentGroupIDs() []string { + if x != nil { + return x.AgentGroupIDs + } + return nil +} + var File_fleet_pb_fleet_proto protoreflect.FileDescriptor var file_fleet_pb_fleet_proto_rawDesc = []byte{ @@ -510,7 +518,7 @@ var file_fleet_pb_fleet_proto_rawDesc = []byte{ 0x18, 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x67, - 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xbe, 0x02, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, + 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe4, 0x02, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, @@ -522,35 +530,38 @@ var file_fleet_pb_fleet_proto_rawDesc = []byte{ 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x07, 0x6f, 0x72, 0x62, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x2e, 0x4f, 0x72, 0x62, 0x54, 0x61, 0x67, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6f, 0x72, 0x62, 0x54, 0x61, 0x67, 0x73, 0x1a, 0x3c, - 0x0a, 0x0e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, - 0x4f, 0x72, 0x62, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xb1, 0x02, 0x0a, 0x0c, 0x46, 0x6c, 0x65, - 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x74, - 0x72, 0x69, 0x65, 0x76, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x13, 0x2e, 0x66, 0x6c, 0x65, - 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, - 0x0f, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x22, 0x00, 0x12, 0x46, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x18, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, - 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x49, 0x44, 0x52, - 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x18, 0x52, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x79, 0x43, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x12, 0x1a, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x4f, - 0x77, 0x6e, 0x65, 0x72, 0x42, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x52, - 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x1c, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, - 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x79, 0x43, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x12, 0x1e, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x22, 0x00, 0x42, 0x0a, 0x5a, 0x08, - 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6f, 0x72, 0x62, 0x54, 0x61, 0x67, 0x73, 0x12, 0x24, + 0x0a, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x49, 0x44, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x67, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4f, 0x72, 0x62, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xb1, + 0x02, 0x0a, 0x0c, 0x46, 0x6c, 0x65, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, + 0x12, 0x13, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x42, 0x79, + 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, + 0x69, 0x65, 0x76, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x18, + 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, + 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x22, 0x00, + 0x12, 0x49, 0x0a, 0x18, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4f, 0x77, 0x6e, 0x65, + 0x72, 0x42, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x12, 0x1a, 0x2e, 0x66, + 0x6c, 0x65, 0x65, 0x74, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x66, 0x6c, 0x65, 0x65, 0x74, + 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x1c, 0x52, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x42, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x12, 0x1e, 0x2e, 0x66, 0x6c, + 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x79, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x66, 0x6c, + 0x65, 0x65, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, + 0x22, 0x00, 0x42, 0x0a, 0x5a, 0x08, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x2f, 0x70, 0x62, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/fleet/pb/fleet.proto b/fleet/pb/fleet.proto index 8bfa52b94..f03b7fa1f 100644 --- a/fleet/pb/fleet.proto +++ b/fleet/pb/fleet.proto @@ -50,5 +50,5 @@ message AgentInfoRes { string agentName = 2; map agentTags = 3; map orbTags = 4; + repeated string agentGroupIDs = 5; } - diff --git a/fleet/postgres/agents.go b/fleet/postgres/agents.go index f867ea02e..234ad72c4 100644 --- a/fleet/postgres/agents.go +++ b/fleet/postgres/agents.go @@ -460,7 +460,7 @@ func (r agentRepository) RetrieveAgentMetadataByOwner(ctx context.Context, owner } func (r agentRepository) RetrieveAgentInfoByChannelID(ctx context.Context, channelID string) (fleet.Agent, error) { - q := `select mf_owner_id, name, agent_tags, orb_tags from agents where mf_channel_id = :mf_channel_id limit 1` + q := `select mf_owner_id, name, agent_tags, orb_tags, mf_thing_id from agents where mf_channel_id = :mf_channel_id limit 1` params := map[string]interface{}{ "mf_channel_id": channelID, diff --git a/fleet/postgres/agents_test.go b/fleet/postgres/agents_test.go index 5fa348909..71974a279 100644 --- a/fleet/postgres/agents_test.go +++ b/fleet/postgres/agents_test.go @@ -996,6 +996,7 @@ func TestRetrieveAgentInfoByChannelID(t *testing.T) { name string agentTags types.Tags orbTags types.Tags + agentID string err error }{ "retrieve existing agent info by channelID": { @@ -1004,10 +1005,12 @@ func TestRetrieveAgentInfoByChannelID(t *testing.T) { name: nameID.String(), agentTags: agent.AgentTags, orbTags: agent.OrbTags, + agentID: agent.MFThingID, err: nil, }, "retrieve existent agent info by non-existent channelID": { channelID: thID.String(), + agentID: "", ownerID: "", name: "", agentTags: nil, @@ -1024,6 +1027,7 @@ func TestRetrieveAgentInfoByChannelID(t *testing.T) { assert.Equal(t, tc.ownerID, ag.MFOwnerID, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.ownerID, ag.MFOwnerID)) assert.Equal(t, tc.agentTags, ag.AgentTags, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.agentTags, ag.AgentTags)) assert.Equal(t, tc.orbTags, ag.OrbTags, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.orbTags, ag.OrbTags)) + assert.Equal(t, tc.agentID, ag.MFThingID, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.agentID, ag.MFThingID)) } assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) }) diff --git a/fleet/redis/producer/streams.go b/fleet/redis/producer/streams.go index 081106752..67ecc970e 100644 --- a/fleet/redis/producer/streams.go +++ b/fleet/redis/producer/streams.go @@ -28,6 +28,10 @@ type eventStore struct { logger *zap.Logger } +func (es eventStore) ViewAgentMatchingGroupsByIDInternal(ctx context.Context, agentID string, ownerID string) (fleet.MatchingGroups, error) { + return es.svc.ViewAgentMatchingGroupsByIDInternal(ctx, agentID, ownerID) +} + func (es eventStore) ResetAgent(ct context.Context, token string, agentID string) error { return es.svc.ResetAgent(ct, token, agentID) } diff --git a/go.mod b/go.mod index 713086b94..a80251249 100644 --- a/go.mod +++ b/go.mod @@ -30,8 +30,9 @@ require ( github.com/stretchr/testify v1.8.0 github.com/uber/jaeger-client-go v2.30.0+incompatible go.uber.org/zap v1.23.0 - google.golang.org/grpc v1.49.0 + google.golang.org/grpc v1.50.0 google.golang.org/protobuf v1.28.1 + gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -39,16 +40,16 @@ require ( require ( github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.3.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.59.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.59.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.60.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.62.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.62.0 github.com/prometheus/common v0.37.0 - go.opentelemetry.io/collector v0.60.0 - go.opentelemetry.io/collector/pdata v0.60.0 - go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/trace v1.9.0 - google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 - k8s.io/client-go v0.25.0 + go.opentelemetry.io/collector v0.62.0 + go.opentelemetry.io/collector/pdata v0.62.0 + go.opentelemetry.io/otel/metric v0.32.1 + go.opentelemetry.io/otel/trace v1.10.0 + google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de + k8s.io/client-go v0.25.2 ) require ( @@ -66,7 +67,7 @@ require ( github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/armon/go-metrics v0.3.10 // indirect - github.com/aws/aws-sdk-go v1.44.96 // indirect + github.com/aws/aws-sdk-go v1.44.114 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -103,10 +104,10 @@ require ( github.com/gophercloud/gophercloud v0.25.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect - github.com/hashicorp/consul/api v1.14.0 // indirect + github.com/hashicorp/consul/api v1.15.2 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.3.0 // indirect + github.com/hashicorp/go-hclog v1.3.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect @@ -145,7 +146,7 @@ require ( github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/oklog/ulid/v2 v2.0.2 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.60.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.62.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.1.0 // indirect @@ -171,27 +172,26 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/otel v1.9.0 // indirect + go.opentelemetry.io/otel v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced // indirect - golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 // indirect - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect + golang.org/x/net v0.0.0-20220927171203-f486391704dc // indirect + golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect + golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect + golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect golang.org/x/tools v0.1.12 // indirect - google.golang.org/api v0.95.0 // indirect + google.golang.org/api v0.98.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/api v0.25.0 // indirect - k8s.io/apimachinery v0.25.0 // indirect + k8s.io/api v0.25.2 // indirect + k8s.io/apimachinery v0.25.2 // indirect k8s.io/klog/v2 v2.70.1 // indirect k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect @@ -200,9 +200,9 @@ require ( //These libs are used to allow orb extend opentelemetry features require ( - cloud.google.com/go/compute v1.9.0 // indirect - github.com/Shopify/sarama v1.36.0 // indirect - github.com/apache/thrift v0.16.0 // indirect + cloud.google.com/go/compute v1.10.0 // indirect + github.com/Shopify/sarama v1.37.2 // indirect + github.com/apache/thrift v0.17.0 // indirect github.com/eapache/go-resiliency v1.3.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect @@ -210,13 +210,13 @@ require ( github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.21.1 // indirect + github.com/go-openapi/swag v0.22.1 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/jaegertracing/jaeger v1.37.0 // indirect + github.com/jaegertracing/jaeger v1.38.1 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect @@ -225,18 +225,18 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.60.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.59.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.60.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.62.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.62.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.62.0 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect + github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rs/cors v1.8.2 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.1 // indirect github.com/xdg-go/stringprep v1.0.3 // indirect - go.opentelemetry.io/collector/semconv v0.60.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 // indirect + go.opentelemetry.io/collector/semconv v0.62.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect ) diff --git a/go.sum b/go.sum index 77fc99a05..6b9d2d03c 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.9.0 h1:ED/FP4xv8GJw63v556/ASNc1CeeLUO2Bs8nzaHchkHg= -cloud.google.com/go/compute v1.9.0/go.mod h1:lWv1h/zUWTm/LozzfTJhBSkd6ShQq8la8VeeuOEGxfY= +cloud.google.com/go/compute v1.10.0 h1:aoLIYaA1fX3ywihqpBk2APQKOo20nXsp1GEZQbx5Jk4= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= @@ -99,10 +99,9 @@ github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.36.0 h1:0OJs3eCcnezkWniVjwBbCJVaa0B1k7ImCRS3WN6NsSk= -github.com/Shopify/sarama v1.36.0/go.mod h1:9glG3eX83tgVYJ5aVtrjVUnEsOPqQIBGx1BWfN+X51I= -github.com/Shopify/toxiproxy/v2 v2.4.0 h1:O1e4Jfvr/hefNTNu+8VtdEG5lSeamJRo4aKhMOKNM64= -github.com/Shopify/toxiproxy/v2 v2.4.0/go.mod h1:3ilnjng821bkozDRxNoo64oI/DKqM+rOyJzb564+bvg= +github.com/Shopify/sarama v1.37.2 h1:LoBbU0yJPte0cE5TZCGdlzZRmMgMtZU/XgnUKZg9Cv4= +github.com/Shopify/sarama v1.37.2/go.mod h1:Nxye/E+YPru//Bpaorfhc3JsSGYwCaDDj+R4bK52U5o= +github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -112,8 +111,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apache/thrift v0.17.0 h1:cMd2aj52n+8VoAtvSvLn4kDC3aZ6IAkBuqWQ2IDu7wo= +github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -122,8 +121,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.96 h1:S9paaqnJ0AJ95t5AB+iK8RM6YNZN0W0Lek1gOVJsEr8= -github.com/aws/aws-sdk-go v1.44.96/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.114 h1:plIkWc/RsHr3DXBj4MEw9sEW4CcL/e2ryokc+CKyq1I= +github.com/aws/aws-sdk-go v1.44.114/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= @@ -253,7 +252,6 @@ github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -302,8 +300,8 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34 github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.1 h1:S6xFhsBKAtvfphnJwRzeCh3OEGsTL/crXdEetSxLs0Q= +github.com/go-openapi/swag v0.22.1/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= @@ -446,7 +444,6 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4= github.com/gophercloud/gophercloud v0.25.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -462,12 +459,12 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.14.0 h1:Y64GIJ8hYTu+tuGekwO4G4ardXoiCivX9wv1iP/kihk= -github.com/hashicorp/consul/api v1.14.0/go.mod h1:bcaw5CSZ7NE9qfOfKCI1xb7ZKjzu/MyvQkCLTfqLqxQ= +github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0= +github.com/hashicorp/consul/api v1.15.2/go.mod h1:v6nvB10borjOuIwNRZYPZiHKrTM/AyrGtd0WVVodKM8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.10.0 h1:rGLEh2AWK4K0KCMvqWAz2EYxQqgciIfMagWZ0nVe5MI= -github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -482,8 +479,8 @@ github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.3.0 h1:G0ACM8Z2WilWgPv3Vdzwm3V0BQu/kSmrkVtpe1fy9do= -github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= +github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -556,8 +553,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ionos-cloud/sdk-go/v6 v6.1.2 h1:es5R5sVmjHFrYNBbJfAeHF+16GheaJMyc63xWxIAec4= github.com/ionos-cloud/sdk-go/v6 v6.1.2/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= -github.com/jaegertracing/jaeger v1.37.0 h1:/EY0n/IUFT/NozEM78bzW2Lm2dPoKuIF/9c9UcoMBxQ= -github.com/jaegertracing/jaeger v1.37.0/go.mod h1:2tPPMcktsOFhmsiyxoYnUE0QAlP4UC6DEsC2jdllt5g= +github.com/jaegertracing/jaeger v1.38.1 h1:IunKLJl9Imgpxh3ZL+SD+E7KHYAkaeiOnjay9YeUl3o= +github.com/jaegertracing/jaeger v1.38.1/go.mod h1:T5RFOZgRQBXR9rpQq8HsiIg39gu0DAYGQbDzpKw9gU8= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -599,7 +596,6 @@ github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1q github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/knadh/koanf v1.4.3 h1:rSJcSH5LSFhvzBRsAYfT3k7eLP0I4UxeZqjtAatk+wc= @@ -615,7 +611,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -635,7 +630,6 @@ github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamh github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mainflux/mainflux v0.0.0-20220415135135-92d8fb99bf82 h1:UWQLBZ7ychamG9uuBtCwVmt1tBQxPQuJ1VszC9zYFS8= @@ -729,7 +723,6 @@ github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -744,28 +737,28 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.59.0 h1:qE14rXsxMYeCcx6lqF/YtBc0TejmoKZZPzKKUFXLGq0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.59.0/go.mod h1:ZrDHjmCxQgiJAYZAojjgnlFBi2+9PxtqMypUvzVxPZE= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.59.0 h1:TGu60Sg694dSzs70tixCJpPxg2aJR3GD8xnt6+B5lFw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.59.0/go.mod h1:uQ0YiNAUhYjefM1SyxopafwTmEmzSghlzKtp8Gg0ys0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.60.0 h1:HptgZUqv7gCEUdCuW+L3QSpqBt4TOSe+KqZuKZDf7hM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.60.0 h1:rmwITDHPgpZ1US1AjHrPp1qUsox+OEwQ+PHsQkbO9v0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.60.0 h1:L5ug/ZKhVnyh1Bmygph/6dKS32VhsJeb7ggYbEV9EmY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.60.0/go.mod h1:XJ02iUcUxcP94hMUkwofboLNoUHKkrb/yb1S/cNL/Nk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.60.0 h1:B7xhbyyT1zvdYf5ne0H9fozF6fx2LoKoT5K71hyqReY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.60.0/go.mod h1:s01aZZ3YqrNbe78Ff/3OLY2s08VjrlHCZOIxceLLzQc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.59.0 h1:+DOhvw+RKnc5QwP5h0y+W55hCvsTsccHVvF3vm1ZViI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.59.0/go.mod h1:hWJ/9y+vs86TyeTPjW1V9xicDVKWrqx+3iXT2UWKGWw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.60.0 h1:2l/uxdfAb/8hW7UW36IvJPd3HjWH4/0jidE0f8kLWGs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.60.0/go.mod h1:1r0L4iFcnxpJy23BiadqwZjTLYNd06oGUib8/WW2DqM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.60.0 h1:PP9vbcUDTvnTg3rLOEdqEE9rdqTxki6m4KGG6Q+THyg= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.60.0 h1:mtA27VH1q9Z10KKmbRqHksQVNx1J3yR72y5/zYPcVdQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.60.0/go.mod h1:mrAUH2BfOgziLQg3CnN1t0AxsYNxM97dOxde7LIDOXs= +github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.62.0 h1:/jMfx1V+TfLULWVo+nJbnUsbsC4OvDY5NCmwmIyAU5o= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.62.0/go.mod h1:w/y5lJ1Emnf2imDVqImSETnMP7X+FhTz+Ucxiq/Obi4= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0 h1:EoidrEk6Dmap+Cw+lXipNL7IVGicS0N6V+oCvesIj/c= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0/go.mod h1:4BwkK9Fb1xZDxmXt7gSm5nxCxtVWJf61/UaCt54gVjU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.62.0 h1:PMUgwDspM+2DX2Ol8Tj/jUBQqzvykVwnFily/HjRDPA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.62.0 h1:4VQYgfWfPFUyTMIJe0Nkxx8UR0P8ZWtz3e3sg21XIUo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.62.0 h1:AYbWxIOsE+tFU62t0WjGSy8YrIKgvKl82oIA5ub65fc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.62.0/go.mod h1:ykdZjo119U+37DyYNpDjiGAi/ZaM99K91Zs6d5/t/5M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.62.0 h1:IrHM8AUQJ07Y4EtX4aZxODYqDzJkbLBJXLD5RtubpKw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.62.0/go.mod h1:WkYDuIOuosTrqBgH8CwglhkKKd0yJH2ZIPnC4xKtrR4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.62.0 h1:Oy2PdppooZrcUiBqHAOHrKK+rk+/+wScXEPMVKdDkcc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.62.0/go.mod h1:WgMWz7+zb5KKN6BDx8rL+88M73BxvjQiRsgK9yEavis= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.62.0 h1:xad1PVsJAgkkkBl5hLmG+LqPRwOG6sl0L83S5lDeCDw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.62.0/go.mod h1:TwiXNeZvqw0ItuREnKkweLQHG9fdix/kjeo9n8kP8iU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.62.0 h1:BG2E6rZ+pbc+o/E7qU62YBFAKPIH6s1qHW4oFUbm54M= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.62.0 h1:KVqFuTSWGjkLa0bln84apNgcmOfg6y4gtOGqUpWJajc= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.62.0/go.mod h1:+hpib0V3pRKeA0sNJ1IG69T5aDBVkhz0H/Q0hSFBN/c= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -795,8 +788,8 @@ github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -859,7 +852,6 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= @@ -877,7 +869,7 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/shirou/gopsutil/v3 v3.22.8 h1:a4s3hXogo5mE2PfdfJIonDbstO/P+9JszdfhAHSzD9Y= +github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4c5eA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -948,7 +940,6 @@ github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVK github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= @@ -994,24 +985,24 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.60.0 h1:rHndW/xILGjNoFaYIvwYpngZnRWw1oQT6GLtzxIs7pw= -go.opentelemetry.io/collector v0.60.0/go.mod h1:n2KBSgs7AakuedVxLR/Tayl3EEztmngrrjZBsYS+qBI= -go.opentelemetry.io/collector/pdata v0.60.0 h1:jCNR5jtUom2FcUu30h4tw7enZytwGnXX6fs/K2FM/A0= -go.opentelemetry.io/collector/pdata v0.60.0/go.mod h1:0hqgNMRneVXaLNelv3q0XKJbyBW9aMDwyC15pKd30+E= -go.opentelemetry.io/collector/semconv v0.60.0 h1:xy6HSukzA5CC8SR4DvFyLd28EFEOnQgxtpU1bSCM0qY= -go.opentelemetry.io/collector/semconv v0.60.0/go.mod h1:aRkHuJ/OshtDFYluKEtnG5nkKTsy1HZuvZVHmakx+Vo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 h1:9NkMW03wwEzPtP/KciZ4Ozu/Uz5ZA7kfqXJIObnrjGU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0/go.mod h1:548ZsYzmT4PL4zWKRd8q/N4z0Wxzn/ZxUE+lkEpwWQA= -go.opentelemetry.io/contrib/propagators/b3 v1.9.0 h1:Lzb9zU98jCE2kyfCjWfSSsiQoGtvBL+COxvUBf7FNhU= -go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw= -go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo= -go.opentelemetry.io/otel/exporters/prometheus v0.31.0 h1:jwtnOGBM8dIty5AVZ+9ZCzZexCea3aVKmUfZAQcHqxs= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.9.0 h1:LNXp1vrr83fNXTHgU8eO89mhzxb/bbWAsHG6fNf3qWo= -go.opentelemetry.io/otel/sdk/metric v0.31.0 h1:2sZx4R43ZMhJdteKAlKoHvRgrMp53V1aRxvEf5lCq8Q= -go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc= -go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo= +go.opentelemetry.io/collector v0.62.0 h1:SdCUsT69mHhEkYKPTv3urzMemwF8pbX7zqSYP5Hyuco= +go.opentelemetry.io/collector v0.62.0/go.mod h1:Qs172nQ5pfK13EvDPNpYPLB2nvp4sR4MfF9eciUMwWE= +go.opentelemetry.io/collector/pdata v0.62.0 h1:7M2512nLih9UXR+DvWo84UQFES9M7Hh5lR3odxhAGUY= +go.opentelemetry.io/collector/pdata v0.62.0/go.mod h1:ziGuxiR4TVSZ7pT+j1t58zYFVQtWwiWi9ng9EFmp5U0= +go.opentelemetry.io/collector/semconv v0.62.0 h1:Zc5Nt+kxVZKftwkOFo9VUAVPILCtLasvdkqV2fJIH0Y= +go.opentelemetry.io/collector/semconv v0.62.0/go.mod h1:aRkHuJ/OshtDFYluKEtnG5nkKTsy1HZuvZVHmakx+Vo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.1 h1:ledXJmnPfXGbE/gO4/PWSBsJGonnq6czWLrdHfQxeTU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.1/go.mod h1:W6/Lb2w3nD2K/l+4SzaqJUr2Ibj2uHA+PdFZlO5cWus= +go.opentelemetry.io/contrib/propagators/b3 v1.10.0 h1:6AD2VV8edRdEYNaD8cNckpzgdMLU2kbV9OYyxt2kvCg= +go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +go.opentelemetry.io/otel/exporters/prometheus v0.32.1 h1:1+iSNGGCYoDAMuFDN2M+sYTwa5/wApb7yO/GpW5Vtzg= +go.opentelemetry.io/otel/metric v0.32.1 h1:ftff5LSBCIDwL0UkhBuDg8j9NNxx2IusvJ18q9h6RC4= +go.opentelemetry.io/otel/metric v0.32.1/go.mod h1:iLPP7FaKMAD5BIxJ2VX7f2KTuz//0QK2hEUyti5psqQ= +go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +go.opentelemetry.io/otel/sdk/metric v0.32.1 h1:S6AqzulzGQl+sTpYeAoVLw1SJbc2LYuKCMUmfEKG+zM= +go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1147,8 +1138,9 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= -golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced h1:3dYNDff0VT5xj+mbj2XucFst9WKk6PdGOrb9n+SbIvw= -golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220927171203-f486391704dc h1:FxpXZdoBqT8RjqTy6i1E8nXHhW21wK7ptQ/EPIGxzPQ= +golang.org/x/net v0.0.0-20220927171203-f486391704dc/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1170,8 +1162,9 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1184,8 +1177,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc= +golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1286,11 +1279,10 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= -golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2 h1:wM1k/lXfpc5HdkJJyW9GELpd8ERGdnh8sMGL6Gzq3Ho= +golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1422,8 +1414,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.95.0 h1:d1c24AAS01DYqXreBeuVV7ewY/U8Mnhh47pwtsgVtYg= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.98.0 h1:yxZrcxXESimy6r6mdL5Q6EnZwmewDJK2dVg3g75s5Dg= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1517,8 +1509,8 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 h1:7PEE9xCtufpGJzrqweakEEnTh7YFELmnKm/ee+5jmfQ= -google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de h1:5ANeKFmGdtiputJJYeUVg8nTGA/1bEirx4CgzcnPSx8= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1554,8 +1546,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1578,7 +1570,6 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUy gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1618,12 +1609,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0= -k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk= -k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU= -k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= -k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= -k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= +k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= +k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= +k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= +k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= +k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= +k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= diff --git a/maestro/maestro.go b/maestro/maestro.go index 1e2731da4..4bf22cc76 100644 --- a/maestro/maestro.go +++ b/maestro/maestro.go @@ -49,13 +49,20 @@ type Maestro struct { Created time.Time } +type SinkConfig struct { + Id string + Url string + Username string + Password string +} + type MaestroService interface { - // CreateOtelCollector - create a existing collector by id - CreateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error + // CreateOtelCollector - create an existing collector by id + CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error - // DeleteOtelCollector - delete a existing collector by id + // DeleteOtelCollector - delete an existing collector by id DeleteOtelCollector(ctx context.Context, sinkID string) error - // UpdateOtelCollector - update a existing collector by id - UpdateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error + // UpdateOtelCollector - update an existing collector by id + UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error } diff --git a/maestro/maestro_service.go b/maestro/maestro_service.go index 683d8a173..55d702b4b 100644 --- a/maestro/maestro_service.go +++ b/maestro/maestro_service.go @@ -23,14 +23,12 @@ var ( ErrConflictMaestro = errors.New("Otel collector already exists") ) -func (svc maestroService) collectorDeploy(operation, namespace, manifest, sinkId, sinkUrl, sinkUsername, sinkPassword string) error { - manifest, err := GetDeploymentJson(sinkId, sinkUrl, sinkUsername, sinkPassword) - if err != nil { - svc.logger.Error("failed to get deployment json", zap.Error(err)) - return err - } +const namespace = "otelcollectors" + +func (svc maestroService) collectorDeploy(ctx context.Context, operation, sinkId, manifest string) error { + fileContent := []byte(manifest) - err = os.WriteFile("/tmp/otel-collector-"+sinkId+".json", fileContent, 0644) + err := os.WriteFile("/tmp/otel-collector-"+sinkId+".json", fileContent, 0644) if err != nil { svc.logger.Error("failed to write file content", zap.Error(err)) return err @@ -68,23 +66,26 @@ func (svc maestroService) collectorDeploy(operation, namespace, manifest, sinkId return nil } -func (svc maestroService) getConfigFromSinkId(id string) (sinkUrl, sinkUsername, sinkPassword string) { +func (svc maestroService) getConfigFromSinkId(config SinkConfig) (sinkID, sinkUrl, sinkUsername, sinkPassword string) { - return "", "", "" + return config.Id, config.Url, config.Username, config.Password } -func (svc maestroService) CreateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error { - sinkUrl, sinkUsername, sinkPassword := svc.getConfigFromSinkId(sinkID) - err := svc.collectorDeploy("apply", "otelcollectors", k8sOtelCollector, sinkID, sinkUrl, sinkUsername, sinkPassword) +func (svc maestroService) CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { + err := svc.collectorDeploy(ctx, "apply", sinkID, deploymentEntry) + if err != nil { return err } return nil } -func (svc maestroService) UpdateOtelCollector(ctx context.Context, sinkID string, msg string, ownerID string) error { - sinkUrl, sinkUsername, sinkPassword := svc.getConfigFromSinkId(sinkID) - err := svc.collectorDeploy("apply", "otelcollectors", k8sOtelCollector, sinkID, sinkUrl, sinkUsername, sinkPassword) +func (svc maestroService) UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { + err := svc.DeleteOtelCollector(ctx, sinkID) + if err != nil { + return err + } + err = svc.CreateOtelCollector(ctx, sinkID, deploymentEntry) if err != nil { return err } @@ -92,7 +93,7 @@ func (svc maestroService) UpdateOtelCollector(ctx context.Context, sinkID string } func (svc maestroService) DeleteOtelCollector(ctx context.Context, sinkID string) error { - err := svc.collectorDeploy("delete", "otelcollectors", k8sOtelCollector, sinkID, "", "", "") + err := svc.collectorDeploy(ctx, "delete", sinkID, "") if err != nil { return err } diff --git a/maestro/redis/consumer/hashset.go b/maestro/redis/consumer/hashset.go index 762402036..a5627601b 100644 --- a/maestro/redis/consumer/hashset.go +++ b/maestro/redis/consumer/hashset.go @@ -9,6 +9,15 @@ import ( const deploymentKey = "orb.sinks.deployment" +func (es eventStore) GetDeploymentEntryFromSinkId(ctx context.Context, sinkId string) (string, error) { + cmd := es.client.HGet(ctx, deploymentKey, sinkId) + if err := cmd.Err(); err != nil { + es.logger.Error("error during redis reading of SinkId", zap.String("sink-id", sinkId), zap.Error(err)) + return "", err + } + return cmd.String(), nil +} + // handleSinksDeleteCollector will delete Deployment Entry and force delete otel collector func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event sinksUpdateEvent) error { es.logger.Info("Received maestro DELETE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) @@ -22,7 +31,7 @@ func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event sinks // handleSinksCreateCollector will create Deployment Entry in Redis func (es eventStore) handleSinksCreateCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received maestro CREATE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) + es.logger.Info("Received event to Create DeploymentEntry from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) sinkUrl := event.config["sink_url"].(string) sinkUsername := event.config["username"].(string) sinkPassword := event.config["password"].(string) @@ -36,9 +45,10 @@ func (es eventStore) handleSinksCreateCollector(ctx context.Context, event sinks return nil } -// handleSinksUpdateCollector will update Deployment Entry in Redis +// handleSinksUpdateCollector will update Deployment Entry in Redis and force update otel collector func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event sinksUpdateEvent) error { - es.logger.Info("Received maestro UPDATE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) + es.logger.Info("Received event to Update DeploymentEntry from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) + sinkUrl := event.config["sink_url"].(string) sinkUsername := event.config["username"].(string) sinkPassword := event.config["password"].(string) @@ -48,6 +58,11 @@ func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event sinks return err } es.client.HSet(ctx, deploymentKey, event.sinkID, deploy) + err = es.maestroService.UpdateOtelCollector(ctx, event.sinkID, deploy) + if err != nil { + return err + } + return nil } diff --git a/maestro/redis/consumer/streams.go b/maestro/redis/consumer/streams.go index 12468223f..13e35626d 100644 --- a/maestro/redis/consumer/streams.go +++ b/maestro/redis/consumer/streams.go @@ -143,8 +143,11 @@ func (es eventStore) handleSinkerDeleteCollector(ctx context.Context, event sink // Create collector func (es eventStore) handleSinkerCreateCollector(ctx context.Context, event sinkerUpdateEvent) error { es.logger.Info("Received maestro CREATE event from sinker, sink state=" + event.state + ", Sink ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - - err := es.maestroService.CreateOtelCollector(ctx, event.sinkID, event.state, event.ownerID) + deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.sinkID) + if err != nil { + return err + } + err = es.maestroService.CreateOtelCollector(ctx, event.sinkID, deploymentEntry) if err != nil { return err } diff --git a/policies/api/grpc/client.go b/policies/api/grpc/client.go index bcc3aa344..3a19f6297 100644 --- a/policies/api/grpc/client.go +++ b/policies/api/grpc/client.go @@ -27,6 +27,29 @@ type grpcClient struct { retrievePolicy endpoint.Endpoint retrievePoliciesByGroups endpoint.Endpoint retrieveDataset endpoint.Endpoint + retrieveDatasetsByGroups endpoint.Endpoint +} + +func (client grpcClient) RetrieveDatasetsByGroups(ctx context.Context, in *pb.DatasetsByGroupsReq, opts ...grpc.CallOption) (*pb.DatasetsRes, error) { + ctx, cancel := context.WithTimeout(ctx, client.timeout) + defer cancel() + + ar := accessByGroupIDReq{ + GroupIDs: in.GroupIDs, + OwnerID: in.OwnerID, + } + res, err := client.retrieveDatasetsByGroups(ctx, ar) + if err != nil { + return nil, err + } + + ir := res.(datasetListRes) + dsList := make([]*pb.DatasetRes, len(ir.datasets)) + for i, ds := range ir.datasets { + dsList[i] = &pb.DatasetRes{Id: ds.id, SinkIds: ds.sinkIDs, PolicyId: ds.policyID, AgentGroupId: ds.agentGroupID} + } + return &pb.DatasetsRes{DatasetList: dsList}, nil + } func (client grpcClient) RetrievePolicy(ctx context.Context, in *pb.PolicyByIDReq, opts ...grpc.CallOption) (*pb.PolicyRes, error) { diff --git a/policies/api/grpc/endpoint.go b/policies/api/grpc/endpoint.go index 74e707e63..6930ec85e 100644 --- a/policies/api/grpc/endpoint.go +++ b/policies/api/grpc/endpoint.go @@ -91,3 +91,28 @@ func retrieveDatasetEnpoint(svc policies.Service) endpoint.Endpoint { }, nil } } + +func retrieveDatasetsByGroupsEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(accessByGroupIDReq) + if err := req.validate(); err != nil { + return nil, err + } + + dsList, err := svc.ListDatasetsByGroupIDInternal(ctx, req.GroupIDs, req.OwnerID) + if err != nil { + return datasetListRes{}, err + } + datasets := make([]datasetRes, len(dsList)) + for i, ds := range dsList { + datasets[i] = datasetRes{ + id: ds.ID, + agentGroupID: ds.AgentGroupID, + sinkIDs: ds.SinkIDs, + policyID: ds.PolicyID, + } + } + + return datasetListRes{datasets: datasets}, nil + } +} diff --git a/policies/api/grpc/responses.go b/policies/api/grpc/responses.go index 9713ba483..bcc9e6dac 100644 --- a/policies/api/grpc/responses.go +++ b/policies/api/grpc/responses.go @@ -37,6 +37,10 @@ type datasetRes struct { sinkIDs []string } +type datasetListRes struct { + datasets []datasetRes +} + type emptyRes struct { err error } diff --git a/policies/api/grpc/server.go b/policies/api/grpc/server.go index 0e933d43b..c154a77c2 100644 --- a/policies/api/grpc/server.go +++ b/policies/api/grpc/server.go @@ -27,6 +27,7 @@ type grpcServer struct { retrievePolicy kitgrpc.Handler retrievePoliciesByGroups kitgrpc.Handler retrieveDataset kitgrpc.Handler + retrieveDatasetsByGroups kitgrpc.Handler } // NewServer returns new PolicyServiceServer instance. @@ -47,6 +48,11 @@ func NewServer(tracer opentracing.Tracer, svc policies.Service) pb.PolicyService decodeRetrieveDatasetRequest, encodeDatasetResponse, ), + retrieveDatasetsByGroups: kitgrpc.NewServer( + kitot.TraceServer(tracer, "retrieve_datasets_by_groups")(retrieveDatasetsByGroupsEndpoint(svc)), + decodeRetrieveDatasetsByGroupRequest, + encodeDatasetListResponse, + ), } } @@ -77,6 +83,15 @@ func (gs *grpcServer) RetrieveDataset(ctx context.Context, req *pb.DatasetByIDRe return res.(*pb.DatasetRes), nil } +func (gs *grpcServer) RetrieveDatasetsByGroups(ctx context.Context, req *pb.DatasetsByGroupsReq) (*pb.DatasetsRes, error) { + _, res, err := gs.retrieveDatasetsByGroups.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + + return res.(*pb.DatasetsRes), nil +} + func decodeRetrievePolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { req := grpcReq.(*pb.PolicyByIDReq) return accessByIDReq{PolicyID: req.PolicyID, OwnerID: req.OwnerID}, nil @@ -95,6 +110,11 @@ func decodeRetrieveDatasetRequest(_ context.Context, grpcReq interface{}) (inter }, nil } +func decodeRetrieveDatasetsByGroupRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*pb.DatasetsByGroupsReq) + return accessByGroupIDReq{GroupIDs: req.GroupIDs, OwnerID: req.OwnerID}, nil +} + func encodePolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { res := grpcRes.(policyRes) return &pb.PolicyRes{ @@ -127,6 +147,16 @@ func encodeDatasetResponse(_ context.Context, grpcRes interface{}) (interface{}, }, nil } +func encodeDatasetListResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(datasetListRes) + + dsList := make([]*pb.DatasetRes, len(res.datasets)) + for i, ds := range res.datasets { + dsList[i] = &pb.DatasetRes{Id: ds.id, PolicyId: ds.policyID, AgentGroupId: ds.agentGroupID, SinkIds: ds.sinkIDs} + } + return &pb.DatasetsRes{DatasetList: dsList}, nil +} + func encodeError(err error) error { switch err { case nil: diff --git a/policies/api/http/logging.go b/policies/api/http/logging.go index 30bc82be3..615b33a2d 100644 --- a/policies/api/http/logging.go +++ b/policies/api/http/logging.go @@ -18,6 +18,20 @@ type loggingMiddleware struct { svc policies.Service } +func (l loggingMiddleware) ListDatasetsByGroupIDInternal(ctx context.Context, groupIDs []string, ownerID string) (_ []policies.Dataset, err error) { + defer func(begin time.Time) { + if err != nil { + l.logger.Warn("method call: list_datasets_by_group_id_internal", + zap.Error(err), + zap.Duration("duration", time.Since(begin))) + } else { + l.logger.Info("method call: list_datasets_by_group_id_internal", + zap.Duration("duration", time.Since(begin))) + } + }(time.Now()) + return l.svc.ListDatasetsByGroupIDInternal(ctx, groupIDs, ownerID) +} + func (l loggingMiddleware) RemoveAllDatasetsByPolicyIDInternal(ctx context.Context, token string, policyID string) (err error) { defer func(begin time.Time) { if err != nil { diff --git a/policies/api/http/metrics.go b/policies/api/http/metrics.go index 60d7d5eb5..a82fe66ff 100644 --- a/policies/api/http/metrics.go +++ b/policies/api/http/metrics.go @@ -22,6 +22,23 @@ type metricsMiddleware struct { svc policies.Service } +func (m metricsMiddleware) ListDatasetsByGroupIDInternal(ctx context.Context, groupIDs []string, ownerID string) ([]policies.Dataset, error) { + defer func(begin time.Time) { + labels := []string{ + "method", "listDatasetsByGroupIDInternal", + "owner_id", ownerID, + "policy_id", "", + "dataset_id", "", + } + + m.counter.With(labels...).Add(1) + m.latency.With(labels...).Observe(float64(time.Since(begin).Microseconds())) + + }(time.Now()) + + return m.svc.ListDatasetsByGroupIDInternal(ctx, groupIDs, ownerID) +} + func (m metricsMiddleware) RemoveAllDatasetsByPolicyIDInternal(ctx context.Context, token string, policyID string) error { ownerID, err := m.identify(token) if err != nil { diff --git a/policies/mocks/policies.go b/policies/mocks/policies.go index eb9d47ee1..7ff7c36e2 100644 --- a/policies/mocks/policies.go +++ b/policies/mocks/policies.go @@ -21,6 +21,21 @@ type mockPoliciesRepository struct { gdb map[string][]policies.PolicyInDataset } +func (m *mockPoliciesRepository) RetrieveDatasetsByGroupID(ctx context.Context, groupIDs []string, ownerID string) ([]policies.Dataset, error) { + var datasetList []policies.Dataset + for _, d := range m.ddb { + if d.MFOwnerID == ownerID { + for _, groupID := range groupIDs { + if groupID == d.AgentGroupID { + datasetList = append(datasetList, d) + } + } + } + } + + return datasetList, nil +} + func (m *mockPoliciesRepository) ActivateDatasetByID(ctx context.Context, datasetID string, ownerID string) error { for _, ds := range m.ddb { if ds.MFOwnerID == ownerID { diff --git a/policies/pb/policies.pb.go b/policies/pb/policies.pb.go index d2491b764..e5421415f 100644 --- a/policies/pb/policies.pb.go +++ b/policies/pb/policies.pb.go @@ -75,6 +75,61 @@ func (x *PolicyByIDReq) GetOwnerID() string { return "" } +type DatasetsByGroupsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GroupIDs []string `protobuf:"bytes,1,rep,name=groupIDs,proto3" json:"groupIDs,omitempty"` + OwnerID string `protobuf:"bytes,2,opt,name=ownerID,proto3" json:"ownerID,omitempty"` +} + +func (x *DatasetsByGroupsReq) Reset() { + *x = DatasetsByGroupsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_policies_pb_policies_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatasetsByGroupsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatasetsByGroupsReq) ProtoMessage() {} + +func (x *DatasetsByGroupsReq) ProtoReflect() protoreflect.Message { + mi := &file_policies_pb_policies_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatasetsByGroupsReq.ProtoReflect.Descriptor instead. +func (*DatasetsByGroupsReq) Descriptor() ([]byte, []int) { + return file_policies_pb_policies_proto_rawDescGZIP(), []int{1} +} + +func (x *DatasetsByGroupsReq) GetGroupIDs() []string { + if x != nil { + return x.GroupIDs + } + return nil +} + +func (x *DatasetsByGroupsReq) GetOwnerID() string { + if x != nil { + return x.OwnerID + } + return "" +} + type PoliciesByGroupsReq struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -87,7 +142,7 @@ type PoliciesByGroupsReq struct { func (x *PoliciesByGroupsReq) Reset() { *x = PoliciesByGroupsReq{} if protoimpl.UnsafeEnabled { - mi := &file_policies_pb_policies_proto_msgTypes[1] + mi := &file_policies_pb_policies_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -100,7 +155,7 @@ func (x *PoliciesByGroupsReq) String() string { func (*PoliciesByGroupsReq) ProtoMessage() {} func (x *PoliciesByGroupsReq) ProtoReflect() protoreflect.Message { - mi := &file_policies_pb_policies_proto_msgTypes[1] + mi := &file_policies_pb_policies_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -113,7 +168,7 @@ func (x *PoliciesByGroupsReq) ProtoReflect() protoreflect.Message { // Deprecated: Use PoliciesByGroupsReq.ProtoReflect.Descriptor instead. func (*PoliciesByGroupsReq) Descriptor() ([]byte, []int) { - return file_policies_pb_policies_proto_rawDescGZIP(), []int{1} + return file_policies_pb_policies_proto_rawDescGZIP(), []int{2} } func (x *PoliciesByGroupsReq) GetGroupIDs() []string { @@ -142,7 +197,7 @@ type DatasetByIDReq struct { func (x *DatasetByIDReq) Reset() { *x = DatasetByIDReq{} if protoimpl.UnsafeEnabled { - mi := &file_policies_pb_policies_proto_msgTypes[2] + mi := &file_policies_pb_policies_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -155,7 +210,7 @@ func (x *DatasetByIDReq) String() string { func (*DatasetByIDReq) ProtoMessage() {} func (x *DatasetByIDReq) ProtoReflect() protoreflect.Message { - mi := &file_policies_pb_policies_proto_msgTypes[2] + mi := &file_policies_pb_policies_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -168,7 +223,7 @@ func (x *DatasetByIDReq) ProtoReflect() protoreflect.Message { // Deprecated: Use DatasetByIDReq.ProtoReflect.Descriptor instead. func (*DatasetByIDReq) Descriptor() ([]byte, []int) { - return file_policies_pb_policies_proto_rawDescGZIP(), []int{2} + return file_policies_pb_policies_proto_rawDescGZIP(), []int{3} } func (x *DatasetByIDReq) GetDatasetID() string { @@ -200,7 +255,7 @@ type PolicyRes struct { func (x *PolicyRes) Reset() { *x = PolicyRes{} if protoimpl.UnsafeEnabled { - mi := &file_policies_pb_policies_proto_msgTypes[3] + mi := &file_policies_pb_policies_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -213,7 +268,7 @@ func (x *PolicyRes) String() string { func (*PolicyRes) ProtoMessage() {} func (x *PolicyRes) ProtoReflect() protoreflect.Message { - mi := &file_policies_pb_policies_proto_msgTypes[3] + mi := &file_policies_pb_policies_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -226,7 +281,7 @@ func (x *PolicyRes) ProtoReflect() protoreflect.Message { // Deprecated: Use PolicyRes.ProtoReflect.Descriptor instead. func (*PolicyRes) Descriptor() ([]byte, []int) { - return file_policies_pb_policies_proto_rawDescGZIP(), []int{3} + return file_policies_pb_policies_proto_rawDescGZIP(), []int{4} } func (x *PolicyRes) GetId() string { @@ -281,7 +336,7 @@ type PolicyInDSRes struct { func (x *PolicyInDSRes) Reset() { *x = PolicyInDSRes{} if protoimpl.UnsafeEnabled { - mi := &file_policies_pb_policies_proto_msgTypes[4] + mi := &file_policies_pb_policies_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -294,7 +349,7 @@ func (x *PolicyInDSRes) String() string { func (*PolicyInDSRes) ProtoMessage() {} func (x *PolicyInDSRes) ProtoReflect() protoreflect.Message { - mi := &file_policies_pb_policies_proto_msgTypes[4] + mi := &file_policies_pb_policies_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -307,7 +362,7 @@ func (x *PolicyInDSRes) ProtoReflect() protoreflect.Message { // Deprecated: Use PolicyInDSRes.ProtoReflect.Descriptor instead. func (*PolicyInDSRes) Descriptor() ([]byte, []int) { - return file_policies_pb_policies_proto_rawDescGZIP(), []int{4} + return file_policies_pb_policies_proto_rawDescGZIP(), []int{5} } func (x *PolicyInDSRes) GetId() string { @@ -370,7 +425,7 @@ type PolicyInDSListRes struct { func (x *PolicyInDSListRes) Reset() { *x = PolicyInDSListRes{} if protoimpl.UnsafeEnabled { - mi := &file_policies_pb_policies_proto_msgTypes[5] + mi := &file_policies_pb_policies_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -383,7 +438,7 @@ func (x *PolicyInDSListRes) String() string { func (*PolicyInDSListRes) ProtoMessage() {} func (x *PolicyInDSListRes) ProtoReflect() protoreflect.Message { - mi := &file_policies_pb_policies_proto_msgTypes[5] + mi := &file_policies_pb_policies_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -396,7 +451,7 @@ func (x *PolicyInDSListRes) ProtoReflect() protoreflect.Message { // Deprecated: Use PolicyInDSListRes.ProtoReflect.Descriptor instead. func (*PolicyInDSListRes) Descriptor() ([]byte, []int) { - return file_policies_pb_policies_proto_rawDescGZIP(), []int{5} + return file_policies_pb_policies_proto_rawDescGZIP(), []int{6} } func (x *PolicyInDSListRes) GetPolicies() []*PolicyInDSRes { @@ -420,7 +475,7 @@ type DatasetRes struct { func (x *DatasetRes) Reset() { *x = DatasetRes{} if protoimpl.UnsafeEnabled { - mi := &file_policies_pb_policies_proto_msgTypes[6] + mi := &file_policies_pb_policies_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -433,7 +488,7 @@ func (x *DatasetRes) String() string { func (*DatasetRes) ProtoMessage() {} func (x *DatasetRes) ProtoReflect() protoreflect.Message { - mi := &file_policies_pb_policies_proto_msgTypes[6] + mi := &file_policies_pb_policies_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -446,7 +501,7 @@ func (x *DatasetRes) ProtoReflect() protoreflect.Message { // Deprecated: Use DatasetRes.ProtoReflect.Descriptor instead. func (*DatasetRes) Descriptor() ([]byte, []int) { - return file_policies_pb_policies_proto_rawDescGZIP(), []int{6} + return file_policies_pb_policies_proto_rawDescGZIP(), []int{7} } func (x *DatasetRes) GetId() string { @@ -477,6 +532,53 @@ func (x *DatasetRes) GetSinkIds() []string { return nil } +type DatasetsRes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatasetList []*DatasetRes `protobuf:"bytes,1,rep,name=datasetList,proto3" json:"datasetList,omitempty"` +} + +func (x *DatasetsRes) Reset() { + *x = DatasetsRes{} + if protoimpl.UnsafeEnabled { + mi := &file_policies_pb_policies_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatasetsRes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatasetsRes) ProtoMessage() {} + +func (x *DatasetsRes) ProtoReflect() protoreflect.Message { + mi := &file_policies_pb_policies_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatasetsRes.ProtoReflect.Descriptor instead. +func (*DatasetsRes) Descriptor() ([]byte, []int) { + return file_policies_pb_policies_proto_rawDescGZIP(), []int{8} +} + +func (x *DatasetsRes) GetDatasetList() []*DatasetRes { + if x != nil { + return x.DatasetList + } + return nil +} + var File_policies_pb_policies_proto protoreflect.FileDescriptor var file_policies_pb_policies_proto_rawDesc = []byte{ @@ -487,64 +589,79 @@ var file_policies_pb_policies_proto_rawDesc = []byte{ 0x79, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0x4b, 0x0a, - 0x13, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x42, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x13, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x42, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0x48, 0x0a, 0x0e, 0x44, 0x61, - 0x74, 0x61, 0x73, 0x65, 0x74, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, - 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, - 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x49, 0x44, 0x22, 0x77, 0x0a, 0x09, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, - 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc0, 0x01, - 0x0a, 0x0d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x52, 0x65, 0x73, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x64, - 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x67, - 0x65, 0x6e, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, - 0x22, 0x48, 0x0a, 0x11, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x4c, 0x69, - 0x73, 0x74, 0x52, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x52, 0x65, 0x73, - 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x7a, 0x0a, 0x0a, 0x44, 0x61, - 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x67, 0x65, 0x6e, - 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, - 0x69, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x69, 0x6e, 0x6b, 0x49, 0x64, 0x73, 0x32, 0xf0, 0x01, 0x0a, 0x0d, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x52, 0x65, 0x74, 0x72, - 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x17, 0x2e, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x79, 0x49, 0x44, - 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x52, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x42, 0x79, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x42, 0x79, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x1b, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x65, 0x73, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, - 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x18, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, - 0x71, 0x1a, 0x14, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x22, 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0x4b, 0x0a, 0x13, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x42, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, + 0x71, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0x48, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, + 0x44, 0x22, 0x77, 0x0a, 0x09, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc0, 0x01, 0x0a, 0x0d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x52, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x22, 0x48, 0x0a, + 0x11, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x12, 0x33, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x52, 0x65, 0x73, 0x52, 0x08, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x7a, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x6b, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x6b, + 0x49, 0x64, 0x73, 0x22, 0x45, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x4c, 0x69, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x52, 0x0b, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x32, 0xc4, 0x02, 0x0a, 0x0d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0e, + 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x17, + 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x58, + 0x0a, 0x18, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x42, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x42, 0x79, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x1b, 0x2e, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x44, 0x53, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x72, + 0x69, 0x65, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x18, 0x2e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x42, 0x79, + 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x18, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x73, 0x42, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x42, 0x79, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, + 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -559,29 +676,34 @@ func file_policies_pb_policies_proto_rawDescGZIP() []byte { return file_policies_pb_policies_proto_rawDescData } -var file_policies_pb_policies_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_policies_pb_policies_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_policies_pb_policies_proto_goTypes = []interface{}{ (*PolicyByIDReq)(nil), // 0: policies.PolicyByIDReq - (*PoliciesByGroupsReq)(nil), // 1: policies.PoliciesByGroupsReq - (*DatasetByIDReq)(nil), // 2: policies.DatasetByIDReq - (*PolicyRes)(nil), // 3: policies.PolicyRes - (*PolicyInDSRes)(nil), // 4: policies.PolicyInDSRes - (*PolicyInDSListRes)(nil), // 5: policies.PolicyInDSListRes - (*DatasetRes)(nil), // 6: policies.DatasetRes + (*DatasetsByGroupsReq)(nil), // 1: policies.DatasetsByGroupsReq + (*PoliciesByGroupsReq)(nil), // 2: policies.PoliciesByGroupsReq + (*DatasetByIDReq)(nil), // 3: policies.DatasetByIDReq + (*PolicyRes)(nil), // 4: policies.PolicyRes + (*PolicyInDSRes)(nil), // 5: policies.PolicyInDSRes + (*PolicyInDSListRes)(nil), // 6: policies.PolicyInDSListRes + (*DatasetRes)(nil), // 7: policies.DatasetRes + (*DatasetsRes)(nil), // 8: policies.DatasetsRes } var file_policies_pb_policies_proto_depIdxs = []int32{ - 4, // 0: policies.PolicyInDSListRes.policies:type_name -> policies.PolicyInDSRes - 0, // 1: policies.PolicyService.RetrievePolicy:input_type -> policies.PolicyByIDReq - 1, // 2: policies.PolicyService.RetrievePoliciesByGroups:input_type -> policies.PoliciesByGroupsReq - 2, // 3: policies.PolicyService.RetrieveDataset:input_type -> policies.DatasetByIDReq - 3, // 4: policies.PolicyService.RetrievePolicy:output_type -> policies.PolicyRes - 5, // 5: policies.PolicyService.RetrievePoliciesByGroups:output_type -> policies.PolicyInDSListRes - 6, // 6: policies.PolicyService.RetrieveDataset:output_type -> policies.DatasetRes - 4, // [4:7] is the sub-list for method output_type - 1, // [1:4] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 5, // 0: policies.PolicyInDSListRes.policies:type_name -> policies.PolicyInDSRes + 7, // 1: policies.DatasetsRes.datasetList:type_name -> policies.DatasetRes + 0, // 2: policies.PolicyService.RetrievePolicy:input_type -> policies.PolicyByIDReq + 2, // 3: policies.PolicyService.RetrievePoliciesByGroups:input_type -> policies.PoliciesByGroupsReq + 3, // 4: policies.PolicyService.RetrieveDataset:input_type -> policies.DatasetByIDReq + 1, // 5: policies.PolicyService.RetrieveDatasetsByGroups:input_type -> policies.DatasetsByGroupsReq + 4, // 6: policies.PolicyService.RetrievePolicy:output_type -> policies.PolicyRes + 6, // 7: policies.PolicyService.RetrievePoliciesByGroups:output_type -> policies.PolicyInDSListRes + 7, // 8: policies.PolicyService.RetrieveDataset:output_type -> policies.DatasetRes + 8, // 9: policies.PolicyService.RetrieveDatasetsByGroups:output_type -> policies.DatasetsRes + 6, // [6:10] is the sub-list for method output_type + 2, // [2:6] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_policies_pb_policies_proto_init() } @@ -603,7 +725,7 @@ func file_policies_pb_policies_proto_init() { } } file_policies_pb_policies_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PoliciesByGroupsReq); i { + switch v := v.(*DatasetsByGroupsReq); i { case 0: return &v.state case 1: @@ -615,7 +737,7 @@ func file_policies_pb_policies_proto_init() { } } file_policies_pb_policies_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DatasetByIDReq); i { + switch v := v.(*PoliciesByGroupsReq); i { case 0: return &v.state case 1: @@ -627,7 +749,7 @@ func file_policies_pb_policies_proto_init() { } } file_policies_pb_policies_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PolicyRes); i { + switch v := v.(*DatasetByIDReq); i { case 0: return &v.state case 1: @@ -639,7 +761,7 @@ func file_policies_pb_policies_proto_init() { } } file_policies_pb_policies_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PolicyInDSRes); i { + switch v := v.(*PolicyRes); i { case 0: return &v.state case 1: @@ -651,7 +773,7 @@ func file_policies_pb_policies_proto_init() { } } file_policies_pb_policies_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PolicyInDSListRes); i { + switch v := v.(*PolicyInDSRes); i { case 0: return &v.state case 1: @@ -663,6 +785,18 @@ func file_policies_pb_policies_proto_init() { } } file_policies_pb_policies_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PolicyInDSListRes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_policies_pb_policies_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DatasetRes); i { case 0: return &v.state @@ -674,6 +808,18 @@ func file_policies_pb_policies_proto_init() { return nil } } + file_policies_pb_policies_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatasetsRes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -681,7 +827,7 @@ func file_policies_pb_policies_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_policies_pb_policies_proto_rawDesc, NumEnums: 0, - NumMessages: 7, + NumMessages: 9, NumExtensions: 0, NumServices: 1, }, diff --git a/policies/pb/policies.proto b/policies/pb/policies.proto index 35de3d5de..380194382 100644 --- a/policies/pb/policies.proto +++ b/policies/pb/policies.proto @@ -7,6 +7,7 @@ service PolicyService { rpc RetrievePolicy(PolicyByIDReq) returns (PolicyRes) {} rpc RetrievePoliciesByGroups(PoliciesByGroupsReq) returns (PolicyInDSListRes) {} rpc RetrieveDataset(DatasetByIDReq) returns (DatasetRes) {} + rpc RetrieveDatasetsByGroups(DatasetsByGroupsReq) returns (DatasetsRes) {} } message PolicyByIDReq { @@ -14,6 +15,11 @@ message PolicyByIDReq { string ownerID = 2; } +message DatasetsByGroupsReq { + repeated string groupIDs = 1; + string ownerID = 2; +} + message PoliciesByGroupsReq { repeated string groupIDs = 1; string ownerID = 2; @@ -52,3 +58,7 @@ message DatasetRes { string policy_id = 3; repeated string sink_ids = 4; } + +message DatasetsRes { + repeated DatasetRes datasetList = 1; +} diff --git a/policies/pb/policies_grpc.pb.go b/policies/pb/policies_grpc.pb.go index 270c444a1..980860d42 100644 --- a/policies/pb/policies_grpc.pb.go +++ b/policies/pb/policies_grpc.pb.go @@ -21,6 +21,7 @@ type PolicyServiceClient interface { RetrievePolicy(ctx context.Context, in *PolicyByIDReq, opts ...grpc.CallOption) (*PolicyRes, error) RetrievePoliciesByGroups(ctx context.Context, in *PoliciesByGroupsReq, opts ...grpc.CallOption) (*PolicyInDSListRes, error) RetrieveDataset(ctx context.Context, in *DatasetByIDReq, opts ...grpc.CallOption) (*DatasetRes, error) + RetrieveDatasetsByGroups(ctx context.Context, in *DatasetsByGroupsReq, opts ...grpc.CallOption) (*DatasetsRes, error) } type policyServiceClient struct { @@ -58,6 +59,15 @@ func (c *policyServiceClient) RetrieveDataset(ctx context.Context, in *DatasetBy return out, nil } +func (c *policyServiceClient) RetrieveDatasetsByGroups(ctx context.Context, in *DatasetsByGroupsReq, opts ...grpc.CallOption) (*DatasetsRes, error) { + out := new(DatasetsRes) + err := c.cc.Invoke(ctx, "/policies.PolicyService/RetrieveDatasetsByGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // PolicyServiceServer is the server API for PolicyService service. // All implementations must embed UnimplementedPolicyServiceServer // for forward compatibility @@ -65,6 +75,7 @@ type PolicyServiceServer interface { RetrievePolicy(context.Context, *PolicyByIDReq) (*PolicyRes, error) RetrievePoliciesByGroups(context.Context, *PoliciesByGroupsReq) (*PolicyInDSListRes, error) RetrieveDataset(context.Context, *DatasetByIDReq) (*DatasetRes, error) + RetrieveDatasetsByGroups(context.Context, *DatasetsByGroupsReq) (*DatasetsRes, error) mustEmbedUnimplementedPolicyServiceServer() } @@ -81,6 +92,9 @@ func (UnimplementedPolicyServiceServer) RetrievePoliciesByGroups(context.Context func (UnimplementedPolicyServiceServer) RetrieveDataset(context.Context, *DatasetByIDReq) (*DatasetRes, error) { return nil, status.Errorf(codes.Unimplemented, "method RetrieveDataset not implemented") } +func (UnimplementedPolicyServiceServer) RetrieveDatasetsByGroups(context.Context, *DatasetsByGroupsReq) (*DatasetsRes, error) { + return nil, status.Errorf(codes.Unimplemented, "method RetrieveDatasetsByGroups not implemented") +} func (UnimplementedPolicyServiceServer) mustEmbedUnimplementedPolicyServiceServer() {} // UnsafePolicyServiceServer may be embedded to opt out of forward compatibility for this service. @@ -148,6 +162,24 @@ func _PolicyService_RetrieveDataset_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _PolicyService_RetrieveDatasetsByGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DatasetsByGroupsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PolicyServiceServer).RetrieveDatasetsByGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/policies.PolicyService/RetrieveDatasetsByGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PolicyServiceServer).RetrieveDatasetsByGroups(ctx, req.(*DatasetsByGroupsReq)) + } + return interceptor(ctx, in, info, handler) +} + // PolicyService_ServiceDesc is the grpc.ServiceDesc for PolicyService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -167,6 +199,10 @@ var PolicyService_ServiceDesc = grpc.ServiceDesc{ MethodName: "RetrieveDataset", Handler: _PolicyService_RetrieveDataset_Handler, }, + { + MethodName: "RetrieveDatasetsByGroups", + Handler: _PolicyService_RetrieveDatasetsByGroups_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "policies/pb/policies.proto", diff --git a/policies/policies.go b/policies/policies.go index b33b8cd2e..b10a1636a 100644 --- a/policies/policies.go +++ b/policies/policies.go @@ -121,6 +121,9 @@ type Service interface { // RemoveAllDatasetsByPolicyIDInternal removes all datasets by policyID owned by the specified user RemoveAllDatasetsByPolicyIDInternal(ctx context.Context, token string, policyID string) error + + // ListDatasetsByGroupIDInternal gRPC version of retrieving list of datasets belonging to specified agent group with no token + ListDatasetsByGroupIDInternal(ctx context.Context, groupIDs []string, ownerID string) ([]Dataset, error) } type Repository interface { @@ -182,4 +185,7 @@ type Repository interface { // DeleteAllDatasetsPolicy removes all datasets by policyID DeleteAllDatasetsPolicy(ctx context.Context, policyID string, ownerID string) error + + // RetrieveDatasetsByGroupID Retrieve dataset list by group id + RetrieveDatasetsByGroupID(ctx context.Context, groupIDs []string, ownerID string) ([]Dataset, error) } diff --git a/policies/policy_service.go b/policies/policy_service.go index b3d210ed2..3ebbe2a9e 100644 --- a/policies/policy_service.go +++ b/policies/policy_service.go @@ -34,6 +34,13 @@ var ( ErrNotifyAgentGroupChannel = errors.New("failed to notify agent group channel") ) +func (s policiesService) ListDatasetsByGroupIDInternal(ctx context.Context, groupIDs []string, ownerID string) ([]Dataset, error) { + if len(groupIDs) == 0 || ownerID == "" { + return nil, ErrMalformedEntity + } + return s.repo.RetrieveDatasetsByGroupID(ctx, groupIDs, ownerID) +} + func (s policiesService) ListPolicies(ctx context.Context, token string, pm PageMetadata) (Page, error) { ownerID, err := s.identify(token) if err != nil { diff --git a/policies/postgres/datasets_test.go b/policies/postgres/datasets_test.go index c6f333038..be3b3226b 100644 --- a/policies/postgres/datasets_test.go +++ b/policies/postgres/datasets_test.go @@ -1088,6 +1088,104 @@ func TestDeleteAllDatasetsPolicy(t *testing.T) { } } +func TestDatasetsRetrieveByGroup(t *testing.T) { + dbMiddleware := postgres.NewDatabase(db) + repo := postgres.NewPoliciesRepository(dbMiddleware, logger) + + oID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + nameID, err := types.NewIdentifier("mypolicy") + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + policy := policies.Policy{ + Name: nameID, + MFOwnerID: oID.String(), + Policy: types.Metadata{"pkey1": "pvalue1"}, + } + policyID, err := repo.SavePolicy(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + + groupID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + sinkIDs := make([]string, 2) + for i := 0; i < 2; i++ { + sinkID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + sinkIDs[i] = sinkID.String() + } + + dsnameID, err := types.NewIdentifier("mydataset") + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + dataset := policies.Dataset{ + Name: dsnameID, + MFOwnerID: oID.String(), + Valid: true, + AgentGroupID: groupID.String(), + PolicyID: policyID, + SinkIDs: sinkIDs, + Metadata: types.Metadata{"testkey": "testvalue"}, + Created: time.Time{}, + } + id, err := repo.SaveDataset(context.Background(), dataset) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + dataset.ID = id + + cases := map[string]struct { + datasetID string + groupID []string + ownerID string + policyID string + results int + err error + }{ + "retrieve existing datasets by group ID and ownerID": { + datasetID: dataset.ID, + groupID: []string{groupID.String()}, + ownerID: policy.MFOwnerID, + policyID: policyID, + results: 1, + err: nil, + }, + "retrieve non existing datasets by group ID and ownerID": { + groupID: []string{policy.MFOwnerID}, + ownerID: policy.MFOwnerID, + policyID: policyID, + results: 0, + err: nil, + }, + "retrieve datasets by groupID with empty owner": { + groupID: []string{policy.MFOwnerID}, + ownerID: "", + policyID: policyID, + results: 0, + err: errors.ErrMalformedEntity, + }, + "retrieve datasets by groupID with empty groupID": { + groupID: []string{}, + ownerID: policy.MFOwnerID, + policyID: policyID, + results: 0, + err: errors.ErrMalformedEntity, + }, + } + + for desc, tc := range cases { + t.Run(desc, func(t *testing.T) { + dsList, err := repo.RetrieveDatasetsByGroupID(context.Background(), tc.groupID, tc.ownerID) + if err == nil { + assert.Equal(t, tc.results, len(dsList), fmt.Sprintf("%s: expected %d got %d\n", desc, tc.results, len(dsList))) + if tc.results > 0 { + assert.Equal(t, dataset.ID, dsList[0].ID, fmt.Sprintf("%s: expected %s got %s\n", desc, dataset.ID, dsList[0].ID)) + } + } + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + }) + } +} + func testSortDataset(t *testing.T, pm policies.PageMetadata, ags []policies.Dataset) { t.Helper() switch pm.Order { diff --git a/policies/postgres/policies.go b/policies/postgres/policies.go index d498bb308..bd7c7c51b 100644 --- a/policies/postgres/policies.go +++ b/policies/postgres/policies.go @@ -34,6 +34,42 @@ type policiesRepository struct { logger *zap.Logger } +func (r policiesRepository) RetrieveDatasetsByGroupID(ctx context.Context, groupIDs []string, ownerID string) ([]policies.Dataset, error) { + q := `SELECT id, agent_group_id, sink_ids, agent_policy_id + FROM datasets + WHERE valid = TRUE AND agent_group_id IN (?) AND mf_owner_id = ?` + + if len(groupIDs) == 0 || ownerID == "" { + return nil, errors.ErrMalformedEntity + } + + query, args, err := sqlx.In(q, groupIDs, ownerID) + if err != nil { + return nil, err + } + + query = r.db.Rebind(query) + + rows, err := r.db.QueryxContext(ctx, query, args...) + if err != nil { + return nil, errors.Wrap(errors.ErrSelectEntity, err) + } + defer rows.Close() + + var items []policies.Dataset + for rows.Next() { + dbth := dbDataset{MFOwnerID: ownerID} + if err := rows.StructScan(&dbth); err != nil { + return nil, errors.Wrap(errors.ErrSelectEntity, err) + } + + th := toDataset(dbth) + items = append(items, policies.Dataset{ID: th.ID, PolicyID: th.PolicyID, SinkIDs: th.SinkIDs, AgentGroupID: th.AgentGroupID}) + } + + return items, nil +} + func (r policiesRepository) DeletePolicy(ctx context.Context, ownerID string, policyID string) error { if ownerID == "" || policyID == "" { return policies.ErrMalformedEntity diff --git a/policies/redis/producer/streams.go b/policies/redis/producer/streams.go index 43fd8f98d..077bb7790 100644 --- a/policies/redis/producer/streams.go +++ b/policies/redis/producer/streams.go @@ -36,6 +36,10 @@ type eventStore struct { logger *zap.Logger } +func (e eventStore) ListDatasetsByGroupIDInternal(ctx context.Context, groupIDs []string, ownerID string) ([]policies.Dataset, error) { + return e.svc.ListDatasetsByGroupIDInternal(ctx, groupIDs, ownerID) +} + func (e eventStore) ViewDatasetByIDInternal(ctx context.Context, ownerID string, datasetID string) (policies.Dataset, error) { return e.svc.ViewDatasetByIDInternal(ctx, ownerID, datasetID) } diff --git a/sinker/backend/pktvisor/pktvisor.go b/sinker/backend/pktvisor/pktvisor.go index b4aca2f69..482ece40b 100644 --- a/sinker/backend/pktvisor/pktvisor.go +++ b/sinker/backend/pktvisor/pktvisor.go @@ -26,7 +26,7 @@ type pktvisorBackend struct { logger *zap.Logger } -type context struct { +type metricAppendix struct { agent *pb.AgentInfoRes agentID string policyID string @@ -34,6 +34,7 @@ type context struct { deviceID string deviceIF string handlerLabel string + format string tags map[string]string logger *zap.Logger } @@ -60,7 +61,7 @@ func (p pktvisorBackend) ProcessMetrics(agent *pb.AgentInfoRes, agentID string, tags[k] = v } - context := context{ + appendix := metricAppendix{ agent: agent, agentID: agentID, policyID: data.PolicyID, @@ -70,6 +71,7 @@ func (p pktvisorBackend) ProcessMetrics(agent *pb.AgentInfoRes, agentID string, handlerLabel: "", tags: tags, logger: p.logger, + format: "prom_sinker", } stats := make(map[string]StatSnapshot) for handlerLabel, handlerData := range metrics { @@ -115,10 +117,10 @@ func (p pktvisorBackend) ProcessMetrics(agent *pb.AgentInfoRes, agentID string, stats[handlerLabel] = sTmp } } - return parseToProm(&context, stats), nil + return parseToProm(&appendix, stats), nil } -func parseToProm(ctxt *context, statsMap map[string]StatSnapshot) prometheus.TSList { +func parseToProm(ctxt *metricAppendix, statsMap map[string]StatSnapshot) prometheus.TSList { var finalTs = prometheus.TSList{} for handlerLabel, stats := range statsMap { var tsList = prometheus.TSList{} @@ -134,7 +136,7 @@ func parseToProm(ctxt *context, statsMap map[string]StatSnapshot) prometheus.TSL return finalTs } -func convertToPromParticle(ctxt *context, statsMap map[string]interface{}, label string, tsList *prometheus.TSList) { +func convertToPromParticle(ctxt *metricAppendix, statsMap map[string]interface{}, label string, tsList *prometheus.TSList) { for key, value := range statsMap { switch statistic := value.(type) { case map[string]interface{}: @@ -198,7 +200,7 @@ func convertToPromParticle(ctxt *context, statsMap map[string]interface{}, label } } -func convertFlowToPromParticle(ctxt *context, statsMap map[string]interface{}, label string, tsList *prometheus.TSList) { +func convertFlowToPromParticle(ctxt *metricAppendix, statsMap map[string]interface{}, label string, tsList *prometheus.TSList) { for key, value := range statsMap { switch statistic := value.(type) { case map[string]interface{}: @@ -261,7 +263,7 @@ func convertFlowToPromParticle(ctxt *context, statsMap map[string]interface{}, l } } -func makePromParticle(ctxt *context, label string, k string, v interface{}, tsList *prometheus.TSList, quantile bool, name string) *prometheus.TSList { +func makePromParticle(ctxt *metricAppendix, label string, k string, v interface{}, tsList *prometheus.TSList, quantile bool, name string) *prometheus.TSList { mapQuantiles := make(map[string]string) mapQuantiles["P50"] = "0.5" mapQuantiles["P90"] = "0.9" @@ -354,7 +356,7 @@ func makePromParticle(ctxt *context, label string, k string, v interface{}, tsLi return tsList } -func handleParticleError(ctxt *context, err error) { +func handleParticleError(ctxt *metricAppendix, err error) { ctxt.logger.Error("failed to set prometheus element", zap.Error(err)) } diff --git a/sinker/config_state_check.go b/sinker/config_state_check.go index 7f1693100..666802af8 100644 --- a/sinker/config_state_check.go +++ b/sinker/config_state_check.go @@ -17,7 +17,7 @@ const ( DefaultTimeout = 30 * time.Minute ) -func (svc *sinkerService) checkState(_ time.Time) { +func (svc *SinkerService) checkState(_ time.Time) { owners, err := svc.sinkerCache.GetAllOwners() if err != nil { svc.logger.Error("failed to retrieve the list of owners") @@ -44,7 +44,7 @@ func (svc *sinkerService) checkState(_ time.Time) { } } -func (svc *sinkerService) checkSinker() { +func (svc *SinkerService) checkSinker() { svc.checkState(time.Now()) for { select { diff --git a/sinker/message_handler.go b/sinker/message_handler.go index c8537a4f5..6a91e46c9 100644 --- a/sinker/message_handler.go +++ b/sinker/message_handler.go @@ -21,7 +21,7 @@ import ( "time" ) -func (svc sinkerService) remoteWriteToPrometheus(tsList prometheus.TSList, ownerID string, sinkID string) error { +func (svc SinkerService) remoteWriteToPrometheus(tsList prometheus.TSList, ownerID string, sinkID string) error { cfgRepo, err := svc.sinkerCache.Get(ownerID, sinkID) if err != nil { svc.logger.Error("unable to retrieve the sink config", zap.Error(err)) @@ -73,7 +73,7 @@ func (svc sinkerService) remoteWriteToPrometheus(tsList prometheus.TSList, owner return nil } -func (svc sinkerService) encodeBase64(user string, password string) string { +func (svc SinkerService) encodeBase64(user string, password string) string { defer func(t time.Time) { svc.logger.Debug("encodeBase64 took", zap.String("execution", time.Since(t).String())) }(time.Now()) @@ -81,7 +81,7 @@ func (svc sinkerService) encodeBase64(user string, password string) string { return fmt.Sprintf("Basic %s", sEnc) } -func (svc sinkerService) handleMetrics(agentID string, channelID string, subtopic string, payload []byte) error { +func (svc SinkerService) handleMetrics(ctx context.Context, agentID string, channelID string, subtopic string, payload []byte) error { // find backend to send it to beName := strings.Split(subtopic, ".") @@ -113,9 +113,9 @@ func (svc sinkerService) handleMetrics(agentID string, channelID string, subtopi return fleet.ErrSchemaMalformed } - agentPb, err := svc.fleetClient.RetrieveAgentInfoByChannelID(context.Background(), &pb.AgentInfoByChannelIDReq{Channel: channelID}) - if err != nil { - return err + agentPb, err2 := svc.ExtractAgent(ctx, channelID) + if err2 != nil { + return err2 } agentName, _ := types.NewIdentifier(agentPb.AgentName) @@ -159,7 +159,15 @@ func (svc sinkerService) handleMetrics(agentID string, channelID string, subtopi return nil } -func (svc sinkerService) SinkPolicy(agent fleet.Agent, metricsPayload fleet.AgentMetricsRPCPayload, datasetSinkIDs map[string]bool, tsList []prometheus.TimeSeries) { +func (svc SinkerService) ExtractAgent(ctx context.Context, channelID string) (*pb.AgentInfoRes, error) { + agentPb, err := svc.fleetClient.RetrieveAgentInfoByChannelID(ctx, &pb.AgentInfoByChannelIDReq{Channel: channelID}) + if err != nil { + return nil, err + } + return agentPb, nil +} + +func (svc SinkerService) SinkPolicy(agent fleet.Agent, metricsPayload fleet.AgentMetricsRPCPayload, datasetSinkIDs map[string]bool, tsList []prometheus.TimeSeries) { sinkIDList := make([]string, len(datasetSinkIDs)) i := 0 for k := range datasetSinkIDs { @@ -194,7 +202,7 @@ func (svc sinkerService) SinkPolicy(agent fleet.Agent, metricsPayload fleet.Agen } } -func (svc sinkerService) GetSinks(agent fleet.Agent, agentMetricsRPCPayload fleet.AgentMetricsRPCPayload, datasetSinkIDs map[string]bool) error { +func (svc SinkerService) GetSinks(agent fleet.Agent, agentMetricsRPCPayload fleet.AgentMetricsRPCPayload, datasetSinkIDs map[string]bool) error { for _, ds := range agentMetricsRPCPayload.Datasets { if ds == "" { svc.logger.Error("malformed agent RPC: empty dataset", zap.String("agent_id", agent.MFThingID), zap.String("owner_id", agent.MFOwnerID)) @@ -237,7 +245,7 @@ func (svc sinkerService) GetSinks(agent fleet.Agent, agentMetricsRPCPayload flee return nil } -func (svc sinkerService) handleMsgFromAgent(msg messaging.Message) error { +func (svc SinkerService) handleMsgFromAgent(msg messaging.Message) error { inputContext := context.WithValue(context.Background(), "trace-id", uuid.NewString()) go func(ctx context.Context) { defer func(t time.Time) { @@ -271,7 +279,7 @@ func (svc sinkerService) handleMsgFromAgent(msg messaging.Message) error { return } - if err := svc.handleMetrics(msg.Publisher, msg.Channel, msg.Subtopic, msg.Payload); err != nil { + if err := svc.handleMetrics(ctx, msg.Publisher, msg.Channel, msg.Subtopic, msg.Payload); err != nil { svc.logger.Error("metrics processing failure", zap.Any("trace-id", ctx.Value("trace-id")), zap.Error(err)) return } diff --git a/sinker/otel/bridgeservice/bridge.go b/sinker/otel/bridgeservice/bridge.go new file mode 100644 index 000000000..810b05d9e --- /dev/null +++ b/sinker/otel/bridgeservice/bridge.go @@ -0,0 +1,67 @@ +package bridgeservice + +import ( + "context" + fleetpb "github.com/ns1labs/orb/fleet/pb" + policiespb "github.com/ns1labs/orb/policies/pb" + "github.com/ns1labs/orb/sinker/config" + "go.uber.org/zap" +) + +type BridgeService interface { + ExtractAgent(ctx context.Context, channelID string) (*fleetpb.AgentInfoRes, error) + GetDataSetsFromAgentGroups(ctx context.Context, mfOwnerId string, agentGroupIds []string) (map[string]string, error) +} + +func NewBridgeService(logger *zap.Logger, + sinkerCache config.ConfigRepo, + policiesClient policiespb.PolicyServiceClient, + fleetClient fleetpb.FleetServiceClient) SinkerOtelBridgeService { + return SinkerOtelBridgeService{ + logger: logger, + sinkerCache: sinkerCache, + policiesClient: policiesClient, + fleetClient: fleetClient, + } +} + +type SinkerOtelBridgeService struct { + logger *zap.Logger + sinkerCache config.ConfigRepo + policiesClient policiespb.PolicyServiceClient + fleetClient fleetpb.FleetServiceClient +} + +func (bs *SinkerOtelBridgeService) ExtractAgent(ctx context.Context, channelID string) (*fleetpb.AgentInfoRes, error) { + agentPb, err := bs.fleetClient.RetrieveAgentInfoByChannelID(ctx, &fleetpb.AgentInfoByChannelIDReq{Channel: channelID}) + if err != nil { + return nil, err + } + return agentPb, nil +} + +func (bs *SinkerOtelBridgeService) GetSinkIdsFromAgentGroups(ctx context.Context, mfOwnerId string, agentGroupIds []string) (map[string]string, error) { + policiesRes, err := bs.policiesClient.RetrievePoliciesByGroups(ctx, &policiespb.PoliciesByGroupsReq{ + GroupIDs: agentGroupIds, + OwnerID: mfOwnerId, + }) + if err != nil { + bs.logger.Error("unable to retrieve policies from agent groups", zap.Error(err)) + return nil, err + } + mapSinkIdPolicy := make(map[string]string) + for _, policy := range policiesRes.Policies { + datasetRes, err := bs.policiesClient.RetrieveDataset(ctx, &policiespb.DatasetByIDReq{ + DatasetID: policy.DatasetId, + OwnerID: mfOwnerId, + }) + if err != nil { + bs.logger.Error("unable to retrieve datasets from policy", zap.String("policy", policy.Name), zap.Error(err)) + continue + } + for _, sinkId := range datasetRes.SinkIds { + mapSinkIdPolicy[sinkId] = "active" + } + } + return mapSinkIdPolicy, nil +} diff --git a/sinker/otel/components.go b/sinker/otel/components.go index 3c455a0bd..9a42cb61d 100644 --- a/sinker/otel/components.go +++ b/sinker/otel/components.go @@ -3,8 +3,9 @@ package otel import ( "context" mfnats "github.com/mainflux/mainflux/pkg/messaging/nats" + "github.com/ns1labs/orb/sinker/otel/bridgeservice" + kafkaexporter "github.com/ns1labs/orb/sinker/otel/kafkafanoutexporter" "github.com/ns1labs/orb/sinker/otel/orbreceiver" - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/otel/metric/global" @@ -12,7 +13,7 @@ import ( "go.uber.org/zap" ) -func StartOtelComponents(ctx context.Context, logger *zap.Logger, kafkaUrl string, pubSub mfnats.PubSub) (context.CancelFunc, error) { +func StartOtelComponents(ctx context.Context, bridgeService *bridgeservice.SinkerOtelBridgeService, logger *zap.Logger, kafkaUrl string, pubSub mfnats.PubSub) (context.CancelFunc, error) { otelContext, otelCancelFunc := context.WithCancel(ctx) log := logger.Sugar() @@ -52,6 +53,7 @@ func StartOtelComponents(ctx context.Context, logger *zap.Logger, kafkaUrl strin receiverCfg := orbReceiverFactory.CreateDefaultConfig().(*orbreceiver.Config) receiverCfg.Logger = logger receiverCfg.PubSub = pubSub + receiverCfg.SinkerService = bridgeService receiverSet := component.ReceiverCreateSettings{ TelemetrySettings: component.TelemetrySettings{ Logger: logger, diff --git a/sinker/otel/kafkafanoutexporter/Makefile b/sinker/otel/kafkafanoutexporter/Makefile new file mode 100644 index 000000000..ded7a3609 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/sinker/otel/kafkafanoutexporter/README.md b/sinker/otel/kafkafanoutexporter/README.md new file mode 100644 index 000000000..10e8f84c6 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/README.md @@ -0,0 +1,90 @@ +# Kafka Exporter + +| Status | | +| ------------------------ |-----------------------| +| Stability | [beta] | +| Supported pipeline types | traces, logs, metrics | +| Distributions | [contrib] | + +Kafka exporter exports logs, metrics, and traces to Kafka. This exporter uses a synchronous producer +that blocks and does not batch messages, therefore it should be used with batch and queued retry +processors for higher throughput and resiliency. Message payload encoding is configurable. + +The following settings are required: +- `protocol_version` (no default): Kafka protocol version e.g. 2.0.0 + +The following settings can be optionally configured: +- `brokers` (default = localhost:9092): The list of kafka brokers +- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to export to. +- `encoding` (default = otlp_proto): The encoding of the traces sent to kafka. All available encodings: + - `otlp_proto`: payload is Protobuf serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs. + - `otlp_json`: ** EXPERIMENTAL ** payload is JSON serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs. + - The following encodings are valid *only* for **traces**. + - `jaeger_proto`: the payload is serialized to a single Jaeger proto `Span`, and keyed by TraceID. + - `jaeger_json`: the payload is serialized to a single Jaeger JSON Span using `jsonpb`, and keyed by TraceID.\ + - The following encodings are valid *only* for **logs**. + - `raw`: if the log record body is a byte array, it is sent as is. Otherwise, it is serialized to JSON. Resource and record attributes are discarded. +- `auth` + - `plain_text` + - `username`: The username to use. + - `password`: The password to use + - `sasl` + - `username`: The username to use. + - `password`: The password to use + - `mechanism`: The sasl mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512 or PLAIN) + - `tls` + - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should + only be used if `insecure` is set to true. + - `cert_file`: path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to true. + - `key_file`: path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to true. + - `insecure` (default = false): Disable verifying the server's certificate chain and host + name (`InsecureSkipVerify` in the tls config) + - `server_name_override`: ServerName indicates the name of the server requested by the client + in order to support virtual hosting. + - `kerberos` + - `service_name`: Kerberos service name + - `realm`: Kerberos realm + - `use_keytab`: Use of keytab instead of password, if this is true, keytab file will be used instead of password + - `username`: The Kerberos username used for authenticate with KDC + - `password`: The Kerberos password used for authenticate with KDC + - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf + - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab +- `metadata` + - `full` (default = true): Whether to maintain a full set of metadata. + When disabled the client does not make the initial request to broker at the startup. + - `retry` + - `max` (default = 3): The number of retries to get metadata + - `backoff` (default = 250ms): How long to wait between metadata retries +- `timeout` (default = 5s): Is the timeout for every attempt to send data to the backend. +- `retry_on_failure` + - `enabled` (default = true) + - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false` + - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false` + - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false` +- `sending_queue` + - `enabled` (default = true) + - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false` + - `queue_size` (default = 5000): Maximum number of batches kept in memory before dropping data; ignored if `enabled` is `false`; + User should calculate this as `num_seconds * requests_per_second` where: + - `num_seconds` is the number of seconds to buffer in case of a backend outage + - `requests_per_second` is the average number of requests per seconds. +- `producer` + - `max_message_bytes` (default = 1000000) the maximum permitted size of a message in bytes + - `required_acks` (default = 1) controls when a message is regarded as transmitted. https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#RequiredAcks + - `compression` (default = 'none') the compression used when producing messages to kafka. The options are: `none`, `gzip`, `snappy`, `lz4`, and `zstd` https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#CompressionCodec + - `flush_max_messages` (default = 0) The maximum number of messages the producer will send in a single broker request. + +Example configuration: + +```yaml +exporters: + kafka: + brokers: + - localhost:9092 + protocol_version: 2.0.0 +``` + +[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta +[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib \ No newline at end of file diff --git a/sinker/otel/kafkafanoutexporter/authentication.go b/sinker/otel/kafkafanoutexporter/authentication.go new file mode 100644 index 000000000..931976295 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/authentication.go @@ -0,0 +1,160 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "crypto/sha256" + "crypto/sha512" + "fmt" + "github.com/ns1labs/orb/sinker/otel/kafkafanoutexporter/internal/awsmsk" + + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/config/configtls" +) + +// Authentication defines authentication. +type Authentication struct { + PlainText *PlainTextConfig `mapstructure:"plain_text"` + SASL *SASLConfig `mapstructure:"sasl"` + TLS *configtls.TLSClientSetting `mapstructure:"tls"` + Kerberos *KerberosConfig `mapstructure:"kerberos"` +} + +// PlainTextConfig defines plaintext authentication. +type PlainTextConfig struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` +} + +// SASLConfig defines the configuration for the SASL authentication. +type SASLConfig struct { + // Username to be used on authentication + Username string `mapstructure:"username"` + // Password to be used on authentication + Password string `mapstructure:"password"` + // SASL Mechanism to be used, possible values are: (PLAIN, AWS_MSK_IAM, SCRAM-SHA-256 or SCRAM-SHA-512). + Mechanism string `mapstructure:"mechanism"` + + AWSMSK AWSMSKConfig `mapstructure:"aws_msk"` +} + +// AWSMSKConfig defines the additional SASL authentication +// measures needed to use AWS_MSK_IAM mechanism +type AWSMSKConfig struct { + // Region is the AWS region the MSK cluster is based in + Region string `mapstructure:"region"` + // BrokerAddr is the client is connecting to in order to perform the auth required + BrokerAddr string `mapstructure:"broker_addr"` +} + +// KerberosConfig defines kereros configuration. +type KerberosConfig struct { + ServiceName string `mapstructure:"service_name"` + Realm string `mapstructure:"realm"` + UseKeyTab bool `mapstructure:"use_keytab"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password" json:"-"` + ConfigPath string `mapstructure:"config_file"` + KeyTabPath string `mapstructure:"keytab_file"` +} + +// ConfigureAuthentication configures authentication in sarama.Config. +func ConfigureAuthentication(config Authentication, saramaConfig *sarama.Config) error { + if config.PlainText != nil { + configurePlaintext(*config.PlainText, saramaConfig) + } + if config.TLS != nil { + if err := configureTLS(*config.TLS, saramaConfig); err != nil { + return err + } + } + if config.SASL != nil { + if err := configureSASL(*config.SASL, saramaConfig); err != nil { + return err + } + } + + if config.Kerberos != nil { + configureKerberos(*config.Kerberos, saramaConfig) + } + return nil +} + +func configurePlaintext(config PlainTextConfig, saramaConfig *sarama.Config) { + saramaConfig.Net.SASL.Enable = true + saramaConfig.Net.SASL.User = config.Username + saramaConfig.Net.SASL.Password = config.Password +} + +func configureSASL(config SASLConfig, saramaConfig *sarama.Config) error { + + if config.Username == "" { + return fmt.Errorf("username have to be provided") + } + + if config.Password == "" { + return fmt.Errorf("password have to be provided") + } + + saramaConfig.Net.SASL.Enable = true + saramaConfig.Net.SASL.User = config.Username + saramaConfig.Net.SASL.Password = config.Password + + switch config.Mechanism { + case "SCRAM-SHA-512": + saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: sha512.New} } + saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + case "SCRAM-SHA-256": + saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: sha256.New} } + saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 + case "PLAIN": + saramaConfig.Net.SASL.Mechanism = sarama.SASLTypePlaintext + case "AWS_MSK_IAM": + saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return awsmsk.NewIAMSASLClient(config.AWSMSK.BrokerAddr, config.AWSMSK.Region, saramaConfig.ClientID) + } + saramaConfig.Net.SASL.Mechanism = awsmsk.Mechanism + default: + return fmt.Errorf(`invalid SASL Mechanism %q: can be either "PLAIN", "AWS_MSK_IAM", "SCRAM-SHA-256" or "SCRAM-SHA-512"`, config.Mechanism) + } + + return nil +} + +func configureTLS(config configtls.TLSClientSetting, saramaConfig *sarama.Config) error { + tlsConfig, err := config.LoadTLSConfig() + if err != nil { + return fmt.Errorf("error loading tls config: %w", err) + } + saramaConfig.Net.TLS.Enable = true + saramaConfig.Net.TLS.Config = tlsConfig + return nil +} + +func configureKerberos(config KerberosConfig, saramaConfig *sarama.Config) { + saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeGSSAPI + saramaConfig.Net.SASL.Enable = true + if config.UseKeyTab { + saramaConfig.Net.SASL.GSSAPI.KeyTabPath = config.KeyTabPath + saramaConfig.Net.SASL.GSSAPI.AuthType = sarama.KRB5_KEYTAB_AUTH + } else { + saramaConfig.Net.SASL.GSSAPI.AuthType = sarama.KRB5_USER_AUTH + saramaConfig.Net.SASL.GSSAPI.Password = config.Password + } + saramaConfig.Net.SASL.GSSAPI.KerberosConfigPath = config.ConfigPath + saramaConfig.Net.SASL.GSSAPI.Username = config.Username + saramaConfig.Net.SASL.GSSAPI.Realm = config.Realm + saramaConfig.Net.SASL.GSSAPI.ServiceName = config.ServiceName +} diff --git a/sinker/otel/kafkafanoutexporter/authentication_test.go b/sinker/otel/kafkafanoutexporter/authentication_test.go new file mode 100644 index 000000000..36babac91 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/authentication_test.go @@ -0,0 +1,141 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "testing" + + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/config/configtls" +) + +func TestAuthentication(t *testing.T) { + saramaPlaintext := &sarama.Config{} + saramaPlaintext.Net.SASL.Enable = true + saramaPlaintext.Net.SASL.User = "jdoe" + saramaPlaintext.Net.SASL.Password = "pass" + + saramaSASLSCRAM256Config := &sarama.Config{} + saramaSASLSCRAM256Config.Net.SASL.Enable = true + saramaSASLSCRAM256Config.Net.SASL.User = "jdoe" + saramaSASLSCRAM256Config.Net.SASL.Password = "pass" + saramaSASLSCRAM256Config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 + + saramaSASLSCRAM512Config := &sarama.Config{} + saramaSASLSCRAM512Config.Net.SASL.Enable = true + saramaSASLSCRAM512Config.Net.SASL.User = "jdoe" + saramaSASLSCRAM512Config.Net.SASL.Password = "pass" + saramaSASLSCRAM512Config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + + saramaSASLPLAINConfig := &sarama.Config{} + saramaSASLPLAINConfig.Net.SASL.Enable = true + saramaSASLPLAINConfig.Net.SASL.User = "jdoe" + saramaSASLPLAINConfig.Net.SASL.Password = "pass" + + saramaSASLPLAINConfig.Net.SASL.Mechanism = sarama.SASLTypePlaintext + + saramaTLSCfg := &sarama.Config{} + saramaTLSCfg.Net.TLS.Enable = true + tlsClient := configtls.TLSClientSetting{} + tlscfg, err := tlsClient.LoadTLSConfig() + require.NoError(t, err) + saramaTLSCfg.Net.TLS.Config = tlscfg + + saramaKerberosCfg := &sarama.Config{} + saramaKerberosCfg.Net.SASL.Mechanism = sarama.SASLTypeGSSAPI + saramaKerberosCfg.Net.SASL.Enable = true + saramaKerberosCfg.Net.SASL.GSSAPI.ServiceName = "foobar" + saramaKerberosCfg.Net.SASL.GSSAPI.AuthType = sarama.KRB5_USER_AUTH + + saramaKerberosKeyTabCfg := &sarama.Config{} + saramaKerberosKeyTabCfg.Net.SASL.Mechanism = sarama.SASLTypeGSSAPI + saramaKerberosKeyTabCfg.Net.SASL.Enable = true + saramaKerberosKeyTabCfg.Net.SASL.GSSAPI.KeyTabPath = "/path" + saramaKerberosKeyTabCfg.Net.SASL.GSSAPI.AuthType = sarama.KRB5_KEYTAB_AUTH + + tests := []struct { + auth Authentication + saramaConfig *sarama.Config + err string + }{ + { + auth: Authentication{PlainText: &PlainTextConfig{Username: "jdoe", Password: "pass"}}, + saramaConfig: saramaPlaintext, + }, + { + auth: Authentication{TLS: &configtls.TLSClientSetting{}}, + saramaConfig: saramaTLSCfg, + }, + { + auth: Authentication{TLS: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{CAFile: "/doesnotexists"}, + }}, + saramaConfig: saramaTLSCfg, + err: "failed to load TLS config", + }, + { + auth: Authentication{Kerberos: &KerberosConfig{ServiceName: "foobar"}}, + saramaConfig: saramaKerberosCfg, + }, + { + auth: Authentication{Kerberos: &KerberosConfig{UseKeyTab: true, KeyTabPath: "/path"}}, + saramaConfig: saramaKerberosKeyTabCfg, + }, + { + auth: Authentication{SASL: &SASLConfig{Username: "jdoe", Password: "pass", Mechanism: "SCRAM-SHA-256"}}, + saramaConfig: saramaSASLSCRAM256Config, + }, + { + auth: Authentication{SASL: &SASLConfig{Username: "jdoe", Password: "pass", Mechanism: "SCRAM-SHA-512"}}, + saramaConfig: saramaSASLSCRAM512Config, + }, + + { + auth: Authentication{SASL: &SASLConfig{Username: "jdoe", Password: "pass", Mechanism: "PLAIN"}}, + saramaConfig: saramaSASLPLAINConfig, + }, + { + auth: Authentication{SASL: &SASLConfig{Username: "jdoe", Password: "pass", Mechanism: "SCRAM-SHA-222"}}, + saramaConfig: saramaSASLSCRAM512Config, + err: "invalid SASL Mechanism", + }, + { + auth: Authentication{SASL: &SASLConfig{Username: "", Password: "pass", Mechanism: "SCRAM-SHA-512"}}, + saramaConfig: saramaSASLSCRAM512Config, + err: "username have to be provided", + }, + { + auth: Authentication{SASL: &SASLConfig{Username: "jdoe", Password: "", Mechanism: "SCRAM-SHA-512"}}, + saramaConfig: saramaSASLSCRAM512Config, + err: "password have to be provided", + }, + } + for _, test := range tests { + t.Run("", func(t *testing.T) { + config := &sarama.Config{} + err := ConfigureAuthentication(test.auth, config) + if test.err != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.err) + } else { + // equalizes SCRAMClientGeneratorFunc to do assertion with the same reference. + config.Net.SASL.SCRAMClientGeneratorFunc = test.saramaConfig.Net.SASL.SCRAMClientGeneratorFunc + assert.Equal(t, test.saramaConfig, config) + } + }) + } +} diff --git a/sinker/otel/kafkafanoutexporter/config.go b/sinker/otel/kafkafanoutexporter/config.go new file mode 100644 index 000000000..9d043a092 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/config.go @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "fmt" + "time" + + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for Kafka exporter. +type Config struct { + config.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + // The list of kafka brokers (default localhost:9092) + Brokers []string `mapstructure:"brokers"` + // Kafka protocol version + ProtocolVersion string `mapstructure:"protocol_version"` + // The name of the kafka topic to export to (default otlp_spans for traces, otlp_metrics for metrics) + Topic string `mapstructure:"topic"` + + // Encoding of messages (default "otlp_proto") + Encoding string `mapstructure:"encoding"` + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata Metadata `mapstructure:"metadata"` + + // Producer is the namespaces for producer properties used only by the Producer + Producer Producer `mapstructure:"producer"` + + // Authentication defines used authentication mechanism. + Authentication Authentication `mapstructure:"auth"` +} + +// Metadata defines configuration for retrieving metadata from the broker. +type Metadata struct { + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool `mapstructure:"full"` + + // Retry configuration for metadata. + // This configuration is useful to avoid race conditions when broker + // is starting at the same time as collector. + Retry MetadataRetry `mapstructure:"retry"` +} + +// Producer defines configuration for producer +type Producer struct { + // Maximum message bytes the producer will accept to produce. + MaxMessageBytes int `mapstructure:"max_message_bytes"` + + // RequiredAcks Number of acknowledgements required to assume that a message has been sent. + // https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#RequiredAcks + // The options are: + // 0 -> NoResponse. doesn't send any response + // 1 -> WaitForLocal. waits for only the local commit to succeed before responding ( default ) + // -1 -> WaitForAll. waits for all in-sync replicas to commit before responding. + RequiredAcks sarama.RequiredAcks `mapstructure:"required_acks"` + + // Compression Codec used to produce messages + // https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#CompressionCodec + // The options are: 'none', 'gzip', 'snappy', 'lz4', and 'zstd' + Compression string `mapstructure:"compression"` + + // The maximum number of messages the producer will send in a single + // broker request. Defaults to 0 for unlimited. Similar to + // `queue.buffering.max.messages` in the JVM producer. + FlushMaxMessages int `mapstructure:"flush_max_messages"` +} + +// MetadataRetry defines retry configuration for Metadata. +type MetadataRetry struct { + // The total number of times to retry a metadata request when the + // cluster is in the middle of a leader election or at startup (default 3). + Max int `mapstructure:"max"` + // How long to wait for leader election to occur before retrying + // (default 250ms). Similar to the JVM's `retry.backoff.ms`. + Backoff time.Duration `mapstructure:"backoff"` +} + +var _ config.Exporter = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (cfg *Config) Validate() error { + if cfg.Producer.RequiredAcks < -1 || cfg.Producer.RequiredAcks > 1 { + return fmt.Errorf("producer.required_acks has to be between -1 and 1. configured value %v", cfg.Producer.RequiredAcks) + } + + _, err := saramaProducerCompressionCodec(cfg.Producer.Compression) + if err != nil { + return err + } + + return nil +} + +func saramaProducerCompressionCodec(compression string) (sarama.CompressionCodec, error) { + switch compression { + case "none": + return sarama.CompressionNone, nil + case "gzip": + return sarama.CompressionGZIP, nil + case "snappy": + return sarama.CompressionSnappy, nil + case "lz4": + return sarama.CompressionLZ4, nil + case "zstd": + return sarama.CompressionZSTD, nil + default: + return sarama.CompressionNone, fmt.Errorf("producer.compression should be one of 'none', 'gzip', 'snappy', 'lz4', or 'zstd'. configured value %v", compression) + } +} diff --git a/sinker/otel/kafkafanoutexporter/config_test.go b/sinker/otel/kafkafanoutexporter/config_test.go new file mode 100644 index 000000000..11f65095e --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/config_test.go @@ -0,0 +1,140 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/service/servicetest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.NopFactories() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := servicetest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) + require.NoError(t, err) + require.Equal(t, 1, len(cfg.Exporters)) + + c := cfg.Exporters[config.NewComponentID(typeStr)].(*Config) + assert.Equal(t, &Config{ + ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)), + TimeoutSettings: exporterhelper.TimeoutSettings{ + Timeout: 10 * time.Second, + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + Topic: "spans", + Encoding: "otlp_proto", + Brokers: []string{"foo:123", "bar:456"}, + Authentication: Authentication{ + PlainText: &PlainTextConfig{ + Username: "jdoe", + Password: "pass", + }, + }, + Metadata: Metadata{ + Full: false, + Retry: MetadataRetry{ + Max: 15, + Backoff: defaultMetadataRetryBackoff, + }, + }, + Producer: Producer{ + MaxMessageBytes: 10000000, + RequiredAcks: sarama.WaitForAll, + Compression: "none", + }, + }, c) +} + +func TestValidate_err_compression(t *testing.T) { + config := &Config{ + Producer: Producer{ + Compression: "idk", + }, + } + + err := config.Validate() + assert.Error(t, err) + assert.Equal(t, err.Error(), "producer.compression should be one of 'none', 'gzip', 'snappy', 'lz4', or 'zstd'. configured value idk") +} + +func Test_saramaProducerCompressionCodec(t *testing.T) { + tests := map[string]struct { + compression string + expectedCompression sarama.CompressionCodec + expectedError error + }{ + "none": { + compression: "none", + expectedCompression: sarama.CompressionNone, + expectedError: nil, + }, + "gzip": { + compression: "gzip", + expectedCompression: sarama.CompressionGZIP, + expectedError: nil, + }, + "snappy": { + compression: "snappy", + expectedCompression: sarama.CompressionSnappy, + expectedError: nil, + }, + "lz4": { + compression: "lz4", + expectedCompression: sarama.CompressionLZ4, + expectedError: nil, + }, + "zstd": { + compression: "zstd", + expectedCompression: sarama.CompressionZSTD, + expectedError: nil, + }, + "unknown": { + compression: "unknown", + expectedCompression: sarama.CompressionNone, + expectedError: fmt.Errorf("producer.compression should be one of 'none', 'gzip', 'snappy', 'lz4', or 'zstd'. configured value unknown"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c, err := saramaProducerCompressionCodec(test.compression) + assert.Equal(t, c, test.expectedCompression) + assert.Equal(t, err, test.expectedError) + }) + } +} diff --git a/sinker/otel/kafkafanoutexporter/doc.go b/sinker/otel/kafkafanoutexporter/doc.go new file mode 100644 index 000000000..e9232027c --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kafkaexporter exports trace data to Kafka. +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" diff --git a/sinker/otel/kafkafanoutexporter/factory.go b/sinker/otel/kafkafanoutexporter/factory.go new file mode 100644 index 000000000..747aea29c --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/factory.go @@ -0,0 +1,204 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "context" + "time" + + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + typeStr = "kafka" + // The stability level of the exporter. + stability = component.StabilityLevelBeta + defaultTracesTopic = "otlp_spans" + defaultMetricsTopic = "otlp_metrics" + defaultLogsTopic = "otlp_logs" + defaultEncoding = "otlp_proto" + defaultBroker = "localhost:9092" + // default from sarama.NewConfig() + defaultMetadataRetryMax = 3 + // default from sarama.NewConfig() + defaultMetadataRetryBackoff = time.Millisecond * 250 + // default from sarama.NewConfig() + defaultMetadataFull = true + // default max.message.bytes for the producer + defaultProducerMaxMessageBytes = 1000000 + // default required_acks for the producer + defaultProducerRequiredAcks = sarama.WaitForLocal + // default from sarama.NewConfig() + defaultCompression = "none" + // default from sarama.NewConfig() + defaultFluxMaxMessages = 0 +) + +// FactoryOption applies changes to kafkaExporterFactory. +type FactoryOption func(factory *kafkaExporterFactory) + +// WithTracesMarshalers adds tracesMarshalers. +func WithTracesMarshalers(tracesMarshalers ...TracesMarshaler) FactoryOption { + return func(factory *kafkaExporterFactory) { + for _, marshaler := range tracesMarshalers { + factory.tracesMarshalers[marshaler.Encoding()] = marshaler + } + } +} + +// NewFactory creates Kafka exporter factory. +func NewFactory(options ...FactoryOption) component.ExporterFactory { + f := &kafkaExporterFactory{ + tracesMarshalers: tracesMarshalers(), + metricsMarshalers: metricsMarshalers(), + logsMarshalers: logsMarshalers(), + } + for _, o := range options { + o(f) + } + return component.NewExporterFactory( + typeStr, + createDefaultConfig, + component.WithTracesExporter(f.createTracesExporter, stability), + component.WithMetricsExporter(f.createMetricsExporter, stability), + component.WithLogsExporter(f.createLogsExporter, stability), + ) +} + +func createDefaultConfig() config.Exporter { + return &Config{ + ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)), + TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), + RetrySettings: exporterhelper.NewDefaultRetrySettings(), + QueueSettings: exporterhelper.NewDefaultQueueSettings(), + Brokers: []string{defaultBroker}, + // using an empty topic to track when it has not been set by user, default is based on traces or metrics. + Topic: "", + Encoding: defaultEncoding, + Metadata: Metadata{ + Full: defaultMetadataFull, + Retry: MetadataRetry{ + Max: defaultMetadataRetryMax, + Backoff: defaultMetadataRetryBackoff, + }, + }, + Producer: Producer{ + MaxMessageBytes: defaultProducerMaxMessageBytes, + RequiredAcks: defaultProducerRequiredAcks, + Compression: defaultCompression, + FlushMaxMessages: defaultFluxMaxMessages, + }, + } +} + +type kafkaExporterFactory struct { + tracesMarshalers map[string]TracesMarshaler + metricsMarshalers map[string]MetricsMarshaler + logsMarshalers map[string]LogsMarshaler +} + +func (f *kafkaExporterFactory) createTracesExporter( + ctx context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.TracesExporter, error) { + oCfg := *(cfg.(*Config)) // Clone the config + if oCfg.Topic == "" { + oCfg.Topic = defaultTracesTopic + } + if oCfg.Encoding == "otlp_json" { + set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") + } + exp, err := newTracesExporter(oCfg, set, f.tracesMarshalers) + if err != nil { + return nil, err + } + return exporterhelper.NewTracesExporter( + ctx, + set, + &oCfg, + exp.tracesPusher, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, + // and will rely on the sarama Producer Timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(exp.Close)) +} + +func (f *kafkaExporterFactory) createMetricsExporter( + ctx context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.MetricsExporter, error) { + oCfg := *(cfg.(*Config)) // Clone the config + if oCfg.Topic == "" { + oCfg.Topic = defaultMetricsTopic + } + if oCfg.Encoding == "otlp_json" { + set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") + } + exp, err := newMetricsExporter(oCfg, set, f.metricsMarshalers) + if err != nil { + return nil, err + } + return exporterhelper.NewMetricsExporter( + ctx, + set, + &oCfg, + exp.metricsDataPusher, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, + // and will rely on the sarama Producer Timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(exp.Close)) +} + +func (f *kafkaExporterFactory) createLogsExporter( + ctx context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.LogsExporter, error) { + oCfg := *(cfg.(*Config)) // Clone the config + if oCfg.Topic == "" { + oCfg.Topic = defaultLogsTopic + } + if oCfg.Encoding == "otlp_json" { + set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") + } + exp, err := newLogsExporter(oCfg, set, f.logsMarshalers) + if err != nil { + return nil, err + } + return exporterhelper.NewLogsExporter( + ctx, + set, + &oCfg, + exp.logsDataPusher, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, + // and will rely on the sarama Producer Timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(exp.Close)) +} diff --git a/sinker/otel/kafkafanoutexporter/factory_test.go b/sinker/otel/kafkafanoutexporter/factory_test.go new file mode 100644 index 000000000..8d178fe7e --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/factory_test.go @@ -0,0 +1,174 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "context" + "reflect" + "testing" + + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig().(*Config) + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configtest.CheckConfigStruct(cfg)) + assert.Equal(t, []string{defaultBroker}, cfg.Brokers) + assert.Equal(t, "", cfg.Topic) +} + +func TestCreateAllExporter(t *testing.T) { + cfg0 := createDefaultConfig().(*Config) + cfg1 := createDefaultConfig().(*Config) + cfg2 := createDefaultConfig().(*Config) + + cfg0.Brokers = []string{"invalid:9092"} + cfg1.Brokers = []string{"invalid:9092"} + cfg2.Brokers = []string{"invalid:9092"} + + cfg0.ProtocolVersion = "2.0.0" + cfg1.ProtocolVersion = "2.0.0" + cfg2.ProtocolVersion = "2.0.0" + + // this disables contacting the broker so we can successfully create the exporter + cfg0.Metadata.Full = false + cfg1.Metadata.Full = false + cfg2.Metadata.Full = false + + cfgClone := *cfg0 // Clone the config + + f := kafkaExporterFactory{tracesMarshalers: tracesMarshalers(), metricsMarshalers: metricsMarshalers(), logsMarshalers: logsMarshalers()} + r0, err := f.createTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg0) + require.NoError(t, err) + r1, err := f.createMetricsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg1) + require.NoError(t, err) + r2, err := f.createLogsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg2) + require.NoError(t, err) + + // createTracesExporter should not mutate values + assert.True(t, reflect.DeepEqual(*cfg0, cfgClone), "config should not mutate") + assert.True(t, reflect.DeepEqual(*cfg1, cfgClone), "config should not mutate") + assert.True(t, reflect.DeepEqual(*cfg2, cfgClone), "config should not mutate") + assert.NotNil(t, r0) + assert.NotNil(t, r1) + assert.NotNil(t, r2) +} + +func TestCreateTracesExporter(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + // this disables contacting the broker so we can successfully create the exporter + cfg.Metadata.Full = false + f := kafkaExporterFactory{tracesMarshalers: tracesMarshalers()} + r, err := f.createTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + require.NoError(t, err) + assert.NotNil(t, r) +} + +func TestCreateMetricsExport(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + // this disables contacting the broker so we can successfully create the exporter + cfg.Metadata.Full = false + mf := kafkaExporterFactory{metricsMarshalers: metricsMarshalers()} + mr, err := mf.createMetricsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + require.NoError(t, err) + assert.NotNil(t, mr) +} + +func TestCreateLogsExport(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + // this disables contacting the broker so we can successfully create the exporter + cfg.Metadata.Full = false + mf := kafkaExporterFactory{logsMarshalers: logsMarshalers()} + mr, err := mf.createLogsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + require.NoError(t, err) + assert.NotNil(t, mr) +} + +func TestCreateTracesExporter_err(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + f := kafkaExporterFactory{tracesMarshalers: tracesMarshalers()} + r, err := f.createTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + // no available broker + require.Error(t, err) + assert.Nil(t, r) +} + +func TestCreateMetricsExporter_err(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + mf := kafkaExporterFactory{metricsMarshalers: metricsMarshalers()} + mr, err := mf.createMetricsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + require.Error(t, err) + assert.Nil(t, mr) +} + +func TestCreateLogsExporter_err(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + mf := kafkaExporterFactory{logsMarshalers: logsMarshalers()} + mr, err := mf.createLogsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + require.Error(t, err) + assert.Nil(t, mr) +} + +func TestWithMarshalers(t *testing.T) { + cm := &customMarshaler{} + f := NewFactory(WithTracesMarshalers(cm)) + cfg := createDefaultConfig().(*Config) + // disable contacting broker + cfg.Metadata.Full = false + + t.Run("custom_encoding", func(t *testing.T) { + cfg.Encoding = cm.Encoding() + exporter, err := f.CreateTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + require.NoError(t, err) + require.NotNil(t, exporter) + }) + t.Run("default_encoding", func(t *testing.T) { + cfg.Encoding = defaultEncoding + exporter, err := f.CreateTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), cfg) + require.NoError(t, err) + assert.NotNil(t, exporter) + }) +} + +type customMarshaler struct { +} + +var _ TracesMarshaler = (*customMarshaler)(nil) + +func (c customMarshaler) Marshal(_ ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { + panic("implement me") +} + +func (c customMarshaler) Encoding() string { + return "custom" +} diff --git a/sinker/otel/kafkafanoutexporter/internal/awsmsk/doc.go b/sinker/otel/kafkafanoutexporter/internal/awsmsk/doc.go new file mode 100644 index 000000000..91ef1899c --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/internal/awsmsk/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package msk implements the required IAM auth used by AWS' managed Kafka platform +// to be used with the Surama kafka producer. +// +// Further details on how the SASL connector works can be viewed here: +// +// https://github.com/aws/aws-msk-iam-auth#details +package awsmsk // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk" diff --git a/sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client.go b/sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client.go new file mode 100644 index 000000000..5dab6122f --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package awsmsk // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk" + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/Shopify/sarama" + "github.com/aws/aws-sdk-go/aws/credentials" + sign "github.com/aws/aws-sdk-go/aws/signer/v4" + "go.uber.org/multierr" +) + +const ( + Mechanism = "AWS_MSK_IAM" + + service = "kafka-cluster" + supportedVersion = "2020_10_22" + scopeFormat = `%s/%s/%s/kafka-cluster/aws4_request` +) + +const ( + _ int32 = iota // Ignoring the zero value to ensure we start up correctly + initMessage + serverResponse + complete + failed +) + +var ( + ErrFailedServerChallenge = errors.New("failed server challenge") + ErrBadChallenge = errors.New("invalid challenge data provided") + ErrInvalidStateReached = errors.New("invalid state reached") +) + +type IAMSASLClient struct { + MSKHostname string + Region string + UserAgent string + + signer *sign.StreamSigner + + state int32 + accessKey string + secretKey string +} + +type payload struct { + Version string `json:"version"` + BrokerHost string `json:"host"` + UserAgent string `json:"user-agent"` + Action string `json:"action"` + Algorithm string `json:"x-amz-algorithm"` + Credentials string `json:"x-amz-credential"` + Date string `json:"x-amz-date"` + Expires string `json:"x-amz-expires"` + SignedHeaders string `json:"x-amz-signedheaders"` + Signature string `json:"x-amz-signature"` +} + +type response struct { + Version string `json:"version"` + RequestID string `json:"request-id"` +} + +var _ sarama.SCRAMClient = (*IAMSASLClient)(nil) + +func NewIAMSASLClient(mskhostname, region, useragent string) sarama.SCRAMClient { + return &IAMSASLClient{ + MSKHostname: mskhostname, + Region: region, + UserAgent: useragent, + } +} + +func (sc *IAMSASLClient) Begin(username, password, _ string) error { + if sc.MSKHostname == "" { + return errors.New("missing required MSK Broker hostname") + } + + if sc.Region == "" { + return errors.New("missing MSK cluster region") + } + + if sc.UserAgent == "" { + return errors.New("missing value for MSK user agent") + } + + sc.signer = sign.NewStreamSigner( + sc.Region, + service, + nil, + credentials.NewChainCredentials([]credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: username, + SecretAccessKey: password, + }, + }, + }), + ) + sc.accessKey = username + sc.secretKey = password + sc.state = initMessage + return nil +} + +func (sc *IAMSASLClient) Step(challenge string) (string, error) { + var resp string + + switch sc.state { + case initMessage: + if challenge != "" { + sc.state = failed + return "", fmt.Errorf("challenge must be empty for initial request: %w", ErrBadChallenge) + } + payload, err := sc.getAuthPayload() + if err != nil { + sc.state = failed + return "", err + } + resp = string(payload) + sc.state = serverResponse + case serverResponse: + if challenge == "" { + sc.state = failed + return "", fmt.Errorf("challenge must not be empty for server resposne: %w", ErrBadChallenge) + } + + var resp response + if err := json.NewDecoder(strings.NewReader(challenge)).Decode(&resp); err != nil { + sc.state = failed + return "", fmt.Errorf("unable to process msk challenge response: %w", multierr.Combine(err, ErrFailedServerChallenge)) + } + + if resp.Version != supportedVersion { + sc.state = failed + return "", fmt.Errorf("unknown version found in response: %w", ErrFailedServerChallenge) + } + + sc.state = complete + default: + return "", fmt.Errorf("invalid invocation: %w", ErrInvalidStateReached) + } + + return resp, nil +} + +func (sc *IAMSASLClient) Done() bool { return sc.state == complete } + +func (sc *IAMSASLClient) getAuthPayload() ([]byte, error) { + ts := time.Now().UTC() + + headers := []byte("host:" + sc.MSKHostname) + + sig, err := sc.signer.GetSignature(headers, nil, ts) + if err != nil { + return nil, err + } + + // Creating a timestamp in the form of: yyyyMMdd'T'HHmmss'Z' + date := ts.Format("20060102T150405Z") + + return json.Marshal(&payload{ + Version: supportedVersion, + BrokerHost: sc.MSKHostname, + UserAgent: sc.UserAgent, + Action: "kafka-cluster:Connect", + Algorithm: "AWS4-HMAC-SHA256", + Credentials: fmt.Sprintf(scopeFormat, sc.accessKey, date[:8], sc.Region), + Date: date, + SignedHeaders: "host", + Expires: "300", // Seconds => 5 Minutes + Signature: string(sig), + }) +} diff --git a/sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client_test.go b/sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client_test.go new file mode 100644 index 000000000..5412d70a7 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/internal/awsmsk/iam_scram_client_test.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package awsmsk + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAuthentication(t *testing.T) { + t.Parallel() + + const ( + AccessKey = "testing" + SecretKey = "hunter2" + BrokerName = "http://localhost:8089" + UserAgent = "kafka-exporter" + region = "us-east-1" + ) + + mskAuth := NewIAMSASLClient(BrokerName, region, UserAgent).(*IAMSASLClient) + require.NotNil(t, mskAuth, "Must have a valid client") + + assert.NoError(t, mskAuth.Begin(AccessKey, SecretKey, "")) + require.NotNil(t, mskAuth.signer, "Must have a valid signer") + assert.Equal(t, initMessage, mskAuth.state, "Must be in the initial state") + + payload, err := mskAuth.Step("") // Initial Challenge + assert.NoError(t, err, "Must not error on the initial challenge") + assert.NotEmpty(t, payload, "Must have a valid payload with data") + + expectedFields := map[string]struct{}{ + "version": {}, + "host": {}, + "user-agent": {}, + "action": {}, + "x-amz-algorithm": {}, + "x-amz-credential": {}, + "x-amz-date": {}, + "x-amz-signedheaders": {}, + "x-amz-expires": {}, + "x-amz-signature": {}, + } + + var request map[string]string + assert.NoError(t, json.NewDecoder(strings.NewReader(payload)).Decode(&request)) + + for k := range expectedFields { + v, ok := request[k] + assert.True(t, ok, "Must have the expected field") + assert.NotEmpty(t, v, "Must have a value for the field") + } + + _, err = mskAuth.Step(`{"version": "2020_10_22", "request-id": "pine apple sauce"}`) + assert.NoError(t, err, "Must not error when given valid challenge") + assert.True(t, mskAuth.Done(), "Must have completed auth") +} + +func TestValidatingServerResponse(t *testing.T) { + t.Parallel() + + testCases := []struct { + scenario string + challenge string + expectErr error + expectDone bool + }{ + { + scenario: "Valid challenge payload", + challenge: `{"version": "2020_10_22", "request-id": "pine apple sauce"}`, + expectErr: nil, + expectDone: true, + }, + { + scenario: "Empty challenge response returned", + challenge: "", + expectErr: ErrBadChallenge, + expectDone: false, + }, + { + scenario: "Challenge sent with unknown field", + challenge: `{"error": "unknown data format"}`, + expectErr: ErrFailedServerChallenge, + expectDone: false, + }, + { + scenario: "Invalid version within challenge", + challenge: `{"version": "2022_10_22", "request-id": "pizza sauce"}`, + expectErr: ErrFailedServerChallenge, + expectDone: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.scenario, func(t *testing.T) { + mskauth := &IAMSASLClient{ + state: serverResponse, + } + + payload, err := mskauth.Step(tc.challenge) + + assert.ErrorIs(t, err, tc.expectErr, "Must match the expected error in scenario") + assert.Empty(t, payload, "Must return a blank string") + assert.Equal(t, tc.expectDone, mskauth.Done(), "Must be in the expected state") + }) + } + + _, err := new(IAMSASLClient).Step("") + assert.ErrorIs(t, err, ErrInvalidStateReached, "Must be an invalid step when not set up correctly") + +} diff --git a/sinker/otel/kafkafanoutexporter/jaeger_marshaler.go b/sinker/otel/kafkafanoutexporter/jaeger_marshaler.go new file mode 100644 index 000000000..3ca66281a --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/jaeger_marshaler.go @@ -0,0 +1,105 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "bytes" + + "github.com/Shopify/sarama" + "github.com/gogo/protobuf/jsonpb" + jaegerproto "github.com/jaegertracing/jaeger/model" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" +) + +type jaegerMarshaler struct { + marshaler jaegerSpanMarshaler +} + +var _ TracesMarshaler = (*jaegerMarshaler)(nil) + +func (j jaegerMarshaler) Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { + batches, err := jaeger.ProtoFromTraces(traces) + if err != nil { + return nil, err + } + var messages []*sarama.ProducerMessage + + var errs error + for _, batch := range batches { + for _, span := range batch.Spans { + span.Process = batch.Process + bts, err := j.marshaler.marshal(span) + // continue to process spans that can be serialized + if err != nil { + errs = multierr.Append(errs, err) + continue + } + key := []byte(span.TraceID.String()) + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(bts), + Key: sarama.ByteEncoder(key), + }) + } + } + return messages, errs +} + +func (j jaegerMarshaler) Encoding() string { + return j.marshaler.encoding() +} + +type jaegerSpanMarshaler interface { + marshal(span *jaegerproto.Span) ([]byte, error) + encoding() string +} + +type jaegerProtoSpanMarshaler struct { +} + +var _ jaegerSpanMarshaler = (*jaegerProtoSpanMarshaler)(nil) + +func (p jaegerProtoSpanMarshaler) marshal(span *jaegerproto.Span) ([]byte, error) { + return span.Marshal() +} + +func (p jaegerProtoSpanMarshaler) encoding() string { + return "jaeger_proto" +} + +type jaegerJSONSpanMarshaler struct { + pbMarshaler *jsonpb.Marshaler +} + +var _ jaegerSpanMarshaler = (*jaegerJSONSpanMarshaler)(nil) + +func newJaegerJSONMarshaler() *jaegerJSONSpanMarshaler { + return &jaegerJSONSpanMarshaler{ + pbMarshaler: &jsonpb.Marshaler{}, + } +} + +func (p jaegerJSONSpanMarshaler) marshal(span *jaegerproto.Span) ([]byte, error) { + out := new(bytes.Buffer) + err := p.pbMarshaler.Marshal(out, span) + return out.Bytes(), err +} + +func (p jaegerJSONSpanMarshaler) encoding() string { + return "jaeger_json" +} diff --git a/sinker/otel/kafkafanoutexporter/jaeger_marshaler_test.go b/sinker/otel/kafkafanoutexporter/jaeger_marshaler_test.go new file mode 100644 index 000000000..11ca9d8aa --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/jaeger_marshaler_test.go @@ -0,0 +1,82 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "bytes" + "testing" + + "github.com/Shopify/sarama" + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" +) + +func TestJaegerMarshaler(t *testing.T) { + td := ptrace.NewTraces() + span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() + span.SetName("foo") + span.SetStartTimestamp(pcommon.Timestamp(10)) + span.SetEndTimestamp(pcommon.Timestamp(20)) + span.SetTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) + span.SetSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) + batches, err := jaeger.ProtoFromTraces(td) + require.NoError(t, err) + + batches[0].Spans[0].Process = batches[0].Process + jaegerProtoBytes, err := batches[0].Spans[0].Marshal() + messageKey := []byte(batches[0].Spans[0].TraceID.String()) + require.NoError(t, err) + require.NotNil(t, jaegerProtoBytes) + + jsonMarshaler := &jsonpb.Marshaler{} + jsonByteBuffer := new(bytes.Buffer) + require.NoError(t, jsonMarshaler.Marshal(jsonByteBuffer, batches[0].Spans[0])) + + tests := []struct { + unmarshaler TracesMarshaler + encoding string + messages []*sarama.ProducerMessage + }{ + { + unmarshaler: jaegerMarshaler{ + marshaler: jaegerProtoSpanMarshaler{}, + }, + encoding: "jaeger_proto", + messages: []*sarama.ProducerMessage{{Topic: "topic", Value: sarama.ByteEncoder(jaegerProtoBytes), Key: sarama.ByteEncoder(messageKey)}}, + }, + { + unmarshaler: jaegerMarshaler{ + marshaler: jaegerJSONSpanMarshaler{ + pbMarshaler: &jsonpb.Marshaler{}, + }, + }, + encoding: "jaeger_json", + messages: []*sarama.ProducerMessage{{Topic: "topic", Value: sarama.ByteEncoder(jsonByteBuffer.Bytes()), Key: sarama.ByteEncoder(messageKey)}}, + }, + } + for _, test := range tests { + t.Run(test.encoding, func(t *testing.T) { + messages, err := test.unmarshaler.Marshal(td, "topic") + require.NoError(t, err) + assert.Equal(t, test.messages, messages) + assert.Equal(t, test.encoding, test.unmarshaler.Encoding()) + }) + } +} diff --git a/sinker/otel/kafkafanoutexporter/kafka_exporter.go b/sinker/otel/kafkafanoutexporter/kafka_exporter.go new file mode 100644 index 000000000..fea05cff2 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/kafka_exporter.go @@ -0,0 +1,231 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "context" + "errors" + "fmt" + + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap" +) + +var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") + +// kafkaTracesProducer uses sarama to produce trace messages to Kafka. +type kafkaTracesProducer struct { + producer sarama.SyncProducer + topic string + marshaler TracesMarshaler + logger *zap.Logger +} + +type kafkaErrors struct { + count int + err string +} + +func (ke kafkaErrors) Error() string { + return fmt.Sprintf("Failed to deliver %d messages due to %s", ke.count, ke.err) +} + +func (e *kafkaTracesProducer) tracesPusher(ctx context.Context, td ptrace.Traces) error { + sinkId := ctx.Value("sink-id").(string) + topic := e.topic + "_" + sinkId + messages, err := e.marshaler.Marshal(td, topic) + if err != nil { + return consumererror.NewPermanent(err) + } + err = e.producer.SendMessages(messages) + if err != nil { + var prodErr sarama.ProducerErrors + if errors.As(err, &prodErr) { + if len(prodErr) > 0 { + return kafkaErrors{len(prodErr), prodErr[0].Err.Error()} + } + } + return err + } + return nil +} + +func (e *kafkaTracesProducer) Close(context.Context) error { + return e.producer.Close() +} + +// kafkaMetricsProducer uses sarama to produce metrics messages to kafka +type kafkaMetricsProducer struct { + producer sarama.SyncProducer + topic string + marshaler MetricsMarshaler + logger *zap.Logger +} + +func (e *kafkaMetricsProducer) metricsDataPusher(ctx context.Context, md pmetric.Metrics) error { + sinkId := ctx.Value("sink-id").(string) + topic := e.topic + "_" + sinkId + messages, err := e.marshaler.Marshal(md, topic) + if err != nil { + return consumererror.NewPermanent(err) + } + err = e.producer.SendMessages(messages) + if err != nil { + var prodErr sarama.ProducerErrors + if errors.As(err, &prodErr) { + if len(prodErr) > 0 { + return kafkaErrors{len(prodErr), prodErr[0].Err.Error()} + } + } + return err + } + return nil +} + +func (e *kafkaMetricsProducer) Close(context.Context) error { + return e.producer.Close() +} + +// kafkaLogsProducer uses sarama to produce logs messages to kafka +type kafkaLogsProducer struct { + producer sarama.SyncProducer + topic string + marshaler LogsMarshaler + logger *zap.Logger +} + +func (e *kafkaLogsProducer) logsDataPusher(ctx context.Context, ld plog.Logs) error { + sinkId := ctx.Value("sink-id").(string) + topic := e.topic + "_" + sinkId + messages, err := e.marshaler.Marshal(ld, topic) + if err != nil { + return consumererror.NewPermanent(err) + } + err = e.producer.SendMessages(messages) + if err != nil { + var prodErr sarama.ProducerErrors + if errors.As(err, &prodErr) { + if len(prodErr) > 0 { + return kafkaErrors{len(prodErr), prodErr[0].Err.Error()} + } + } + return err + } + return nil +} + +func (e *kafkaLogsProducer) Close(context.Context) error { + return e.producer.Close() +} + +func newSaramaProducer(config Config) (sarama.SyncProducer, error) { + c := sarama.NewConfig() + // These setting are required by the sarama.SyncProducer implementation. + c.Producer.Return.Successes = true + c.Producer.Return.Errors = true + c.Producer.RequiredAcks = config.Producer.RequiredAcks + // Because sarama does not accept a Context for every message, set the Timeout here. + c.Producer.Timeout = config.Timeout + c.Metadata.Full = config.Metadata.Full + c.Metadata.Retry.Max = config.Metadata.Retry.Max + c.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff + c.Producer.MaxMessageBytes = config.Producer.MaxMessageBytes + c.Producer.Flush.MaxMessages = config.Producer.FlushMaxMessages + + if config.ProtocolVersion != "" { + version, err := sarama.ParseKafkaVersion(config.ProtocolVersion) + if err != nil { + return nil, err + } + c.Version = version + } + + if err := ConfigureAuthentication(config.Authentication, c); err != nil { + return nil, err + } + + compression, err := saramaProducerCompressionCodec(config.Producer.Compression) + if err != nil { + return nil, err + } + c.Producer.Compression = compression + + producer, err := sarama.NewSyncProducer(config.Brokers, c) + if err != nil { + return nil, err + } + return producer, nil +} + +func newMetricsExporter(config Config, set component.ExporterCreateSettings, marshalers map[string]MetricsMarshaler) (*kafkaMetricsProducer, error) { + marshaler := marshalers[config.Encoding] + if marshaler == nil { + return nil, errUnrecognizedEncoding + } + producer, err := newSaramaProducer(config) + if err != nil { + return nil, err + } + + return &kafkaMetricsProducer{ + producer: producer, + topic: config.Topic, + marshaler: marshaler, + logger: set.Logger, + }, nil + +} + +// newTracesExporter creates Kafka exporter. +func newTracesExporter(config Config, set component.ExporterCreateSettings, marshalers map[string]TracesMarshaler) (*kafkaTracesProducer, error) { + marshaler := marshalers[config.Encoding] + if marshaler == nil { + return nil, errUnrecognizedEncoding + } + producer, err := newSaramaProducer(config) + if err != nil { + return nil, err + } + return &kafkaTracesProducer{ + producer: producer, + topic: config.Topic, + marshaler: marshaler, + logger: set.Logger, + }, nil +} + +func newLogsExporter(config Config, set component.ExporterCreateSettings, marshalers map[string]LogsMarshaler) (*kafkaLogsProducer, error) { + marshaler := marshalers[config.Encoding] + if marshaler == nil { + return nil, errUnrecognizedEncoding + } + producer, err := newSaramaProducer(config) + if err != nil { + return nil, err + } + + return &kafkaLogsProducer{ + producer: producer, + topic: config.Topic, + marshaler: marshaler, + logger: set.Logger, + }, nil + +} diff --git a/sinker/otel/kafkafanoutexporter/marshaler.go b/sinker/otel/kafkafanoutexporter/marshaler.go new file mode 100644 index 000000000..4c62ad7df --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/marshaler.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +// TracesMarshaler marshals traces into Message array. +type TracesMarshaler interface { + // Marshal serializes spans into sarama's ProducerMessages + Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) + + // Encoding returns encoding name + Encoding() string +} + +// MetricsMarshaler marshals metrics into Message array +type MetricsMarshaler interface { + // Marshal serializes metrics into sarama's ProducerMessages + Marshal(metrics pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) + + // Encoding returns encoding name + Encoding() string +} + +// LogsMarshaler marshals logs into Message array +type LogsMarshaler interface { + // Marshal serializes logs into sarama's ProducerMessages + Marshal(logs plog.Logs, topic string) ([]*sarama.ProducerMessage, error) + + // Encoding returns encoding name + Encoding() string +} + +// tracesMarshalers returns map of supported encodings with TracesMarshaler. +func tracesMarshalers() map[string]TracesMarshaler { + otlpPb := newPdataTracesMarshaler(ptrace.NewProtoMarshaler(), defaultEncoding) + otlpJSON := newPdataTracesMarshaler(ptrace.NewJSONMarshaler(), "otlp_json") + jaegerProto := jaegerMarshaler{marshaler: jaegerProtoSpanMarshaler{}} + jaegerJSON := jaegerMarshaler{marshaler: newJaegerJSONMarshaler()} + return map[string]TracesMarshaler{ + otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, + jaegerProto.Encoding(): jaegerProto, + jaegerJSON.Encoding(): jaegerJSON, + } +} + +// metricsMarshalers returns map of supported encodings and MetricsMarshaler +func metricsMarshalers() map[string]MetricsMarshaler { + otlpPb := newPdataMetricsMarshaler(pmetric.NewProtoMarshaler(), defaultEncoding) + otlpJSON := newPdataMetricsMarshaler(pmetric.NewJSONMarshaler(), "otlp_json") + return map[string]MetricsMarshaler{ + otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, + } +} + +// logsMarshalers returns map of supported encodings and LogsMarshaler +func logsMarshalers() map[string]LogsMarshaler { + otlpPb := newPdataLogsMarshaler(plog.NewProtoMarshaler(), defaultEncoding) + otlpJSON := newPdataLogsMarshaler(plog.NewJSONMarshaler(), "otlp_json") + raw := newRawMarshaler() + return map[string]LogsMarshaler{ + otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, + raw.Encoding(): raw, + } +} diff --git a/sinker/otel/kafkafanoutexporter/marshaler_test.go b/sinker/otel/kafkafanoutexporter/marshaler_test.go new file mode 100644 index 000000000..600d0ec15 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/marshaler_test.go @@ -0,0 +1,149 @@ +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +func TestDefaultTracesMarshalers(t *testing.T) { + expectedEncodings := []string{ + "otlp_proto", + "otlp_json", + "jaeger_proto", + "jaeger_json", + } + marshalers := tracesMarshalers() + assert.Equal(t, len(expectedEncodings), len(marshalers)) + for _, e := range expectedEncodings { + t.Run(e, func(t *testing.T) { + m, ok := marshalers[e] + require.True(t, ok) + assert.NotNil(t, m) + }) + } +} + +func TestDefaultMetricsMarshalers(t *testing.T) { + expectedEncodings := []string{ + "otlp_proto", + "otlp_json", + } + marshalers := metricsMarshalers() + assert.Equal(t, len(expectedEncodings), len(marshalers)) + for _, e := range expectedEncodings { + t.Run(e, func(t *testing.T) { + m, ok := marshalers[e] + require.True(t, ok) + assert.NotNil(t, m) + }) + } +} + +func TestDefaultLogsMarshalers(t *testing.T) { + expectedEncodings := []string{ + "otlp_proto", + "otlp_json", + "raw", + } + marshalers := logsMarshalers() + assert.Equal(t, len(expectedEncodings), len(marshalers)) + for _, e := range expectedEncodings { + t.Run(e, func(t *testing.T) { + m, ok := marshalers[e] + require.True(t, ok) + assert.NotNil(t, m) + }) + } +} + +func TestOTLPTracesJsonMarshaling(t *testing.T) { + t.Parallel() + + now := time.Unix(1, 0) + + traces := ptrace.NewTraces() + traces.ResourceSpans().AppendEmpty() + + rs := traces.ResourceSpans().At(0) + rs.SetSchemaUrl(conventions.SchemaURL) + rs.ScopeSpans().AppendEmpty() + + ils := rs.ScopeSpans().At(0) + ils.SetSchemaUrl(conventions.SchemaURL) + ils.Spans().AppendEmpty() + + span := ils.Spans().At(0) + span.SetKind(ptrace.SpanKindInternal) + span.SetName(t.Name()) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(now.Add(time.Second))) + span.SetSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7}) + span.SetParentSpanID([8]byte{8, 9, 10, 11, 12, 13, 14}) + + marshaler, ok := tracesMarshalers()["otlp_json"] + require.True(t, ok, "Must have otlp json marshaller") + + msg, err := marshaler.Marshal(traces, t.Name()) + require.NoError(t, err, "Must have marshaled the data without error") + require.Len(t, msg, 1, "Must have one entry in the message") + + data, err := msg[0].Value.Encode() + require.NoError(t, err, "Must not error when encoding value") + require.NotNil(t, data, "Must have valid data to test") + + // Since marshaling json is not guaranteed to be in order + // within a string, using a map to compare that the expected values are there + expectedJSON := map[string]interface{}{ + "resourceSpans": []interface{}{ + map[string]interface{}{ + "resource": map[string]interface{}{}, + "scopeSpans": []interface{}{ + map[string]interface{}{ + "scope": map[string]interface{}{}, + "spans": []interface{}{ + map[string]interface{}{ + "traceId": "", + "spanId": "0001020304050607", + "parentSpanId": "08090a0b0c0d0e00", + "name": t.Name(), + "kind": ptrace.SpanKindInternal.String(), + "startTimeUnixNano": fmt.Sprint(now.UnixNano()), + "endTimeUnixNano": fmt.Sprint(now.Add(time.Second).UnixNano()), + "status": map[string]interface{}{}, + }, + }, + "schemaUrl": conventions.SchemaURL, + }, + }, + "schemaUrl": conventions.SchemaURL, + }, + }, + } + + var final map[string]interface{} + err = json.Unmarshal(data, &final) + require.NoError(t, err, "Must not error marshaling expected data") + + assert.Equal(t, expectedJSON, final, "Must match the expected value") +} diff --git a/sinker/otel/kafkafanoutexporter/pdata_marshaler.go b/sinker/otel/kafkafanoutexporter/pdata_marshaler.go new file mode 100644 index 000000000..2fec31e28 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/pdata_marshaler.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +type pdataLogsMarshaler struct { + marshaler plog.Marshaler + encoding string +} + +func (p pdataLogsMarshaler) Marshal(ld plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { + bts, err := p.marshaler.MarshalLogs(ld) + if err != nil { + return nil, err + } + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil +} + +func (p pdataLogsMarshaler) Encoding() string { + return p.encoding +} + +func newPdataLogsMarshaler(marshaler plog.Marshaler, encoding string) LogsMarshaler { + return pdataLogsMarshaler{ + marshaler: marshaler, + encoding: encoding, + } +} + +type pdataMetricsMarshaler struct { + marshaler pmetric.Marshaler + encoding string +} + +func (p pdataMetricsMarshaler) Marshal(ld pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) { + bts, err := p.marshaler.MarshalMetrics(ld) + if err != nil { + return nil, err + } + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil +} + +func (p pdataMetricsMarshaler) Encoding() string { + return p.encoding +} + +func newPdataMetricsMarshaler(marshaler pmetric.Marshaler, encoding string) MetricsMarshaler { + return pdataMetricsMarshaler{ + marshaler: marshaler, + encoding: encoding, + } +} + +type pdataTracesMarshaler struct { + marshaler ptrace.Marshaler + encoding string +} + +func (p pdataTracesMarshaler) Marshal(td ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { + bts, err := p.marshaler.MarshalTraces(td) + if err != nil { + return nil, err + } + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil +} + +func (p pdataTracesMarshaler) Encoding() string { + return p.encoding +} + +func newPdataTracesMarshaler(marshaler ptrace.Marshaler, encoding string) TracesMarshaler { + return pdataTracesMarshaler{ + marshaler: marshaler, + encoding: encoding, + } +} diff --git a/sinker/otel/kafkafanoutexporter/raw_marshaler.go b/sinker/otel/kafkafanoutexporter/raw_marshaler.go new file mode 100644 index 000000000..c49fdda17 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/raw_marshaler.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "encoding/json" + "errors" + + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" +) + +var errUnsupported = errors.New("unsupported serialization") + +type rawMarshaler struct { +} + +func newRawMarshaler() rawMarshaler { + return rawMarshaler{} +} + +func (r rawMarshaler) Marshal(logs plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { + var messages []*sarama.ProducerMessage + for i := 0; i < logs.ResourceLogs().Len(); i++ { + rl := logs.ResourceLogs().At(i) + for j := 0; j < rl.ScopeLogs().Len(); j++ { + sl := rl.ScopeLogs().At(j) + for k := 0; k < sl.LogRecords().Len(); k++ { + lr := sl.LogRecords().At(k) + b, err := r.logBodyAsBytes(lr.Body()) + if err != nil { + return nil, err + } + if len(b) == 0 { + continue + } + + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(b), + }) + } + } + } + + return messages, nil +} + +func (r rawMarshaler) logBodyAsBytes(value pcommon.Value) ([]byte, error) { + switch value.Type() { + case pcommon.ValueTypeStr: + return r.interfaceAsBytes(value.Str()) + case pcommon.ValueTypeBytes: + return value.Bytes().AsRaw(), nil + case pcommon.ValueTypeBool: + return r.interfaceAsBytes(value.Bool()) + case pcommon.ValueTypeDouble: + return r.interfaceAsBytes(value.Double()) + case pcommon.ValueTypeInt: + return r.interfaceAsBytes(value.Int()) + case pcommon.ValueTypeEmpty: + return []byte{}, nil + case pcommon.ValueTypeSlice: + return r.interfaceAsBytes(value.Slice().AsRaw()) + case pcommon.ValueTypeMap: + return r.interfaceAsBytes(value.Map().AsRaw()) + default: + return nil, errUnsupported + } +} + +func (r rawMarshaler) interfaceAsBytes(value interface{}) ([]byte, error) { + if value == nil { + return []byte{}, nil + } + res, err := json.Marshal(value) + return res, err +} + +func (r rawMarshaler) Encoding() string { + return "raw" +} diff --git a/sinker/otel/kafkafanoutexporter/raw_marshaller_test.go b/sinker/otel/kafkafanoutexporter/raw_marshaller_test.go new file mode 100644 index 000000000..4f50c5b8a --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/raw_marshaller_test.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "testing" + + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/plog" +) + +func ptr(i int) *int { + return &i +} + +func Test_RawMarshaler(t *testing.T) { + tests := []struct { + name string + countExpected *int + logRecord func() plog.LogRecord + marshaled sarama.ByteEncoder + errorExpected bool + }{ + { + name: "string", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.Body().SetStr("foo") + return lr + }, + errorExpected: false, + marshaled: []byte("\"foo\""), + }, + { + name: "[]byte", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.Body().SetEmptyBytes().FromRaw([]byte("foo")) + return lr + }, + errorExpected: false, + marshaled: []byte("foo"), + }, + { + name: "double", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.Body().SetDouble(float64(1.64)) + return lr + }, + errorExpected: false, + marshaled: []byte("1.64"), + }, + { + name: "int", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.Body().SetInt(int64(456)) + return lr + }, + errorExpected: false, + marshaled: []byte("456"), + }, + { + name: "empty", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + return lr + }, + countExpected: ptr(0), + errorExpected: false, + marshaled: []byte{}, + }, + { + name: "bool", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.Body().SetBool(false) + return lr + }, + errorExpected: false, + marshaled: []byte("false"), + }, + { + name: "slice", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + slice := lr.Body().SetEmptySlice() + slice.AppendEmpty().SetStr("foo") + slice.AppendEmpty().SetStr("bar") + slice.AppendEmpty().SetBool(false) + return lr + }, + errorExpected: false, + marshaled: []byte(`["foo","bar",false]`), + }, + { + name: "map", + logRecord: func() plog.LogRecord { + lr := plog.NewLogRecord() + m := lr.Body().SetEmptyMap() + m.PutStr("foo", "foo") + m.PutStr("bar", "bar") + m.PutBool("foobar", false) + return lr + }, + errorExpected: false, + marshaled: []byte(`{"bar":"bar","foo":"foo","foobar":false}`), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + r := newRawMarshaler() + logs := plog.NewLogs() + lr := test.logRecord() + lr.MoveTo(logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()) + messages, err := r.Marshal(logs, "foo") + if test.errorExpected { + require.Error(t, err) + } else { + require.NoError(t, err) + } + countExpected := 1 + if test.countExpected != nil { + countExpected = *test.countExpected + } + assert.Len(t, messages, countExpected) + if countExpected > 0 { + bytes, ok := messages[0].Value.(sarama.ByteEncoder) + require.True(t, ok) + assert.Equal(t, test.marshaled, bytes) + } + }) + } +} diff --git a/sinker/otel/kafkafanoutexporter/scram_client.go b/sinker/otel/kafkafanoutexporter/scram_client.go new file mode 100644 index 000000000..d4e86eaca --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/scram_client.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" + +import ( + "github.com/Shopify/sarama" + "github.com/xdg-go/scram" +) + +var _ sarama.SCRAMClient = (*XDGSCRAMClient)(nil) + +// XDGSCRAMClient uses xdg-go scram to authentication conversation +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +// Begin starts the XDGSCRAMClient conversation. +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +// Step takes a string provided from a server (or just an empty string for the +// very first conversation step) and attempts to move the authentication +// conversation forward. It returns a string to be sent to the server or an +// error if the server message is invalid. Calling Step after a conversation +// completes is also an error. +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + return x.ClientConversation.Step(challenge) + +} + +// Done returns true if the conversation is completed or has errored. +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/sinker/otel/kafkafanoutexporter/testdata/config.yaml b/sinker/otel/kafkafanoutexporter/testdata/config.yaml new file mode 100644 index 000000000..8c0885165 --- /dev/null +++ b/sinker/otel/kafkafanoutexporter/testdata/config.yaml @@ -0,0 +1,40 @@ +exporters: + kafka: + topic: spans + brokers: + - "foo:123" + - "bar:456" + metadata: + full: false + retry: + max: 15 + producer: + max_message_bytes: 10000000 + required_acks: -1 # WaitForAll + timeout: 10s + auth: + plain_text: + username: jdoe + password: pass + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + +processors: + nop: + +receivers: + nop: + +service: + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [kafka] diff --git a/sinker/otel/orbreceiver/config.go b/sinker/otel/orbreceiver/config.go index 8792e265e..87534d67d 100644 --- a/sinker/otel/orbreceiver/config.go +++ b/sinker/otel/orbreceiver/config.go @@ -16,6 +16,7 @@ package orbreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiv import ( mfnats "github.com/mainflux/mainflux/pkg/messaging/nats" + "github.com/ns1labs/orb/sinker/otel/bridgeservice" "go.uber.org/zap" "go.opentelemetry.io/collector/config" @@ -30,6 +31,8 @@ type Config struct { // Entry from Metrics PubSub mfnats.PubSub + // Entry for Accessing DataSets, AgentGroup and Sinks + SinkerService *bridgeservice.SinkerOtelBridgeService } var _ config.Receiver = (*Config)(nil) diff --git a/sinker/otel/orbreceiver/internal/logs/otlp_test.go b/sinker/otel/orbreceiver/internal/logs/otlp_test.go deleted file mode 100644 index f74fc6fb7..000000000 --- a/sinker/otel/orbreceiver/internal/logs/otlp_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "context" - "errors" - "github.com/ns1labs/orb/sinker/otel/orbreceiver/internal/testdata" - "net" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/pdata/plog/plogotlp" -) - -func TestExport(t *testing.T) { - ld := testdata.GenerateLogs(1) - // Keep log data to compare the test result against it - // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream - logData := ld.Clone() - req := plogotlp.NewRequestFromLogs(ld) - - logSink := new(consumertest.LogsSink) - logClient := makeLogsServiceClient(t, logSink) - resp, err := logClient.Export(context.Background(), req) - require.NoError(t, err, "Failed to export trace: %v", err) - require.NotNil(t, resp, "The response is missing") - - lds := logSink.AllLogs() - require.Len(t, lds, 1) - assert.EqualValues(t, logData, lds[0]) -} - -func TestExport_EmptyRequest(t *testing.T) { - logSink := new(consumertest.LogsSink) - - logClient := makeLogsServiceClient(t, logSink) - resp, err := logClient.Export(context.Background(), plogotlp.NewRequest()) - assert.NoError(t, err, "Failed to export trace: %v", err) - assert.NotNil(t, resp, "The response is missing") -} - -func TestExport_ErrorConsumer(t *testing.T) { - ld := testdata.GenerateLogs(1) - req := plogotlp.NewRequestFromLogs(ld) - - logClient := makeLogsServiceClient(t, consumertest.NewErr(errors.New("my error"))) - resp, err := logClient.Export(context.Background(), req) - assert.EqualError(t, err, "rpc error: code = Unknown desc = my error") - assert.Equal(t, plogotlp.Response{}, resp) -} - -func makeLogsServiceClient(t *testing.T, lc consumer.Logs) plogotlp.Client { - addr := otlpReceiverOnGRPCServer(t, lc) - cc, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) - require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) - t.Cleanup(func() { - require.NoError(t, cc.Close()) - }) - - return plogotlp.NewClient(cc) -} - -func otlpReceiverOnGRPCServer(t *testing.T, lc consumer.Logs) net.Addr { - ln, err := net.Listen("tcp", "localhost:") - require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) - - t.Cleanup(func() { - require.NoError(t, ln.Close()) - }) - - r := New(config.NewComponentIDWithName("otlp", "log"), lc, componenttest.NewNopReceiverCreateSettings()) - // Now run it as a gRPC server - srv := grpc.NewServer() - plogotlp.RegisterServer(srv, r) - go func() { - _ = srv.Serve(ln) - }() - - return ln.Addr() -} diff --git a/sinker/otel/orbreceiver/internal/metrics/otlp_test.go b/sinker/otel/orbreceiver/internal/metrics/otlp_test.go deleted file mode 100644 index 4c449737d..000000000 --- a/sinker/otel/orbreceiver/internal/metrics/otlp_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "context" - "errors" - "github.com/ns1labs/orb/sinker/otel/orbreceiver/internal/testdata" - "net" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" -) - -func TestExport(t *testing.T) { - md := testdata.GenerateMetrics(1) - // Keep metric data to compare the test result against it - // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream - metricData := md.Clone() - req := pmetricotlp.NewRequestFromMetrics(md) - - metricSink := new(consumertest.MetricsSink) - metricsClient := makeMetricsServiceClient(t, metricSink) - resp, err := metricsClient.Export(context.Background(), req) - - require.NoError(t, err, "Failed to export metrics: %v", err) - require.NotNil(t, resp, "The response is missing") - - mds := metricSink.AllMetrics() - require.Len(t, mds, 1) - assert.EqualValues(t, metricData, mds[0]) -} - -func TestExport_EmptyRequest(t *testing.T) { - metricSink := new(consumertest.MetricsSink) - metricsClient := makeMetricsServiceClient(t, metricSink) - resp, err := metricsClient.Export(context.Background(), pmetricotlp.NewRequest()) - require.NoError(t, err) - require.NotNil(t, resp) -} - -func TestExport_ErrorConsumer(t *testing.T) { - md := testdata.GenerateMetrics(1) - req := pmetricotlp.NewRequestFromMetrics(md) - - metricsClient := makeMetricsServiceClient(t, consumertest.NewErr(errors.New("my error"))) - resp, err := metricsClient.Export(context.Background(), req) - assert.EqualError(t, err, "rpc error: code = Unknown desc = my error") - assert.Equal(t, pmetricotlp.Response{}, resp) -} - -func makeMetricsServiceClient(t *testing.T, mc consumer.Metrics) pmetricotlp.Client { - addr := otlpReceiverOnGRPCServer(t, mc) - - cc, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) - require.NoError(t, err, "Failed to create the MetricsServiceClient: %v", err) - t.Cleanup(func() { - require.NoError(t, cc.Close()) - }) - - return pmetricotlp.NewClient(cc) -} - -func otlpReceiverOnGRPCServer(t *testing.T, mc consumer.Metrics) net.Addr { - ln, err := net.Listen("tcp", "localhost:") - require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) - - t.Cleanup(func() { - require.NoError(t, ln.Close()) - }) - - r := New(config.NewComponentIDWithName("otlp", "metrics"), mc, componenttest.NewNopReceiverCreateSettings()) - // Now run it as a gRPC server - srv := grpc.NewServer() - pmetricotlp.RegisterServer(srv, r) - go func() { - _ = srv.Serve(ln) - }() - - return ln.Addr() -} diff --git a/sinker/otel/orbreceiver/internal/testdata/log.go b/sinker/otel/orbreceiver/internal/testdata/log.go index 3379cac89..9862c4f6b 100644 --- a/sinker/otel/orbreceiver/internal/testdata/log.go +++ b/sinker/otel/orbreceiver/internal/testdata/log.go @@ -50,10 +50,10 @@ func fillLogOne(log plog.LogRecord) { log.SetTraceID([16]byte{0x08, 0x04, 0x02, 0x01}) attrs := log.Attributes() - attrs.PutString("app", "server") + attrs.PutStr("app", "server") attrs.PutInt("instance_num", 1) - log.Body().SetStringVal("This is a log message") + log.Body().SetStr("This is a log message") } func fillLogTwo(log plog.LogRecord) { @@ -66,5 +66,5 @@ func fillLogTwo(log plog.LogRecord) { attrs.PutString("customer", "acme") attrs.PutString("env", "dev") - log.Body().SetStringVal("something happened") + log.Body().SetStr("something happened") } diff --git a/sinker/otel/orbreceiver/internal/testdata/metric.go b/sinker/otel/orbreceiver/internal/testdata/metric.go index 3908b0dcc..ebbf975e9 100644 --- a/sinker/otel/orbreceiver/internal/testdata/metric.go +++ b/sinker/otel/orbreceiver/internal/testdata/metric.go @@ -49,29 +49,29 @@ func GenerateMetricsAllTypesEmpty() pmetric.Metrics { ms := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() doubleGauge := ms.AppendEmpty() - initMetric(doubleGauge, TestGaugeDoubleMetricName, pmetric.MetricDataTypeGauge) + initMetric(doubleGauge, TestGaugeDoubleMetricName, pmetric.MetricTypeGauge) doubleGauge.Gauge().DataPoints().AppendEmpty() intGauge := ms.AppendEmpty() - initMetric(intGauge, TestGaugeIntMetricName, pmetric.MetricDataTypeGauge) + initMetric(intGauge, TestGaugeIntMetricName, pmetric.MetricTypeGauge) intGauge.Gauge().DataPoints().AppendEmpty() doubleSum := ms.AppendEmpty() - initMetric(doubleSum, TestSumDoubleMetricName, pmetric.MetricDataTypeSum) + initMetric(doubleSum, TestSumDoubleMetricName, pmetric.MetricTypeSum) doubleSum.Sum().DataPoints().AppendEmpty() intSum := ms.AppendEmpty() - initMetric(intSum, TestSumIntMetricName, pmetric.MetricDataTypeSum) + initMetric(intSum, TestSumIntMetricName, pmetric.MetricTypeSum) intSum.Sum().DataPoints().AppendEmpty() histogram := ms.AppendEmpty() - initMetric(histogram, TestHistogramMetricName, pmetric.MetricDataTypeHistogram) + initMetric(histogram, TestHistogramMetricName, pmetric.MetricTypeHistogram) histogram.Histogram().DataPoints().AppendEmpty() summary := ms.AppendEmpty() - initMetric(summary, TestSummaryMetricName, pmetric.MetricDataTypeSummary) + initMetric(summary, TestSummaryMetricName, pmetric.MetricTypeSummary) summary.Summary().DataPoints().AppendEmpty() return md } func GenerateMetricsMetricTypeInvalid() pmetric.Metrics { md := generateMetricsOneEmptyInstrumentationScope() - initMetric(md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty(), TestSumIntMetricName, pmetric.MetricDataTypeNone) + initMetric(md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty(), TestSumIntMetricName, pmetric.MetricTypeEmpty) return md } @@ -114,72 +114,72 @@ func GenerateMetrics(count int) pmetric.Metrics { } func initGaugeIntMetric(im pmetric.Metric) { - initMetric(im, TestGaugeIntMetricName, pmetric.MetricDataTypeGauge) + initMetric(im, TestGaugeIntMetricName, pmetric.MetricTypeGauge) idps := im.Gauge().DataPoints() idp0 := idps.AppendEmpty() initMetricAttributes1(idp0.Attributes()) idp0.SetStartTimestamp(metricStartTimestamp) idp0.SetTimestamp(metricTimestamp) - idp0.SetIntVal(123) + idp0.SetIntValue(123) idp1 := idps.AppendEmpty() initMetricAttributes2(idp1.Attributes()) idp1.SetStartTimestamp(metricStartTimestamp) idp1.SetTimestamp(metricTimestamp) - idp1.SetIntVal(456) + idp1.SetIntValue(456) } func initGaugeDoubleMetric(im pmetric.Metric) { - initMetric(im, TestGaugeDoubleMetricName, pmetric.MetricDataTypeGauge) + initMetric(im, TestGaugeDoubleMetricName, pmetric.MetricTypeGauge) idps := im.Gauge().DataPoints() idp0 := idps.AppendEmpty() initMetricAttributes12(idp0.Attributes()) idp0.SetStartTimestamp(metricStartTimestamp) idp0.SetTimestamp(metricTimestamp) - idp0.SetDoubleVal(1.23) + idp0.SetDoubleValue(1.23) idp1 := idps.AppendEmpty() initMetricAttributes13(idp1.Attributes()) idp1.SetStartTimestamp(metricStartTimestamp) idp1.SetTimestamp(metricTimestamp) - idp1.SetDoubleVal(4.56) + idp1.SetDoubleValue(4.56) } func initSumIntMetric(im pmetric.Metric) { - initMetric(im, TestSumIntMetricName, pmetric.MetricDataTypeSum) + initMetric(im, TestSumIntMetricName, pmetric.MetricTypeSum) idps := im.Sum().DataPoints() idp0 := idps.AppendEmpty() initMetricAttributes1(idp0.Attributes()) idp0.SetStartTimestamp(metricStartTimestamp) idp0.SetTimestamp(metricTimestamp) - idp0.SetIntVal(123) + idp0.SetIntValue(123) idp1 := idps.AppendEmpty() initMetricAttributes2(idp1.Attributes()) idp1.SetStartTimestamp(metricStartTimestamp) idp1.SetTimestamp(metricTimestamp) - idp1.SetIntVal(456) + idp1.SetIntValue(456) } func initSumDoubleMetric(dm pmetric.Metric) { - initMetric(dm, TestSumDoubleMetricName, pmetric.MetricDataTypeSum) + initMetric(dm, TestSumDoubleMetricName, pmetric.MetricTypeSum) ddps := dm.Sum().DataPoints() ddp0 := ddps.AppendEmpty() initMetricAttributes12(ddp0.Attributes()) ddp0.SetStartTimestamp(metricStartTimestamp) ddp0.SetTimestamp(metricTimestamp) - ddp0.SetDoubleVal(1.23) + ddp0.SetDoubleValue(1.23) ddp1 := ddps.AppendEmpty() initMetricAttributes13(ddp1.Attributes()) ddp1.SetStartTimestamp(metricStartTimestamp) ddp1.SetTimestamp(metricTimestamp) - ddp1.SetDoubleVal(4.56) + ddp1.SetDoubleValue(4.56) } func initHistogramMetric(hm pmetric.Metric) { - initMetric(hm, TestHistogramMetricName, pmetric.MetricDataTypeHistogram) + initMetric(hm, TestHistogramMetricName, pmetric.MetricTypeHistogram) hdps := hm.Histogram().DataPoints() hdp0 := hdps.AppendEmpty() @@ -200,13 +200,13 @@ func initHistogramMetric(hm pmetric.Metric) { hdp1.BucketCounts().FromRaw([]uint64{0, 1}) exemplar := hdp1.Exemplars().AppendEmpty() exemplar.SetTimestamp(metricExemplarTimestamp) - exemplar.SetDoubleVal(15) + exemplar.SetDoubleValue(15) initMetricExemplarAttributes(exemplar.FilteredAttributes()) hdp1.ExplicitBounds().FromRaw([]float64{1}) } func initExponentialHistogramMetric(hm pmetric.Metric) { - initMetric(hm, TestExponentialHistogramMetricName, pmetric.MetricDataTypeExponentialHistogram) + initMetric(hm, TestExponentialHistogramMetricName, pmetric.MetricTypeExponentialHistogram) hdps := hm.ExponentialHistogram().DataPoints() hdp0 := hdps.AppendEmpty() @@ -254,12 +254,12 @@ func initExponentialHistogramMetric(hm pmetric.Metric) { exemplar := hdp1.Exemplars().AppendEmpty() exemplar.SetTimestamp(metricExemplarTimestamp) - exemplar.SetDoubleVal(15) + exemplar.SetDoubleValue(15) initMetricExemplarAttributes(exemplar.FilteredAttributes()) } func initSummaryMetric(sm pmetric.Metric) { - initMetric(sm, TestSummaryMetricName, pmetric.MetricDataTypeSummary) + initMetric(sm, TestSummaryMetricName, pmetric.MetricTypeSummary) sdps := sm.Summary().DataPoints() sdp0 := sdps.AppendEmpty() @@ -281,24 +281,24 @@ func initSummaryMetric(sm pmetric.Metric) { quantile.SetValue(15) } -func initMetric(m pmetric.Metric, name string, ty pmetric.MetricDataType) { +func initMetric(m pmetric.Metric, name string, ty pmetric.MetricType) { m.SetName(name) m.SetDescription("") m.SetUnit("1") switch ty { - case pmetric.MetricDataTypeGauge: + case pmetric.MetricTypeGauge: m.SetEmptyGauge() - case pmetric.MetricDataTypeSum: + case pmetric.MetricTypeSum: sum := m.SetEmptySum() sum.SetIsMonotonic(true) sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) - case pmetric.MetricDataTypeHistogram: + case pmetric.MetricTypeHistogram: histo := m.SetEmptyHistogram() histo.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) - case pmetric.MetricDataTypeExponentialHistogram: + case pmetric.MetricTypeExponentialHistogram: histo := m.SetEmptyExponentialHistogram() histo.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) - case pmetric.MetricDataTypeSummary: + case pmetric.MetricTypeSummary: m.SetEmptySummary() } } diff --git a/sinker/otel/orbreceiver/internal/trace/otlp_test.go b/sinker/otel/orbreceiver/internal/trace/otlp_test.go deleted file mode 100644 index 4255d71c6..000000000 --- a/sinker/otel/orbreceiver/internal/trace/otlp_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - "errors" - "github.com/ns1labs/orb/sinker/otel/orbreceiver/internal/testdata" - "net" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" -) - -func TestExport(t *testing.T) { - td := testdata.GenerateTraces(1) - // Keep trace data to compare the test result against it - // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream - traceData := td.Clone() - req := ptraceotlp.NewRequestFromTraces(td) - - traceSink := new(consumertest.TracesSink) - traceClient := makeTraceServiceClient(t, traceSink) - resp, err := traceClient.Export(context.Background(), req) - require.NoError(t, err, "Failed to export trace: %v", err) - require.NotNil(t, resp, "The response is missing") - - require.Len(t, traceSink.AllTraces(), 1) - assert.EqualValues(t, traceData, traceSink.AllTraces()[0]) -} - -func TestExport_EmptyRequest(t *testing.T) { - traceSink := new(consumertest.TracesSink) - traceClient := makeTraceServiceClient(t, traceSink) - resp, err := traceClient.Export(context.Background(), ptraceotlp.NewRequest()) - assert.NoError(t, err, "Failed to export trace: %v", err) - assert.NotNil(t, resp, "The response is missing") -} - -func TestExport_ErrorConsumer(t *testing.T) { - td := testdata.GenerateTraces(1) - req := ptraceotlp.NewRequestFromTraces(td) - - traceClient := makeTraceServiceClient(t, consumertest.NewErr(errors.New("my error"))) - resp, err := traceClient.Export(context.Background(), req) - assert.EqualError(t, err, "rpc error: code = Unknown desc = my error") - assert.Equal(t, ptraceotlp.Response{}, resp) -} - -func makeTraceServiceClient(t *testing.T, tc consumer.Traces) ptraceotlp.Client { - addr := otlpReceiverOnGRPCServer(t, tc) - cc, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) - require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) - t.Cleanup(func() { - require.NoError(t, cc.Close()) - }) - - return ptraceotlp.NewClient(cc) -} - -func otlpReceiverOnGRPCServer(t *testing.T, tc consumer.Traces) net.Addr { - ln, err := net.Listen("tcp", "localhost:") - require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) - - t.Cleanup(func() { - require.NoError(t, ln.Close()) - }) - - r := New(config.NewComponentIDWithName("otlp", "trace"), tc, componenttest.NewNopReceiverCreateSettings()) - // Now run it as a gRPC server - srv := grpc.NewServer() - ptraceotlp.RegisterServer(srv, r) - go func() { - _ = srv.Serve(ln) - }() - - return ln.Addr() -} diff --git a/sinker/otel/orbreceiver/otlp.go b/sinker/otel/orbreceiver/otlp.go index 54ab33888..d72813d24 100644 --- a/sinker/otel/orbreceiver/otlp.go +++ b/sinker/otel/orbreceiver/otlp.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "github.com/mainflux/mainflux/pkg/messaging" + "github.com/ns1labs/orb/sinker/otel/bridgeservice" "github.com/ns1labs/orb/sinker/otel/orbreceiver/internal/metrics" "go.opentelemetry.io/collector/config" "go.uber.org/zap" @@ -36,6 +37,7 @@ type OrbReceiver struct { cancelFunc context.CancelFunc metricsReceiver *metrics.Receiver encoder encoder + sinkerService *bridgeservice.SinkerOtelBridgeService shutdownWG sync.WaitGroup @@ -47,9 +49,10 @@ type OrbReceiver struct { // as the various Stop*Reception methods to end it. func NewOrbReceiver(ctx context.Context, cfg *Config, settings component.ReceiverCreateSettings) *OrbReceiver { r := &OrbReceiver{ - ctx: ctx, - cfg: cfg, - settings: settings, + ctx: ctx, + cfg: cfg, + settings: settings, + sinkerService: cfg.SinkerService, } return r @@ -106,11 +109,34 @@ func (r *OrbReceiver) MessageInbound(msg messaging.Message) error { r.cfg.Logger.Error("error during unmarshalling, skipping message", zap.Error(err)) return } - _, err = r.metricsReceiver.Export(r.ctx, mr) + // Add tags in Context + execCtx, execCancelF := context.WithCancel(r.ctx) + defer execCancelF() + agentPb, err := r.sinkerService.ExtractAgent(execCtx, msg.Channel) if err != nil { - r.cfg.Logger.Error("error during export, skipping message", zap.Error(err)) + execCancelF() + r.cfg.Logger.Error("error during extracting agent information from fleet", zap.Error(err)) return } + sinkIds, err := r.sinkerService.GetSinkIdsFromAgentGroups(execCtx, agentPb.OwnerID, agentPb.AgentGroupIDs) + if err != nil { + execCancelF() + r.cfg.Logger.Error("error during extracting sinks information from policies", zap.Error(err)) + return + } + attributeCtx := context.WithValue(r.ctx, "agent-name", agentPb.AgentName) + attributeCtx = context.WithValue(attributeCtx, "agent-tags", agentPb.AgentTags) + attributeCtx = context.WithValue(attributeCtx, "orb-tags", agentPb.OrbTags) + attributeCtx = context.WithValue(attributeCtx, "agent-groups", agentPb.AgentGroupIDs) + attributeCtx = context.WithValue(attributeCtx, "agent-ownerID", agentPb.OwnerID) + for sinkId, _ := range sinkIds { + attributeCtx = context.WithValue(attributeCtx, "sink-id", sinkId) + _, err = r.metricsReceiver.Export(attributeCtx, mr) + if err != nil { + r.cfg.Logger.Error("error during export, skipping message", zap.Error(err)) + return + } + } }() return nil } diff --git a/sinker/service.go b/sinker/service.go index b9740ec51..a9bd97c81 100644 --- a/sinker/service.go +++ b/sinker/service.go @@ -16,6 +16,7 @@ import ( "github.com/ns1labs/orb/sinker/backend/pktvisor" "github.com/ns1labs/orb/sinker/config" "github.com/ns1labs/orb/sinker/otel" + "github.com/ns1labs/orb/sinker/otel/bridgeservice" "github.com/ns1labs/orb/sinker/prometheus" sinkspb "github.com/ns1labs/orb/sinks/pb" "go.uber.org/zap" @@ -40,7 +41,7 @@ type Service interface { Stop() error } -type sinkerService struct { +type SinkerService struct { pubSub mfnats.PubSub otel bool otelCancelFunct context.CancelFunc @@ -67,7 +68,7 @@ type sinkerService struct { asyncContext context.Context } -func (svc sinkerService) Start() error { +func (svc SinkerService) Start() error { svc.asyncContext, svc.cancelAsyncContext = context.WithCancel(context.WithValue(context.Background(), "routine", "async")) if !svc.otel { topic := fmt.Sprintf("channels.*.%s", BackendMetricsTopic) @@ -90,10 +91,11 @@ func (svc sinkerService) Start() error { return nil } -func (svc sinkerService) startOtel(ctx context.Context) error { +func (svc SinkerService) startOtel(ctx context.Context) error { if svc.otel { var err error - svc.otelCancelFunct, err = otel.StartOtelComponents(ctx, svc.logger, svc.otelKafkaUrl, svc.pubSub) + bridgeService := bridgeservice.NewBridgeService(svc.logger, svc.sinkerCache, svc.policiesClient, svc.fleetClient) + svc.otelCancelFunct, err = otel.StartOtelComponents(ctx, &bridgeService, svc.logger, svc.otelKafkaUrl, svc.pubSub) if err != nil { svc.logger.Error("error during StartOtelComponents", zap.Error(err)) return err @@ -102,7 +104,7 @@ func (svc sinkerService) startOtel(ctx context.Context) error { return nil } -func (svc sinkerService) Stop() error { +func (svc SinkerService) Stop() error { if svc.otel { otelTopic := fmt.Sprintf("channels.*.%s", OtelMetricsTopic) if err := svc.pubSub.Unsubscribe(otelTopic); err != nil { @@ -140,7 +142,7 @@ func New(logger *zap.Logger, ) Service { pktvisor.Register(logger) - return &sinkerService{ + return &SinkerService{ logger: logger, pubSub: pubSub, esclient: esclient, From 5dafd6635d046a1967643452061fdcd72c898269 Mon Sep 17 00:00:00 2001 From: manrodrigues <78241475+manrodrigues@users.noreply.github.com> Date: Thu, 27 Oct 2022 08:13:45 -0300 Subject: [PATCH 12/94] enabling otel on agents used on integration tests (#1919) --- python-test/README.md | 8 ++++++++ python-test/features/steps/local_agent.py | 15 +++++++++++++-- python-test/features/steps/test_config.py | 10 ++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/python-test/README.md b/python-test/README.md index 136387d34..422e99897 100644 --- a/python-test/README.md +++ b/python-test/README.md @@ -80,6 +80,14 @@ Then fill in the correct values: - Bool - Referred to UI tests. If True, run chromedriver in headless mode - Default value: `true` +- **include_otel_env_var**: + - Bool + - If true, use the environmental variable "ORB_OTEL_ENABLE" on agent provisioning commands + - Default value: `false` +- **enable_otel**: + - Bool + - Value to be used in variable "ORB_OTEL_ENABLE". Note that `include_otel_env_var` parameter must be `true` if this variable is true. + - Default value: `false` ## Run behave Simply run `behave`, optionally passing the feature file as follows: diff --git a/python-test/features/steps/local_agent.py b/python-test/features/steps/local_agent.py index cd0f1044a..2e3becb38 100644 --- a/python-test/features/steps/local_agent.py +++ b/python-test/features/steps/local_agent.py @@ -24,6 +24,8 @@ def run_local_agent_container(context, status_port): agent_docker_image = configs.get('agent_docker_image', 'ns1labs/orb-agent') image_tag = ':' + configs.get('agent_docker_tag', 'latest') agent_image = agent_docker_image + image_tag + include_otel_env_var = configs.get("include_otel_env_var") + enable_otel = configs.get("enable_otel") env_vars = {"ORB_CLOUD_ADDRESS": orb_address, "ORB_CLOUD_MQTT_ID": context.agent['id'], "ORB_CLOUD_MQTT_CHANNEL_ID": context.agent['channel_id'], @@ -31,6 +33,8 @@ def run_local_agent_container(context, status_port): "PKTVISOR_PCAP_IFACE_DEFAULT": interface} if ignore_ssl_and_certificate_errors == 'true': env_vars["ORB_TLS_VERIFY"] = "false" + if include_otel_env_var == "true": + env_vars["ORB_OTEL_ENABLE"] = enable_otel context.port = return_port_to_run_docker_container(context, availability[status_port]) @@ -112,8 +116,11 @@ def run_container_using_ui_command(context, status_port): assert_that(status_port, any_of(equal_to("available"), equal_to("unavailable")), "Unexpected value for port") availability = {"available": True, "unavailable": False} context.port = return_port_to_run_docker_container(context, availability[status_port]) + include_otel_env_var = configs.get("include_otel_env_var") + enable_otel = configs.get("enable_otel") context.container_id = run_local_agent_from_terminal(context.agent_provisioning_command, - ignore_ssl_and_certificate_errors, str(context.port)) + ignore_ssl_and_certificate_errors, str(context.port), + include_otel_env_var, enable_otel) assert_that(context.container_id, is_not((none())), f"Agent container was not run") rename_container(context.container_id, LOCAL_AGENT_CONTAINER_NAME + context.agent['name'][-5:]) if context.container_id not in context.containers_id.keys(): @@ -241,11 +248,13 @@ def check_logs_contain_log(logs, expected_log, event, start_time=0): return event.is_set() -def run_local_agent_from_terminal(command, ignore_ssl_and_certificate_errors, pktvisor_port): +def run_local_agent_from_terminal(command, ignore_ssl_and_certificate_errors, pktvisor_port, + include_otel_env_var="false", enable_otel="false"): """ :param (str) command: docker command to provision an agent :param (bool) ignore_ssl_and_certificate_errors: True if orb address doesn't have a valid certificate. :param (str or int) pktvisor_port: Port on which pktvisor should run + :param (str): if 'true', ORB_OTEL_ENABLE env ver is included on command provisioning of the agent :return: agent container ID """ command = command.replace("\\\n", " ") @@ -253,6 +262,8 @@ def run_local_agent_from_terminal(command, ignore_ssl_and_certificate_errors, pk if ignore_ssl_and_certificate_errors == 'true': args.insert(-1, "-e") args.insert(-1, "ORB_TLS_VERIFY=false") + if include_otel_env_var == "true": + args.insert(-1, f"ORB_OTEL_ENABLE={enable_otel}") if pktvisor_port != 'default': args.insert(-1, "-e") args.insert(-1, f"ORB_BACKENDS_PKTVISOR_API_PORT={pktvisor_port}") diff --git a/python-test/features/steps/test_config.py b/python-test/features/steps/test_config.py index ced5e9521..a69318e45 100644 --- a/python-test/features/steps/test_config.py +++ b/python-test/features/steps/test_config.py @@ -61,4 +61,14 @@ def _read_configs(): assert_that(is_credentials_registered, any_of(equal_to('true'), equal_to('false')), 'Invalid value to is_credentials_registered parameter. A boolean value is expected.') configs['is_credentials_registered'] = is_credentials_registered + include_otel_env_var = configs.get("include_otel_env_var", "false").lower() + configs["include_otel_env_var"] = include_otel_env_var + enable_otel = configs.get('enable_otel', 'false').lower() + assert_that(enable_otel, any_of(equal_to('true'), equal_to('false')), + 'Invalid value to enable_otel parameter. A boolean value is expected.') + configs['enable_otel'] = enable_otel + if include_otel_env_var == "false" and enable_otel == "true": + raise ValueError("'enable_otel' is enabled, but the variable is not being included in the commands because of " + "'include_otel_env_var' is false. Check your parameters.") + return configs From 626563c9b21e00b5774515ccd69d8dc802fdfbda Mon Sep 17 00:00:00 2001 From: Luiz Henrique Pegoraro Date: Thu, 27 Oct 2022 14:40:19 -0300 Subject: [PATCH 13/94] Fixing events not deleting properly (#1915) * feat(sinks/sinker): wip add redis topic to handle config yaml. Signed-off-by: Luiz Pegoraro * feat(maestro): fix event not handling correctly delete, must send deploymentEntry Signed-off-by: Luiz Pegoraro * feat(maestro): fix event not handling correctly delete, must send deploymentEntry Signed-off-by: Luiz Pegoraro * feat(maestro): add docs Signed-off-by: Luiz Pegoraro * feat(sinks): add new proto function for sinks. Signed-off-by: Luiz Pegoraro * feat(sinks): add new proto function to fetch sinks from db and send to new internal. Signed-off-by: Luiz Pegoraro * feat(sinks): implementing metadata filtering and state filter. Signed-off-by: Luiz Pegoraro * feat(maestro): add sinks grpc in maestro call. Signed-off-by: Luiz Pegoraro * feat(maestro): add start function and extracted kubeControl to external service and replaced maestro redis subscriber. Signed-off-by: Luiz Pegoraro * feat(sinks): implement missing method. Signed-off-by: Luiz Pegoraro * feat(maestro): fixing compilation errors. Signed-off-by: Luiz Pegoraro * feat(sinks): fixing missing impl Signed-off-by: Luiz Pegoraro * feat(maestro): fix dependency ciclying. Signed-off-by: Luiz Pegoraro * feat(sinks): fix missing test. Signed-off-by: Luiz Pegoraro * feat(sinks): decrypt the metadata password since we will build the openTelemetryEntry with that info. Signed-off-by: Luiz Pegoraro Signed-off-by: Luiz Pegoraro --- cmd/maestro/main.go | 136 ++++++++--- fleet/pb/fleet.pb.go | 4 +- fleet/pb/fleet_grpc.pb.go | 4 + go.mod | 15 +- go.sum | 2 - maestro/{ => config}/config_builder.go | 2 +- maestro/{ => config}/config_builder_test.go | 2 +- .../kubecontrol.go} | 58 +++-- maestro/maestro.go | 43 +--- maestro/redis/consumer/hashset.go | 31 ++- maestro/redis/consumer/streams.go | 34 +-- maestro/service.go | 136 ++++++++++- pkg/types/maps.go | 9 + policies/pb/policies.pb.go | 4 +- policies/pb/policies_grpc.pb.go | 4 + sinker/redis/consumer/streams.go | 1 + sinker/redis/producer/events.go | 18 ++ sinks/api/grpc/client.go | 68 +++++- sinks/api/grpc/endpoint.go | 46 ++++ sinks/api/grpc/request.go | 4 + sinks/api/grpc/response.go | 4 + sinks/api/grpc/server.go | 39 ++++ sinks/api/http/logging.go | 14 ++ sinks/api/http/metrics.go | 5 + sinks/api/http/openapi.yaml | 1 + sinks/mocks/client.go | 4 + sinks/mocks/sinks.go | 6 +- sinks/pb/sinks.pb.go | 219 ++++++++++++++---- sinks/pb/sinks.proto | 11 +- sinks/pb/sinks_grpc.pb.go | 40 ++++ sinks/postgres/sinks.go | 44 +++- sinks/postgres/sinks_test.go | 2 +- sinks/redis/producer/streams.go | 5 + sinks/service.go | 4 +- sinks/sinks.go | 23 +- sinks/sinks_service.go | 17 +- 36 files changed, 856 insertions(+), 203 deletions(-) rename maestro/{ => config}/config_builder.go (99%) rename maestro/{ => config}/config_builder_test.go (99%) rename maestro/{maestro_service.go => kubecontrol/kubecontrol.go} (54%) diff --git a/cmd/maestro/main.go b/cmd/maestro/main.go index 56e14576f..26377a10b 100644 --- a/cmd/maestro/main.go +++ b/cmd/maestro/main.go @@ -11,14 +11,21 @@ package main import ( "context" "fmt" + sinksgrpc "github.com/ns1labs/orb/sinks/api/grpc" + "github.com/opentracing/opentracing-go" + jconfig "github.com/uber/jaeger-client-go/config" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "io" "os" "os/signal" "strconv" "strings" "syscall" + "time" "github.com/ns1labs/orb/maestro" - rediscons1 "github.com/ns1labs/orb/maestro/redis/consumer" "github.com/ns1labs/orb/pkg/config" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -27,16 +34,17 @@ import ( ) const ( - svcName = "maestro" - mfEnvPrefix = "mf" - envPrefix = "orb_maestro" - httpPort = "8500" + svcName = "maestro" + envPrefix = "orb_maestro" + httpPort = "8500" ) func main() { esCfg := config.LoadEsConfig(envPrefix) svcCfg := config.LoadBaseServiceConfig(envPrefix, httpPort) + jCfg := config.LoadJaegerConfig(envPrefix) + sinksGRPCCfg := config.LoadGRPCConfig("orb", "sinks") // logger var logger *zap.Logger @@ -62,26 +70,111 @@ func main() { defer func(logger *zap.Logger) { _ = logger.Sync() }(logger) - + log := logger.Sugar() esClient := connectToRedis(esCfg.URL, esCfg.Pass, esCfg.DB, logger) - defer esClient.Close() + defer func(esClient *r.Client) { + err := esClient.Close() + if err != nil { + return + } + }(esClient) + + tracer, tracerCloser := initJaeger(svcName, jCfg.URL, logger) + defer func(tracerCloser io.Closer) { + err := tracerCloser.Close() + if err != nil { + logger.Fatal(err.Error()) + } + }(tracerCloser) + + sinksGRPCConn := connectToGRPC(sinksGRPCCfg, logger) + defer func(sinksGRPCConn *grpc.ClientConn) { + err := sinksGRPCConn.Close() + if err != nil { + logger.Fatal(err.Error()) + } + }(sinksGRPCConn) + + sinksGRPCTimeout, err := time.ParseDuration(sinksGRPCCfg.Timeout) + if err != nil { + log.Fatalf("Invalid %s value: %s", sinksGRPCCfg.Timeout, err.Error()) + } + sinksGRPCClient := sinksgrpc.NewClient(tracer, sinksGRPCConn, sinksGRPCTimeout) - svc := newMaestroService(logger, esClient) + svc := maestro.NewMaestroService(logger, esClient, sinksGRPCClient, esCfg) errs := make(chan error, 2) - go subscribeToSinkerES(svc, esClient, esCfg, logger) - go subscribeToSinksES(svc, esClient, esCfg, logger) + mainContext, mainCancelFunction := context.WithCancel(context.Background()) + err = svc.Start(mainContext, mainCancelFunction) + if err != nil { + mainCancelFunction() + log.Fatalf(fmt.Sprintf("Maestro service terminated: %s", err)) + } go func() { c := make(chan os.Signal) signal.Notify(c, syscall.SIGINT) errs <- fmt.Errorf("%s", <-c) + mainCancelFunction() }() - err := <-errs + err = <-errs logger.Error(fmt.Sprintf("Maestro service terminated: %s", err)) } +func connectToGRPC(cfg config.GRPCConfig, logger *zap.Logger) *grpc.ClientConn { + var opts []grpc.DialOption + tls, err := strconv.ParseBool(cfg.ClientTLS) + if err != nil { + tls = false + } + if tls { + if cfg.CaCerts != "" { + tpc, err := credentials.NewClientTLSFromFile(cfg.CaCerts, "") + if err != nil { + logger.Error(fmt.Sprintf("Failed to create tls credentials: %s", err)) + os.Exit(1) + } + opts = append(opts, grpc.WithTransportCredentials(tpc)) + } + } else { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + conn, err := grpc.Dial(cfg.URL, opts...) + if err != nil { + logger.Error(fmt.Sprintf("Failed to dial to gRPC service %s: %s", cfg.URL, err)) + os.Exit(1) + } + logger.Info(fmt.Sprintf("Dialed to gRPC service %s at %s, TLS? %t", cfg.Service, cfg.URL, tls)) + + return conn +} + +func initJaeger(svcName, url string, logger *zap.Logger) (opentracing.Tracer, io.Closer) { + if url == "" { + return opentracing.NoopTracer{}, io.NopCloser(nil) + } + + tracer, closer, err := jconfig.Configuration{ + ServiceName: svcName, + Sampler: &jconfig.SamplerConfig{ + Type: "const", + Param: 1, + }, + Reporter: &jconfig.ReporterConfig{ + LocalAgentHostPort: url, + LogSpans: true, + }, + }.NewTracer() + if err != nil { + logger.Error("Failed to init Jaeger client", zap.Error(err)) + os.Exit(1) + } + + return tracer, closer +} + func connectToRedis(redisURL, redisPass, redisDB string, logger *zap.Logger) *r.Client { db, err := strconv.Atoi(redisDB) if err != nil { @@ -95,24 +188,3 @@ func connectToRedis(redisURL, redisPass, redisDB string, logger *zap.Logger) *r. DB: db, }) } - -func newMaestroService(logger *zap.Logger, esClient *r.Client) maestro.MaestroService { - svc := maestro.NewMaestroService(logger, esClient) - return svc -} - -func subscribeToSinkerES(svc maestro.MaestroService, client *r.Client, cfg config.EsConfig, logger *zap.Logger) { - eventStore := rediscons1.NewEventStore(svc, client, cfg.Consumer, logger) - logger.Info("Subscribed to Redis Event Store for sinker") - if err := eventStore.SubscribeSinker(context.Background()); err != nil { - logger.Error("Bootstrap service failed to subscribe to event sourcing sinker", zap.Error(err)) - } -} - -func subscribeToSinksES(svc maestro.MaestroService, client *r.Client, cfg config.EsConfig, logger *zap.Logger) { - logger.Info("Subscribed to Redis Event Store for sinks") - eventStore := rediscons1.NewEventStore(svc, client, cfg.Consumer, logger) - if err := eventStore.SubscribeSinks(context.Background()); err != nil { - logger.Error("Bootstrap service failed to subscribe to event sourcing sinks", zap.Error(err)) - } -} diff --git a/fleet/pb/fleet.pb.go b/fleet/pb/fleet.pb.go index 43abdf1f4..16b3ddf29 100644 --- a/fleet/pb/fleet.pb.go +++ b/fleet/pb/fleet.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.14.0 +// protoc-gen-go v1.28.1 +// protoc v3.12.4 // source: fleet/pb/fleet.proto package pb diff --git a/fleet/pb/fleet_grpc.pb.go b/fleet/pb/fleet_grpc.pb.go index e1c1a83e8..7016bdb90 100644 --- a/fleet/pb/fleet_grpc.pb.go +++ b/fleet/pb/fleet_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: fleet/pb/fleet.proto package pb diff --git a/go.mod b/go.mod index a80251249..bcea10634 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/ns1labs/orb go 1.19 require ( + github.com/aws/aws-sdk-go v1.44.114 github.com/eclipse/paho.mqtt.golang v1.4.1 github.com/fatih/structs v1.1.0 github.com/ghodss/yaml v1.0.0 @@ -29,6 +30,7 @@ require ( github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.8.0 github.com/uber/jaeger-client-go v2.30.0+incompatible + go.uber.org/multierr v1.8.0 go.uber.org/zap v1.23.0 google.golang.org/grpc v1.50.0 google.golang.org/protobuf v1.28.1 @@ -40,7 +42,6 @@ require ( require ( github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.3.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.62.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.62.0 github.com/prometheus/common v0.37.0 @@ -67,7 +68,6 @@ require ( github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/armon/go-metrics v0.3.10 // indirect - github.com/aws/aws-sdk-go v1.44.114 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -175,7 +175,6 @@ require ( go.opentelemetry.io/otel v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.0 // indirect - go.uber.org/multierr v1.8.0 // indirect golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/net v0.0.0-20220927171203-f486391704dc // indirect @@ -201,7 +200,7 @@ require ( //These libs are used to allow orb extend opentelemetry features require ( cloud.google.com/go/compute v1.10.0 // indirect - github.com/Shopify/sarama v1.37.2 // indirect + github.com/Shopify/sarama v1.37.2 github.com/apache/thrift v0.17.0 // indirect github.com/eapache/go-resiliency v1.3.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect @@ -216,7 +215,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/jaegertracing/jaeger v1.38.1 // indirect + github.com/jaegertracing/jaeger v1.38.1 github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect @@ -226,16 +225,16 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.62.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.62.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.62.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.62.0 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rs/cors v1.8.2 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.1.1 // indirect + github.com/xdg-go/scram v1.1.1 github.com/xdg-go/stringprep v1.0.3 // indirect - go.opentelemetry.io/collector/semconv v0.62.0 // indirect + go.opentelemetry.io/collector/semconv v0.62.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect diff --git a/go.sum b/go.sum index 6b9d2d03c..1f7b4890a 100644 --- a/go.sum +++ b/go.sum @@ -742,8 +742,6 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.62.0 h1:/jMfx1V+TfLULWVo+nJbnUsbsC4OvDY5NCmwmIyAU5o= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.62.0/go.mod h1:w/y5lJ1Emnf2imDVqImSETnMP7X+FhTz+Ucxiq/Obi4= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0 h1:EoidrEk6Dmap+Cw+lXipNL7IVGicS0N6V+oCvesIj/c= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.62.0/go.mod h1:4BwkK9Fb1xZDxmXt7gSm5nxCxtVWJf61/UaCt54gVjU= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.62.0 h1:PMUgwDspM+2DX2Ol8Tj/jUBQqzvykVwnFily/HjRDPA= diff --git a/maestro/config_builder.go b/maestro/config/config_builder.go similarity index 99% rename from maestro/config_builder.go rename to maestro/config/config_builder.go index 64396111e..2745b3933 100644 --- a/maestro/config_builder.go +++ b/maestro/config/config_builder.go @@ -1,4 +1,4 @@ -package maestro +package config import ( "context" diff --git a/maestro/config_builder_test.go b/maestro/config/config_builder_test.go similarity index 99% rename from maestro/config_builder_test.go rename to maestro/config/config_builder_test.go index 330de4250..7dda58ef1 100644 --- a/maestro/config_builder_test.go +++ b/maestro/config/config_builder_test.go @@ -1,4 +1,4 @@ -package maestro +package config import ( "context" diff --git a/maestro/maestro_service.go b/maestro/kubecontrol/kubecontrol.go similarity index 54% rename from maestro/maestro_service.go rename to maestro/kubecontrol/kubecontrol.go index 55d702b4b..9e0239d00 100644 --- a/maestro/maestro_service.go +++ b/maestro/kubecontrol/kubecontrol.go @@ -1,31 +1,38 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Adapted for Orb project, modifications licensed under MPL v. 2.0: -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -package maestro +package kubecontrol import ( "bufio" "context" "fmt" - "github.com/ns1labs/orb/pkg/errors" "go.uber.org/zap" "os" "os/exec" ) -var ( - ErrCreateMaestro = errors.New("failed to create Otel Collector") - ErrConflictMaestro = errors.New("Otel collector already exists") -) - const namespace = "otelcollectors" -func (svc maestroService) collectorDeploy(ctx context.Context, operation, sinkId, manifest string) error { +var _ Service = (*deployService)(nil) + +type deployService struct { + logger *zap.Logger +} + +func NewService(logger *zap.Logger) Service { + return &deployService{logger: logger} +} + +type Service interface { + // CreateOtelCollector - create an existing collector by id + CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error + + // DeleteOtelCollector - delete an existing collector by id + DeleteOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error + + // UpdateOtelCollector - update an existing collector by id + UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error +} + +func (svc *deployService) collectorDeploy(_ context.Context, operation, sinkId, manifest string) error { fileContent := []byte(manifest) err := os.WriteFile("/tmp/otel-collector-"+sinkId+".json", fileContent, 0644) @@ -66,12 +73,7 @@ func (svc maestroService) collectorDeploy(ctx context.Context, operation, sinkId return nil } -func (svc maestroService) getConfigFromSinkId(config SinkConfig) (sinkID, sinkUrl, sinkUsername, sinkPassword string) { - - return config.Id, config.Url, config.Username, config.Password -} - -func (svc maestroService) CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { +func (svc *deployService) CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { err := svc.collectorDeploy(ctx, "apply", sinkID, deploymentEntry) if err != nil { @@ -80,20 +82,16 @@ func (svc maestroService) CreateOtelCollector(ctx context.Context, sinkID, deplo return nil } -func (svc maestroService) UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { - err := svc.DeleteOtelCollector(ctx, sinkID) - if err != nil { - return err - } - err = svc.CreateOtelCollector(ctx, sinkID, deploymentEntry) +func (svc *deployService) UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { + err := svc.CreateOtelCollector(ctx, sinkID, deploymentEntry) if err != nil { return err } return nil } -func (svc maestroService) DeleteOtelCollector(ctx context.Context, sinkID string) error { - err := svc.collectorDeploy(ctx, "delete", sinkID, "") +func (svc *deployService) DeleteOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error { + err := svc.collectorDeploy(ctx, "delete", sinkID, deploymentEntry) if err != nil { return err } diff --git a/maestro/maestro.go b/maestro/maestro.go index 4bf22cc76..3ac0f65e5 100644 --- a/maestro/maestro.go +++ b/maestro/maestro.go @@ -4,40 +4,9 @@ import ( "context" "time" - "github.com/ns1labs/orb/pkg/errors" "github.com/ns1labs/orb/pkg/types" ) -var ( - // ErrMalformedEntity indicates malformed entity specification (e.g. - // invalid username or password). - ErrMalformedEntity = errors.New("malformed entity specification") - - // ErrNotFound indicates a non-existent entity request. - ErrNotFound = errors.New("non-existent entity") - - // ErrConflict indicates that entity already exists. - ErrConflict = errors.New("entity already exists") - - // ErrScanMetadata indicates problem with metadata in db - ErrScanMetadata = errors.New("failed to scan metadata in db") - - // ErrSelectEntity indicates error while reading entity from database - ErrSelectEntity = errors.New("select entity from db error") - - // ErrEntityConnected indicates error while checking connection in database - ErrEntityConnected = errors.New("check connection in database error") - - // ErrUpdateEntity indicates error while updating a entity - ErrUpdateEntity = errors.New("failed to update entity") - - ErrUnauthorizedAccess = errors.New("missing or invalid credentials provided") - - ErrRemoveEntity = errors.New("failed to remove entity") - - ErrInvalidBackend = errors.New("No available backends") -) - type Maestro struct { ID string Name types.Identifier @@ -56,13 +25,7 @@ type SinkConfig struct { Password string } -type MaestroService interface { - // CreateOtelCollector - create an existing collector by id - CreateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error - - // DeleteOtelCollector - delete an existing collector by id - DeleteOtelCollector(ctx context.Context, sinkID string) error - - // UpdateOtelCollector - update an existing collector by id - UpdateOtelCollector(ctx context.Context, sinkID, deploymentEntry string) error +type Service interface { + // Start starts the service - load the configuration + Start(ctx context.Context, cancelFunction context.CancelFunc) error } diff --git a/maestro/redis/consumer/hashset.go b/maestro/redis/consumer/hashset.go index a5627601b..1a6eb6d78 100644 --- a/maestro/redis/consumer/hashset.go +++ b/maestro/redis/consumer/hashset.go @@ -2,7 +2,7 @@ package consumer import ( "context" - "github.com/ns1labs/orb/maestro" + "github.com/ns1labs/orb/maestro/config" "go.uber.org/zap" "time" ) @@ -21,11 +21,15 @@ func (es eventStore) GetDeploymentEntryFromSinkId(ctx context.Context, sinkId st // handleSinksDeleteCollector will delete Deployment Entry and force delete otel collector func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event sinksUpdateEvent) error { es.logger.Info("Received maestro DELETE event from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - es.client.HDel(ctx, deploymentKey, event.sinkID) - err := es.maestroService.DeleteOtelCollector(ctx, event.sinkID) + deployment, err := es.GetDeploymentEntryFromSinkId(ctx, event.sinkID) + if err != nil { + return err + } + err = es.kubecontrol.DeleteOtelCollector(ctx, event.sinkID, deployment) if err != nil { return err } + es.client.HDel(ctx, deploymentKey, event.sinkID) return nil } @@ -35,30 +39,37 @@ func (es eventStore) handleSinksCreateCollector(ctx context.Context, event sinks sinkUrl := event.config["sink_url"].(string) sinkUsername := event.config["username"].(string) sinkPassword := event.config["password"].(string) - deploy, err := maestro.GetDeploymentJson(event.sinkID, sinkUrl, sinkUsername, sinkPassword) + err2 := es.CreateDeploymentEntry(ctx, event.sinkID, sinkUrl, sinkUsername, sinkPassword) + if err2 != nil { + return err2 + } + + return nil +} + +func (es eventStore) CreateDeploymentEntry(ctx context.Context, sinkId, sinkUrl, sinkUsername, sinkPassword string) error { + deploy, err := config.GetDeploymentJson(sinkId, sinkUrl, sinkUsername, sinkPassword) if err != nil { - es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", event.sinkID)) + es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", sinkId)) return err } - es.client.HSet(ctx, deploymentKey, event.sinkID, deploy) - + es.client.HSet(ctx, deploymentKey, sinkId, deploy) return nil } // handleSinksUpdateCollector will update Deployment Entry in Redis and force update otel collector func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event sinksUpdateEvent) error { es.logger.Info("Received event to Update DeploymentEntry from sinks ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - sinkUrl := event.config["sink_url"].(string) sinkUsername := event.config["username"].(string) sinkPassword := event.config["password"].(string) - deploy, err := maestro.GetDeploymentJson(event.sinkID, sinkUrl, sinkUsername, sinkPassword) + deploy, err := config.GetDeploymentJson(event.sinkID, sinkUrl, sinkUsername, sinkPassword) if err != nil { es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", event.sinkID)) return err } es.client.HSet(ctx, deploymentKey, event.sinkID, deploy) - err = es.maestroService.UpdateOtelCollector(ctx, event.sinkID, deploy) + err = es.kubecontrol.UpdateOtelCollector(ctx, event.sinkID, deploy) if err != nil { return err } diff --git a/maestro/redis/consumer/streams.go b/maestro/redis/consumer/streams.go index 13e35626d..5b24795f8 100644 --- a/maestro/redis/consumer/streams.go +++ b/maestro/redis/consumer/streams.go @@ -2,11 +2,11 @@ package consumer import ( "context" + "github.com/ns1labs/orb/maestro/kubecontrol" "github.com/ns1labs/orb/pkg/types" "time" "github.com/go-redis/redis/v8" - "github.com/ns1labs/orb/maestro" "go.uber.org/zap" ) @@ -27,23 +27,25 @@ const ( ) type Subscriber interface { + CreateDeploymentEntry(ctx context.Context, sinkId, sinkUrl, sinkUsername, sinkPassword string) error + GetDeploymentEntryFromSinkId(ctx context.Context, sinkId string) (string, error) SubscribeSinks(context context.Context) error SubscribeSinker(context context.Context) error } type eventStore struct { - maestroService maestro.MaestroService - client *redis.Client - esconsumer string - logger *zap.Logger + kubecontrol kubecontrol.Service + client *redis.Client + esconsumer string + logger *zap.Logger } -func NewEventStore(maestroService maestro.MaestroService, client *redis.Client, esconsumer string, logger *zap.Logger) Subscriber { +func NewEventStore(client *redis.Client, kubecontrol kubecontrol.Service, esconsumer string, logger *zap.Logger) Subscriber { return eventStore{ - maestroService: maestroService, - client: client, - esconsumer: esconsumer, - logger: logger, + kubecontrol: kubecontrol, + client: client, + esconsumer: esconsumer, + logger: logger, } } @@ -130,24 +132,28 @@ func (es eventStore) SubscribeSinks(context context.Context) error { } } -// Delete collector +// handleSinkerDeleteCollector Delete collector func (es eventStore) handleSinkerDeleteCollector(ctx context.Context, event sinkerUpdateEvent) error { es.logger.Info("Received maestro DELETE event from sinker, sink state=" + event.state + ", , Sink ID=" + event.sinkID + ", Owner ID=" + event.ownerID) - err := es.maestroService.DeleteOtelCollector(ctx, event.sinkID) + deployment, err := es.GetDeploymentEntryFromSinkId(ctx, event.sinkID) + if err != nil { + return err + } + err = es.kubecontrol.DeleteOtelCollector(ctx, event.sinkID, deployment) if err != nil { return err } return nil } -// Create collector +// handleSinkerCreateCollector Create collector func (es eventStore) handleSinkerCreateCollector(ctx context.Context, event sinkerUpdateEvent) error { es.logger.Info("Received maestro CREATE event from sinker, sink state=" + event.state + ", Sink ID=" + event.sinkID + ", Owner ID=" + event.ownerID) deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.sinkID) if err != nil { return err } - err = es.maestroService.CreateOtelCollector(ctx, event.sinkID, deploymentEntry) + err = es.kubecontrol.CreateOtelCollector(ctx, event.sinkID, deploymentEntry) if err != nil { return err } diff --git a/maestro/service.go b/maestro/service.go index 5cc705e9e..e5a4b45dc 100644 --- a/maestro/service.go +++ b/maestro/service.go @@ -9,20 +9,152 @@ package maestro import ( + "context" + "database/sql/driver" + "encoding/json" "github.com/go-redis/redis/v8" + "github.com/ns1labs/orb/maestro/kubecontrol" + rediscons1 "github.com/ns1labs/orb/maestro/redis/consumer" + "github.com/ns1labs/orb/pkg/config" + sinkspb "github.com/ns1labs/orb/sinks/pb" "go.uber.org/zap" + "time" ) -var _ MaestroService = (*maestroService)(nil) +var _ Service = (*maestroService)(nil) type maestroService struct { + serviceContext context.Context + serviceCancelFunc context.CancelFunc + + kubecontrol kubecontrol.Service logger *zap.Logger redisClient *redis.Client + sinksClient sinkspb.SinkServiceClient + esCfg config.EsConfig + eventStore rediscons1.Subscriber } -func NewMaestroService(logger *zap.Logger, redisClient *redis.Client) MaestroService { +func NewMaestroService(logger *zap.Logger, redisClient *redis.Client, sinksGrpcClient sinkspb.SinkServiceClient, esCfg config.EsConfig) Service { + kubectr := kubecontrol.NewService(logger) + eventStore := rediscons1.NewEventStore(redisClient, kubectr, esCfg.Consumer, logger) return &maestroService{ logger: logger, redisClient: redisClient, + sinksClient: sinksGrpcClient, + kubecontrol: kubectr, + eventStore: eventStore, + } +} + +type SinkData struct { + SinkID string `json:"sink_id"` + OwnerID string `json:"owner_id"` + Url string `json:"remote_host"` + User string `json:"username"` + Password string `json:"password"` + State PrometheusState `json:"state,omitempty"` + Msg string `json:"msg,omitempty"` + LastRemoteWrite time.Time `json:"last_remote_write,omitempty"` +} + +const ( + Unknown PrometheusState = iota + Active + Error + Idle +) + +type PrometheusState int + +var promStateMap = [...]string{ + "unknown", + "active", + "error", + "idle", +} + +var promStateRevMap = map[string]PrometheusState{ + "unknown": Unknown, + "active": Active, + "error": Error, + "idle": Idle, +} + +func (p PrometheusState) String() string { + return promStateMap[p] +} + +func (p *PrometheusState) Scan(value interface{}) error { + *p = promStateRevMap[string(value.([]byte))] + return nil +} + +func (p PrometheusState) Value() (driver.Value, error) { + return p.String(), nil +} + +// Start will load all sinks from DB using SinksGRPC, +// +// then for each sink, will create DeploymentEntry in Redis +// And for each sink with active state, deploy OtelCollector +func (svc *maestroService) Start(ctx context.Context, cancelFunction context.CancelFunc) error { + + loadCtx, loadCancelFunction := context.WithCancel(ctx) + defer loadCancelFunction() + svc.serviceContext = ctx + svc.serviceCancelFunc = cancelFunction + + sinksRes, err := svc.sinksClient.RetrieveSinks(loadCtx, &sinkspb.SinksFilterReq{OtelEnabled: "enabled"}) + if err != nil { + loadCancelFunction() + return err + } + + for _, sinkRes := range sinksRes.Sinks { + sinkContext := context.WithValue(loadCtx, "sink-id", sinkRes.Id) + var data SinkData + if err := json.Unmarshal(sinkRes.Config, &data); err != nil { + svc.logger.Warn("failed to unmarshal sink, skipping", zap.String("sink-id", data.SinkID)) + continue + } + + err := svc.eventStore.CreateDeploymentEntry(sinkContext, sinkRes.Id, data.Url, data.User, data.Password) + if err != nil { + svc.logger.Warn("failed to create deploymentEntry for sink, skipping", zap.String("sink-id", data.SinkID)) + continue + } + + // if State is Active, deploy OtelCollector + if data.State == Active { + deploymentEntry, err := svc.eventStore.GetDeploymentEntryFromSinkId(sinkContext, data.SinkID) + if err != nil { + svc.logger.Warn("failed to fetch deploymentEntry for sink, skipping", zap.String("sink-id", data.SinkID)) + continue + } + err = svc.kubecontrol.CreateOtelCollector(sinkContext, data.SinkID, deploymentEntry) + if err != nil { + svc.logger.Warn("failed to deploy OtelCollector for sink, skipping", zap.String("sink-id", data.SinkID)) + continue + } + } + } + + go svc.subscribeToSinksES() + go svc.subscribeToSinkerES() + return nil +} + +func (svc *maestroService) subscribeToSinkerES() { + if err := svc.eventStore.SubscribeSinker(context.Background()); err != nil { + svc.logger.Error("Bootstrap service failed to subscribe to event sourcing sinker", zap.Error(err)) + } + svc.logger.Info("Subscribed to Redis Event Store for sinker") +} + +func (svc *maestroService) subscribeToSinksES() { + svc.logger.Info("Subscribed to Redis Event Store for sinks") + if err := svc.eventStore.SubscribeSinks(context.Background()); err != nil { + svc.logger.Error("Bootstrap service failed to subscribe to event sourcing sinks", zap.Error(err)) } } diff --git a/pkg/types/maps.go b/pkg/types/maps.go index 7b5c586f9..ee4aaaaa8 100644 --- a/pkg/types/maps.go +++ b/pkg/types/maps.go @@ -33,6 +33,15 @@ func (s *Metadata) RestrictKeys(predicate func(string) bool) { } } +func (s *Metadata) IsApplicable(filterFunc func(string, interface{}) bool) bool { + for key, value := range *s { + if filterFunc(key, value) { + return true + } + } + return false +} + func (s *Metadata) FilterMap(predicateFunc func(string) bool, mapFunc func(string, interface{}) (string, interface{})) { for key, value := range *s { if predicateFunc(key) { diff --git a/policies/pb/policies.pb.go b/policies/pb/policies.pb.go index e5421415f..767c80859 100644 --- a/policies/pb/policies.pb.go +++ b/policies/pb/policies.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.14.0 +// protoc-gen-go v1.28.1 +// protoc v3.12.4 // source: policies/pb/policies.proto package pb diff --git a/policies/pb/policies_grpc.pb.go b/policies/pb/policies_grpc.pb.go index 980860d42..c6d44c377 100644 --- a/policies/pb/policies_grpc.pb.go +++ b/policies/pb/policies_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: policies/pb/policies.proto package pb diff --git a/sinker/redis/consumer/streams.go b/sinker/redis/consumer/streams.go index 6c93d58d2..99fec13a6 100644 --- a/sinker/redis/consumer/streams.go +++ b/sinker/redis/consumer/streams.go @@ -89,6 +89,7 @@ func decodeSinksUpdate(event map[string]interface{}) (updateSinkEvent, error) { owner: read(event, "owner", ""), timestamp: time.Time{}, } + var metadata types.Metadata if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { return updateSinkEvent{}, err diff --git a/sinker/redis/producer/events.go b/sinker/redis/producer/events.go index 79ead9a3d..1363c6cab 100644 --- a/sinker/redis/producer/events.go +++ b/sinker/redis/producer/events.go @@ -25,6 +25,24 @@ type SinkerUpdateEvent struct { Timestamp time.Time } +type SinkerOtelConfigEvent struct { + SinkId string + Owner string + State string + ConfigYaml string + Timestamp time.Time +} + +func (e SinkerOtelConfigEvent) Encode() map[string]interface{} { + return map[string]interface{}{ + "sink_id": e.SinkId, + "owner": e.Owner, + "state": e.State, + "config": e.ConfigYaml, + "timestamp": e.Timestamp.Unix(), + } +} + func (cse SinkerUpdateEvent) Encode() map[string]interface{} { return map[string]interface{}{ "sink_id": cse.SinkID, diff --git a/sinks/api/grpc/client.go b/sinks/api/grpc/client.go index bf12b96fb..aa2f0f48c 100644 --- a/sinks/api/grpc/client.go +++ b/sinks/api/grpc/client.go @@ -22,11 +22,41 @@ import ( var _ pb.SinkServiceClient = (*grpcClient)(nil) type grpcClient struct { - timeout time.Duration - retrieveSink endpoint.Endpoint + timeout time.Duration + retrieveSink endpoint.Endpoint + retrieveSinks endpoint.Endpoint } -func (client grpcClient) RetrieveSink(ctx context.Context, in *pb.SinkByIDReq, opts ...grpc.CallOption) (*pb.SinkRes, error) { +func (client grpcClient) RetrieveSinks(ctx context.Context, in *pb.SinksFilterReq, _ ...grpc.CallOption) (*pb.SinksRes, error) { + ctx, cancel := context.WithTimeout(ctx, client.timeout) + defer cancel() + + sinksFilter := sinksFilter{ + isOtel: in.OtelEnabled, + } + + res, err := client.retrieveSinks(ctx, sinksFilter) + if err != nil { + return nil, err + } + var sinksResponse *pb.SinksRes + ir := res.(sinksRes) + for _, sinkResponse := range ir.sinks { + sinksResponse.Sinks = append(sinksResponse.Sinks, &pb.SinkRes{ + Id: sinkResponse.id, + Name: sinkResponse.name, + Description: sinkResponse.description, + Tags: sinkResponse.tags, + State: sinkResponse.state, + Error: sinkResponse.error, + Backend: sinkResponse.backend, + Config: sinkResponse.config, + }) + } + return sinksResponse, nil +} + +func (client grpcClient) RetrieveSink(ctx context.Context, in *pb.SinkByIDReq, _ ...grpc.CallOption) (*pb.SinkRes, error) { ctx, cancel := context.WithTimeout(ctx, client.timeout) defer cancel() @@ -66,7 +96,39 @@ func NewClient(tracer opentracing.Tracer, conn *grpc.ClientConn, timeout time.Du decodeSinkResponse, pb.SinkRes{}, ).Endpoint()), + retrieveSinks: kitot.TraceClient(tracer, "retrieve_sinks")(kitgrpc.NewClient( + conn, + svcName, + "RetrieveSinks", + encodeRetrieveSinksRequest, + decodeSinksResponse, + pb.SinkRes{}, + ).Endpoint()), + } +} + +func encodeRetrieveSinksRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*sinksFilter) + return &pb.SinksFilterReq{OtelEnabled: req.isOtel}, nil +} + +func decodeSinksResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*pb.SinksRes) + var sinksRes sinksRes + for _, sink := range res.Sinks { + sinkRs := sinkRes{ + id: sink.Id, + name: sink.Name, + description: sink.Description, + tags: sink.Tags, + state: sink.State, + error: sink.Error, + backend: sink.Backend, + config: sink.Config, + } + sinksRes.sinks = append(sinksRes.sinks, sinkRs) } + return &sinksRes, nil } func encodeRetrieveSinkRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { diff --git a/sinks/api/grpc/endpoint.go b/sinks/api/grpc/endpoint.go index a57cf792f..facf65f8d 100644 --- a/sinks/api/grpc/endpoint.go +++ b/sinks/api/grpc/endpoint.go @@ -44,3 +44,49 @@ func retrieveSinkEndpoint(svc sinks.SinkService) endpoint.Endpoint { return res, err } } + +func retrieveSinksEndpoint(svc sinks.SinkService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(sinksFilter) + filter := sinks.Filter{ + OpenTelemetry: req.isOtel, + } + sinksInternal, err := svc.ListSinksInternal(ctx, filter) + if err != nil { + return sinksInternal, err + } + responseStr := sinksRes{} + for _, sink := range sinksInternal { + sinkResponse, err := buildSinkResponse(sink) + if err != nil { + + } else { + responseStr.sinks = append(responseStr.sinks, sinkResponse) + } + } + + return responseStr, err + } +} + +func buildSinkResponse(sink sinks.Sink) (sinkRes, error) { + tagData, err := json.Marshal(sink.Tags) + if err != nil { + return sinkRes{}, err + } + configData, err := json.Marshal(sink.Config) + if err != nil { + return sinkRes{}, err + } + + return sinkRes{ + id: sink.ID, + name: sink.Name.String(), + description: sink.Description, + tags: tagData, + state: sink.State.String(), + error: sink.Error, + backend: sink.Backend, + config: configData, + }, nil +} diff --git a/sinks/api/grpc/request.go b/sinks/api/grpc/request.go index f54eeb226..38b031fe3 100644 --- a/sinks/api/grpc/request.go +++ b/sinks/api/grpc/request.go @@ -17,6 +17,10 @@ type accessByIDReq struct { OwnerID string } +type sinksFilter struct { + isOtel string +} + func (req accessByIDReq) validate() error { if req.SinkID == "" || req.OwnerID == "" { return sinks.ErrMalformedEntity diff --git a/sinks/api/grpc/response.go b/sinks/api/grpc/response.go index a9f5a3358..a3a8d01ea 100644 --- a/sinks/api/grpc/response.go +++ b/sinks/api/grpc/response.go @@ -18,3 +18,7 @@ type sinkRes struct { backend string config []byte } + +type sinksRes struct { + sinks []sinkRes +} diff --git a/sinks/api/grpc/server.go b/sinks/api/grpc/server.go index 7957df184..4beb4a85c 100644 --- a/sinks/api/grpc/server.go +++ b/sinks/api/grpc/server.go @@ -25,6 +25,7 @@ type grpcServer struct { pb.UnimplementedSinkServiceServer retrieveSink kitgrpc.Handler passwordService sinks.PasswordService + retrieveSinks kitgrpc.Handler } func NewServer(tracer opentracing.Tracer, svc sinks.SinkService) pb.SinkServiceServer { @@ -34,7 +35,21 @@ func NewServer(tracer opentracing.Tracer, svc sinks.SinkService) pb.SinkServiceS decodeRetrieveSinkRequest, encodeSinkResponse, ), + retrieveSinks: kitgrpc.NewServer( + kitot.TraceServer(tracer, "retrieve_sinks")(retrieveSinksEndpoint(svc)), + decodeRetrieveSinksRequest, + encodeSinksResponse, + ), + } +} + +func (gs *grpcServer) RetrieveSinks(ctx context.Context, req *pb.SinksFilterReq) (*pb.SinksRes, error) { + _, res, err := gs.retrieveSinks.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) } + + return res.(*pb.SinksRes), nil } func (gs *grpcServer) RetrieveSink(ctx context.Context, req *pb.SinkByIDReq) (*pb.SinkRes, error) { @@ -46,6 +61,30 @@ func (gs *grpcServer) RetrieveSink(ctx context.Context, req *pb.SinkByIDReq) (*p return res.(*pb.SinkRes), nil } +func decodeRetrieveSinksRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*pb.SinksFilterReq) + return sinksFilter{isOtel: req.OtelEnabled}, nil +} + +func encodeSinksResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(sinksRes) + var sinksRes *pb.SinksRes + for _, sink := range res.sinks { + sinkRes := &pb.SinkRes{ + Id: sink.id, + Name: sink.name, + Description: sink.description, + Tags: sink.tags, + State: sink.state, + Error: sink.error, + Backend: sink.backend, + Config: sink.config, + } + sinksRes.Sinks = append(sinksRes.Sinks, sinkRes) + } + return &sinksRes, nil +} + func decodeRetrieveSinkRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { req := grpcReq.(*pb.SinkByIDReq) return accessByIDReq{SinkID: req.SinkID, OwnerID: req.OwnerID}, nil diff --git a/sinks/api/http/logging.go b/sinks/api/http/logging.go index 3bba17426..d1a82ad06 100644 --- a/sinks/api/http/logging.go +++ b/sinks/api/http/logging.go @@ -19,6 +19,20 @@ type loggingMiddleware struct { svc sinks.SinkService } +func (l loggingMiddleware) ListSinksInternal(ctx context.Context, filter sinks.Filter) (sinks []sinks.Sink, err error) { + defer func(begin time.Time) { + if err != nil { + l.logger.Warn("method call: list_sinks_internal", + zap.Error(err), + zap.Duration("duration", time.Since(begin))) + } else { + l.logger.Info("method call: list_sinks_internal", + zap.Duration("duration", time.Since(begin))) + } + }(time.Now()) + return l.svc.ListSinksInternal(ctx, filter) +} + func (l loggingMiddleware) ChangeSinkStateInternal(ctx context.Context, sinkID string, msg string, ownerID string, state sinks.State) (err error) { defer func(begin time.Time) { if err != nil { diff --git a/sinks/api/http/metrics.go b/sinks/api/http/metrics.go index 842598a9d..ae51d1a77 100644 --- a/sinks/api/http/metrics.go +++ b/sinks/api/http/metrics.go @@ -23,6 +23,11 @@ type metricsMiddleware struct { svc sinks.SinkService } +// ListSinksInternal Will not count metrics since it is internal-service only rpc +func (m metricsMiddleware) ListSinksInternal(ctx context.Context, filter sinks.Filter) (sinks []sinks.Sink, err error) { + return m.svc.ListSinksInternal(ctx, filter) +} + func (m metricsMiddleware) ChangeSinkStateInternal(ctx context.Context, sinkID string, msg string, ownerID string, state sinks.State) error { defer func(begin time.Time) { labels := []string{ diff --git a/sinks/api/http/openapi.yaml b/sinks/api/http/openapi.yaml index b707d28ac..36ffa2f26 100644 --- a/sinks/api/http/openapi.yaml +++ b/sinks/api/http/openapi.yaml @@ -437,6 +437,7 @@ components: example: remote_host: my.prometheus-host.com username: dbuser + opentelemetry: enabled description: Object representing backend specific configuration information ts_created: type: string diff --git a/sinks/mocks/client.go b/sinks/mocks/client.go index c7e9100f4..caab5b91c 100644 --- a/sinks/mocks/client.go +++ b/sinks/mocks/client.go @@ -18,6 +18,10 @@ var _ pb.SinkServiceClient = (*grpcClient)(nil) type grpcClient struct{} +func (client grpcClient) RetrieveSinks(ctx context.Context, in *pb.SinksFilterReq, opts ...grpc.CallOption) (*pb.SinksRes, error) { + return &pb.SinksRes{}, nil +} + func (client grpcClient) RetrieveSink(ctx context.Context, in *pb.SinkByIDReq, opts ...grpc.CallOption) (*pb.SinkRes, error) { return &pb.SinkRes{}, nil } diff --git a/sinks/mocks/sinks.go b/sinks/mocks/sinks.go index 682473356..9844b8e45 100644 --- a/sinks/mocks/sinks.go +++ b/sinks/mocks/sinks.go @@ -26,6 +26,10 @@ type sinkRepositoryMock struct { sinksMock map[string]sinks.Sink } +func (s *sinkRepositoryMock) SearchAllSinks(ctx context.Context, filter sinks.Filter) ([]sinks.Sink, error) { + return nil, nil +} + func (s *sinkRepositoryMock) UpdateSinkState(ctx context.Context, sinkID string, msg string, ownerID string, state sinks.State) error { return nil } @@ -82,7 +86,7 @@ func (s *sinkRepositoryMock) Update(ctx context.Context, sink sinks.Sink) (err e return sinks.ErrNotFound } -func (s *sinkRepositoryMock) RetrieveAll(ctx context.Context, owner string, pm sinks.PageMetadata) (sinks.Page, error) { +func (s *sinkRepositoryMock) RetrieveAllByOwnerID(ctx context.Context, owner string, pm sinks.PageMetadata) (sinks.Page, error) { s.mu.Lock() defer s.mu.Unlock() diff --git a/sinks/pb/sinks.pb.go b/sinks/pb/sinks.pb.go index db191d5e5..1f3372309 100644 --- a/sinks/pb/sinks.pb.go +++ b/sinks/pb/sinks.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 +// protoc-gen-go v1.28.1 +// protoc v3.12.4 // source: sinks/pb/sinks.proto package pb @@ -20,6 +20,100 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type SinksRes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sinks []*SinkRes `protobuf:"bytes,1,rep,name=sinks,proto3" json:"sinks,omitempty"` +} + +func (x *SinksRes) Reset() { + *x = SinksRes{} + if protoimpl.UnsafeEnabled { + mi := &file_sinks_pb_sinks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SinksRes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SinksRes) ProtoMessage() {} + +func (x *SinksRes) ProtoReflect() protoreflect.Message { + mi := &file_sinks_pb_sinks_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SinksRes.ProtoReflect.Descriptor instead. +func (*SinksRes) Descriptor() ([]byte, []int) { + return file_sinks_pb_sinks_proto_rawDescGZIP(), []int{0} +} + +func (x *SinksRes) GetSinks() []*SinkRes { + if x != nil { + return x.Sinks + } + return nil +} + +type SinksFilterReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OtelEnabled string `protobuf:"bytes,1,opt,name=otelEnabled,proto3" json:"otelEnabled,omitempty"` +} + +func (x *SinksFilterReq) Reset() { + *x = SinksFilterReq{} + if protoimpl.UnsafeEnabled { + mi := &file_sinks_pb_sinks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SinksFilterReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SinksFilterReq) ProtoMessage() {} + +func (x *SinksFilterReq) ProtoReflect() protoreflect.Message { + mi := &file_sinks_pb_sinks_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SinksFilterReq.ProtoReflect.Descriptor instead. +func (*SinksFilterReq) Descriptor() ([]byte, []int) { + return file_sinks_pb_sinks_proto_rawDescGZIP(), []int{1} +} + +func (x *SinksFilterReq) GetOtelEnabled() string { + if x != nil { + return x.OtelEnabled + } + return "" +} + type SinkByIDReq struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -32,7 +126,7 @@ type SinkByIDReq struct { func (x *SinkByIDReq) Reset() { *x = SinkByIDReq{} if protoimpl.UnsafeEnabled { - mi := &file_sinks_pb_sinks_proto_msgTypes[0] + mi := &file_sinks_pb_sinks_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -45,7 +139,7 @@ func (x *SinkByIDReq) String() string { func (*SinkByIDReq) ProtoMessage() {} func (x *SinkByIDReq) ProtoReflect() protoreflect.Message { - mi := &file_sinks_pb_sinks_proto_msgTypes[0] + mi := &file_sinks_pb_sinks_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -58,7 +152,7 @@ func (x *SinkByIDReq) ProtoReflect() protoreflect.Message { // Deprecated: Use SinkByIDReq.ProtoReflect.Descriptor instead. func (*SinkByIDReq) Descriptor() ([]byte, []int) { - return file_sinks_pb_sinks_proto_rawDescGZIP(), []int{0} + return file_sinks_pb_sinks_proto_rawDescGZIP(), []int{2} } func (x *SinkByIDReq) GetSinkID() string { @@ -93,7 +187,7 @@ type SinkRes struct { func (x *SinkRes) Reset() { *x = SinkRes{} if protoimpl.UnsafeEnabled { - mi := &file_sinks_pb_sinks_proto_msgTypes[1] + mi := &file_sinks_pb_sinks_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -106,7 +200,7 @@ func (x *SinkRes) String() string { func (*SinkRes) ProtoMessage() {} func (x *SinkRes) ProtoReflect() protoreflect.Message { - mi := &file_sinks_pb_sinks_proto_msgTypes[1] + mi := &file_sinks_pb_sinks_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -119,7 +213,7 @@ func (x *SinkRes) ProtoReflect() protoreflect.Message { // Deprecated: Use SinkRes.ProtoReflect.Descriptor instead. func (*SinkRes) Descriptor() ([]byte, []int) { - return file_sinks_pb_sinks_proto_rawDescGZIP(), []int{1} + return file_sinks_pb_sinks_proto_rawDescGZIP(), []int{3} } func (x *SinkRes) GetId() string { @@ -182,29 +276,39 @@ var File_sinks_pb_sinks_proto protoreflect.FileDescriptor var file_sinks_pb_sinks_proto_rawDesc = []byte{ 0x0a, 0x14, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x69, 0x6e, 0x6b, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x22, 0x3f, 0x0a, - 0x0b, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, - 0x73, 0x69, 0x6e, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x69, - 0x6e, 0x6b, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0xc1, - 0x01, 0x0a, 0x07, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x74, 0x61, 0x67, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x32, 0x43, 0x0a, 0x0b, 0x53, 0x69, 0x6e, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x34, 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x53, 0x69, 0x6e, - 0x6b, 0x12, 0x12, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x79, - 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x0e, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2e, 0x53, 0x69, - 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x22, 0x00, 0x42, 0x0a, 0x5a, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x73, - 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x22, 0x30, 0x0a, + 0x08, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x05, 0x73, 0x69, 0x6e, + 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, + 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x22, + 0x32, 0x0a, 0x0e, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x12, 0x20, 0x0a, 0x0b, 0x6f, 0x74, 0x65, 0x6c, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x74, 0x65, 0x6c, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x22, 0x3f, 0x0a, 0x0b, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x79, 0x49, 0x44, 0x52, + 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x69, 0x6e, 0x6b, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x49, 0x44, 0x22, 0xc1, 0x01, 0x0a, 0x07, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x7e, 0x0a, 0x0b, 0x53, 0x69, 0x6e, 0x6b, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x76, 0x65, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x12, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2e, + 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x0e, 0x2e, 0x73, 0x69, + 0x6e, 0x6b, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x39, 0x0a, + 0x0d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x15, + 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x2e, 0x53, 0x69, + 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x42, 0x0a, 0x5a, 0x08, 0x73, 0x69, 0x6e, 0x6b, + 0x73, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -219,19 +323,24 @@ func file_sinks_pb_sinks_proto_rawDescGZIP() []byte { return file_sinks_pb_sinks_proto_rawDescData } -var file_sinks_pb_sinks_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sinks_pb_sinks_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_sinks_pb_sinks_proto_goTypes = []interface{}{ - (*SinkByIDReq)(nil), // 0: sinks.SinkByIDReq - (*SinkRes)(nil), // 1: sinks.SinkRes + (*SinksRes)(nil), // 0: sinks.SinksRes + (*SinksFilterReq)(nil), // 1: sinks.SinksFilterReq + (*SinkByIDReq)(nil), // 2: sinks.SinkByIDReq + (*SinkRes)(nil), // 3: sinks.SinkRes } var file_sinks_pb_sinks_proto_depIdxs = []int32{ - 0, // 0: sinks.SinkService.RetrieveSink:input_type -> sinks.SinkByIDReq - 1, // 1: sinks.SinkService.RetrieveSink:output_type -> sinks.SinkRes - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 3, // 0: sinks.SinksRes.sinks:type_name -> sinks.SinkRes + 2, // 1: sinks.SinkService.RetrieveSink:input_type -> sinks.SinkByIDReq + 1, // 2: sinks.SinkService.RetrieveSinks:input_type -> sinks.SinksFilterReq + 3, // 3: sinks.SinkService.RetrieveSink:output_type -> sinks.SinkRes + 0, // 4: sinks.SinkService.RetrieveSinks:output_type -> sinks.SinksRes + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_sinks_pb_sinks_proto_init() } @@ -241,7 +350,7 @@ func file_sinks_pb_sinks_proto_init() { } if !protoimpl.UnsafeEnabled { file_sinks_pb_sinks_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SinkByIDReq); i { + switch v := v.(*SinksRes); i { case 0: return &v.state case 1: @@ -253,6 +362,30 @@ func file_sinks_pb_sinks_proto_init() { } } file_sinks_pb_sinks_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SinksFilterReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sinks_pb_sinks_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SinkByIDReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sinks_pb_sinks_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SinkRes); i { case 0: return &v.state @@ -271,7 +404,7 @@ func file_sinks_pb_sinks_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sinks_pb_sinks_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 4, NumExtensions: 0, NumServices: 1, }, diff --git a/sinks/pb/sinks.proto b/sinks/pb/sinks.proto index ee3672cee..1ecc1ff66 100644 --- a/sinks/pb/sinks.proto +++ b/sinks/pb/sinks.proto @@ -5,6 +5,15 @@ option go_package = "sinks/pb"; service SinkService { rpc RetrieveSink(SinkByIDReq) returns (SinkRes) {} + rpc RetrieveSinks(SinksFilterReq) returns (SinksRes) {} +} + +message SinksRes { + repeated SinkRes sinks = 1; +} + +message SinksFilterReq { + string otelEnabled = 1; } message SinkByIDReq { @@ -21,4 +30,4 @@ message SinkRes { string error = 6; string backend = 7; bytes config = 8; -} \ No newline at end of file +} diff --git a/sinks/pb/sinks_grpc.pb.go b/sinks/pb/sinks_grpc.pb.go index f46f3c940..864a656f7 100644 --- a/sinks/pb/sinks_grpc.pb.go +++ b/sinks/pb/sinks_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: sinks/pb/sinks.proto package pb @@ -19,6 +23,7 @@ const _ = grpc.SupportPackageIsVersion7 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type SinkServiceClient interface { RetrieveSink(ctx context.Context, in *SinkByIDReq, opts ...grpc.CallOption) (*SinkRes, error) + RetrieveSinks(ctx context.Context, in *SinksFilterReq, opts ...grpc.CallOption) (*SinksRes, error) } type sinkServiceClient struct { @@ -38,11 +43,21 @@ func (c *sinkServiceClient) RetrieveSink(ctx context.Context, in *SinkByIDReq, o return out, nil } +func (c *sinkServiceClient) RetrieveSinks(ctx context.Context, in *SinksFilterReq, opts ...grpc.CallOption) (*SinksRes, error) { + out := new(SinksRes) + err := c.cc.Invoke(ctx, "/sinks.SinkService/RetrieveSinks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // SinkServiceServer is the server API for SinkService service. // All implementations must embed UnimplementedSinkServiceServer // for forward compatibility type SinkServiceServer interface { RetrieveSink(context.Context, *SinkByIDReq) (*SinkRes, error) + RetrieveSinks(context.Context, *SinksFilterReq) (*SinksRes, error) mustEmbedUnimplementedSinkServiceServer() } @@ -53,6 +68,9 @@ type UnimplementedSinkServiceServer struct { func (UnimplementedSinkServiceServer) RetrieveSink(context.Context, *SinkByIDReq) (*SinkRes, error) { return nil, status.Errorf(codes.Unimplemented, "method RetrieveSink not implemented") } +func (UnimplementedSinkServiceServer) RetrieveSinks(context.Context, *SinksFilterReq) (*SinksRes, error) { + return nil, status.Errorf(codes.Unimplemented, "method RetrieveSinks not implemented") +} func (UnimplementedSinkServiceServer) mustEmbedUnimplementedSinkServiceServer() {} // UnsafeSinkServiceServer may be embedded to opt out of forward compatibility for this service. @@ -84,6 +102,24 @@ func _SinkService_RetrieveSink_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SinkService_RetrieveSinks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SinksFilterReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SinkServiceServer).RetrieveSinks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/sinks.SinkService/RetrieveSinks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SinkServiceServer).RetrieveSinks(ctx, req.(*SinksFilterReq)) + } + return interceptor(ctx, in, info, handler) +} + // SinkService_ServiceDesc is the grpc.ServiceDesc for SinkService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -95,6 +131,10 @@ var SinkService_ServiceDesc = grpc.ServiceDesc{ MethodName: "RetrieveSink", Handler: _SinkService_RetrieveSink_Handler, }, + { + MethodName: "RetrieveSinks", + Handler: _SinkService_RetrieveSinks_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "sinks/pb/sinks.proto", diff --git a/sinks/postgres/sinks.go b/sinks/postgres/sinks.go index fd50a0f7c..1dfeddd94 100644 --- a/sinks/postgres/sinks.go +++ b/sinks/postgres/sinks.go @@ -31,6 +31,48 @@ type sinksRepository struct { logger *zap.Logger } +func (s sinksRepository) SearchAllSinks(ctx context.Context, filter sinks.Filter) ([]sinks.Sink, error) { + q := `SELECT id, name, mf_owner_id, description, tags, state, coalesce(error, '') as error, backend, metadata, ts_created FROM sinks` + params := map[string]interface{}{} + if filter.StateFilter != "" { + q += `WHERE state == :state` + params["state"] = filter.StateFilter + } + + rows, err := s.db.NamedQueryContext(ctx, q, params) + if err != nil { + return nil, errors.Wrap(errors.ErrSelectEntity, err) + } + defer rows.Close() + + var items []sinks.Sink + for rows.Next() { + dbSink := dbSink{} + if err := rows.StructScan(&dbSink); err != nil { + return nil, errors.Wrap(errors.ErrSelectEntity, err) + } + + sink, err := toSink(dbSink) + if err != nil { + return nil, errors.Wrap(errors.ErrSelectEntity, err) + } + // metadataFilters will apply only after Fetching in metadata, due to struct + filterFunc := func(key string, value interface{}) bool { + if key == sinks.MetadataLabelOtel { + if value.(string) == filter.OpenTelemetry { + return true + } + } + return false + } + if sink.Config.IsApplicable(filterFunc) { + items = append(items, sink) + } + } + + return nil, err +} + func (s sinksRepository) Save(ctx context.Context, sink sinks.Sink) (string, error) { q := `INSERT INTO sinks (name, mf_owner_id, metadata, description, backend, tags, state, error) VALUES (:name, :mf_owner_id, :metadata, :description, :backend, :tags, :state, :error) RETURNING id` @@ -99,7 +141,7 @@ func (s sinksRepository) Update(ctx context.Context, sink sinks.Sink) error { return nil } -func (s sinksRepository) RetrieveAll(ctx context.Context, owner string, pm sinks.PageMetadata) (sinks.Page, error) { +func (s sinksRepository) RetrieveAllByOwnerID(ctx context.Context, owner string, pm sinks.PageMetadata) (sinks.Page, error) { name, nameQuery := getNameQuery(pm.Name) orderQuery := getOrderQuery(pm.Order) dirQuery := getDirQuery(pm.Dir) diff --git a/sinks/postgres/sinks_test.go b/sinks/postgres/sinks_test.go index 0f87431a3..80fa08c03 100644 --- a/sinks/postgres/sinks_test.go +++ b/sinks/postgres/sinks_test.go @@ -362,7 +362,7 @@ func TestMultiSinkRetrieval(t *testing.T) { for desc, tc := range cases { t.Run(desc, func(t *testing.T) { - page, err := sinkRepo.RetrieveAll(context.Background(), tc.owner, tc.pageMetadata) + page, err := sinkRepo.RetrieveAllByOwnerID(context.Background(), tc.owner, tc.pageMetadata) require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) size := uint64(len(page.Sinks)) assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d", desc, tc.size, size)) diff --git a/sinks/redis/producer/streams.go b/sinks/redis/producer/streams.go index 8dce77b9f..6216d8243 100644 --- a/sinks/redis/producer/streams.go +++ b/sinks/redis/producer/streams.go @@ -30,6 +30,11 @@ type eventStore struct { logger *zap.Logger } +// ListSinksInternal will only call following service +func (es eventStore) ListSinksInternal(ctx context.Context, filter sinks.Filter) ([]sinks.Sink, error) { + return es.svc.ListSinksInternal(ctx, filter) +} + func (es eventStore) ChangeSinkStateInternal(ctx context.Context, sinkID string, msg string, ownerID string, state sinks.State) error { return es.svc.ChangeSinkStateInternal(ctx, sinkID, msg, ownerID, state) } diff --git a/sinks/service.go b/sinks/service.go index 02d02c508..01bb365b8 100644 --- a/sinks/service.go +++ b/sinks/service.go @@ -44,11 +44,11 @@ type sinkService struct { passwordService PasswordService } -func (s sinkService) identify(token string) (string, error) { +func (svc sinkService) identify(token string) (string, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - res, err := s.auth.Identify(ctx, &mainflux.Token{Value: token}) + res, err := svc.auth.Identify(ctx, &mainflux.Token{Value: token}) if err != nil { return "", errors.Wrap(errors.ErrUnauthorizedAccess, err) } diff --git a/sinks/sinks.go b/sinks/sinks.go index c40bd765a..67760cb1b 100644 --- a/sinks/sinks.go +++ b/sinks/sinks.go @@ -59,6 +59,13 @@ var stateMap = [...]string{ "idle", } +const MetadataLabelOtel = "opentelemetry" + +type Filter struct { + StateFilter string + OpenTelemetry string +} + var stateRevMap = map[string]State{ "unknown": Unknown, "active": Active, @@ -112,6 +119,8 @@ type SinkService interface { UpdateSink(ctx context.Context, token string, s Sink) (Sink, error) // ListSinks retrieves data about sinks ListSinks(ctx context.Context, token string, pm PageMetadata) (Page, error) + // ListSinksInternal retrieves data from sinks filtered by SinksFilter for Services like Maestro, to build DeploymentEntries + ListSinksInternal(ctx context.Context, filter Filter) ([]Sink, error) // ListBackends retrieves a list of available backends ListBackends(ctx context.Context, token string) ([]string, error) // ViewBackend retrieves a backend by the name @@ -134,14 +143,16 @@ type SinkRepository interface { // Update performs an update to the existing sink, A non-nil error is // returned to indicate operation failure Update(ctx context.Context, sink Sink) error - // RetrieveAll retrieves Sinks - RetrieveAll(ctx context.Context, owner string, pm PageMetadata) (Page, error) - // RetrieveById retrieves a Sink by Id + // RetrieveAllByOwnerID retrieves Sinks by OwnerID + RetrieveAllByOwnerID(ctx context.Context, owner string, pm PageMetadata) (Page, error) + // SearchAllSinks search Sinks for internal usage like services + SearchAllSinks(ctx context.Context, filter Filter) ([]Sink, error) + // RetrieveById retrieves a Sink by ID RetrieveById(ctx context.Context, key string) (Sink, error) - // RetrieveById retrieves a Sink by Id + // RetrieveByOwnerAndId retrieves a By OwnerId And SinkId RetrieveByOwnerAndId(ctx context.Context, ownerID string, key string) (Sink, error) - // Remove a existing Sink by id + // Remove an existing Sink by id Remove(ctx context.Context, owner string, key string) error - // UpdateSinkState + // UpdateSinkState updates sink state like active, idle, new, unknown UpdateSinkState(ctx context.Context, sinkID string, msg string, ownerID string, state State) error } diff --git a/sinks/sinks_service.go b/sinks/sinks_service.go index dc2b9a9dc..55237639f 100644 --- a/sinks/sinks_service.go +++ b/sinks/sinks_service.go @@ -153,13 +153,28 @@ func (svc sinkService) ViewSinkInternal(ctx context.Context, ownerID string, key return res, nil } +func (svc sinkService) ListSinksInternal(ctx context.Context, filter Filter) (sinks []Sink, err error) { + sinks, err = svc.sinkRepo.SearchAllSinks(ctx, filter) + if err != nil { + return nil, errors.Wrap(errors.ErrNotFound, err) + } + for _, sink := range sinks { + sink, err = svc.decryptMetadata(sink) + if err != nil { + return nil, errors.Wrap(errors.ErrViewEntity, err) + } + } + + return +} + func (svc sinkService) ListSinks(ctx context.Context, token string, pm PageMetadata) (Page, error) { res, err := svc.identify(token) if err != nil { return Page{}, err } - return svc.sinkRepo.RetrieveAll(ctx, res, pm) + return svc.sinkRepo.RetrieveAllByOwnerID(ctx, res, pm) } func (svc sinkService) DeleteSink(ctx context.Context, token string, id string) error { From c4b8bbb6d6a9e3edce6d1c0305d195a0d3ba08d3 Mon Sep 17 00:00:00 2001 From: Guilhermo Pazuch <1490938+gpazuch@users.noreply.github.com> Date: Thu, 27 Oct 2022 14:56:23 -0300 Subject: [PATCH 14/94] add otel toggle for sink creation (#1921) --- .../pages/sinks/add/sink.add.component.html | 20 +++++++++++-------- .../app/pages/sinks/add/sink.add.component.ts | 15 +++++++++++++- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/ui/src/app/pages/sinks/add/sink.add.component.html b/ui/src/app/pages/sinks/add/sink.add.component.html index b4ebd8086..fc9a219a7 100644 --- a/ui/src/app/pages/sinks/add/sink.add.component.html +++ b/ui/src/app/pages/sinks/add/sink.add.component.html @@ -125,12 +125,12 @@

{{strings.sink[isEdit ? 'edit' : 'add']['header']}}

- -
+ + * -
-
+ + {{strings.sink[isEdit ? 'edit' : 'add']['header']}} [attr.data-orb-qa-id]="type" [value]="type">{{ type }} - -
-
-
+ + {{control.label}} + + +
- {{ command2show }} - + + {{ command2show }} + diff --git a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts index f3cafa0a2..8499f2073 100644 --- a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts +++ b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts @@ -49,7 +49,7 @@ export class AgentProvisioningComponent implements OnInit { -e ORB_CLOUD_MQTT_ID=${ this.agent?.id } \\ -e ORB_CLOUD_MQTT_CHANNEL_ID=${ this.agent?.channel_id } \\ -e ORB_CLOUD_MQTT_KEY="AGENT_KEY" \\ --e PKTVISOR_PCAP_IFACE_DEFAULT=mock \\ +-e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ ns1labs/orb-agent:develop`; this.command2show = `docker run -d --restart=always --net=host \\ @@ -57,7 +57,7 @@ ns1labs/orb-agent:develop`; -e ORB_CLOUD_MQTT_ID=${ this.agent?.id } \\ -e ORB_CLOUD_MQTT_CHANNEL_ID=${ this.agent?.channel_id } \\ -e ORB_CLOUD_MQTT_KEY=AGENT_KEY \\ --e PKTVISOR_PCAP_IFACE_DEFAULT=mock \\ +-e PKTVISOR_PCAP_IFACE_DEFAULT=auto \\ ns1labs/orb-agent:develop`; } } From 897b2e7392649454c1741d523a43a00bf703ad2d Mon Sep 17 00:00:00 2001 From: Guilhermo Pazuch <1490938+gpazuch@users.noreply.github.com> Date: Mon, 31 Oct 2022 15:36:31 -0300 Subject: [PATCH 24/94] feat(ui): allow user to select policy backend. (#1938) * pulled from 'hackathon' branch --- .../add/agent.policy.add.component.html | 27 +++++++++++++++++-- .../add/agent.policy.add.component.scss | 4 +++ .../add/agent.policy.add.component.ts | 2 +- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html b/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html index 4fe87b9fc..d510079eb 100644 --- a/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html +++ b/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html @@ -11,7 +11,7 @@

{{ isEdit ? 'Edit Agent Policy' : 'Create Agent Policy'}}

{{ isEdit ? 'Edit Agent Policy' : 'Create Agent Policy'}}
-