From b7c3c18da9eec6f958a52c71cdc6116a981df954 Mon Sep 17 00:00:00 2001 From: dmytro_velychko3 Date: Thu, 30 Mar 2023 16:02:37 +0300 Subject: [PATCH 1/6] feat: refactor --- iam.tf | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++ main.tf | 47 ++++++++++++++++++++++------- outputs.tf | 3 +- secrets.tf | 5 ++-- variables.tf | 45 ++++++++++++++++++++++++++++ 5 files changed, 170 insertions(+), 14 deletions(-) create mode 100644 iam.tf diff --git a/iam.tf b/iam.tf new file mode 100644 index 0000000..2136a33 --- /dev/null +++ b/iam.tf @@ -0,0 +1,84 @@ +locals { + admin_user_map = var.workspace_admins.user == null ? {} : { + for user in var.workspace_admins.user : "user.${user}" => user if user != null + } + + admin_sp_map = var.workspace_admins.service_principal == null ? {} : { + for sp in var.workspace_admins.service_principal : "service_principal.${sp}" => sp if sp != null + } + + members_object_list = concat( + flatten([for group, params in var.iam : [ + for pair in setproduct([group], params.user) : { + type = "user", group = pair[0], member = pair[1] + }] if params.user != null + ]), + flatten([for group, params in var.iam : [ + for pair in setproduct([group], params.service_principal) : { + type = "service_principal", group = pair[0], member = pair[1] + }] if params.service_principal != null + ]) + ) +} + +data "databricks_group" "admin" { + display_name = "admins" +} + +resource "databricks_group" "this" { + for_each = toset(keys(var.iam)) + + display_name = each.key + lifecycle { ignore_changes = [external_id, allow_cluster_create, allow_instance_pool_create, databricks_sql_access, workspace_access] } +} + +resource "databricks_user" "this" { + for_each = toset(flatten(concat( + values({ for group, member in var.iam : group => member.user if member.user != null }), + values(local.admin_user_map) + ))) + + user_name = each.key + lifecycle { ignore_changes = [external_id, allow_cluster_create, allow_instance_pool_create, databricks_sql_access, workspace_access] } +} + +resource "databricks_service_principal" "this" { + for_each = toset(flatten(concat( + values({ for group, member in var.iam : group => member.service_principal if member.service_principal != null }), + values(local.admin_sp_map) + ))) + + display_name = each.key + application_id = lookup(var.user_object_ids, each.value) + lifecycle { ignore_changes = [external_id, allow_cluster_create, allow_instance_pool_create, databricks_sql_access, workspace_access] } +} + +resource "databricks_group_member" "admin" { + for_each = merge(local.admin_user_map, local.admin_sp_map) + + group_id = data.databricks_group.admin.id + member_id = startswith(each.key, "user") ? databricks_user.this[each.value].id : databricks_service_principal.this[each.value].id +} + +resource "databricks_group_member" "this" { + for_each = { + for entry in local.members_object_list : "${entry.type}.${entry.group}.${entry.member}" => entry + } + + group_id = databricks_group.this[each.value.group].id + member_id = startswith(each.key, "user") ? databricks_user.this[each.value.member].id : databricks_service_principal.this[each.value.member].id +} + +resource "databricks_entitlements" "this" { + for_each = { + for group, params in var.iam : group => params + } + + group_id = databricks_group.this[each.key].id + allow_cluster_create = contains(coalesce(each.value.entitlements, ["none"]), "allow_cluster_create") + allow_instance_pool_create = contains(coalesce(each.value.entitlements, ["none"]), "allow_instance_pool_create") + databricks_sql_access = contains(coalesce(each.value.entitlements, ["none"]), "databricks_sql_access") + workspace_access = true + + depends_on = [databricks_group_member.this] +} diff --git a/main.tf b/main.tf index db6bc80..54baab8 100644 --- a/main.tf +++ b/main.tf @@ -1,3 +1,9 @@ +/* Premium +locals { + ip_rules = var.ip_rules == null ? null : values(var.ip_rules) +} +*/ + data "azurerm_key_vault_secret" "sp_client_id" { name = var.sp_client_id_secret_name key_vault_id = var.key_vault_id @@ -13,18 +19,19 @@ data "azurerm_key_vault_secret" "tenant_id" { key_vault_id = var.key_vault_id } -resource "databricks_token" "pat" { +resource "databricks_token" "pat" { # comment = "Terraform Provisioning" lifetime_seconds = var.pat_token_lifetime_seconds } -resource "databricks_user" "this" { - for_each = var.sku == "premium" ? [] : toset(var.users) - user_name = each.value - lifecycle { ignore_changes = [external_id] } -} +#resource "databricks_user" "this" { # Only for 'Standard' SKU type +# #for_each = var.sku == "premium" ? [] : toset(var.users) +# for_each = toset(var.users) +# user_name = each.value +# lifecycle { ignore_changes = [external_id] } +#} -resource "azurerm_role_assignment" "this" { +resource "azurerm_role_assignment" "this" { ### for_each = { for permission in var.permissions : "${permission.object_id}-${permission.role}" => permission if permission.role != null @@ -35,10 +42,11 @@ resource "azurerm_role_assignment" "this" { } resource "databricks_cluster_policy" "this" { - for_each = var.sku == "premium" ? { + #for_each = var.sku == "premium" ? { + for_each = { for param in var.custom_cluster_policies : (param.name) => param.definition if param.definition != null - } : {} + } # : {} name = each.key definition = jsonencode(each.value) @@ -50,8 +58,6 @@ resource "databricks_cluster" "this" { spark_conf = var.spark_conf spark_env_vars = var.spark_env_vars - policy_id = var.sku == "premium" ? one([for policy in var.custom_cluster_policies : databricks_cluster_policy.this[policy.name].id if policy.assigned]) : null - data_security_mode = var.data_security_mode node_type_id = var.node_type autotermination_minutes = var.autotermination_minutes @@ -86,3 +92,22 @@ resource "databricks_cluster" "this" { } } } +/* Premium +resource "databricks_workspace_conf" "this" { + count = local.ip_rules == null ? 0 : 1 + + custom_config = { + "enableIpAccessLists" : true + } +} + +resource "databricks_ip_access_list" "this" { + count = local.ip_rules == null ? 0 : 1 + + label = "allow_in" + list_type = "ALLOW" + ip_addresses = local.ip_rules + + depends_on = [databricks_workspace_conf.this] +} +*/ \ No newline at end of file diff --git a/outputs.tf b/outputs.tf index d391ff0..93c21b6 100644 --- a/outputs.tf +++ b/outputs.tf @@ -16,7 +16,7 @@ output "cluster_policies_object" { } if policy.definition != null && var.sku == "premium"] description = "Databricks Cluster Policies object map" } - +/* output "secret_scope_object" { value = [for param in var.secret_scope : { scope_name = databricks_secret_scope.this[param.scope_name].name @@ -24,3 +24,4 @@ output "secret_scope_object" { } if param.acl != null] description = "Databricks-managed Secret Scope object map to create ACLs" } +*/ \ No newline at end of file diff --git a/secrets.tf b/secrets.tf index e27a218..ca63322 100644 --- a/secrets.tf +++ b/secrets.tf @@ -1,3 +1,4 @@ + locals { sp_secrets = { (var.sp_client_id_secret_name) = { value = data.azurerm_key_vault_secret.sp_client_id.value } @@ -14,7 +15,7 @@ locals { # Secret Scope with SP secrets for mounting Azure Data Lake Storage resource "databricks_secret_scope" "main" { name = "main" - initial_manage_principal = var.sku == "premium" ? null : "users" + initial_manage_principal = "users" #var.sku == "premium" ? null : "users" } resource "databricks_secret" "main" { @@ -33,7 +34,7 @@ resource "databricks_secret_scope" "this" { } name = each.key - initial_manage_principal = var.sku == "premium" ? null : "users" + initial_manage_principal = "users" } resource "databricks_secret" "this" { diff --git a/variables.tf b/variables.tf index b9b7dba..82aba77 100644 --- a/variables.tf +++ b/variables.tf @@ -216,3 +216,48 @@ EOT # dns_name = null # } #} + +# Identity Access Management variables +variable "user_object_ids" { + type = map(string) + description = "Map of AD usernames and corresponding object IDs" + default = {} +} + +variable "workspace_admins" { + type = object({ + user = list(string) + service_principal = list(string) + }) + description = "Provide users or service principals to grant them Admin permissions in Workspace." + default = { + user = null + service_principal = null + } +} + +variable "iam" { + type = map(object({ + user = optional(list(string)) + service_principal = optional(list(string)) + entitlements = optional(list(string)) + default_cluster_permission = optional(string) + })) + description = "Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements." + default = {} + + validation { + condition = length([for item in values(var.iam)[*] : item.entitlements if item.entitlements != null]) != 0 ? alltrue([ + for entry in flatten(values(var.iam)[*].entitlements) : contains(["allow_cluster_create", "allow_instance_pool_create", "databricks_sql_access"], entry) if entry != null + ]) : true + error_message = "Entitlements validation. The only suitable values are: databricks_sql_access, allow_instance_pool_create, allow_cluster_create" + } +} + +/* Premium +variable "ip_rules" { + type = map(string) + description = "Map of IP addresses permitted for access to DB" + default = {} +} +*/ \ No newline at end of file From 0027f1bb02e46a616011d0e46aa8bd80702b5f5b Mon Sep 17 00:00:00 2001 From: dmytro_velychko3 Date: Fri, 31 Mar 2023 16:42:26 +0300 Subject: [PATCH 2/6] feat: delete resource databricks_secret --- iam.tf | 84 ------------------------------------ main.tf | 51 +++------------------- outputs.tf | 18 -------- secrets.tf | 57 +----------------------- variables.tf | 120 --------------------------------------------------- 5 files changed, 8 insertions(+), 322 deletions(-) delete mode 100644 iam.tf diff --git a/iam.tf b/iam.tf deleted file mode 100644 index 2136a33..0000000 --- a/iam.tf +++ /dev/null @@ -1,84 +0,0 @@ -locals { - admin_user_map = var.workspace_admins.user == null ? {} : { - for user in var.workspace_admins.user : "user.${user}" => user if user != null - } - - admin_sp_map = var.workspace_admins.service_principal == null ? {} : { - for sp in var.workspace_admins.service_principal : "service_principal.${sp}" => sp if sp != null - } - - members_object_list = concat( - flatten([for group, params in var.iam : [ - for pair in setproduct([group], params.user) : { - type = "user", group = pair[0], member = pair[1] - }] if params.user != null - ]), - flatten([for group, params in var.iam : [ - for pair in setproduct([group], params.service_principal) : { - type = "service_principal", group = pair[0], member = pair[1] - }] if params.service_principal != null - ]) - ) -} - -data "databricks_group" "admin" { - display_name = "admins" -} - -resource "databricks_group" "this" { - for_each = toset(keys(var.iam)) - - display_name = each.key - lifecycle { ignore_changes = [external_id, allow_cluster_create, allow_instance_pool_create, databricks_sql_access, workspace_access] } -} - -resource "databricks_user" "this" { - for_each = toset(flatten(concat( - values({ for group, member in var.iam : group => member.user if member.user != null }), - values(local.admin_user_map) - ))) - - user_name = each.key - lifecycle { ignore_changes = [external_id, allow_cluster_create, allow_instance_pool_create, databricks_sql_access, workspace_access] } -} - -resource "databricks_service_principal" "this" { - for_each = toset(flatten(concat( - values({ for group, member in var.iam : group => member.service_principal if member.service_principal != null }), - values(local.admin_sp_map) - ))) - - display_name = each.key - application_id = lookup(var.user_object_ids, each.value) - lifecycle { ignore_changes = [external_id, allow_cluster_create, allow_instance_pool_create, databricks_sql_access, workspace_access] } -} - -resource "databricks_group_member" "admin" { - for_each = merge(local.admin_user_map, local.admin_sp_map) - - group_id = data.databricks_group.admin.id - member_id = startswith(each.key, "user") ? databricks_user.this[each.value].id : databricks_service_principal.this[each.value].id -} - -resource "databricks_group_member" "this" { - for_each = { - for entry in local.members_object_list : "${entry.type}.${entry.group}.${entry.member}" => entry - } - - group_id = databricks_group.this[each.value.group].id - member_id = startswith(each.key, "user") ? databricks_user.this[each.value.member].id : databricks_service_principal.this[each.value.member].id -} - -resource "databricks_entitlements" "this" { - for_each = { - for group, params in var.iam : group => params - } - - group_id = databricks_group.this[each.key].id - allow_cluster_create = contains(coalesce(each.value.entitlements, ["none"]), "allow_cluster_create") - allow_instance_pool_create = contains(coalesce(each.value.entitlements, ["none"]), "allow_instance_pool_create") - databricks_sql_access = contains(coalesce(each.value.entitlements, ["none"]), "databricks_sql_access") - workspace_access = true - - depends_on = [databricks_group_member.this] -} diff --git a/main.tf b/main.tf index 54baab8..9087fb4 100644 --- a/main.tf +++ b/main.tf @@ -1,9 +1,3 @@ -/* Premium -locals { - ip_rules = var.ip_rules == null ? null : values(var.ip_rules) -} -*/ - data "azurerm_key_vault_secret" "sp_client_id" { name = var.sp_client_id_secret_name key_vault_id = var.key_vault_id @@ -19,19 +13,18 @@ data "azurerm_key_vault_secret" "tenant_id" { key_vault_id = var.key_vault_id } -resource "databricks_token" "pat" { # +resource "databricks_token" "pat" { comment = "Terraform Provisioning" lifetime_seconds = var.pat_token_lifetime_seconds } -#resource "databricks_user" "this" { # Only for 'Standard' SKU type -# #for_each = var.sku == "premium" ? [] : toset(var.users) -# for_each = toset(var.users) -# user_name = each.value -# lifecycle { ignore_changes = [external_id] } -#} +resource "databricks_user" "this" { + for_each = toset(var.users) + user_name = each.value + lifecycle { ignore_changes = [external_id] } +} -resource "azurerm_role_assignment" "this" { ### +resource "azurerm_role_assignment" "this" { for_each = { for permission in var.permissions : "${permission.object_id}-${permission.role}" => permission if permission.role != null @@ -41,17 +34,6 @@ resource "azurerm_role_assignment" "this" { ### principal_id = each.value.object_id } -resource "databricks_cluster_policy" "this" { - #for_each = var.sku == "premium" ? { - for_each = { - for param in var.custom_cluster_policies : (param.name) => param.definition - if param.definition != null - } # : {} - - name = each.key - definition = jsonencode(each.value) -} - resource "databricks_cluster" "this" { cluster_name = var.custom_default_cluster_name == null ? "shared autoscaling" : var.custom_default_cluster_name spark_version = var.spark_version @@ -92,22 +74,3 @@ resource "databricks_cluster" "this" { } } } -/* Premium -resource "databricks_workspace_conf" "this" { - count = local.ip_rules == null ? 0 : 1 - - custom_config = { - "enableIpAccessLists" : true - } -} - -resource "databricks_ip_access_list" "this" { - count = local.ip_rules == null ? 0 : 1 - - label = "allow_in" - list_type = "ALLOW" - ip_addresses = local.ip_rules - - depends_on = [databricks_workspace_conf.this] -} -*/ \ No newline at end of file diff --git a/outputs.tf b/outputs.tf index 93c21b6..ce5b1c5 100644 --- a/outputs.tf +++ b/outputs.tf @@ -7,21 +7,3 @@ output "cluster_id" { value = databricks_cluster.this.id description = "Databricks Cluster Id" } - -output "cluster_policies_object" { - value = [for policy in var.custom_cluster_policies : { - id = databricks_cluster_policy.this[policy.name].id - name = databricks_cluster_policy.this[policy.name].name - can_use = policy.can_use - } if policy.definition != null && var.sku == "premium"] - description = "Databricks Cluster Policies object map" -} -/* -output "secret_scope_object" { - value = [for param in var.secret_scope : { - scope_name = databricks_secret_scope.this[param.scope_name].name - acl = param.acl - } if param.acl != null] - description = "Databricks-managed Secret Scope object map to create ACLs" -} -*/ \ No newline at end of file diff --git a/secrets.tf b/secrets.tf index ca63322..be068a3 100644 --- a/secrets.tf +++ b/secrets.tf @@ -1,21 +1,14 @@ - locals { sp_secrets = { (var.sp_client_id_secret_name) = { value = data.azurerm_key_vault_secret.sp_client_id.value } (var.sp_key_secret_name) = { value = data.azurerm_key_vault_secret.sp_key.value } } - - secrets_objects_list = flatten([for param in var.secret_scope : [ - for secret in param.secrets : { - scope_name = param.scope_name, key = secret.key, string_value = secret.string_value - }] if param.secrets != null - ]) } # Secret Scope with SP secrets for mounting Azure Data Lake Storage resource "databricks_secret_scope" "main" { name = "main" - initial_manage_principal = "users" #var.sku == "premium" ? null : "users" + initial_manage_principal = "users" } resource "databricks_secret" "main" { @@ -25,51 +18,3 @@ resource "databricks_secret" "main" { string_value = each.value["value"] scope = databricks_secret_scope.main.id } - -# Custom additional Databricks Secret Scope -resource "databricks_secret_scope" "this" { - for_each = { - for param in var.secret_scope : (param.scope_name) => param - if param.scope_name != null - } - - name = each.key - initial_manage_principal = "users" -} - -resource "databricks_secret" "this" { - for_each = { for entry in local.secrets_objects_list : "${entry.scope_name}.${entry.key}" => entry } - - key = each.value.key - string_value = each.value.string_value - scope = databricks_secret_scope.this[each.value.scope_name].id -} - -# At the nearest future, Azure will allow acquiring AAD tokens by service principals, -# thus providing an ability to create Azure backed Key Vault with Terraform -# https://github.com/databricks/terraform-provider-databricks/pull/1965 - -## Azure Key Vault-backed Scope -#resource "azurerm_key_vault_access_policy" "databricks" { -# count = var.key_vault_secret_scope.key_vault_id != null ? 1 : 0 - -# key_vault_id = var.key_vault_secret_scope.key_vault_id -# object_id = "9b38785a-6e08-4087-a0c4-20634343f21f" # Global 'AzureDatabricks' SP object id -# tenant_id = data.azurerm_key_vault_secret.tenant_id.value -# -# secret_permissions = [ -# "Get", -# "List", -# ] -#} -# -#resource "databricks_secret_scope" "external" { -# count = var.key_vault_secret_scope.key_vault_id != null ? 1 : 0 -# -# name = "external" -# keyvault_metadata { -# resource_id = var.key_vault_secret_scope.key_vault_id -# dns_name = var.key_vault_secret_scope.dns_name -# } -# depends_on = [azurerm_key_vault_access_policy.databricks] -#} diff --git a/variables.tf b/variables.tf index 82aba77..f11df95 100644 --- a/variables.tf +++ b/variables.tf @@ -23,12 +23,6 @@ variable "key_vault_id" { description = "ID of the Key Vault instance where the Secret resides" } -variable "sku" { - type = string - description = "The sku to use for the Databricks Workspace: [standard|premium|trial]" - default = "standard" -} - variable "pat_token_lifetime_seconds" { type = number description = "The lifetime of the token, in seconds. If no lifetime is specified, the token remains valid indefinitely" @@ -52,33 +46,6 @@ variable "permissions" { ] } -# Cluster policy variables -variable "custom_cluster_policies" { - type = list(object({ - name = string - can_use = list(string) - definition = any - assigned = bool - })) - description = <<-EOT -Provides an ability to create custom cluster policy, assign it to cluster and grant CAN_USE permissions on it to certain custom groups -name - name of custom cluster policy to create -can_use - list of string, where values are custom group names, there groups have to be created with Terraform; -definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; -assigned - boolean flag which assigns policy to default 'shared autoscaling' cluster, only single custom policy could be assigned; -EOT - default = [{ - name = null - can_use = null - definition = null - assigned = false - }] - validation { - condition = length([for policy in var.custom_cluster_policies : policy.assigned if policy.assigned]) <= 1 - error_message = "Only single cluster policy assignment allowed. Please set 'assigned' parameter to 'true' for exact one or none policy" - } -} - # Shared autoscaling cluster config variables variable "cluster_nodes_availability" { type = string @@ -174,90 +141,3 @@ variable "mountpoints" { description = "Mountpoints for databricks" default = {} } - -# Secret Scope variables -variable "secret_scope" { - type = list(object({ - scope_name = string - acl = optional(list(object({ - principal = string - permission = string - }))) - secrets = optional(list(object({ - key = string - string_value = string - }))) - })) - description = <<-EOT -Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management -scope_name - name of Secret Scope to create; -acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE"; -secrets - list of objects, where object's 'key' param is created key name and 'string_value' is a value for it; -EOT - default = [{ - scope_name = null - acl = null - secrets = null - }] -} - -# At the nearest future, Azure will allow acquiring AAD tokens by service principals, -# thus providing an ability to create Azure backed Key Vault with Terraform -# https://github.com/databricks/terraform-provider-databricks/pull/1965 - -#variable "key_vault_secret_scope" { -# type = object({ -# key_vault_id = string -# dns_name = string -# }) -# description = "Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope" -# default = { -# key_vault_id = null -# dns_name = null -# } -#} - -# Identity Access Management variables -variable "user_object_ids" { - type = map(string) - description = "Map of AD usernames and corresponding object IDs" - default = {} -} - -variable "workspace_admins" { - type = object({ - user = list(string) - service_principal = list(string) - }) - description = "Provide users or service principals to grant them Admin permissions in Workspace." - default = { - user = null - service_principal = null - } -} - -variable "iam" { - type = map(object({ - user = optional(list(string)) - service_principal = optional(list(string)) - entitlements = optional(list(string)) - default_cluster_permission = optional(string) - })) - description = "Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements." - default = {} - - validation { - condition = length([for item in values(var.iam)[*] : item.entitlements if item.entitlements != null]) != 0 ? alltrue([ - for entry in flatten(values(var.iam)[*].entitlements) : contains(["allow_cluster_create", "allow_instance_pool_create", "databricks_sql_access"], entry) if entry != null - ]) : true - error_message = "Entitlements validation. The only suitable values are: databricks_sql_access, allow_instance_pool_create, allow_cluster_create" - } -} - -/* Premium -variable "ip_rules" { - type = map(string) - description = "Map of IP addresses permitted for access to DB" - default = {} -} -*/ \ No newline at end of file From 9859ab05cf5770dd2da04a38b65aab0576fa9d68 Mon Sep 17 00:00:00 2001 From: dmytro_velychko3 Date: Wed, 5 Apr 2023 02:29:17 +0300 Subject: [PATCH 3/6] feat: refactoring --- secrets.tf | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/secrets.tf b/secrets.tf index be068a3..330cceb 100644 --- a/secrets.tf +++ b/secrets.tf @@ -3,6 +3,12 @@ locals { (var.sp_client_id_secret_name) = { value = data.azurerm_key_vault_secret.sp_client_id.value } (var.sp_key_secret_name) = { value = data.azurerm_key_vault_secret.sp_key.value } } + + secrets_objects_list = flatten([for param in var.secret_scope : [ + for secret in param.secrets : { + scope_name = param.scope_name, key = secret.key, string_value = secret.string_value + }] if param.secrets != null + ]) } # Secret Scope with SP secrets for mounting Azure Data Lake Storage @@ -18,3 +24,51 @@ resource "databricks_secret" "main" { string_value = each.value["value"] scope = databricks_secret_scope.main.id } + +# Custom additional Databricks Secret Scope +resource "databricks_secret_scope" "this" { + for_each = { + for param in var.secret_scope : (param.scope_name) => param + if param.scope_name != null + } + + name = each.key + initial_manage_principal = "users" +} + +resource "databricks_secret" "this" { + for_each = { for entry in local.secrets_objects_list : "${entry.scope_name}.${entry.key}" => entry } + + key = each.value.key + string_value = each.value.string_value + scope = databricks_secret_scope.this[each.value.scope_name].id +} + +# At the nearest future, Azure will allow acquiring AAD tokens by service principals, +# thus providing an ability to create Azure backed Key Vault with Terraform +# https://github.com/databricks/terraform-provider-databricks/pull/1965 + +## Azure Key Vault-backed Scope +#resource "azurerm_key_vault_access_policy" "databricks" { +# count = var.key_vault_secret_scope.key_vault_id != null ? 1 : 0 + +# key_vault_id = var.key_vault_secret_scope.key_vault_id +# object_id = "9b38785a-6e08-4087-a0c4-20634343f21f" # Global 'AzureDatabricks' SP object id +# tenant_id = data.azurerm_key_vault_secret.tenant_id.value +# +# secret_permissions = [ +# "Get", +# "List", +# ] +#} +# +#resource "databricks_secret_scope" "external" { +# count = var.key_vault_secret_scope.key_vault_id != null ? 1 : 0 +# +# name = "external" +# keyvault_metadata { +# resource_id = var.key_vault_secret_scope.key_vault_id +# dns_name = var.key_vault_secret_scope.dns_name +# } +# depends_on = [azurerm_key_vault_access_policy.databricks] +#} \ No newline at end of file From f6096a04f5bba1f613bc9ca5c730b74b0a7c12bd Mon Sep 17 00:00:00 2001 From: dmytro_velychko3 Date: Wed, 5 Apr 2023 11:43:31 +0300 Subject: [PATCH 4/6] docs: changed readmi --- README.md | 7 +------ variables.tf | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c8b5b84..e1f80f2 100644 --- a/README.md +++ b/README.md @@ -117,8 +117,7 @@ No modules. | [databricks_token.pat](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/token) | resource | | [databricks_user.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/user) | resource | | [azurerm_role_assignment.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [databricks_cluster_policy.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster_policy) | resource | -| [databricks_cluster.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster) | resource | +| [databricks_cluster.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster) | resource | | [databricks_mount.adls](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/mount) | resource | | [databricks_secret_scope.main](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/secret_scope) | resource | | [databricks_secret.main](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/secret) | resource | @@ -134,11 +133,9 @@ No modules. | [sp\_key\_secret\_name](#input\_sp\_key\_secret\_name) | The name of Azure Key Vault secret that contains client secret of Service Principal to access in Azure Key Vault | `string` | n/a | yes | | [tenant\_id\_secret\_name](#input\_tenant\_id\_secret\_name) | The name of Azure Key Vault secret that contains tenant ID secret of Service Principal to access in Azure Key Vault | `string` | n/a | yes | | [key\_vault\_id](#input\_key\_vault\_id) | ID of the Key Vault instance where the Secret resides | `string` | n/a | yes | -| [sku](#input\_sku) | The sku to use for the Databricks Workspace: [standard \ premium \ trial] | `string` | "standard" | no | | [pat\_token\_lifetime\_seconds](#input\_pat\_token\_lifetime\_seconds) | The lifetime of the token, in seconds. If no lifetime is specified, the token remains valid indefinitely | `number` | 315569520 | no | | [users](#input\_users) | List of users to access Databricks | `list(string)` | [] | no | | [permissions](#input\_permissions) | Databricks Workspace permission maps | `list(map(string))` |
 [{   
object_id = null
role = null
}]
| no | -| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN_USE permissions on it to certain custom groups |
list(object({
name = string
can_use = list(string)
definition = any
assigned = bool
}))
|
[{
name = null
can_use = null
definition = null
assigned = false
}]
| no | | [cluster\_nodes\_availability](#input\_cluster\_nodes\_availability) | Availability type used for all subsequent nodes past the first_on_demand ones: [SPOT_AZURE \ SPOT_WITH_FALLBACK_AZURE \ ON_DEMAND_AZURE] | `string` | null | no | | [first\_on\_demand](#input\_first\_on\_demand) | The first first_on_demand nodes of the cluster will be placed on on-demand instances: [[ \:number ]] | `number` | 0 | no | | [spot\_bid\_max\_price](#input\_spot\_bid\_max\_price) | The max price for Azure spot instances. Use -1 to specify lowest price | `number` | -1 | no | @@ -163,8 +160,6 @@ No modules. | ------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- | | [token](#output\_token) | Databricks Personal Authorization Token | | [cluster\_id](#output\_cluster\_id) | Databricks Cluster Id | -| [cluster\_policies\_object](#output\_cluster\_policies\_object) | Databricks Cluster Policies object map | -| [secret_scope\_object](#output\_secret_scope\_object) | Databricks-managed Secret Scope object map to create ACLs | ## License diff --git a/variables.tf b/variables.tf index f11df95..387ede7 100644 --- a/variables.tf +++ b/variables.tf @@ -141,3 +141,45 @@ variable "mountpoints" { description = "Mountpoints for databricks" default = {} } + +# Secret Scope variables +variable "secret_scope" { + type = list(object({ + scope_name = string + acl = optional(list(object({ + principal = string + permission = string + }))) + secrets = optional(list(object({ + key = string + string_value = string + }))) + })) + description = <<-EOT +Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management +scope_name - name of Secret Scope to create; +acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE"; +secrets - list of objects, where object's 'key' param is created key name and 'string_value' is a value for it; +EOT + default = [{ + scope_name = null + acl = null + secrets = null + }] +} + +# At the nearest future, Azure will allow acquiring AAD tokens by service principals, +# thus providing an ability to create Azure backed Key Vault with Terraform +# https://github.com/databricks/terraform-provider-databricks/pull/1965 + +#variable "key_vault_secret_scope" { +# type = object({ +# key_vault_id = string +# dns_name = string +# }) +# description = "Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope" +# default = { +# key_vault_id = null +# dns_name = null +# } +#} \ No newline at end of file From e071f79b7f30405ce8f43eb66e056a7fa9402dcd Mon Sep 17 00:00:00 2001 From: dmytro_velychko3 Date: Wed, 5 Apr 2023 12:02:08 +0300 Subject: [PATCH 5/6] fix: add end line --- secrets.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secrets.tf b/secrets.tf index 330cceb..dbccafa 100644 --- a/secrets.tf +++ b/secrets.tf @@ -71,4 +71,4 @@ resource "databricks_secret" "this" { # dns_name = var.key_vault_secret_scope.dns_name # } # depends_on = [azurerm_key_vault_access_policy.databricks] -#} \ No newline at end of file +#} From 63970cb4cc0c91411fef6d7b7af5d0a4708a6142 Mon Sep 17 00:00:00 2001 From: dmytro_velychko3 Date: Wed, 5 Apr 2023 12:03:25 +0300 Subject: [PATCH 6/6] fix: add new line --- variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/variables.tf b/variables.tf index 387ede7..e894e16 100644 --- a/variables.tf +++ b/variables.tf @@ -182,4 +182,4 @@ EOT # key_vault_id = null # dns_name = null # } -#} \ No newline at end of file +#}