-
Notifications
You must be signed in to change notification settings - Fork 1
/
variables.tf
266 lines (243 loc) · 9.35 KB
/
variables.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
variable "ip_rules" {
type = map(string)
description = "Map of IP addresses permitted for access to DB"
default = {}
}
variable "suffix" {
type = string
description = "Optional suffix that would be added to the end of resources names."
default = ""
}
# Identity Access Management variables
variable "user_object_ids" {
type = map(string)
description = "Map of AD usernames and corresponding object IDs"
default = {}
}
variable "workspace_admins" {
type = object({
user = list(string)
service_principal = list(string)
})
description = "Provide users or service principals to grant them Admin permissions in Workspace."
default = {
user = null
service_principal = null
}
}
variable "iam_account_groups" {
type = list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
description = "List of objects with group name and entitlements for this group"
default = []
}
variable "iam_workspace_groups" {
type = map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
description = "Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements."
default = {}
validation {
condition = length([for item in values(var.iam_workspace_groups)[*] : item.entitlements if item.entitlements != null]) != 0 ? alltrue([
for entry in flatten(values(var.iam_workspace_groups)[*].entitlements) : contains(["allow_cluster_create", "allow_instance_pool_create", "databricks_sql_access"], entry) if entry != null
]) : true
error_message = "Entitlements validation. The only suitable values are: databricks_sql_access, allow_instance_pool_create, allow_cluster_create"
}
}
# SQL Endpoint variables
variable "sql_endpoint" {
type = set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
description = "Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups"
default = []
}
# Secret Scope variables
variable "secret_scope" {
type = list(object({
scope_name = string
acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
description = <<-EOT
Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string_value' is a value for it;
EOT
default = [{
scope_name = null
acl = null
secrets = null
}]
}
# Azure Key Vault-backed Secret Scope
variable "global_databricks_sp_object_id" {
type = string
description = "Global 'AzureDatabricks' SP object id. Used to create Key Vault Access Policy for Secret Scope"
default = "9b38785a-6e08-4087-a0c4-20634343f21f"
}
variable "create_databricks_access_policy_to_key_vault" {
type = bool
description = "Boolean flag to enable creation of Key Vault Access Policy for Databricks Global Service Principal."
default = true
}
variable "key_vault_secret_scope" {
type = list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
description = "Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope"
default = []
}
variable "custom_cluster_policies" {
type = list(object({
name = string
can_use = list(string)
definition = any
}))
description = <<-EOT
Provides an ability to create custom cluster policy, assign it to cluster and grant CAN_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value;
EOT
default = [{
name = null
can_use = null
definition = null
}]
}
variable "clusters" {
type = set(object({
cluster_name = string
spark_version = optional(string, "13.3.x-scala2.12")
spark_conf = optional(map(any), {})
cluster_conf_passthrought = optional(bool, false)
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
node_type_id = optional(string, "Standard_D3_v2")
autotermination_minutes = optional(number, 30)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
availability = optional(string, "ON_DEMAND_AZURE")
first_on_demand = optional(number, 0)
spot_bid_max_price = optional(number, 1)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
description = "Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups"
default = []
}
variable "pat_token_lifetime_seconds" {
type = number
description = "The lifetime of the token, in seconds. If no lifetime is specified, the token remains valid indefinitely"
default = 315569520
}
# Mount ADLS Gen2 Filesystem
variable "mount_enabled" {
type = bool
description = "Boolean flag that determines whether mount point for storage account filesystem is created"
default = false
}
variable "mount_service_principal_client_id" {
type = string
description = "Application(client) Id of Service Principal used to perform storage account mounting"
default = null
}
variable "mount_service_principal_secret" {
type = string
description = "Service Principal Secret used to perform storage account mounting"
default = null
sensitive = true
}
variable "mount_service_principal_tenant_id" {
type = string
description = "Service Principal tenant id used to perform storage account mounting"
default = null
}
variable "mountpoints" {
type = map(object({
storage_account_name = string
container_name = string
}))
description = "Mountpoints for databricks"
default = {}
}
variable "mount_adls_passthrough" {
type = bool
description = "Boolean flag to use mount options for credentials passthrough. Should be used with mount_cluster_name, specified cluster should have option cluster_conf_passthrought == true"
default = false
}
variable "mount_cluster_name" {
type = string
description = "Name of the cluster that will be used during storage mounting. If mount_adls_passthrough == true, cluster should also have option cluster_conf_passthrought == true"
default = null
}
variable "system_schemas" {
type = set(string)
description = "Set of strings with all possible System Schema names"
default = ["access", "billing", "compute", "marketplace", "storage"]
}
variable "system_schemas_enabled" {
type = bool
description = "System Schemas only works with assigned Unity Catalog Metastore. Boolean flag to enabled this feature"
default = false
}
variable "default_cluster_policies_override" {
type = list(object({
name = string
family_id = string
definition = any
}))
description = <<-EOT
Provides an ability to override default cluster policy
name - name of cluster policy to override
family_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value;
EOT
default = [{
name = null
family_id = null
definition = null
}]
}