mirror of https://github.com/databricks/cli.git
Set default data_security_mode to "SINGLE_USER" in bundle templates (#2372)
## Changes 1. Change the **default-python** bundle template to set `data_security_mode` of a cluster to SINGLE_USER 2. Change the **experimental-jobs-as-code** bundle template to set `data_security_mode` of a cluster to SINGLE_USER ## Why Explicitly adding this field saves experienced users from confusion onto what security mode is applied to the cluster ## Tests Changed existing unit and integration tests to pass with this change
This commit is contained in:
parent
9659f91c9f
commit
428e730c9e
|
@ -44,6 +44,7 @@ resources:
|
||||||
new_cluster:
|
new_cluster:
|
||||||
spark_version: 15.4.x-scala2.12
|
spark_version: 15.4.x-scala2.12
|
||||||
node_type_id: i3.xlarge
|
node_type_id: i3.xlarge
|
||||||
|
data_security_mode: SINGLE_USER
|
||||||
autoscale:
|
autoscale:
|
||||||
min_workers: 1
|
min_workers: 1
|
||||||
max_workers: 4
|
max_workers: 4
|
||||||
|
|
|
@ -34,6 +34,7 @@ Warning: Ignoring Databricks CLI version constraint for development build. Requi
|
||||||
"max_workers": 4,
|
"max_workers": 4,
|
||||||
"min_workers": 1
|
"min_workers": 1
|
||||||
},
|
},
|
||||||
|
"data_security_mode": "SINGLE_USER",
|
||||||
"node_type_id": "i3.xlarge",
|
"node_type_id": "i3.xlarge",
|
||||||
"spark_version": "15.4.x-scala2.12"
|
"spark_version": "15.4.x-scala2.12"
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,6 +56,7 @@ my_jobs_as_code_job = Job.from_dict(
|
||||||
"new_cluster": {
|
"new_cluster": {
|
||||||
"spark_version": "15.4.x-scala2.12",
|
"spark_version": "15.4.x-scala2.12",
|
||||||
"node_type_id": "i3.xlarge",
|
"node_type_id": "i3.xlarge",
|
||||||
|
"data_security_mode": "SINGLE_USER",
|
||||||
"autoscale": {
|
"autoscale": {
|
||||||
"min_workers": 1,
|
"min_workers": 1,
|
||||||
"max_workers": 4,
|
"max_workers": 4,
|
||||||
|
|
|
@ -82,6 +82,7 @@
|
||||||
"max_workers": 4,
|
"max_workers": 4,
|
||||||
"min_workers": 1
|
"min_workers": 1
|
||||||
},
|
},
|
||||||
|
"data_security_mode": "SINGLE_USER",
|
||||||
"node_type_id": "i3.xlarge",
|
"node_type_id": "i3.xlarge",
|
||||||
"spark_version": "15.4.x-scala2.12"
|
"spark_version": "15.4.x-scala2.12"
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,6 +69,7 @@ resources:
|
||||||
new_cluster:
|
new_cluster:
|
||||||
spark_version: {{template "latest_lts_dbr_version"}}
|
spark_version: {{template "latest_lts_dbr_version"}}
|
||||||
node_type_id: {{smallest_node_type}}
|
node_type_id: {{smallest_node_type}}
|
||||||
|
data_security_mode: SINGLE_USER
|
||||||
autoscale:
|
autoscale:
|
||||||
min_workers: 1
|
min_workers: 1
|
||||||
max_workers: 4
|
max_workers: 4
|
||||||
|
|
|
@ -97,6 +97,7 @@ This job runs {{.project_name}}_pipeline on a schedule.
|
||||||
"new_cluster": {
|
"new_cluster": {
|
||||||
"spark_version": "{{template "latest_lts_dbr_version"}}",
|
"spark_version": "{{template "latest_lts_dbr_version"}}",
|
||||||
"node_type_id": "{{smallest_node_type}}",
|
"node_type_id": "{{smallest_node_type}}",
|
||||||
|
"data_security_mode": "SINGLE_USER",
|
||||||
"autoscale": {
|
"autoscale": {
|
||||||
"min_workers": 1,
|
"min_workers": 1,
|
||||||
"max_workers": 4,
|
"max_workers": 4,
|
||||||
|
|
Loading…
Reference in New Issue