mirror of https://github.com/databricks/cli.git
Merge branch 'main' of github.com:databricks/cli into feat/config-reference-doc-autogen
This commit is contained in:
commit
40c4b3a40a
|
@ -1 +1 @@
|
|||
7016dcbf2e011459416cf408ce21143bcc4b3a25
|
||||
a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d
|
|
@ -8,6 +8,7 @@ cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=
|
|||
cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true
|
||||
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
||||
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
||||
cmd/account/federation-policy/federation-policy.go linguist-generated=true
|
||||
cmd/account/groups/groups.go linguist-generated=true
|
||||
cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true
|
||||
cmd/account/log-delivery/log-delivery.go linguist-generated=true
|
||||
|
@ -19,6 +20,7 @@ cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=tr
|
|||
cmd/account/personal-compute/personal-compute.go linguist-generated=true
|
||||
cmd/account/private-access/private-access.go linguist-generated=true
|
||||
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
||||
cmd/account/service-principal-federation-policy/service-principal-federation-policy.go linguist-generated=true
|
||||
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
||||
cmd/account/service-principals/service-principals.go linguist-generated=true
|
||||
cmd/account/settings/settings.go linguist-generated=true
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
name: integration-approve
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
# Trigger for merge groups.
|
||||
#
|
||||
# Statuses and checks apply to specific commits (by hash).
|
||||
# Enforcement of required checks is done both at the PR level and the merge queue level.
|
||||
# In case of multiple commits in a single PR, the hash of the squashed commit
|
||||
# will not match the one for the latest (approved) commit in the PR.
|
||||
#
|
||||
# We auto approve the check for the merge queue for two reasons:
|
||||
#
|
||||
# * Queue times out due to duration of tests.
|
||||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
#
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Auto-approve squashed commit
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh api -X POST -H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
|
||||
-f 'state=success' \
|
||||
-f 'context=Integration Tests Check'
|
|
@ -0,0 +1,33 @@
|
|||
name: integration-main
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
# Trigger for pushes to the main branch.
|
||||
#
|
||||
# This workflow triggers the integration test workflow in a different repository.
|
||||
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-nightly.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f commit_sha=${{ github.event.after }}
|
|
@ -0,0 +1,56 @@
|
|||
name: integration-pr
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
|
||||
steps:
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
# Trigger for pull requests.
|
||||
#
|
||||
# This workflow triggers the integration test workflow in a different repository.
|
||||
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
|
||||
# It depends on the "check-token" workflow to confirm access to this environment to avoid failures.
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
if: needs.check-token.outputs.has_token == 'true'
|
||||
needs: check-token
|
||||
|
||||
steps:
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f pull_request_number=${{ github.event.pull_request.number }} \
|
||||
-f commit_sha=${{ github.event.pull_request.head.sha }}
|
|
@ -1,78 +0,0 @@
|
|||
name: integration
|
||||
|
||||
on:
|
||||
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
merge_group:
|
||||
|
||||
|
||||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
steps:
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
trigger-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-token
|
||||
if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true'
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f pull_request_number=${{ github.event.pull_request.number }} \
|
||||
-f commit_sha=${{ github.event.pull_request.head.sha }}
|
||||
|
||||
|
||||
|
||||
# Statuses and checks apply to specific commits (by hash).
|
||||
# Enforcement of required checks is done both at the PR level and the merge queue level.
|
||||
# In case of multiple commits in a single PR, the hash of the squashed commit
|
||||
# will not match the one for the latest (approved) commit in the PR.
|
||||
# We auto approve the check for the merge queue for two reasons:
|
||||
# * Queue times out due to duration of tests.
|
||||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
auto-approve:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark Check
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh api -X POST -H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
|
||||
-f 'state=success' \
|
||||
-f 'context=Integration Tests Check'
|
|
@ -33,13 +33,16 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
go-version: 1.23.4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Set go env
|
||||
run: |
|
||||
echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
|
||||
|
@ -54,9 +57,6 @@ jobs:
|
|||
- name: Run tests
|
||||
run: make testonly
|
||||
|
||||
- name: Publish test coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -64,7 +64,7 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
go-version: 1.23.4
|
||||
- name: Run go mod tidy
|
||||
run: |
|
||||
go mod tidy
|
||||
|
@ -88,7 +88,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
go-version: 1.23.4
|
||||
|
||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||
- name: Install ajv-cli
|
||||
|
|
|
@ -31,7 +31,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
go-version: 1.23.4
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
go-version: 1.23.4
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
23
CHANGELOG.md
23
CHANGELOG.md
|
@ -1,5 +1,28 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.237.0
|
||||
|
||||
Bundles:
|
||||
* Allow overriding compute for non-development mode targets ([#1899](https://github.com/databricks/cli/pull/1899)).
|
||||
* Show an error when using a cluster override with 'mode: production' ([#1994](https://github.com/databricks/cli/pull/1994)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks account federation-policy` command group.
|
||||
* Added `databricks account service-principal-federation-policy` command group.
|
||||
* Added `databricks aibi-dashboard-embedding-access-policy delete` command.
|
||||
* Added `databricks aibi-dashboard-embedding-approved-domains delete` command.
|
||||
|
||||
OpenAPI commit a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d (2024-12-16)
|
||||
Dependency updates:
|
||||
* Upgrade TF provider to 1.62.0 ([#2030](https://github.com/databricks/cli/pull/2030)).
|
||||
* Upgrade Go SDK to 0.54.0 ([#2029](https://github.com/databricks/cli/pull/2029)).
|
||||
* Bump TF codegen dependencies to latest ([#1961](https://github.com/databricks/cli/pull/1961)).
|
||||
* Bump golang.org/x/term from 0.26.0 to 0.27.0 ([#1983](https://github.com/databricks/cli/pull/1983)).
|
||||
* Bump golang.org/x/sync from 0.9.0 to 0.10.0 ([#1984](https://github.com/databricks/cli/pull/1984)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.52.0 to 0.53.0 ([#1985](https://github.com/databricks/cli/pull/1985)).
|
||||
* Bump golang.org/x/crypto from 0.24.0 to 0.31.0 ([#2006](https://github.com/databricks/cli/pull/2006)).
|
||||
* Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in /bundle/internal/tf/codegen ([#2005](https://github.com/databricks/cli/pull/2005)).
|
||||
|
||||
## [Release] Release v0.236.0
|
||||
|
||||
**New features for Databricks Asset Bundles:**
|
||||
|
|
12
NOTICE
12
NOTICE
|
@ -73,10 +73,6 @@ fatih/color - https://github.com/fatih/color
|
|||
Copyright (c) 2013 Fatih Arslan
|
||||
License - https://github.com/fatih/color/blob/main/LICENSE.md
|
||||
|
||||
ghodss/yaml - https://github.com/ghodss/yaml
|
||||
Copyright (c) 2014 Sam Ghods
|
||||
License - https://github.com/ghodss/yaml/blob/master/LICENSE
|
||||
|
||||
Masterminds/semver - https://github.com/Masterminds/semver
|
||||
Copyright (C) 2014-2019, Matt Butcher and Matt Farina
|
||||
License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt
|
||||
|
@ -101,3 +97,11 @@ License - https://github.com/stretchr/testify/blob/master/LICENSE
|
|||
whilp/git-urls - https://github.com/whilp/git-urls
|
||||
Copyright (c) 2020 Will Maier
|
||||
License - https://github.com/whilp/git-urls/blob/master/LICENSE
|
||||
|
||||
github.com/wI2L/jsondiff v0.6.1
|
||||
Copyright (c) 2020-2024 William Poussier <william.poussier@gmail.com>
|
||||
License - https://github.com/wI2L/jsondiff/blob/master/LICENSE
|
||||
|
||||
https://github.com/hexops/gotextdiff
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
License - https://github.com/hexops/gotextdiff/blob/main/LICENSE
|
||||
|
|
|
@ -16,12 +16,6 @@ type infer struct {
|
|||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact := b.Config.Artifacts[m.name]
|
||||
|
||||
// TODO use python.DetectVEnvExecutable once bundle has a way to specify venv path
|
||||
py, err := python.DetectExecutable(ctx)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Note: using --build-number (build tag) flag does not help with re-installing
|
||||
// libraries on all-purpose clusters. The reason is that `pip` ignoring build tag
|
||||
// when upgrading the library and only look at wheel version.
|
||||
|
@ -36,7 +30,9 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
// version=datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"),
|
||||
// ...
|
||||
//)
|
||||
artifact.BuildCommand = fmt.Sprintf(`"%s" setup.py bdist_wheel`, py)
|
||||
|
||||
py := python.GetExecutable()
|
||||
artifact.BuildCommand = fmt.Sprintf(`%s setup.py bdist_wheel`, py)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -541,7 +541,7 @@ func TestLoadDiagnosticsFile_nonExistent(t *testing.T) {
|
|||
|
||||
func TestInterpreterPath(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv"))
|
||||
assert.Equal(t, "venv\\Scripts\\python.exe", interpreterPath("venv"))
|
||||
} else {
|
||||
assert.Equal(t, "venv/bin/python3", interpreterPath("venv"))
|
||||
}
|
||||
|
@ -673,7 +673,7 @@ func withFakeVEnv(t *testing.T, venvPath string) {
|
|||
|
||||
func interpreterPath(venvPath string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||
return filepath.Join(venvPath, "Scripts", "python.exe")
|
||||
} else {
|
||||
return filepath.Join(venvPath, "bin", "python3")
|
||||
}
|
||||
|
|
|
@ -70,6 +70,12 @@ github.com/databricks/cli/bundle/config/resources.Cluster:
|
|||
If `cluster_log_conf` is specified, init script logs are sent to `<destination>/<cluster-ID>/init_scripts`.
|
||||
instance_pool_id:
|
||||
description: The optional ID of the instance pool to which the cluster belongs.
|
||||
is_single_node:
|
||||
description: |
|
||||
This field can only be used with `kind`.
|
||||
|
||||
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
||||
kind: {}
|
||||
node_type_id:
|
||||
description: |
|
||||
This field encodes, through a single value, the resources available to each of
|
||||
|
@ -119,6 +125,11 @@ github.com/databricks/cli/bundle/config/resources.Cluster:
|
|||
SSH public key contents that will be added to each Spark node in this cluster. The
|
||||
corresponding private keys can be used to login with the user name `ubuntu` on port `2200`.
|
||||
Up to 10 keys can be specified.
|
||||
use_ml_runtime:
|
||||
description: |
|
||||
This field can only be used with `kind`.
|
||||
|
||||
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
||||
workload_type: {}
|
||||
github.com/databricks/cli/bundle/config/resources.Dashboard:
|
||||
create_time:
|
||||
|
@ -759,6 +770,12 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec:
|
|||
If `cluster_log_conf` is specified, init script logs are sent to `<destination>/<cluster-ID>/init_scripts`.
|
||||
instance_pool_id:
|
||||
description: The optional ID of the instance pool to which the cluster belongs.
|
||||
is_single_node:
|
||||
description: |
|
||||
This field can only be used with `kind`.
|
||||
|
||||
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
||||
kind: {}
|
||||
node_type_id:
|
||||
description: |
|
||||
This field encodes, through a single value, the resources available to each of
|
||||
|
@ -808,6 +825,11 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec:
|
|||
SSH public key contents that will be added to each Spark node in this cluster. The
|
||||
corresponding private keys can be used to login with the user name `ubuntu` on port `2200`.
|
||||
Up to 10 keys can be specified.
|
||||
use_ml_runtime:
|
||||
description: |
|
||||
This field can only be used with `kind`.
|
||||
|
||||
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
||||
workload_type: {}
|
||||
github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode:
|
||||
_:
|
||||
|
@ -815,6 +837,12 @@ github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode:
|
|||
Data security mode decides what data governance model to use when accessing data
|
||||
from a cluster.
|
||||
|
||||
The following modes can only be used with `kind`.
|
||||
* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
|
||||
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
|
||||
* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
|
||||
|
||||
The following modes can be used regardless of `kind`.
|
||||
* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.
|
||||
* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.
|
||||
* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.
|
||||
|
@ -827,6 +855,9 @@ github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode:
|
|||
* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.
|
||||
* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.
|
||||
enum:
|
||||
- DATA_SECURITY_MODE_AUTO
|
||||
- DATA_SECURITY_MODE_STANDARD
|
||||
- DATA_SECURITY_MODE_DEDICATED
|
||||
- NONE
|
||||
- SINGLE_USER
|
||||
- USER_ISOLATION
|
||||
|
@ -1068,6 +1099,17 @@ github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState:
|
|||
enum:
|
||||
- ACTIVE
|
||||
- TRASHED
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask:
|
||||
clean_room_name:
|
||||
description: The clean room that the notebook belongs to.
|
||||
etag:
|
||||
description: |-
|
||||
Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version).
|
||||
It can be fetched by calling the :method:cleanroomassets/get API.
|
||||
notebook_base_parameters:
|
||||
description: Base parameters to be used for the clean room notebook job.
|
||||
notebook_name:
|
||||
description: Name of the notebook being run.
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.Condition:
|
||||
_:
|
||||
enum:
|
||||
|
@ -1346,10 +1388,10 @@ github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthMetric:
|
|||
Specifies the health metric that is being evaluated for a particular health rule.
|
||||
|
||||
* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.
|
||||
* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview.
|
||||
* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview.
|
||||
* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview.
|
||||
* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview.
|
||||
* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.
|
||||
* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.
|
||||
* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.
|
||||
* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.
|
||||
enum:
|
||||
- RUN_DURATION_SECONDS
|
||||
- STREAMING_BACKLOG_BYTES
|
||||
|
@ -1651,6 +1693,10 @@ github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfigura
|
|||
and can be used to wait for a series of table updates before triggering a run. The
|
||||
minimum allowed value is 60 seconds.
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.Task:
|
||||
clean_rooms_notebook_task:
|
||||
description: |-
|
||||
The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook
|
||||
when the `clean_rooms_notebook_task` field is present.
|
||||
condition_task:
|
||||
description: |-
|
||||
The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.
|
||||
|
|
|
@ -5,6 +5,9 @@ github.com/databricks/cli/bundle/config/resources.Cluster:
|
|||
"docker_image":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"kind":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"permissions":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
|
@ -90,6 +93,9 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec:
|
|||
"docker_image":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"kind":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"runtime_engine":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
|
|
|
@ -15,8 +15,8 @@ import (
|
|||
"github.com/databricks/cli/libs/dyn/merge"
|
||||
"github.com/databricks/cli/libs/dyn/yamlloader"
|
||||
"github.com/databricks/cli/libs/jsonschema"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func copyFile(src, dst string) error {
|
||||
|
|
|
@ -9,10 +9,9 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn/yamlloader"
|
||||
"github.com/databricks/cli/libs/jsonschema"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Components struct {
|
||||
|
|
|
@ -2,7 +2,7 @@ module github.com/databricks/cli/bundle/internal/tf/codegen
|
|||
|
||||
go 1.23
|
||||
|
||||
toolchain go1.23.2
|
||||
toolchain go1.23.4
|
||||
|
||||
require (
|
||||
github.com/hashicorp/go-version v1.7.0
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package schema
|
||||
|
||||
const ProviderVersion = "1.61.0"
|
||||
const ProviderVersion = "1.62.0"
|
||||
|
|
|
@ -317,6 +317,8 @@ type DataSourceClusterClusterInfoSpec struct {
|
|||
EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
|
||||
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||
IsSingleNode bool `json:"is_single_node,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||
NumWorkers int `json:"num_workers,omitempty"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
|
@ -326,6 +328,7 @@ type DataSourceClusterClusterInfoSpec struct {
|
|||
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
|
||||
SparkVersion string `json:"spark_version"`
|
||||
SshPublicKeys []string `json:"ssh_public_keys,omitempty"`
|
||||
UseMlRuntime bool `json:"use_ml_runtime,omitempty"`
|
||||
Autoscale *DataSourceClusterClusterInfoSpecAutoscale `json:"autoscale,omitempty"`
|
||||
AwsAttributes *DataSourceClusterClusterInfoSpecAwsAttributes `json:"aws_attributes,omitempty"`
|
||||
AzureAttributes *DataSourceClusterClusterInfoSpecAzureAttributes `json:"azure_attributes,omitempty"`
|
||||
|
@ -369,7 +372,9 @@ type DataSourceClusterClusterInfo struct {
|
|||
EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
|
||||
EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
|
||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||
IsSingleNode bool `json:"is_single_node,omitempty"`
|
||||
JdbcPort int `json:"jdbc_port,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
LastRestartedTime int `json:"last_restarted_time,omitempty"`
|
||||
LastStateLossTime int `json:"last_state_loss_time,omitempty"`
|
||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||
|
@ -386,6 +391,7 @@ type DataSourceClusterClusterInfo struct {
|
|||
State string `json:"state,omitempty"`
|
||||
StateMessage string `json:"state_message,omitempty"`
|
||||
TerminatedTime int `json:"terminated_time,omitempty"`
|
||||
UseMlRuntime bool `json:"use_ml_runtime,omitempty"`
|
||||
Autoscale *DataSourceClusterClusterInfoAutoscale `json:"autoscale,omitempty"`
|
||||
AwsAttributes *DataSourceClusterClusterInfoAwsAttributes `json:"aws_attributes,omitempty"`
|
||||
AzureAttributes *DataSourceClusterClusterInfoAzureAttributes `json:"azure_attributes,omitempty"`
|
||||
|
|
|
@ -176,6 +176,8 @@ type ResourceCluster struct {
|
|||
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||
IsPinned bool `json:"is_pinned,omitempty"`
|
||||
IsSingleNode bool `json:"is_single_node,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
NoWait bool `json:"no_wait,omitempty"`
|
||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||
NumWorkers int `json:"num_workers,omitempty"`
|
||||
|
@ -188,6 +190,7 @@ type ResourceCluster struct {
|
|||
SshPublicKeys []string `json:"ssh_public_keys,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
UseMlRuntime bool `json:"use_ml_runtime,omitempty"`
|
||||
Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"`
|
||||
AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"`
|
||||
AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"`
|
||||
|
|
|
@ -240,6 +240,8 @@ type ResourceJobJobClusterNewCluster struct {
|
|||
EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
|
||||
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||
IsSingleNode bool `json:"is_single_node,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||
NumWorkers int `json:"num_workers,omitempty"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
|
@ -249,6 +251,7 @@ type ResourceJobJobClusterNewCluster struct {
|
|||
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
|
||||
SparkVersion string `json:"spark_version"`
|
||||
SshPublicKeys []string `json:"ssh_public_keys,omitempty"`
|
||||
UseMlRuntime bool `json:"use_ml_runtime,omitempty"`
|
||||
Autoscale *ResourceJobJobClusterNewClusterAutoscale `json:"autoscale,omitempty"`
|
||||
AwsAttributes *ResourceJobJobClusterNewClusterAwsAttributes `json:"aws_attributes,omitempty"`
|
||||
AzureAttributes *ResourceJobJobClusterNewClusterAzureAttributes `json:"azure_attributes,omitempty"`
|
||||
|
@ -462,6 +465,8 @@ type ResourceJobNewCluster struct {
|
|||
EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
|
||||
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||
IsSingleNode bool `json:"is_single_node,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||
NumWorkers int `json:"num_workers,omitempty"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
|
@ -471,6 +476,7 @@ type ResourceJobNewCluster struct {
|
|||
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
|
||||
SparkVersion string `json:"spark_version"`
|
||||
SshPublicKeys []string `json:"ssh_public_keys,omitempty"`
|
||||
UseMlRuntime bool `json:"use_ml_runtime,omitempty"`
|
||||
Autoscale *ResourceJobNewClusterAutoscale `json:"autoscale,omitempty"`
|
||||
AwsAttributes *ResourceJobNewClusterAwsAttributes `json:"aws_attributes,omitempty"`
|
||||
AzureAttributes *ResourceJobNewClusterAzureAttributes `json:"azure_attributes,omitempty"`
|
||||
|
@ -548,6 +554,13 @@ type ResourceJobSparkSubmitTask struct {
|
|||
Parameters []string `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskCleanRoomsNotebookTask struct {
|
||||
CleanRoomName string `json:"clean_room_name"`
|
||||
Etag string `json:"etag,omitempty"`
|
||||
NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"`
|
||||
NotebookName string `json:"notebook_name"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskConditionTask struct {
|
||||
Left string `json:"left"`
|
||||
Op string `json:"op"`
|
||||
|
@ -578,6 +591,13 @@ type ResourceJobTaskEmailNotifications struct {
|
|||
OnSuccess []string `json:"on_success,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask struct {
|
||||
CleanRoomName string `json:"clean_room_name"`
|
||||
Etag string `json:"etag,omitempty"`
|
||||
NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"`
|
||||
NotebookName string `json:"notebook_name"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskForEachTaskTaskConditionTask struct {
|
||||
Left string `json:"left"`
|
||||
Op string `json:"op"`
|
||||
|
@ -814,6 +834,8 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct {
|
|||
EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
|
||||
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||
IsSingleNode bool `json:"is_single_node,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||
NumWorkers int `json:"num_workers,omitempty"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
|
@ -823,6 +845,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct {
|
|||
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
|
||||
SparkVersion string `json:"spark_version"`
|
||||
SshPublicKeys []string `json:"ssh_public_keys,omitempty"`
|
||||
UseMlRuntime bool `json:"use_ml_runtime,omitempty"`
|
||||
Autoscale *ResourceJobTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"`
|
||||
AwsAttributes *ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"`
|
||||
AzureAttributes *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"`
|
||||
|
@ -963,34 +986,35 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct {
|
|||
}
|
||||
|
||||
type ResourceJobTaskForEachTaskTask struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"`
|
||||
EnvironmentKey string `json:"environment_key,omitempty"`
|
||||
ExistingClusterId string `json:"existing_cluster_id,omitempty"`
|
||||
JobClusterKey string `json:"job_cluster_key,omitempty"`
|
||||
MaxRetries int `json:"max_retries,omitempty"`
|
||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||
RunIf string `json:"run_if,omitempty"`
|
||||
TaskKey string `json:"task_key"`
|
||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||
ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"`
|
||||
DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"`
|
||||
DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"`
|
||||
EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"`
|
||||
Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"`
|
||||
Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"`
|
||||
NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"`
|
||||
NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"`
|
||||
NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"`
|
||||
PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"`
|
||||
PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||
RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"`
|
||||
SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||
SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||
SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||
SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"`
|
||||
WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"`
|
||||
EnvironmentKey string `json:"environment_key,omitempty"`
|
||||
ExistingClusterId string `json:"existing_cluster_id,omitempty"`
|
||||
JobClusterKey string `json:"job_cluster_key,omitempty"`
|
||||
MaxRetries int `json:"max_retries,omitempty"`
|
||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||
RunIf string `json:"run_if,omitempty"`
|
||||
TaskKey string `json:"task_key"`
|
||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||
CleanRoomsNotebookTask *ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"`
|
||||
ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"`
|
||||
DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"`
|
||||
DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"`
|
||||
EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"`
|
||||
Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"`
|
||||
Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"`
|
||||
NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"`
|
||||
NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"`
|
||||
NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"`
|
||||
PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"`
|
||||
PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||
RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"`
|
||||
SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||
SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||
SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||
SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"`
|
||||
WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskForEachTask struct {
|
||||
|
@ -1205,6 +1229,8 @@ type ResourceJobTaskNewCluster struct {
|
|||
EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
|
||||
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||
IsSingleNode bool `json:"is_single_node,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||
NumWorkers int `json:"num_workers,omitempty"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
|
@ -1214,6 +1240,7 @@ type ResourceJobTaskNewCluster struct {
|
|||
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
|
||||
SparkVersion string `json:"spark_version"`
|
||||
SshPublicKeys []string `json:"ssh_public_keys,omitempty"`
|
||||
UseMlRuntime bool `json:"use_ml_runtime,omitempty"`
|
||||
Autoscale *ResourceJobTaskNewClusterAutoscale `json:"autoscale,omitempty"`
|
||||
AwsAttributes *ResourceJobTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"`
|
||||
AzureAttributes *ResourceJobTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"`
|
||||
|
@ -1354,35 +1381,36 @@ type ResourceJobTaskWebhookNotifications struct {
|
|||
}
|
||||
|
||||
type ResourceJobTask struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"`
|
||||
EnvironmentKey string `json:"environment_key,omitempty"`
|
||||
ExistingClusterId string `json:"existing_cluster_id,omitempty"`
|
||||
JobClusterKey string `json:"job_cluster_key,omitempty"`
|
||||
MaxRetries int `json:"max_retries,omitempty"`
|
||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||
RunIf string `json:"run_if,omitempty"`
|
||||
TaskKey string `json:"task_key"`
|
||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||
ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"`
|
||||
DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"`
|
||||
DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"`
|
||||
EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"`
|
||||
ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"`
|
||||
Health *ResourceJobTaskHealth `json:"health,omitempty"`
|
||||
Library []ResourceJobTaskLibrary `json:"library,omitempty"`
|
||||
NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"`
|
||||
NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"`
|
||||
NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"`
|
||||
PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"`
|
||||
PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||
RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"`
|
||||
SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||
SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||
SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||
SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"`
|
||||
WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"`
|
||||
EnvironmentKey string `json:"environment_key,omitempty"`
|
||||
ExistingClusterId string `json:"existing_cluster_id,omitempty"`
|
||||
JobClusterKey string `json:"job_cluster_key,omitempty"`
|
||||
MaxRetries int `json:"max_retries,omitempty"`
|
||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||
RunIf string `json:"run_if,omitempty"`
|
||||
TaskKey string `json:"task_key"`
|
||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||
CleanRoomsNotebookTask *ResourceJobTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"`
|
||||
ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"`
|
||||
DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"`
|
||||
DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"`
|
||||
EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"`
|
||||
ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"`
|
||||
Health *ResourceJobTaskHealth `json:"health,omitempty"`
|
||||
Library []ResourceJobTaskLibrary `json:"library,omitempty"`
|
||||
NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"`
|
||||
NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"`
|
||||
NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"`
|
||||
PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"`
|
||||
PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||
RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"`
|
||||
SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||
SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||
SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||
SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"`
|
||||
WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTriggerFileArrival struct {
|
||||
|
|
|
@ -244,9 +244,9 @@ type ResourcePipelineNotification struct {
|
|||
}
|
||||
|
||||
type ResourcePipelineRestartWindow struct {
|
||||
DaysOfWeek string `json:"days_of_week,omitempty"`
|
||||
StartHour int `json:"start_hour"`
|
||||
TimeZoneId string `json:"time_zone_id,omitempty"`
|
||||
DaysOfWeek []string `json:"days_of_week,omitempty"`
|
||||
StartHour int `json:"start_hour"`
|
||||
TimeZoneId string `json:"time_zone_id,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineTriggerCron struct {
|
||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
|||
|
||||
const ProviderHost = "registry.terraform.io"
|
||||
const ProviderSource = "databricks/databricks"
|
||||
const ProviderVersion = "1.61.0"
|
||||
const ProviderVersion = "1.62.0"
|
||||
|
||||
func NewRoot() *Root {
|
||||
return &Root{
|
||||
|
|
|
@ -130,6 +130,13 @@
|
|||
"description": "The optional ID of the instance pool to which the cluster belongs.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"is_single_node": {
|
||||
"description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"kind": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind"
|
||||
},
|
||||
"node_type_id": {
|
||||
"description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n",
|
||||
"$ref": "#/$defs/string"
|
||||
|
@ -168,6 +175,10 @@
|
|||
"description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"use_ml_runtime": {
|
||||
"description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"workload_type": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType"
|
||||
}
|
||||
|
@ -1988,6 +1999,13 @@
|
|||
"description": "The optional ID of the instance pool to which the cluster belongs.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"is_single_node": {
|
||||
"description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"kind": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind"
|
||||
},
|
||||
"node_type_id": {
|
||||
"description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n",
|
||||
"$ref": "#/$defs/string"
|
||||
|
@ -2023,6 +2041,10 @@
|
|||
"description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"use_ml_runtime": {
|
||||
"description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"workload_type": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType"
|
||||
}
|
||||
|
@ -2037,8 +2059,11 @@
|
|||
},
|
||||
"compute.DataSecurityMode": {
|
||||
"type": "string",
|
||||
"description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n",
|
||||
"description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used with `kind`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n",
|
||||
"enum": [
|
||||
"DATA_SECURITY_MODE_AUTO",
|
||||
"DATA_SECURITY_MODE_STANDARD",
|
||||
"DATA_SECURITY_MODE_DEDICATED",
|
||||
"NONE",
|
||||
"SINGLE_USER",
|
||||
"USER_ISOLATION",
|
||||
|
@ -2255,6 +2280,9 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"compute.Kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"compute.Library": {
|
||||
"oneOf": [
|
||||
{
|
||||
|
@ -2543,6 +2571,40 @@
|
|||
"TRASHED"
|
||||
]
|
||||
},
|
||||
"jobs.CleanRoomsNotebookTask": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"clean_room_name": {
|
||||
"description": "The clean room that the notebook belongs to.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"etag": {
|
||||
"description": "Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version).\nIt can be fetched by calling the :method:cleanroomassets/get API.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"notebook_base_parameters": {
|
||||
"description": "Base parameters to be used for the clean room notebook job.",
|
||||
"$ref": "#/$defs/map/string"
|
||||
},
|
||||
"notebook_name": {
|
||||
"description": "Name of the notebook being run.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"clean_room_name",
|
||||
"notebook_name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"jobs.Condition": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
|
@ -3063,7 +3125,7 @@
|
|||
},
|
||||
"jobs.JobsHealthMetric": {
|
||||
"type": "string",
|
||||
"description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview.",
|
||||
"description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.",
|
||||
"enum": [
|
||||
"RUN_DURATION_SECONDS",
|
||||
"STREAMING_BACKLOG_BYTES",
|
||||
|
@ -3653,6 +3715,10 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"clean_rooms_notebook_task": {
|
||||
"description": "The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask"
|
||||
},
|
||||
"condition_task": {
|
||||
"description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask"
|
||||
|
@ -4551,7 +4617,7 @@
|
|||
"properties": {
|
||||
"days_of_week": {
|
||||
"description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek"
|
||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek"
|
||||
},
|
||||
"start_hour": {
|
||||
"description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.",
|
||||
|
@ -6162,6 +6228,20 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"pipelines.RestartWindowDaysOfWeek": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayRateLimit": {
|
||||
"oneOf": [
|
||||
{
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
credentials "github.com/databricks/cli/cmd/account/credentials"
|
||||
custom_app_integration "github.com/databricks/cli/cmd/account/custom-app-integration"
|
||||
encryption_keys "github.com/databricks/cli/cmd/account/encryption-keys"
|
||||
account_federation_policy "github.com/databricks/cli/cmd/account/federation-policy"
|
||||
account_groups "github.com/databricks/cli/cmd/account/groups"
|
||||
account_ip_access_lists "github.com/databricks/cli/cmd/account/ip-access-lists"
|
||||
log_delivery "github.com/databricks/cli/cmd/account/log-delivery"
|
||||
|
@ -21,6 +22,7 @@ import (
|
|||
o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps"
|
||||
private_access "github.com/databricks/cli/cmd/account/private-access"
|
||||
published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration"
|
||||
service_principal_federation_policy "github.com/databricks/cli/cmd/account/service-principal-federation-policy"
|
||||
service_principal_secrets "github.com/databricks/cli/cmd/account/service-principal-secrets"
|
||||
account_service_principals "github.com/databricks/cli/cmd/account/service-principals"
|
||||
account_settings "github.com/databricks/cli/cmd/account/settings"
|
||||
|
@ -44,6 +46,7 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(credentials.New())
|
||||
cmd.AddCommand(custom_app_integration.New())
|
||||
cmd.AddCommand(encryption_keys.New())
|
||||
cmd.AddCommand(account_federation_policy.New())
|
||||
cmd.AddCommand(account_groups.New())
|
||||
cmd.AddCommand(account_ip_access_lists.New())
|
||||
cmd.AddCommand(log_delivery.New())
|
||||
|
@ -54,6 +57,7 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(o_auth_published_apps.New())
|
||||
cmd.AddCommand(private_access.New())
|
||||
cmd.AddCommand(published_app_integration.New())
|
||||
cmd.AddCommand(service_principal_federation_policy.New())
|
||||
cmd.AddCommand(service_principal_secrets.New())
|
||||
cmd.AddCommand(account_service_principals.New())
|
||||
cmd.AddCommand(account_settings.New())
|
||||
|
|
|
@ -0,0 +1,402 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package federation_policy
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/oauth2"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "federation-policy",
|
||||
Short: `These APIs manage account federation policies.`,
|
||||
Long: `These APIs manage account federation policies.
|
||||
|
||||
Account federation policies allow users and service principals in your
|
||||
Databricks account to securely access Databricks APIs using tokens from your
|
||||
trusted identity providers (IdPs).
|
||||
|
||||
With token federation, your users and service principals can exchange tokens
|
||||
from your IdP for Databricks OAuth tokens, which can be used to access
|
||||
Databricks APIs. Token federation eliminates the need to manage Databricks
|
||||
secrets, and allows you to centralize management of token issuance policies in
|
||||
your IdP. Databricks token federation is typically used in combination with
|
||||
[SCIM], so users in your IdP are synchronized into your Databricks account.
|
||||
|
||||
Token federation is configured in your Databricks account using an account
|
||||
federation policy. An account federation policy specifies: * which IdP, or
|
||||
issuer, your Databricks account should accept tokens from * how to determine
|
||||
which Databricks user, or subject, a token is issued for
|
||||
|
||||
To configure a federation policy, you provide the following: * The required
|
||||
token __issuer__, as specified in the “iss” claim of your tokens. The
|
||||
issuer is an https URL that identifies your IdP. * The allowed token
|
||||
__audiences__, as specified in the “aud” claim of your tokens. This
|
||||
identifier is intended to represent the recipient of the token. As long as the
|
||||
audience in the token matches at least one audience in the policy, the token
|
||||
is considered a match. If unspecified, the default value is your Databricks
|
||||
account id. * The __subject claim__, which indicates which token claim
|
||||
contains the Databricks username of the user the token was issued for. If
|
||||
unspecified, the default value is “sub”. * Optionally, the public keys
|
||||
used to validate the signature of your tokens, in JWKS format. If unspecified
|
||||
(recommended), Databricks automatically fetches the public keys from your
|
||||
issuer’s well known endpoint. Databricks strongly recommends relying on your
|
||||
issuer’s well known endpoint for discovering public keys.
|
||||
|
||||
An example federation policy is: issuer: "https://idp.mycompany.com/oidc"
|
||||
audiences: ["databricks"] subject_claim: "sub"
|
||||
|
||||
An example JWT token body that matches this policy and could be used to
|
||||
authenticate to Databricks as user username@mycompany.com is: { "iss":
|
||||
"https://idp.mycompany.com/oidc", "aud": "databricks", "sub":
|
||||
"username@mycompany.com" }
|
||||
|
||||
You may also need to configure your IdP to generate tokens for your users to
|
||||
exchange with Databricks, if your users do not already have the ability to
|
||||
generate tokens that are compatible with your federation policy.
|
||||
|
||||
You do not need to configure an OAuth application in Databricks to use token
|
||||
federation.
|
||||
|
||||
[SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html`,
|
||||
GroupID: "oauth2",
|
||||
Annotations: map[string]string{
|
||||
"package": "oauth2",
|
||||
},
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newCreate())
|
||||
cmd.AddCommand(newDelete())
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newList())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var createOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.CreateAccountFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newCreate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var createReq oauth2.CreateAccountFederationPolicyRequest
|
||||
createReq.Policy = &oauth2.FederationPolicy{}
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`)
|
||||
cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`)
|
||||
// TODO: complex arg: oidc_policy
|
||||
|
||||
cmd.Use = "create"
|
||||
cmd.Short = `Create account federation policy.`
|
||||
cmd.Long = `Create account federation policy.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createJson.Unmarshal(&createReq.Policy)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response, err := a.FederationPolicy.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range createOverrides {
|
||||
fn(cmd, &createReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.DeleteAccountFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newDelete() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteReq oauth2.DeleteAccountFederationPolicyRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete POLICY_ID"
|
||||
cmd.Short = `Delete account federation policy.`
|
||||
cmd.Long = `Delete account federation policy.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
deleteReq.PolicyId = args[0]
|
||||
|
||||
err = a.FederationPolicy.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteOverrides {
|
||||
fn(cmd, &deleteReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.GetAccountFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq oauth2.GetAccountFederationPolicyRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get POLICY_ID"
|
||||
cmd.Short = `Get account federation policy.`
|
||||
cmd.Long = `Get account federation policy.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
getReq.PolicyId = args[0]
|
||||
|
||||
response, err := a.FederationPolicy.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.ListAccountFederationPoliciesRequest,
|
||||
)
|
||||
|
||||
func newList() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listReq oauth2.ListAccountFederationPoliciesRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``)
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``)
|
||||
|
||||
cmd.Use = "list"
|
||||
cmd.Short = `List account federation policies.`
|
||||
cmd.Long = `List account federation policies.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
response := a.FederationPolicy.List(ctx, listReq)
|
||||
return cmdio.RenderIterator(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listOverrides {
|
||||
fn(cmd, &listReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.UpdateAccountFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq oauth2.UpdateAccountFederationPolicyRequest
|
||||
updateReq.Policy = &oauth2.FederationPolicy{}
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`)
|
||||
cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`)
|
||||
// TODO: complex arg: oidc_policy
|
||||
|
||||
cmd.Use = "update POLICY_ID UPDATE_MASK"
|
||||
cmd.Short = `Update account federation policy.`
|
||||
cmd.Long = `Update account federation policy.
|
||||
|
||||
Arguments:
|
||||
POLICY_ID:
|
||||
UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask
|
||||
specifies which fields of the setting payload will be updated. The field
|
||||
mask needs to be supplied as single string. To specify multiple fields in
|
||||
the field mask, use comma as the separator (no space).`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq.Policy)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
updateReq.PolicyId = args[0]
|
||||
updateReq.UpdateMask = args[1]
|
||||
|
||||
response, err := a.FederationPolicy.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service AccountFederationPolicy
|
445
cmd/account/service-principal-federation-policy/service-principal-federation-policy.go
generated
Executable file
445
cmd/account/service-principal-federation-policy/service-principal-federation-policy.go
generated
Executable file
|
@ -0,0 +1,445 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package service_principal_federation_policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/oauth2"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "service-principal-federation-policy",
|
||||
Short: `These APIs manage service principal federation policies.`,
|
||||
Long: `These APIs manage service principal federation policies.
|
||||
|
||||
Service principal federation, also known as Workload Identity Federation,
|
||||
allows your automated workloads running outside of Databricks to securely
|
||||
access Databricks APIs without the need for Databricks secrets. With Workload
|
||||
Identity Federation, your application (or workload) authenticates to
|
||||
Databricks as a Databricks service principal, using tokens provided by the
|
||||
workload runtime.
|
||||
|
||||
Databricks strongly recommends using Workload Identity Federation to
|
||||
authenticate to Databricks from automated workloads, over alternatives such as
|
||||
OAuth client secrets or Personal Access Tokens, whenever possible. Workload
|
||||
Identity Federation is supported by many popular services, including Github
|
||||
Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes clusters, among
|
||||
others.
|
||||
|
||||
Workload identity federation is configured in your Databricks account using a
|
||||
service principal federation policy. A service principal federation policy
|
||||
specifies: * which IdP, or issuer, the service principal is allowed to
|
||||
authenticate from * which workload identity, or subject, is allowed to
|
||||
authenticate as the Databricks service principal
|
||||
|
||||
To configure a federation policy, you provide the following: * The required
|
||||
token __issuer__, as specified in the “iss” claim of workload identity
|
||||
tokens. The issuer is an https URL that identifies the workload identity
|
||||
provider. * The required token __subject__, as specified in the “sub”
|
||||
claim of workload identity tokens. The subject uniquely identifies the
|
||||
workload in the workload runtime environment. * The allowed token
|
||||
__audiences__, as specified in the “aud” claim of workload identity
|
||||
tokens. The audience is intended to represent the recipient of the token. As
|
||||
long as the audience in the token matches at least one audience in the policy,
|
||||
the token is considered a match. If unspecified, the default value is your
|
||||
Databricks account id. * Optionally, the public keys used to validate the
|
||||
signature of the workload identity tokens, in JWKS format. If unspecified
|
||||
(recommended), Databricks automatically fetches the public keys from the
|
||||
issuer’s well known endpoint. Databricks strongly recommends relying on the
|
||||
issuer’s well known endpoint for discovering public keys.
|
||||
|
||||
An example service principal federation policy, for a Github Actions workload,
|
||||
is: issuer: "https://token.actions.githubusercontent.com" audiences:
|
||||
["https://github.com/my-github-org"] subject:
|
||||
"repo:my-github-org/my-repo:environment:prod"
|
||||
|
||||
An example JWT token body that matches this policy and could be used to
|
||||
authenticate to Databricks is: { "iss":
|
||||
"https://token.actions.githubusercontent.com", "aud":
|
||||
"https://github.com/my-github-org", "sub":
|
||||
"repo:my-github-org/my-repo:environment:prod" }
|
||||
|
||||
You may also need to configure the workload runtime to generate tokens for
|
||||
your workloads.
|
||||
|
||||
You do not need to configure an OAuth application in Databricks to use token
|
||||
federation.`,
|
||||
GroupID: "oauth2",
|
||||
Annotations: map[string]string{
|
||||
"package": "oauth2",
|
||||
},
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newCreate())
|
||||
cmd.AddCommand(newDelete())
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newList())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var createOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.CreateServicePrincipalFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newCreate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var createReq oauth2.CreateServicePrincipalFederationPolicyRequest
|
||||
createReq.Policy = &oauth2.FederationPolicy{}
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`)
|
||||
cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`)
|
||||
// TODO: complex arg: oidc_policy
|
||||
|
||||
cmd.Use = "create SERVICE_PRINCIPAL_ID"
|
||||
cmd.Short = `Create service principal federation policy.`
|
||||
cmd.Long = `Create service principal federation policy.
|
||||
|
||||
Arguments:
|
||||
SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createJson.Unmarshal(&createReq.Policy)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
||||
}
|
||||
|
||||
response, err := a.ServicePrincipalFederationPolicy.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range createOverrides {
|
||||
fn(cmd, &createReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.DeleteServicePrincipalFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newDelete() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteReq oauth2.DeleteServicePrincipalFederationPolicyRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete SERVICE_PRINCIPAL_ID POLICY_ID"
|
||||
cmd.Short = `Delete service principal federation policy.`
|
||||
cmd.Long = `Delete service principal federation policy.
|
||||
|
||||
Arguments:
|
||||
SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.
|
||||
POLICY_ID: `
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
_, err = fmt.Sscan(args[0], &deleteReq.ServicePrincipalId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
||||
}
|
||||
deleteReq.PolicyId = args[1]
|
||||
|
||||
err = a.ServicePrincipalFederationPolicy.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteOverrides {
|
||||
fn(cmd, &deleteReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.GetServicePrincipalFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq oauth2.GetServicePrincipalFederationPolicyRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get SERVICE_PRINCIPAL_ID POLICY_ID"
|
||||
cmd.Short = `Get service principal federation policy.`
|
||||
cmd.Long = `Get service principal federation policy.
|
||||
|
||||
Arguments:
|
||||
SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.
|
||||
POLICY_ID: `
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
_, err = fmt.Sscan(args[0], &getReq.ServicePrincipalId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
||||
}
|
||||
getReq.PolicyId = args[1]
|
||||
|
||||
response, err := a.ServicePrincipalFederationPolicy.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.ListServicePrincipalFederationPoliciesRequest,
|
||||
)
|
||||
|
||||
func newList() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listReq oauth2.ListServicePrincipalFederationPoliciesRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``)
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``)
|
||||
|
||||
cmd.Use = "list SERVICE_PRINCIPAL_ID"
|
||||
cmd.Short = `List service principal federation policies.`
|
||||
cmd.Long = `List service principal federation policies.
|
||||
|
||||
Arguments:
|
||||
SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
_, err = fmt.Sscan(args[0], &listReq.ServicePrincipalId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
||||
}
|
||||
|
||||
response := a.ServicePrincipalFederationPolicy.List(ctx, listReq)
|
||||
return cmdio.RenderIterator(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listOverrides {
|
||||
fn(cmd, &listReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*oauth2.UpdateServicePrincipalFederationPolicyRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq oauth2.UpdateServicePrincipalFederationPolicyRequest
|
||||
updateReq.Policy = &oauth2.FederationPolicy{}
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`)
|
||||
cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`)
|
||||
// TODO: complex arg: oidc_policy
|
||||
|
||||
cmd.Use = "update SERVICE_PRINCIPAL_ID POLICY_ID UPDATE_MASK"
|
||||
cmd.Short = `Update service principal federation policy.`
|
||||
cmd.Long = `Update service principal federation policy.
|
||||
|
||||
Arguments:
|
||||
SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.
|
||||
POLICY_ID:
|
||||
UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask
|
||||
specifies which fields of the setting payload will be updated. The field
|
||||
mask needs to be supplied as single string. To specify multiple fields in
|
||||
the field mask, use comma as the separator (no space).`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(3)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq.Policy)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = fmt.Sscan(args[0], &updateReq.ServicePrincipalId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
||||
}
|
||||
updateReq.PolicyId = args[1]
|
||||
updateReq.UpdateMask = args[2]
|
||||
|
||||
response, err := a.ServicePrincipalFederationPolicy.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service ServicePrincipalFederationPolicy
|
|
@ -204,6 +204,9 @@ func newCreate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`)
|
||||
// TODO: map via StringToStringVar: custom_tags
|
||||
cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [
|
||||
DATA_SECURITY_MODE_AUTO,
|
||||
DATA_SECURITY_MODE_DEDICATED,
|
||||
DATA_SECURITY_MODE_STANDARD,
|
||||
LEGACY_PASSTHROUGH,
|
||||
LEGACY_SINGLE_USER,
|
||||
LEGACY_SINGLE_USER_STANDARD,
|
||||
|
@ -220,6 +223,8 @@ func newCreate() *cobra.Command {
|
|||
// TODO: complex arg: gcp_attributes
|
||||
// TODO: array: init_scripts
|
||||
cmd.Flags().StringVar(&createReq.InstancePoolId, "instance-pool-id", createReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
||||
cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used with kind.`)
|
||||
cmd.Flags().Var(&createReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
||||
cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||
cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||
cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`)
|
||||
|
@ -228,6 +233,7 @@ func newCreate() *cobra.Command {
|
|||
// TODO: map via StringToStringVar: spark_conf
|
||||
// TODO: map via StringToStringVar: spark_env_vars
|
||||
// TODO: array: ssh_public_keys
|
||||
cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used with kind.`)
|
||||
// TODO: complex arg: workload_type
|
||||
|
||||
cmd.Use = "create SPARK_VERSION"
|
||||
|
@ -468,6 +474,9 @@ func newEdit() *cobra.Command {
|
|||
cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`)
|
||||
// TODO: map via StringToStringVar: custom_tags
|
||||
cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [
|
||||
DATA_SECURITY_MODE_AUTO,
|
||||
DATA_SECURITY_MODE_DEDICATED,
|
||||
DATA_SECURITY_MODE_STANDARD,
|
||||
LEGACY_PASSTHROUGH,
|
||||
LEGACY_SINGLE_USER,
|
||||
LEGACY_SINGLE_USER_STANDARD,
|
||||
|
@ -484,6 +493,8 @@ func newEdit() *cobra.Command {
|
|||
// TODO: complex arg: gcp_attributes
|
||||
// TODO: array: init_scripts
|
||||
cmd.Flags().StringVar(&editReq.InstancePoolId, "instance-pool-id", editReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
||||
cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used with kind.`)
|
||||
cmd.Flags().Var(&editReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
||||
cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||
cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||
cmd.Flags().StringVar(&editReq.PolicyId, "policy-id", editReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`)
|
||||
|
@ -492,6 +503,7 @@ func newEdit() *cobra.Command {
|
|||
// TODO: map via StringToStringVar: spark_conf
|
||||
// TODO: map via StringToStringVar: spark_env_vars
|
||||
// TODO: array: ssh_public_keys
|
||||
cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used with kind.`)
|
||||
// TODO: complex arg: workload_type
|
||||
|
||||
cmd.Use = "edit CLUSTER_ID SPARK_VERSION"
|
||||
|
|
|
@ -828,6 +828,7 @@ func newMigrate() *cobra.Command {
|
|||
|
||||
cmd.Flags().StringVar(&migrateReq.DisplayName, "display-name", migrateReq.DisplayName, `Display name for the new Lakeview dashboard.`)
|
||||
cmd.Flags().StringVar(&migrateReq.ParentPath, "parent-path", migrateReq.ParentPath, `The workspace path of the folder to contain the migrated Lakeview dashboard.`)
|
||||
cmd.Flags().BoolVar(&migrateReq.UpdateParameterSyntax, "update-parameter-syntax", migrateReq.UpdateParameterSyntax, `Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named syntax (:param) when converting datasets in the dashboard.`)
|
||||
|
||||
cmd.Use = "migrate SOURCE_DASHBOARD_ID"
|
||||
cmd.Short = `Migrate dashboard.`
|
||||
|
|
12
go.mod
12
go.mod
|
@ -2,19 +2,19 @@ module github.com/databricks/cli
|
|||
|
||||
go 1.23
|
||||
|
||||
toolchain go1.23.2
|
||||
toolchain go1.23.4
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.3.1 // MIT
|
||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.53.0 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.54.0 // Apache 2.0
|
||||
github.com/fatih/color v1.18.0 // MIT
|
||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||
github.com/hashicorp/go-version v1.7.0 // MPL 2.0
|
||||
github.com/hashicorp/hc-install v0.9.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-json v0.23.0 // MPL 2.0
|
||||
github.com/hexops/gotextdiff v1.0.3 // BSD 3-Clause "New" or "Revised" License
|
||||
github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause
|
||||
github.com/mattn/go-isatty v0.0.20 // MIT
|
||||
github.com/nwidger/jsoncolor v0.3.2 // MIT
|
||||
|
@ -23,6 +23,7 @@ require (
|
|||
github.com/spf13/cobra v1.8.1 // Apache 2.0
|
||||
github.com/spf13/pflag v1.0.5 // BSD-3-Clause
|
||||
github.com/stretchr/testify v1.10.0 // MIT
|
||||
github.com/wI2L/jsondiff v0.6.1 // MIT
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
|
@ -61,6 +62,10 @@ require (
|
|||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/zclconf/go-cty v1.15.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
|
@ -75,5 +80,4 @@ require (
|
|||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect
|
||||
google.golang.org/grpc v1.64.1 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
|
|
@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/databricks/databricks-sdk-go v0.53.0 h1:rZMXaTC3HNKZt+m4C4I/dY3EdZj+kl/sVd/Kdq55Qfo=
|
||||
github.com/databricks/databricks-sdk-go v0.53.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/databricks/databricks-sdk-go v0.54.0 h1:L8gsA3NXs+uYU3QtW/OUgjxMQxOH24k0MT9JhB3zLlM=
|
||||
github.com/databricks/databricks-sdk-go v0.54.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -48,8 +48,6 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
|||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||
|
@ -111,6 +109,8 @@ github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVW
|
|||
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
|
||||
github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI=
|
||||
github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
|
@ -166,6 +166,18 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
|||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/wI2L/jsondiff v0.6.1 h1:ISZb9oNWbP64LHnu4AUhsMF5W0FIj5Ok3Krip9Shqpw=
|
||||
github.com/wI2L/jsondiff v0.6.1/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ=
|
||||
|
@ -271,8 +283,6 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
|||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
package bundle_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/integration/internal/acc"
|
||||
"github.com/databricks/cli/internal/testcli"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/python/pythontest"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var pythonVersions = []string{
|
||||
"3.8",
|
||||
"3.9",
|
||||
"3.10",
|
||||
"3.11",
|
||||
"3.12",
|
||||
"3.13",
|
||||
}
|
||||
|
||||
var pythonVersionsShort = []string{
|
||||
"3.9",
|
||||
"3.12",
|
||||
}
|
||||
|
||||
var extraInstalls = map[string][]string{
|
||||
"3.12": {"setuptools"},
|
||||
"3.13": {"setuptools"},
|
||||
}
|
||||
|
||||
func TestDefaultPython(t *testing.T) {
|
||||
versions := pythonVersions
|
||||
if testing.Short() {
|
||||
versions = pythonVersionsShort
|
||||
}
|
||||
|
||||
for _, pythonVersion := range versions {
|
||||
t.Run(pythonVersion, func(t *testing.T) {
|
||||
testDefaultPython(t, pythonVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testDefaultPython(t *testing.T, pythonVersion string) {
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
|
||||
uniqueProjectId := testutil.RandomName("")
|
||||
ctx, replacements := testcli.WithReplacementsMap(ctx)
|
||||
replacements.Set(uniqueProjectId, "$UNIQUE_PRJ")
|
||||
|
||||
user, err := wt.W.CurrentUser.Me(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, user)
|
||||
testcli.PrepareReplacementsUser(t, replacements, *user)
|
||||
testcli.PrepareReplacements(t, replacements, wt.W)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
testutil.Chdir(t, tmpDir)
|
||||
|
||||
opts := pythontest.VenvOpts{
|
||||
PythonVersion: pythonVersion,
|
||||
Dir: tmpDir,
|
||||
}
|
||||
|
||||
pythontest.RequireActivatedPythonEnv(t, ctx, &opts)
|
||||
extras, ok := extraInstalls[pythonVersion]
|
||||
if ok {
|
||||
args := append([]string{"pip", "install", "--python", opts.PythonExe}, extras...)
|
||||
cmd := exec.Command("uv", args...)
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
projectName := "project_name_" + uniqueProjectId
|
||||
|
||||
initConfig := map[string]string{
|
||||
"project_name": projectName,
|
||||
"include_notebook": "yes",
|
||||
"include_python": "yes",
|
||||
"include_dlt": "yes",
|
||||
}
|
||||
b, err := json.Marshal(initConfig)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "config.json"), b, 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
testcli.AssertOutput(
|
||||
t,
|
||||
ctx,
|
||||
[]string{"bundle", "init", "default-python", "--config-file", "config.json"},
|
||||
testutil.TestData("testdata/default_python/bundle_init.txt"),
|
||||
)
|
||||
testutil.Chdir(t, projectName)
|
||||
|
||||
t.Cleanup(func() {
|
||||
// Delete the stack
|
||||
testcli.RequireSuccessfulRun(t, ctx, "bundle", "destroy", "--auto-approve")
|
||||
})
|
||||
|
||||
testcli.AssertOutput(
|
||||
t,
|
||||
ctx,
|
||||
[]string{"bundle", "validate"},
|
||||
testutil.TestData("testdata/default_python/bundle_validate.txt"),
|
||||
)
|
||||
testcli.AssertOutput(
|
||||
t,
|
||||
ctx,
|
||||
[]string{"bundle", "deploy"},
|
||||
testutil.TestData("testdata/default_python/bundle_deploy.txt"),
|
||||
)
|
||||
|
||||
testcli.AssertOutputJQ(
|
||||
t,
|
||||
ctx,
|
||||
[]string{"bundle", "summary", "--output", "json"},
|
||||
testutil.TestData("testdata/default_python/bundle_summary.txt"),
|
||||
[]string{
|
||||
"/bundle/terraform/exec_path",
|
||||
"/resources/jobs/project_name_$UNIQUE_PRJ_job/email_notifications",
|
||||
"/resources/jobs/project_name_$UNIQUE_PRJ_job/job_clusters/0/new_cluster/node_type_id",
|
||||
"/resources/jobs/project_name_$UNIQUE_PRJ_job/url",
|
||||
"/resources/pipelines/project_name_$UNIQUE_PRJ_pipeline/catalog",
|
||||
"/resources/pipelines/project_name_$UNIQUE_PRJ_pipeline/url",
|
||||
"/workspace/current_user",
|
||||
},
|
||||
)
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
Building project_name_$UNIQUE_PRJ...
|
||||
Uploading project_name_$UNIQUE_PRJ-0.0.1+<NUMID>.<NUMID>-py3-none-any.whl...
|
||||
Uploading bundle files to /Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files...
|
||||
Deploying resources...
|
||||
Updating deployment state...
|
||||
Deployment complete!
|
|
@ -0,0 +1,8 @@
|
|||
|
||||
Welcome to the default Python template for Databricks Asset Bundles!
|
||||
Workspace to use (auto-detected, edit in 'project_name_$UNIQUE_PRJ/databricks.yml'): https://$DATABRICKS_HOST
|
||||
|
||||
✨ Your new project has been created in the 'project_name_$UNIQUE_PRJ' directory!
|
||||
|
||||
Please refer to the README.md file for "getting started" instructions.
|
||||
See also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html.
|
|
@ -0,0 +1,185 @@
|
|||
{
|
||||
"bundle": {
|
||||
"name": "project_name_$UNIQUE_PRJ",
|
||||
"target": "dev",
|
||||
"environment": "dev",
|
||||
"terraform": {
|
||||
"exec_path": "/tmp/.../terraform"
|
||||
},
|
||||
"git": {
|
||||
"bundle_root_path": ".",
|
||||
"inferred": true
|
||||
},
|
||||
"mode": "development",
|
||||
"deployment": {
|
||||
"lock": {
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"resources/project_name_$UNIQUE_PRJ.job.yml",
|
||||
"resources/project_name_$UNIQUE_PRJ.pipeline.yml"
|
||||
],
|
||||
"workspace": {
|
||||
"host": "https://$DATABRICKS_HOST",
|
||||
"current_user": {
|
||||
"active": true,
|
||||
"displayName": "$USERNAME",
|
||||
"emails": [
|
||||
{
|
||||
"primary": true,
|
||||
"type": "work",
|
||||
"value": "$USERNAME"
|
||||
}
|
||||
],
|
||||
"groups": [
|
||||
{
|
||||
"$ref": "Groups/$USER.Groups[0]",
|
||||
"display": "team.engineering",
|
||||
"type": "direct",
|
||||
"value": "$USER.Groups[0]"
|
||||
}
|
||||
],
|
||||
"id": "$USER.Id",
|
||||
"name": {
|
||||
"familyName": "$USERNAME",
|
||||
"givenName": "$USERNAME"
|
||||
},
|
||||
"schemas": [
|
||||
"urn:ietf:params:scim:schemas:core:2.0:User",
|
||||
"urn:ietf:params:scim:schemas:extension:workspace:2.0:User"
|
||||
],
|
||||
"short_name": "$USERNAME",
|
||||
"userName": "$USERNAME"
|
||||
},
|
||||
"root_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev",
|
||||
"file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files",
|
||||
"resource_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/resources",
|
||||
"artifact_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/artifacts",
|
||||
"state_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state"
|
||||
},
|
||||
"resources": {
|
||||
"jobs": {
|
||||
"project_name_$UNIQUE_PRJ_job": {
|
||||
"deployment": {
|
||||
"kind": "BUNDLE",
|
||||
"metadata_file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state/metadata.json"
|
||||
},
|
||||
"edit_mode": "UI_LOCKED",
|
||||
"email_notifications": {
|
||||
"on_failure": [
|
||||
"$USERNAME"
|
||||
]
|
||||
},
|
||||
"format": "MULTI_TASK",
|
||||
"id": "<NUMID>",
|
||||
"job_clusters": [
|
||||
{
|
||||
"job_cluster_key": "job_cluster",
|
||||
"new_cluster": {
|
||||
"autoscale": {
|
||||
"max_workers": 4,
|
||||
"min_workers": 1
|
||||
},
|
||||
"node_type_id": "i3.xlarge",
|
||||
"spark_version": "15.4.x-scala2.12"
|
||||
}
|
||||
}
|
||||
],
|
||||
"max_concurrent_runs": 4,
|
||||
"name": "[dev $USERNAME] project_name_$UNIQUE_PRJ_job",
|
||||
"queue": {
|
||||
"enabled": true
|
||||
},
|
||||
"tags": {
|
||||
"dev": "$USERNAME"
|
||||
},
|
||||
"tasks": [
|
||||
{
|
||||
"job_cluster_key": "job_cluster",
|
||||
"notebook_task": {
|
||||
"notebook_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src/notebook"
|
||||
},
|
||||
"task_key": "notebook_task"
|
||||
},
|
||||
{
|
||||
"depends_on": [
|
||||
{
|
||||
"task_key": "notebook_task"
|
||||
}
|
||||
],
|
||||
"pipeline_task": {
|
||||
"pipeline_id": "${resources.pipelines.project_name_$UNIQUE_PRJ_pipeline.id}"
|
||||
},
|
||||
"task_key": "refresh_pipeline"
|
||||
},
|
||||
{
|
||||
"depends_on": [
|
||||
{
|
||||
"task_key": "refresh_pipeline"
|
||||
}
|
||||
],
|
||||
"job_cluster_key": "job_cluster",
|
||||
"libraries": [
|
||||
{
|
||||
"whl": "dist/*.whl"
|
||||
}
|
||||
],
|
||||
"python_wheel_task": {
|
||||
"entry_point": "main",
|
||||
"package_name": "project_name_$UNIQUE_PRJ"
|
||||
},
|
||||
"task_key": "main_task"
|
||||
}
|
||||
],
|
||||
"trigger": {
|
||||
"pause_status": "PAUSED",
|
||||
"periodic": {
|
||||
"interval": 1,
|
||||
"unit": "DAYS"
|
||||
}
|
||||
},
|
||||
"url": "https://$DATABRICKS_HOST/jobs/<NUMID>?o=<NUMID>"
|
||||
}
|
||||
},
|
||||
"pipelines": {
|
||||
"project_name_$UNIQUE_PRJ_pipeline": {
|
||||
"catalog": "main",
|
||||
"configuration": {
|
||||
"bundle.sourcePath": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src"
|
||||
},
|
||||
"deployment": {
|
||||
"kind": "BUNDLE",
|
||||
"metadata_file_path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/state/metadata.json"
|
||||
},
|
||||
"development": true,
|
||||
"id": "<UUID>",
|
||||
"libraries": [
|
||||
{
|
||||
"notebook": {
|
||||
"path": "/Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev/files/src/dlt_pipeline"
|
||||
}
|
||||
}
|
||||
],
|
||||
"name": "[dev $USERNAME] project_name_$UNIQUE_PRJ_pipeline",
|
||||
"target": "project_name_$UNIQUE_PRJ_dev",
|
||||
"url": "https://$DATABRICKS_HOST/pipelines/<UUID>?o=<NUMID>"
|
||||
}
|
||||
}
|
||||
},
|
||||
"sync": {
|
||||
"paths": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
"presets": {
|
||||
"name_prefix": "[dev $USERNAME] ",
|
||||
"pipelines_development": true,
|
||||
"trigger_pause_status": "PAUSED",
|
||||
"jobs_max_concurrent_runs": 4,
|
||||
"tags": {
|
||||
"dev": "$USERNAME"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
Name: project_name_$UNIQUE_PRJ
|
||||
Target: dev
|
||||
Workspace:
|
||||
Host: https://$DATABRICKS_HOST
|
||||
User: $USERNAME
|
||||
Path: /Workspace/Users/$USERNAME/.bundle/project_name_$UNIQUE_PRJ/dev
|
||||
|
||||
Validation OK!
|
|
@ -0,0 +1,224 @@
|
|||
package testcli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/iamutil"
|
||||
"github.com/databricks/cli/libs/testdiff"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var OverwriteMode = os.Getenv("TESTS_OUTPUT") == "OVERWRITE"
|
||||
|
||||
func ReadFile(t testutil.TestingT, ctx context.Context, filename string) string {
|
||||
data, err := os.ReadFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return ""
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
// On CI, on Windows \n in the file somehow end up as \r\n
|
||||
return NormalizeNewlines(string(data))
|
||||
}
|
||||
|
||||
func captureOutput(t testutil.TestingT, ctx context.Context, args []string) string {
|
||||
t.Logf("run args: [%s]", strings.Join(args, ", "))
|
||||
r := NewRunner(t, ctx, args...)
|
||||
stdout, stderr, err := r.Run()
|
||||
assert.NoError(t, err)
|
||||
out := stderr.String() + stdout.String()
|
||||
return ReplaceOutput(t, ctx, out)
|
||||
}
|
||||
|
||||
func WriteFile(t testutil.TestingT, filename, data string) {
|
||||
t.Logf("Overwriting %s", filename)
|
||||
err := os.WriteFile(filename, []byte(data), 0o644)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func AssertOutput(t testutil.TestingT, ctx context.Context, args []string, expectedPath string) {
|
||||
expected := ReadFile(t, ctx, expectedPath)
|
||||
|
||||
out := captureOutput(t, ctx, args)
|
||||
|
||||
if out != expected {
|
||||
actual := fmt.Sprintf("Output from %v", args)
|
||||
testdiff.AssertEqualTexts(t, expectedPath, actual, expected, out)
|
||||
|
||||
if OverwriteMode {
|
||||
WriteFile(t, expectedPath, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func AssertOutputJQ(t testutil.TestingT, ctx context.Context, args []string, expectedPath string, ignorePaths []string) {
|
||||
expected := ReadFile(t, ctx, expectedPath)
|
||||
|
||||
out := captureOutput(t, ctx, args)
|
||||
|
||||
if out != expected {
|
||||
actual := fmt.Sprintf("Output from %v", args)
|
||||
testdiff.AssertEqualJQ(t.(*testing.T), expectedPath, actual, expected, out, ignorePaths)
|
||||
|
||||
if OverwriteMode {
|
||||
WriteFile(t, expectedPath, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
uuidRegex = regexp.MustCompile(`[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}`)
|
||||
numIdRegex = regexp.MustCompile(`[0-9]{3,}`)
|
||||
privatePathRegex = regexp.MustCompile(`(/tmp|/private)(/.*)/([a-zA-Z0-9]+)`)
|
||||
)
|
||||
|
||||
func ReplaceOutput(t testutil.TestingT, ctx context.Context, out string) string {
|
||||
out = NormalizeNewlines(out)
|
||||
replacements := GetReplacementsMap(ctx)
|
||||
if replacements == nil {
|
||||
t.Fatal("WithReplacementsMap was not called")
|
||||
}
|
||||
out = replacements.Replace(out)
|
||||
out = uuidRegex.ReplaceAllString(out, "<UUID>")
|
||||
out = numIdRegex.ReplaceAllString(out, "<NUMID>")
|
||||
out = privatePathRegex.ReplaceAllString(out, "/tmp/.../$3")
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
type key int
|
||||
|
||||
const (
|
||||
replacementsMapKey = key(1)
|
||||
)
|
||||
|
||||
type Replacement struct {
|
||||
Old string
|
||||
New string
|
||||
}
|
||||
|
||||
type ReplacementsContext struct {
|
||||
Repls []Replacement
|
||||
}
|
||||
|
||||
func (r *ReplacementsContext) Replace(s string) string {
|
||||
// QQQ Should probably only replace whole words
|
||||
for _, repl := range r.Repls {
|
||||
s = strings.ReplaceAll(s, repl.Old, repl.New)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ReplacementsContext) Set(old, new string) {
|
||||
if old == "" || new == "" {
|
||||
return
|
||||
}
|
||||
r.Repls = append(r.Repls, Replacement{Old: old, New: new})
|
||||
}
|
||||
|
||||
func WithReplacementsMap(ctx context.Context) (context.Context, *ReplacementsContext) {
|
||||
value := ctx.Value(replacementsMapKey)
|
||||
if value != nil {
|
||||
if existingMap, ok := value.(*ReplacementsContext); ok {
|
||||
return ctx, existingMap
|
||||
}
|
||||
}
|
||||
|
||||
newMap := &ReplacementsContext{}
|
||||
ctx = context.WithValue(ctx, replacementsMapKey, newMap)
|
||||
return ctx, newMap
|
||||
}
|
||||
|
||||
func GetReplacementsMap(ctx context.Context) *ReplacementsContext {
|
||||
value := ctx.Value(replacementsMapKey)
|
||||
if value != nil {
|
||||
if existingMap, ok := value.(*ReplacementsContext); ok {
|
||||
return existingMap
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareReplacements(t testutil.TestingT, r *ReplacementsContext, w *databricks.WorkspaceClient) {
|
||||
// in some clouds (gcp) w.Config.Host includes "https://" prefix in others it's really just a host (azure)
|
||||
host := strings.TrimPrefix(strings.TrimPrefix(w.Config.Host, "http://"), "https://")
|
||||
r.Set(host, "$DATABRICKS_HOST")
|
||||
r.Set(w.Config.ClusterID, "$DATABRICKS_CLUSTER_ID")
|
||||
r.Set(w.Config.WarehouseID, "$DATABRICKS_WAREHOUSE_ID")
|
||||
r.Set(w.Config.ServerlessComputeID, "$DATABRICKS_SERVERLESS_COMPUTE_ID")
|
||||
r.Set(w.Config.MetadataServiceURL, "$DATABRICKS_METADATA_SERVICE_URL")
|
||||
r.Set(w.Config.AccountID, "$DATABRICKS_ACCOUNT_ID")
|
||||
r.Set(w.Config.Token, "$DATABRICKS_TOKEN")
|
||||
r.Set(w.Config.Username, "$DATABRICKS_USERNAME")
|
||||
r.Set(w.Config.Password, "$DATABRICKS_PASSWORD")
|
||||
r.Set(w.Config.Profile, "$DATABRICKS_CONFIG_PROFILE")
|
||||
r.Set(w.Config.ConfigFile, "$DATABRICKS_CONFIG_FILE")
|
||||
r.Set(w.Config.GoogleServiceAccount, "$DATABRICKS_GOOGLE_SERVICE_ACCOUNT")
|
||||
r.Set(w.Config.GoogleCredentials, "$GOOGLE_CREDENTIALS")
|
||||
r.Set(w.Config.AzureResourceID, "$DATABRICKS_AZURE_RESOURCE_ID")
|
||||
r.Set(w.Config.AzureClientSecret, "$ARM_CLIENT_SECRET")
|
||||
// r.Set(w.Config.AzureClientID, "$ARM_CLIENT_ID")
|
||||
r.Set(w.Config.AzureClientID, "$USERNAME")
|
||||
r.Set(w.Config.AzureTenantID, "$ARM_TENANT_ID")
|
||||
r.Set(w.Config.ActionsIDTokenRequestURL, "$ACTIONS_ID_TOKEN_REQUEST_URL")
|
||||
r.Set(w.Config.ActionsIDTokenRequestToken, "$ACTIONS_ID_TOKEN_REQUEST_TOKEN")
|
||||
r.Set(w.Config.AzureEnvironment, "$ARM_ENVIRONMENT")
|
||||
r.Set(w.Config.ClientID, "$DATABRICKS_CLIENT_ID")
|
||||
r.Set(w.Config.ClientSecret, "$DATABRICKS_CLIENT_SECRET")
|
||||
r.Set(w.Config.DatabricksCliPath, "$DATABRICKS_CLI_PATH")
|
||||
// This is set to words like "path" that happen too frequently
|
||||
// r.Set(w.Config.AuthType, "$DATABRICKS_AUTH_TYPE")
|
||||
}
|
||||
|
||||
func PrepareReplacementsUser(t testutil.TestingT, r *ReplacementsContext, u iam.User) {
|
||||
// There could be exact matches or overlap between different name fields, so sort them by length
|
||||
// to ensure we match the largest one first and map them all to the same token
|
||||
names := []string{
|
||||
u.DisplayName,
|
||||
u.UserName,
|
||||
iamutil.GetShortUserName(&u),
|
||||
u.Name.FamilyName,
|
||||
u.Name.GivenName,
|
||||
}
|
||||
if u.Name != nil {
|
||||
names = append(names, u.Name.FamilyName)
|
||||
names = append(names, u.Name.GivenName)
|
||||
}
|
||||
for _, val := range u.Emails {
|
||||
names = append(names, val.Value)
|
||||
}
|
||||
stableSortReverseLength(names)
|
||||
|
||||
for _, name := range names {
|
||||
r.Set(name, "$USERNAME")
|
||||
}
|
||||
|
||||
for ind, val := range u.Groups {
|
||||
r.Set(val.Value, fmt.Sprintf("$USER.Groups[%d]", ind))
|
||||
}
|
||||
|
||||
r.Set(u.Id, "$USER.Id")
|
||||
|
||||
for ind, val := range u.Roles {
|
||||
r.Set(val.Value, fmt.Sprintf("$USER.Roles[%d]", ind))
|
||||
}
|
||||
}
|
||||
|
||||
func stableSortReverseLength(strs []string) {
|
||||
slices.SortStableFunc(strs, func(a, b string) int {
|
||||
return len(b) - len(a)
|
||||
})
|
||||
}
|
||||
|
||||
func NormalizeNewlines(input string) string {
|
||||
output := strings.ReplaceAll(input, "\r\n", "\n")
|
||||
return strings.ReplaceAll(output, "\r", "\n")
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package testcli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSort(t *testing.T) {
|
||||
input := []string{"a", "bc", "cd"}
|
||||
stableSortReverseLength(input)
|
||||
assert.Equal(t, []string{"bc", "cd", "a"}, input)
|
||||
}
|
|
@ -47,6 +47,9 @@ func Chdir(t TestingT, dir string) string {
|
|||
|
||||
wd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
if os.Getenv("TESTS_ORIG_WD") == "" {
|
||||
t.Setenv("TESTS_ORIG_WD", wd)
|
||||
}
|
||||
|
||||
abs, err := filepath.Abs(dir)
|
||||
require.NoError(t, err)
|
||||
|
@ -61,3 +64,10 @@ func Chdir(t TestingT, dir string) string {
|
|||
|
||||
return wd
|
||||
}
|
||||
|
||||
// Return filename ff testutil.Chdir was not called.
|
||||
// Return absolute path to filename testutil.Chdir() was called.
|
||||
func TestData(filename string) string {
|
||||
// Note, if TESTS_ORIG_WD is not set, Getenv return "" and Join returns filename
|
||||
return filepath.Join(os.Getenv("TESTS_ORIG_WD"), filename)
|
||||
}
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package flags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
type YamlFlag struct {
|
||||
raw []byte
|
||||
}
|
||||
|
||||
func (y *YamlFlag) String() string {
|
||||
return fmt.Sprintf("YAML (%d bytes)", len(y.raw))
|
||||
}
|
||||
|
||||
// TODO: Command.MarkFlagFilename()
|
||||
func (y *YamlFlag) Set(v string) error {
|
||||
// Load request from file if it starts with '@' (like curl).
|
||||
if v[0] != '@' {
|
||||
y.raw = []byte(v)
|
||||
return nil
|
||||
}
|
||||
buf, err := os.ReadFile(v[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("read %s: %w", v, err)
|
||||
}
|
||||
y.raw = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
func (y *YamlFlag) Unmarshal(v any) error {
|
||||
if y.raw == nil {
|
||||
return nil
|
||||
}
|
||||
return yaml.Unmarshal(y.raw, v)
|
||||
}
|
||||
|
||||
func (y *YamlFlag) Type() string {
|
||||
return "YAML"
|
||||
}
|
|
@ -11,6 +11,19 @@ import (
|
|||
"runtime"
|
||||
)
|
||||
|
||||
// GetExecutable gets appropriate python binary name for the platform
|
||||
func GetExecutable() string {
|
||||
// On Windows when virtualenv is created, the <env>/Scripts directory
|
||||
// contains python.exe but no python3.exe.
|
||||
// Most installers (e.g. the ones from python.org) only install python.exe and not python3.exe
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
return "python"
|
||||
} else {
|
||||
return "python3"
|
||||
}
|
||||
}
|
||||
|
||||
// DetectExecutable looks up the path to the python3 executable from the PATH
|
||||
// environment variable.
|
||||
//
|
||||
|
@ -25,7 +38,9 @@ func DetectExecutable(ctx context.Context) (string, error) {
|
|||
// the parent directory tree.
|
||||
//
|
||||
// See https://github.com/pyenv/pyenv#understanding-python-version-selection
|
||||
out, err := exec.LookPath("python3")
|
||||
|
||||
out, err := exec.LookPath(GetExecutable())
|
||||
|
||||
// most of the OS'es have python3 in $PATH, but for those which don't,
|
||||
// we perform the latest version lookup
|
||||
if err != nil && !errors.Is(err, exec.ErrNotFound) {
|
||||
|
@ -54,7 +69,7 @@ func DetectExecutable(ctx context.Context) (string, error) {
|
|||
func DetectVEnvExecutable(venvPath string) (string, error) {
|
||||
interpreterPath := filepath.Join(venvPath, "bin", "python3")
|
||||
if runtime.GOOS == "windows" {
|
||||
interpreterPath = filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||
interpreterPath = filepath.Join(venvPath, "Scripts", "python.exe")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(interpreterPath); err != nil {
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestDetectVEnvExecutable_badLayout(t *testing.T) {
|
|||
|
||||
func interpreterPath(venvPath string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||
return filepath.Join(venvPath, "Scripts", "python.exe")
|
||||
} else {
|
||||
return filepath.Join(venvPath, "bin", "python3")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
package pythontest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type VenvOpts struct {
|
||||
// input
|
||||
PythonVersion string
|
||||
skipVersionCheck bool
|
||||
|
||||
// input/output
|
||||
Dir string
|
||||
Name string
|
||||
|
||||
// output:
|
||||
// Absolute path to venv
|
||||
EnvPath string
|
||||
|
||||
// Absolute path to venv/bin or venv/Scripts, depending on OS
|
||||
BinPath string
|
||||
|
||||
// Absolute path to python binary
|
||||
PythonExe string
|
||||
}
|
||||
|
||||
func CreatePythonEnv(opts *VenvOpts) error {
|
||||
if opts == nil || opts.PythonVersion == "" {
|
||||
return errors.New("PythonVersion must be provided")
|
||||
}
|
||||
if opts.Name == "" {
|
||||
opts.Name = testutil.RandomName("test-venv-")
|
||||
}
|
||||
|
||||
cmd := exec.Command("uv", "venv", opts.Name, "--python", opts.PythonVersion, "--seed", "-q")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Dir = opts.Dir
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts.EnvPath, err = filepath.Abs(filepath.Join(opts.Dir, opts.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = os.Stat(opts.EnvPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stat EnvPath %s: %s", opts.EnvPath, err)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// https://github.com/pypa/virtualenv/commit/993ba1316a83b760370f5a3872b3f5ef4dd904c1
|
||||
opts.BinPath = filepath.Join(opts.EnvPath, "Scripts")
|
||||
opts.PythonExe = filepath.Join(opts.BinPath, "python.exe")
|
||||
} else {
|
||||
opts.BinPath = filepath.Join(opts.EnvPath, "bin")
|
||||
opts.PythonExe = filepath.Join(opts.BinPath, "python3")
|
||||
}
|
||||
|
||||
_, err = os.Stat(opts.BinPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stat BinPath %s: %s", opts.BinPath, err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(opts.PythonExe)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stat PythonExe %s: %s", opts.PythonExe, err)
|
||||
}
|
||||
|
||||
if !opts.skipVersionCheck {
|
||||
cmd := exec.Command(opts.PythonExe, "--version")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to run %s --version: %s", opts.PythonExe, err)
|
||||
}
|
||||
outString := string(out)
|
||||
expectVersion := "Python " + opts.PythonVersion
|
||||
if !strings.HasPrefix(outString, expectVersion) {
|
||||
return fmt.Errorf("Unexpected output from %s --version: %v (expected %v)", opts.PythonExe, outString, expectVersion)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RequireActivatedPythonEnv(t *testing.T, ctx context.Context, opts *VenvOpts) {
|
||||
err := CreatePythonEnv(opts)
|
||||
require.NoError(t, err)
|
||||
require.DirExists(t, opts.BinPath)
|
||||
|
||||
newPath := fmt.Sprintf("%s%c%s", opts.BinPath, os.PathListSeparator, os.Getenv("PATH"))
|
||||
t.Setenv("PATH", newPath)
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package pythontest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/libs/python"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVenvSuccess(t *testing.T) {
|
||||
// Test at least two version to ensure we capture a case where venv version does not match system one
|
||||
for _, pythonVersion := range []string{"3.11", "3.12"} {
|
||||
t.Run(pythonVersion, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
dir := t.TempDir()
|
||||
opts := VenvOpts{
|
||||
PythonVersion: pythonVersion,
|
||||
Dir: dir,
|
||||
}
|
||||
RequireActivatedPythonEnv(t, ctx, &opts)
|
||||
require.DirExists(t, opts.EnvPath)
|
||||
require.DirExists(t, opts.BinPath)
|
||||
require.FileExists(t, opts.PythonExe)
|
||||
|
||||
pythonExe, err := exec.LookPath(python.GetExecutable())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, filepath.Dir(pythonExe), filepath.Dir(opts.PythonExe))
|
||||
require.FileExists(t, pythonExe)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrongVersion(t *testing.T) {
|
||||
require.Error(t, CreatePythonEnv(&VenvOpts{PythonVersion: "4.0"}))
|
||||
}
|
||||
|
||||
func TestMissingVersion(t *testing.T) {
|
||||
require.Error(t, CreatePythonEnv(nil))
|
||||
require.Error(t, CreatePythonEnv(&VenvOpts{}))
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package testdiff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/hexops/gotextdiff"
|
||||
"github.com/hexops/gotextdiff/myers"
|
||||
"github.com/hexops/gotextdiff/span"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/wI2L/jsondiff"
|
||||
)
|
||||
|
||||
func UnifiedDiff(filename1, filename2, s1, s2 string) string {
|
||||
edits := myers.ComputeEdits(span.URIFromPath(filename1), s1, s2)
|
||||
return fmt.Sprint(gotextdiff.ToUnified(filename1, filename2, s1, edits))
|
||||
}
|
||||
|
||||
func AssertEqualTexts(t testutil.TestingT, filename1, filename2, expected, out string) {
|
||||
if len(out) < 1000 && len(expected) < 1000 {
|
||||
// This shows full strings + diff which could be useful when debugging newlines
|
||||
assert.Equal(t, expected, out)
|
||||
} else {
|
||||
// only show diff for large texts
|
||||
diff := UnifiedDiff(filename1, filename2, expected, out)
|
||||
t.Errorf("Diff:\n" + diff)
|
||||
}
|
||||
}
|
||||
|
||||
func AssertEqualJQ(t testutil.TestingT, expectedName, outName, expected, out string, ignorePaths []string) {
|
||||
patch, err := jsondiff.CompareJSON([]byte(expected), []byte(out))
|
||||
if err != nil {
|
||||
t.Logf("CompareJSON error for %s vs %s: %s (fallback to textual comparison)", outName, expectedName, err)
|
||||
AssertEqualTexts(t, expectedName, outName, expected, out)
|
||||
} else {
|
||||
diff := UnifiedDiff(expectedName, outName, expected, out)
|
||||
t.Logf("Diff:\n%s", diff)
|
||||
allowedDiffs := []string{}
|
||||
erroredDiffs := []string{}
|
||||
for _, op := range patch {
|
||||
if allowDifference(ignorePaths, op) {
|
||||
allowedDiffs = append(allowedDiffs, fmt.Sprintf("%7s %s %v old=%v", op.Type, op.Path, op.Value, op.OldValue))
|
||||
} else {
|
||||
erroredDiffs = append(erroredDiffs, fmt.Sprintf("%7s %s %v old=%v", op.Type, op.Path, op.Value, op.OldValue))
|
||||
}
|
||||
}
|
||||
if len(allowedDiffs) > 0 {
|
||||
t.Logf("Allowed differences between %s and %s:\n ==> %s", expectedName, outName, strings.Join(allowedDiffs, "\n ==> "))
|
||||
}
|
||||
if len(erroredDiffs) > 0 {
|
||||
t.Errorf("Unexpected differences between %s and %s:\n ==> %s", expectedName, outName, strings.Join(erroredDiffs, "\n ==> "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allowDifference(ignorePaths []string, op jsondiff.Operation) bool {
|
||||
if matchesPrefixes(ignorePaths, op.Path) {
|
||||
return true
|
||||
}
|
||||
if op.Type == "replace" && almostSameStrings(op.OldValue, op.Value) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// compare strings and ignore forward vs backward slashes
|
||||
func almostSameStrings(v1, v2 any) bool {
|
||||
s1, ok := v1.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
s2, ok := v2.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return strings.ReplaceAll(s1, "\\", "/") == strings.ReplaceAll(s2, "\\", "/")
|
||||
}
|
||||
|
||||
func matchesPrefixes(prefixes []string, path string) bool {
|
||||
for _, p := range prefixes {
|
||||
if p == path {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, p+"/") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package testdiff
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
assert.Equal(t, "", UnifiedDiff("a", "b", "", ""))
|
||||
assert.Equal(t, "", UnifiedDiff("a", "b", "abc", "abc"))
|
||||
assert.Equal(t, "--- a\n+++ b\n@@ -1 +1,2 @@\n abc\n+123\n", UnifiedDiff("a", "b", "abc\n", "abc\n123\n"))
|
||||
}
|
||||
|
||||
func TestMatchesPrefixes(t *testing.T) {
|
||||
assert.False(t, matchesPrefixes([]string{}, ""))
|
||||
assert.False(t, matchesPrefixes([]string{"/hello", "/hello/world"}, ""))
|
||||
assert.True(t, matchesPrefixes([]string{"/hello", "/a/b"}, "/hello"))
|
||||
assert.True(t, matchesPrefixes([]string{"/hello", "/a/b"}, "/a/b/c"))
|
||||
}
|
Loading…
Reference in New Issue