mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into telemetry/logger-2
This commit is contained in:
commit
a4f5d89298
|
@ -11,7 +11,7 @@
|
|||
"required": ["go"],
|
||||
"post_generate": [
|
||||
"go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build",
|
||||
"go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json",
|
||||
"make schema",
|
||||
"echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes",
|
||||
"echo 'go.sum linguist-generated=true' >> ./.gitattributes",
|
||||
"echo 'bundle/schema/jsonschema.json linguist-generated=true' >> ./.gitattributes"
|
||||
|
|
|
@ -1 +1 @@
|
|||
d25296d2f4aa7bd6195c816fdf82e0f960f775da
|
||||
a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d
|
|
@ -411,5 +411,5 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- define "request-body-obj" -}}
|
||||
{{- $method := .Method -}}
|
||||
{{- $field := .Field -}}
|
||||
{{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}}
|
||||
{{$method.CamelName}}Req{{ if (and $method.RequestBodyField (and (not $field.IsPath) (not $field.IsQuery))) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
# Enable gofumpt and goimports in golangci-lint (#1999)
|
||||
2e018cfaec200a02ee2bd5b389e7da3c6f15f460
|
||||
|
||||
# Enable errcheck everywhere and fix or silent remaining issues (#1987)
|
||||
8d5351c1c3d7befda4baae5d6adb99367aa50b3c
|
||||
|
||||
# Add error checking in tests and enable errcheck there (#1980)
|
||||
1b2be1b2cb4b7909df2a8ad4cb6a0f43e8fcf0c6
|
|
@ -8,6 +8,7 @@ cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=
|
|||
cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true
|
||||
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
||||
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
||||
cmd/account/federation-policy/federation-policy.go linguist-generated=true
|
||||
cmd/account/groups/groups.go linguist-generated=true
|
||||
cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true
|
||||
cmd/account/log-delivery/log-delivery.go linguist-generated=true
|
||||
|
@ -19,6 +20,7 @@ cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=tr
|
|||
cmd/account/personal-compute/personal-compute.go linguist-generated=true
|
||||
cmd/account/private-access/private-access.go linguist-generated=true
|
||||
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
||||
cmd/account/service-principal-federation-policy/service-principal-federation-policy.go linguist-generated=true
|
||||
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
||||
cmd/account/service-principals/service-principals.go linguist-generated=true
|
||||
cmd/account/settings/settings.go linguist-generated=true
|
||||
|
@ -37,6 +39,9 @@ cmd/workspace/apps/apps.go linguist-generated=true
|
|||
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||
cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true
|
||||
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
||||
cmd/workspace/clean-room-assets/clean-room-assets.go linguist-generated=true
|
||||
cmd/workspace/clean-room-task-runs/clean-room-task-runs.go linguist-generated=true
|
||||
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
||||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||
cmd/workspace/cmd.go linguist-generated=true
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
name: integration-approve
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
# Trigger for merge groups.
|
||||
#
|
||||
# Statuses and checks apply to specific commits (by hash).
|
||||
# Enforcement of required checks is done both at the PR level and the merge queue level.
|
||||
# In case of multiple commits in a single PR, the hash of the squashed commit
|
||||
# will not match the one for the latest (approved) commit in the PR.
|
||||
#
|
||||
# We auto approve the check for the merge queue for two reasons:
|
||||
#
|
||||
# * Queue times out due to duration of tests.
|
||||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
#
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Auto-approve squashed commit
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh api -X POST -H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
|
||||
-f 'state=success' \
|
||||
-f 'context=Integration Tests Check'
|
|
@ -0,0 +1,33 @@
|
|||
name: integration-main
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
# Trigger for pushes to the main branch.
|
||||
#
|
||||
# This workflow triggers the integration test workflow in a different repository.
|
||||
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-nightly.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f commit_sha=${{ github.event.after }}
|
|
@ -0,0 +1,56 @@
|
|||
name: integration-pr
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
|
||||
steps:
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
# Trigger for pull requests.
|
||||
#
|
||||
# This workflow triggers the integration test workflow in a different repository.
|
||||
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
|
||||
# It depends on the "check-token" workflow to confirm access to this environment to avoid failures.
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
if: needs.check-token.outputs.has_token == 'true'
|
||||
needs: check-token
|
||||
|
||||
steps:
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f pull_request_number=${{ github.event.pull_request.number }} \
|
||||
-f commit_sha=${{ github.event.pull_request.head.sha }}
|
|
@ -1,78 +0,0 @@
|
|||
name: integration
|
||||
|
||||
on:
|
||||
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
merge_group:
|
||||
|
||||
|
||||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
steps:
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
trigger-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-token
|
||||
if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true'
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f pull_request_number=${{ github.event.pull_request.number }} \
|
||||
-f commit_sha=${{ github.event.pull_request.head.sha }}
|
||||
|
||||
|
||||
|
||||
# Statuses and checks apply to specific commits (by hash).
|
||||
# Enforcement of required checks is done both at the PR level and the merge queue level.
|
||||
# In case of multiple commits in a single PR, the hash of the squashed commit
|
||||
# will not match the one for the latest (approved) commit in the PR.
|
||||
# We auto approve the check for the merge queue for two reasons:
|
||||
# * Queue times out due to duration of tests.
|
||||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
auto-approve:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark Check
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh api -X POST -H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
|
||||
-f 'state=success' \
|
||||
-f 'context=Integration Tests Check'
|
|
@ -44,8 +44,7 @@ jobs:
|
|||
run: |
|
||||
echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
|
||||
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
|
||||
go install gotest.tools/gotestsum@latest
|
||||
go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
go install gotest.tools/gotestsum@v1.12.0
|
||||
|
||||
- name: Pull external libraries
|
||||
run: |
|
||||
|
@ -53,42 +52,28 @@ jobs:
|
|||
pip3 install wheel
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
run: make testonly
|
||||
|
||||
- name: Publish test coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
|
||||
fmt:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
|
||||
# No need to download cached dependencies when running gofmt.
|
||||
cache: false
|
||||
|
||||
- name: Install goimports
|
||||
run: |
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
|
||||
- name: Run make fmt
|
||||
run: |
|
||||
make fmt
|
||||
|
||||
- name: Run go mod tidy
|
||||
run: |
|
||||
go mod tidy
|
||||
|
||||
- name: Fail on differences
|
||||
run: |
|
||||
# Exit with status code 1 if there are differences (i.e. unformatted files)
|
||||
git diff --exit-code
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.62.2
|
||||
args: --timeout=15m
|
||||
|
||||
validate-bundle-schema:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -111,14 +96,19 @@ jobs:
|
|||
# By default the ajv-cli runs in strict mode which will fail if the schema
|
||||
# itself is not valid. Strict mode is more strict than the JSON schema
|
||||
# specification. See for details: https://ajv.js.org/options.html#strict-mode-options
|
||||
# The ajv-cli is configured to use the markdownDescription keyword which is not part of the JSON schema specification,
|
||||
# but is used in editors like VSCode to render markdown in the description field
|
||||
- name: Validate bundle schema
|
||||
run: |
|
||||
go run main.go bundle schema > schema.json
|
||||
|
||||
# Add markdownDescription keyword to ajv
|
||||
echo "module.exports=function(a){a.addKeyword('markdownDescription')}" >> keywords.js
|
||||
|
||||
for file in ./bundle/internal/schema/testdata/pass/*.yml; do
|
||||
ajv test -s schema.json -d $file --valid
|
||||
ajv test -s schema.json -d $file --valid -c=./keywords.js
|
||||
done
|
||||
|
||||
for file in ./bundle/internal/schema/testdata/fail/*.yml; do
|
||||
ajv test -s schema.json -d $file --invalid
|
||||
ajv test -s schema.json -d $file --invalid -c=./keywords.js
|
||||
done
|
||||
|
|
|
@ -5,6 +5,7 @@ on:
|
|||
branches:
|
||||
- "main"
|
||||
- "demo-*"
|
||||
- "bugbash-*"
|
||||
|
||||
# Confirm that snapshot builds work if this file is modified.
|
||||
pull_request:
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- unused
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
linters-settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
- fieldalignment
|
||||
- shadow
|
||||
gofmt:
|
||||
rewrite-rules:
|
||||
- pattern: 'a[b:len(a)]'
|
||||
replacement: 'a[b:]'
|
||||
- pattern: 'interface{}'
|
||||
replacement: 'any'
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- (*github.com/spf13/cobra.Command).RegisterFlagCompletionFunc
|
||||
- (*github.com/spf13/cobra.Command).MarkFlagRequired
|
||||
- (*github.com/spf13/pflag.FlagSet).MarkDeprecated
|
||||
- (*github.com/spf13/pflag.FlagSet).MarkHidden
|
||||
gofumpt:
|
||||
module-path: github.com/databricks/cli
|
||||
extra-rules: true
|
||||
#goimports:
|
||||
# local-prefixes: github.com/databricks/cli
|
||||
issues:
|
||||
exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/
|
|
@ -3,11 +3,18 @@
|
|||
"editor.insertSpaces": false,
|
||||
"editor.formatOnSave": true
|
||||
},
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": [
|
||||
"--fast"
|
||||
],
|
||||
"go.useLanguageServer": true,
|
||||
"gopls": {
|
||||
"formatting.gofumpt": true
|
||||
},
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimFinalNewlines": true,
|
||||
"python.envFile": "${workspaceRoot}/.env",
|
||||
"databricks.python.envFile": "${workspaceFolder}/.env",
|
||||
"python.analysis.stubPath": ".vscode",
|
||||
"jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\<codecell\\>|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])",
|
||||
"jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------"
|
||||
|
|
50
CHANGELOG.md
50
CHANGELOG.md
|
@ -1,5 +1,55 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.237.0
|
||||
|
||||
Bundles:
|
||||
* Allow overriding compute for non-development mode targets ([#1899](https://github.com/databricks/cli/pull/1899)).
|
||||
* Show an error when using a cluster override with 'mode: production' ([#1994](https://github.com/databricks/cli/pull/1994)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks account federation-policy` command group.
|
||||
* Added `databricks account service-principal-federation-policy` command group.
|
||||
* Added `databricks aibi-dashboard-embedding-access-policy delete` command.
|
||||
* Added `databricks aibi-dashboard-embedding-approved-domains delete` command.
|
||||
|
||||
OpenAPI commit a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d (2024-12-16)
|
||||
Dependency updates:
|
||||
* Upgrade TF provider to 1.62.0 ([#2030](https://github.com/databricks/cli/pull/2030)).
|
||||
* Upgrade Go SDK to 0.54.0 ([#2029](https://github.com/databricks/cli/pull/2029)).
|
||||
* Bump TF codegen dependencies to latest ([#1961](https://github.com/databricks/cli/pull/1961)).
|
||||
* Bump golang.org/x/term from 0.26.0 to 0.27.0 ([#1983](https://github.com/databricks/cli/pull/1983)).
|
||||
* Bump golang.org/x/sync from 0.9.0 to 0.10.0 ([#1984](https://github.com/databricks/cli/pull/1984)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.52.0 to 0.53.0 ([#1985](https://github.com/databricks/cli/pull/1985)).
|
||||
* Bump golang.org/x/crypto from 0.24.0 to 0.31.0 ([#2006](https://github.com/databricks/cli/pull/2006)).
|
||||
* Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in /bundle/internal/tf/codegen ([#2005](https://github.com/databricks/cli/pull/2005)).
|
||||
|
||||
## [Release] Release v0.236.0
|
||||
|
||||
**New features for Databricks Asset Bundles:**
|
||||
|
||||
This release adds support for managing Unity Catalog volumes as part of your bundle configuration.
|
||||
|
||||
Bundles:
|
||||
* Add DABs support for Unity Catalog volumes ([#1762](https://github.com/databricks/cli/pull/1762)).
|
||||
* Support lookup by name of notification destinations ([#1922](https://github.com/databricks/cli/pull/1922)).
|
||||
* Extend "notebook not found" error to warn about missing extension ([#1920](https://github.com/databricks/cli/pull/1920)).
|
||||
* Skip sync warning if no sync paths are defined ([#1926](https://github.com/databricks/cli/pull/1926)).
|
||||
* Add validation for single node clusters ([#1909](https://github.com/databricks/cli/pull/1909)).
|
||||
* Fix segfault in bundle summary command ([#1937](https://github.com/databricks/cli/pull/1937)).
|
||||
* Add the `bundle_uuid` helper function for templates ([#1947](https://github.com/databricks/cli/pull/1947)).
|
||||
* Add default value for `volume_type` for DABs ([#1952](https://github.com/databricks/cli/pull/1952)).
|
||||
* Properly read Git metadata when running inside workspace ([#1945](https://github.com/databricks/cli/pull/1945)).
|
||||
* Upgrade TF provider to 1.59.0 ([#1960](https://github.com/databricks/cli/pull/1960)).
|
||||
|
||||
Internal:
|
||||
* Breakout variable lookup into separate files and tests ([#1921](https://github.com/databricks/cli/pull/1921)).
|
||||
* Add golangci-lint v1.62.2 ([#1953](https://github.com/databricks/cli/pull/1953)).
|
||||
|
||||
Dependency updates:
|
||||
* Bump golang.org/x/term from 0.25.0 to 0.26.0 ([#1907](https://github.com/databricks/cli/pull/1907)).
|
||||
* Bump github.com/Masterminds/semver/v3 from 3.3.0 to 3.3.1 ([#1930](https://github.com/databricks/cli/pull/1930)).
|
||||
* Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 ([#1932](https://github.com/databricks/cli/pull/1932)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.51.0 to 0.52.0 ([#1931](https://github.com/databricks/cli/pull/1931)).
|
||||
## [Release] Release v0.235.0
|
||||
|
||||
**Note:** the `bundle generate` command now uses the `.<resource-type>.yml`
|
||||
|
|
31
Makefile
31
Makefile
|
@ -1,16 +1,16 @@
|
|||
default: build
|
||||
|
||||
fmt:
|
||||
@echo "✓ Formatting source code with goimports ..."
|
||||
@goimports -w $(shell find . -type f -name '*.go' -not -path "./vendor/*")
|
||||
@echo "✓ Formatting source code with gofmt ..."
|
||||
@gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*")
|
||||
|
||||
lint: vendor
|
||||
@echo "✓ Linting source code with https://staticcheck.io/ ..."
|
||||
@staticcheck ./...
|
||||
@echo "✓ Linting source code with https://golangci-lint.run/ (with --fix)..."
|
||||
@golangci-lint run --fix ./...
|
||||
|
||||
test: lint
|
||||
lintcheck: vendor
|
||||
@echo "✓ Linting source code with https://golangci-lint.run/ ..."
|
||||
@golangci-lint run ./...
|
||||
|
||||
test: lint testonly
|
||||
|
||||
testonly:
|
||||
@echo "✓ Running tests ..."
|
||||
@gotestsum --format pkgname-and-test-fails --no-summary=skipped --raw-command go test -v -json -short -coverprofile=coverage.txt ./...
|
||||
|
||||
|
@ -29,6 +29,17 @@ snapshot:
|
|||
vendor:
|
||||
@echo "✓ Filling vendor folder with library code ..."
|
||||
@go mod vendor
|
||||
|
||||
schema:
|
||||
@echo "✓ Generating json-schema ..."
|
||||
@go run ./bundle/internal/schema ./bundle/internal/schema ./bundle/schema/jsonschema.json
|
||||
|
||||
.PHONY: build vendor coverage test lint fmt
|
||||
INTEGRATION = gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h
|
||||
|
||||
integration:
|
||||
$(INTEGRATION)
|
||||
|
||||
integration-short:
|
||||
$(INTEGRATION) -short
|
||||
|
||||
.PHONY: lint lintcheck test testonly coverage build snapshot vendor schema integration integration-short
|
||||
|
|
4
NOTICE
4
NOTICE
|
@ -73,10 +73,6 @@ fatih/color - https://github.com/fatih/color
|
|||
Copyright (c) 2013 Fatih Arslan
|
||||
License - https://github.com/fatih/color/blob/main/LICENSE.md
|
||||
|
||||
ghodss/yaml - https://github.com/ghodss/yaml
|
||||
Copyright (c) 2014 Sam Ghods
|
||||
License - https://github.com/ghodss/yaml/blob/master/LICENSE
|
||||
|
||||
Masterminds/semver - https://github.com/Masterminds/semver
|
||||
Copyright (C) 2014-2019, Matt Butcher and Matt Farina
|
||||
License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt
|
||||
|
|
|
@ -3,7 +3,6 @@ package artifacts
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
|
|
@ -13,8 +13,7 @@ func DetectPackages() bundle.Mutator {
|
|||
return &autodetect{}
|
||||
}
|
||||
|
||||
type autodetect struct {
|
||||
}
|
||||
type autodetect struct{}
|
||||
|
||||
func (m *autodetect) Name() string {
|
||||
return "artifacts.DetectPackages"
|
||||
|
|
|
@ -96,7 +96,6 @@ func (m *expandGlobs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
|
|||
// Set the expanded globs back into the configuration.
|
||||
return dyn.SetByPath(v, base, dyn.V(output))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -21,18 +21,13 @@ func (m *cleanUp) Name() string {
|
|||
}
|
||||
|
||||
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
uploadPath, err := libraries.GetUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
client, err := libraries.GetFilerForLibraries(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
client, uploadPath, diags := libraries.GetFilerForLibraries(ctx, b)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// We intentionally ignore the error because it is not critical to the deployment
|
||||
err = client.Delete(ctx, ".", filer.DeleteRecursively)
|
||||
err := client.Delete(ctx, ".", filer.DeleteRecursively)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "failed to delete %s: %v", uploadPath, err)
|
||||
}
|
||||
|
|
|
@ -15,8 +15,7 @@ import (
|
|||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
type detectPkg struct {
|
||||
}
|
||||
type detectPkg struct{}
|
||||
|
||||
func DetectPackage() bundle.Mutator {
|
||||
return &detectPkg{}
|
||||
|
@ -42,7 +41,7 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath))
|
||||
log.Infof(ctx, "Found Python wheel project at %s", b.BundleRootPath)
|
||||
module := extractModuleName(setupPy)
|
||||
|
||||
if b.Config.Artifacts == nil {
|
||||
|
|
|
@ -48,6 +48,10 @@ type Bundle struct {
|
|||
// Exclusively use this field for filesystem operations.
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// Path to the root of git worktree containing the bundle.
|
||||
// https://git-scm.com/docs/git-worktree
|
||||
WorktreeRoot vfs.Path
|
||||
|
||||
// Config contains the bundle configuration.
|
||||
// It is loaded from the bundle configuration files and mutators may update it.
|
||||
Config config.Root
|
||||
|
@ -182,7 +186,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error)
|
|||
|
||||
// Make directory if it doesn't exist yet.
|
||||
dir := filepath.Join(parts...)
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
err := os.MkdirAll(dir, 0o700)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -199,7 +203,7 @@ func (b *Bundle) InternalDir(ctx context.Context) (string, error) {
|
|||
}
|
||||
|
||||
dir := filepath.Join(cacheDir, internalFolder)
|
||||
err = os.MkdirAll(dir, 0700)
|
||||
err = os.MkdirAll(dir, 0o700)
|
||||
if err != nil {
|
||||
return dir, err
|
||||
}
|
||||
|
|
|
@ -32,6 +32,10 @@ func (r ReadOnlyBundle) SyncRoot() vfs.Path {
|
|||
return r.b.SyncRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) WorktreeRoot() vfs.Path {
|
||||
return r.b.WorktreeRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||
return r.b.WorkspaceClient()
|
||||
}
|
||||
|
|
|
@ -49,4 +49,8 @@ type Bundle struct {
|
|||
|
||||
// Databricks CLI version constraints required to run the bundle.
|
||||
DatabricksCliVersion string `json:"databricks_cli_version,omitempty"`
|
||||
|
||||
// A stable generated UUID for the bundle. This is normally serialized by
|
||||
// Databricks first party template when a user runs bundle init.
|
||||
Uuid string `json:"uuid,omitempty"`
|
||||
}
|
||||
|
|
|
@ -47,8 +47,10 @@ type PyDABs struct {
|
|||
Import []string `json:"import,omitempty"`
|
||||
}
|
||||
|
||||
type Command string
|
||||
type ScriptHook string
|
||||
type (
|
||||
Command string
|
||||
ScriptHook string
|
||||
)
|
||||
|
||||
// These hook names are subject to change and currently experimental
|
||||
const (
|
||||
|
|
|
@ -6,8 +6,10 @@ import (
|
|||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
var jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"})
|
||||
var taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"})
|
||||
var (
|
||||
jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"})
|
||||
taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"})
|
||||
)
|
||||
|
||||
func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) {
|
||||
value := make(map[string]dyn.Value)
|
||||
|
|
|
@ -27,7 +27,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
var out []bundle.Mutator
|
||||
|
||||
// Map with files we've already seen to avoid loading them twice.
|
||||
var seen = map[string]bool{}
|
||||
seen := map[string]bool{}
|
||||
|
||||
for _, file := range config.FileNames {
|
||||
seen[file] = true
|
||||
|
|
|
@ -73,7 +73,7 @@ func TestApplyPresetsPrefix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsPrefixForUcSchema(t *testing.T) {
|
||||
func TestApplyPresetsPrefixForSchema(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
|
@ -129,6 +129,36 @@ func TestApplyPresetsPrefixForUcSchema(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsVolumesShouldNotBePrefixed(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"volume1": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "volume1",
|
||||
CatalogName: "catalog1",
|
||||
SchemaName: "schema1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: "[prefix]",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
}
|
||||
|
||||
func TestApplyPresetsTags(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -451,5 +481,4 @@ func TestApplyPresetsSourceLinkedDeployment(t *testing.T) {
|
|||
require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagn
|
|||
var diags diag.Diagnostics
|
||||
computeIdPath := p.Append(dyn.Key("compute_id"))
|
||||
computeId, err := dyn.GetByPath(v, computeIdPath)
|
||||
|
||||
// If the "compute_id" key is not set, we don't need to do anything.
|
||||
if err != nil {
|
||||
return v, nil
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type configureVolumeDefaults struct{}
|
||||
|
||||
func ConfigureVolumeDefaults() bundle.Mutator {
|
||||
return &configureVolumeDefaults{}
|
||||
}
|
||||
|
||||
func (m *configureVolumeDefaults) Name() string {
|
||||
return "ConfigureVolumeDefaults"
|
||||
}
|
||||
|
||||
func (m *configureVolumeDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
pattern := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("volumes"),
|
||||
dyn.AnyKey(),
|
||||
)
|
||||
|
||||
// Configure defaults for all volumes.
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("volume_type")), dyn.V("MANAGED"))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
return diags
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigureVolumeDefaultsVolumeType(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"v1": {
|
||||
// Empty string is skipped.
|
||||
// See below for how it is set.
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
VolumeType: "",
|
||||
},
|
||||
},
|
||||
"v2": {
|
||||
// Non-empty string is skipped.
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
VolumeType: "already-set",
|
||||
},
|
||||
},
|
||||
"v3": {
|
||||
// No volume type set.
|
||||
},
|
||||
"v4": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We can't set an empty string in the typed configuration.
|
||||
// Do it on the dyn.Value directly.
|
||||
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Set(v, "resources.volumes.v1.volume_type", dyn.V(""))
|
||||
})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ConfigureVolumeDefaults())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
var v dyn.Value
|
||||
var err error
|
||||
|
||||
// Set to empty string; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.volumes.v1.volume_type")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", v.MustString())
|
||||
|
||||
// Set to non-empty string; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.volumes.v2.volume_type")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "already-set", v.MustString())
|
||||
|
||||
// Not set; set to default.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.volumes.v3.volume_type")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "MANAGED", v.MustString())
|
||||
|
||||
// No valid volume; No change.
|
||||
_, err = dyn.Get(b.Config.Value(), "resources.volumes.v4.volume_type")
|
||||
assert.True(t, dyn.IsCannotTraverseNilError(err))
|
||||
}
|
|
@ -17,7 +17,7 @@ import (
|
|||
)
|
||||
|
||||
func touchEmptyFile(t *testing.T, path string) {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0700)
|
||||
err := os.MkdirAll(filepath.Dir(path), 0o700)
|
||||
require.NoError(t, err)
|
||||
f, err := os.Create(path)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -28,7 +28,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
}
|
||||
|
||||
currentUser := b.Config.Workspace.CurrentUser
|
||||
if currentUser == nil || currentUser.UserName == "" {
|
||||
if currentUser == nil || currentUser.User == nil || currentUser.UserName == "" {
|
||||
return diag.Errorf("unable to expand workspace root: current user not set")
|
||||
}
|
||||
|
||||
|
|
|
@ -10,8 +10,7 @@ import (
|
|||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
type initializeURLs struct {
|
||||
}
|
||||
type initializeURLs struct{}
|
||||
|
||||
// InitializeURLs makes sure the URL field of each resource is configured.
|
||||
// NOTE: since this depends on an extra API call, this mutator adds some extra
|
||||
|
@ -32,11 +31,14 @@ func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
|||
}
|
||||
orgId := strconv.FormatInt(workspaceId, 10)
|
||||
host := b.WorkspaceClient().Config.CanonicalHostName()
|
||||
initializeForWorkspace(b, orgId, host)
|
||||
err = initializeForWorkspace(b, orgId, host)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error {
|
||||
func initializeForWorkspace(b *bundle.Bundle, orgId, host string) error {
|
||||
baseURL, err := url.Parse(host)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -110,7 +110,8 @@ func TestInitializeURLs(t *testing.T) {
|
|||
"dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456",
|
||||
}
|
||||
|
||||
initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/")
|
||||
err := initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/")
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, group := range b.Config.Resources.AllResources() {
|
||||
for key, r := range group.Resources {
|
||||
|
@ -133,7 +134,8 @@ func TestInitializeURLsWithoutOrgId(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/")
|
||||
err := initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL)
|
||||
}
|
||||
|
|
|
@ -2,12 +2,14 @@ package mutator
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/git"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
)
|
||||
|
||||
type loadGitDetails struct{}
|
||||
|
@ -21,50 +23,42 @@ func (m *loadGitDetails) Name() string {
|
|||
}
|
||||
|
||||
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// Load relevant git repository
|
||||
repo, err := git.NewRepository(b.BundleRoot)
|
||||
var diags diag.Diagnostics
|
||||
info, err := git.FetchRepositoryInfo(ctx, b.BundleRoot.Native(), b.WorkspaceClient())
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
diags = append(diags, diag.WarningFromErr(err)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Read branch name of current checkout
|
||||
branch, err := repo.CurrentBranch()
|
||||
if err == nil {
|
||||
b.Config.Bundle.Git.ActualBranch = branch
|
||||
if b.Config.Bundle.Git.Branch == "" {
|
||||
// Only load branch if there's no user defined value
|
||||
b.Config.Bundle.Git.Inferred = true
|
||||
b.Config.Bundle.Git.Branch = branch
|
||||
}
|
||||
if info.WorktreeRoot == "" {
|
||||
b.WorktreeRoot = b.BundleRoot
|
||||
} else {
|
||||
log.Warnf(ctx, "failed to load current branch: %s", err)
|
||||
b.WorktreeRoot = vfs.MustNew(info.WorktreeRoot)
|
||||
}
|
||||
|
||||
b.Config.Bundle.Git.ActualBranch = info.CurrentBranch
|
||||
if b.Config.Bundle.Git.Branch == "" {
|
||||
// Only load branch if there's no user defined value
|
||||
b.Config.Bundle.Git.Inferred = true
|
||||
b.Config.Bundle.Git.Branch = info.CurrentBranch
|
||||
}
|
||||
|
||||
// load commit hash if undefined
|
||||
if b.Config.Bundle.Git.Commit == "" {
|
||||
commit, err := repo.LatestCommit()
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "failed to load latest commit: %s", err)
|
||||
} else {
|
||||
b.Config.Bundle.Git.Commit = commit
|
||||
}
|
||||
}
|
||||
// load origin url if undefined
|
||||
if b.Config.Bundle.Git.OriginURL == "" {
|
||||
remoteUrl := repo.OriginUrl()
|
||||
b.Config.Bundle.Git.OriginURL = remoteUrl
|
||||
b.Config.Bundle.Git.Commit = info.LatestCommit
|
||||
}
|
||||
|
||||
// Compute relative path of the bundle root from the Git repo root.
|
||||
absBundlePath, err := filepath.Abs(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
// load origin url if undefined
|
||||
if b.Config.Bundle.Git.OriginURL == "" {
|
||||
b.Config.Bundle.Git.OriginURL = info.OriginURL
|
||||
}
|
||||
// repo.Root() returns the absolute path of the repo
|
||||
relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath)
|
||||
|
||||
relBundlePath, err := filepath.Rel(b.WorktreeRoot.Native(), b.BundleRoot.Native())
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
} else {
|
||||
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
||||
}
|
||||
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ func DefaultMutators() []bundle.Mutator {
|
|||
ComputeIdToClusterId(),
|
||||
InitializeVariables(),
|
||||
DefineDefaultTarget(),
|
||||
LoadGitDetails(),
|
||||
pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad),
|
||||
|
||||
// Note: This mutator must run before the target overrides are merged.
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
)
|
||||
|
@ -22,7 +23,7 @@ func (m *overrideCompute) Name() string {
|
|||
|
||||
func overrideJobCompute(j *resources.Job, compute string) {
|
||||
for i := range j.Tasks {
|
||||
var task = &j.Tasks[i]
|
||||
task := &j.Tasks[i]
|
||||
|
||||
if task.ForEachTask != nil {
|
||||
task = &task.ForEachTask.Task
|
||||
|
@ -38,18 +39,32 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
|||
}
|
||||
|
||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if b.Config.Bundle.Mode != config.Development {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
if b.Config.Bundle.Mode == config.Production {
|
||||
if b.Config.Bundle.ClusterId != "" {
|
||||
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
||||
// Overriding compute via a command-line flag for production works, but is not recommended.
|
||||
diags = diags.Extend(diag.Diagnostics{{
|
||||
Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended",
|
||||
Detail: "It is recommended to always use the same compute for production target for consistency.",
|
||||
Severity: diag.Warning,
|
||||
}})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
||||
// For historical reasons, we allow setting the cluster ID via the DATABRICKS_CLUSTER_ID
|
||||
// when development mode is used. Sometimes, this is done by accident, so we log an info message.
|
||||
if b.Config.Bundle.Mode == config.Development {
|
||||
cmdio.LogString(ctx, "Setting a cluster override because DATABRICKS_CLUSTER_ID is set. It is recommended to use --cluster-id instead, which works in any target mode.")
|
||||
} else {
|
||||
// We don't allow using DATABRICKS_CLUSTER_ID in any other mode, it's too error-prone.
|
||||
return diag.Warningf("The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'")
|
||||
}
|
||||
b.Config.Bundle.ClusterId = v
|
||||
}
|
||||
|
||||
if b.Config.Bundle.ClusterId == "" {
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
|
@ -57,5 +72,5 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag
|
|||
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
|
||||
}
|
||||
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
|
|
@ -8,13 +8,14 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOverrideDevelopment(t *testing.T) {
|
||||
func TestOverrideComputeModeDevelopment(t *testing.T) {
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "")
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
|
@ -62,10 +63,13 @@ func TestOverrideDevelopment(t *testing.T) {
|
|||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
||||
}
|
||||
|
||||
func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||
func TestOverrideComputeModeDefaultIgnoresVariable(t *testing.T) {
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Mode: "",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {JobSettings: &jobs.JobSettings{
|
||||
|
@ -86,11 +90,12 @@ func TestOverrideDevelopmentEnv(t *testing.T) {
|
|||
|
||||
m := mutator.OverrideCompute()
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary)
|
||||
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||
}
|
||||
|
||||
func TestOverridePipelineTask(t *testing.T) {
|
||||
func TestOverrideComputePipelineTask(t *testing.T) {
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
|
@ -115,7 +120,7 @@ func TestOverridePipelineTask(t *testing.T) {
|
|||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||
}
|
||||
|
||||
func TestOverrideForEachTask(t *testing.T) {
|
||||
func TestOverrideComputeForEachTask(t *testing.T) {
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
|
@ -140,10 +145,11 @@ func TestOverrideForEachTask(t *testing.T) {
|
|||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task)
|
||||
}
|
||||
|
||||
func TestOverrideProduction(t *testing.T) {
|
||||
func TestOverrideComputeModeProduction(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Mode: config.Production,
|
||||
ClusterId: "newClusterID",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
|
@ -166,13 +172,19 @@ func TestOverrideProduction(t *testing.T) {
|
|||
|
||||
m := mutator.OverrideCompute()
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.True(t, diags.HasError())
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, "Setting a cluster override for a target that uses 'mode: production' is not recommended", diags[0].Summary)
|
||||
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||
}
|
||||
|
||||
func TestOverrideProductionEnv(t *testing.T) {
|
||||
func TestOverrideComputeModeProductionIgnoresVariable(t *testing.T) {
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Mode: config.Production,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {JobSettings: &jobs.JobSettings{
|
||||
|
@ -193,5 +205,7 @@ func TestOverrideProductionEnv(t *testing.T) {
|
|||
|
||||
m := mutator.OverrideCompute()
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary)
|
||||
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func jobRewritePatterns() []jobRewritePattern {
|
|||
// VisitJobPaths visits all paths in job resources and applies a function to each path.
|
||||
func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) {
|
||||
var err error
|
||||
var newValue = value
|
||||
newValue := value
|
||||
|
||||
for _, rewritePattern := range jobRewritePatterns() {
|
||||
newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
|
@ -105,7 +105,6 @@ func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) {
|
|||
|
||||
return fn(p, rewritePattern.kind, v)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
|
|
@ -57,14 +57,12 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
|||
|
||||
return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -131,6 +131,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
Schemas: map[string]*resources.Schema{
|
||||
"schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}},
|
||||
},
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"volume1": {CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{Name: "volume1"}},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
|
||||
},
|
||||
|
@ -311,6 +314,8 @@ func TestProcessTargetModeDefault(t *testing.T) {
|
|||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||
assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
}
|
||||
|
||||
|
@ -355,6 +360,8 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
|||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||
assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
}
|
||||
|
||||
|
@ -388,10 +395,17 @@ func TestAllResourcesMocked(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Make sure that we at least rename all resources
|
||||
func TestAllResourcesRenamed(t *testing.T) {
|
||||
// Make sure that we at rename all non UC resources
|
||||
func TestAllNonUcResourcesAreRenamed(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
// UC resources should not have a prefix added to their name. Right now
|
||||
// this list only contains the Volume resource since we have yet to remove
|
||||
// prefixing support for UC schemas and registered models.
|
||||
ucFields := []reflect.Type{
|
||||
reflect.TypeOf(&resources.Volume{}),
|
||||
}
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -404,14 +418,14 @@ func TestAllResourcesRenamed(t *testing.T) {
|
|||
for _, key := range field.MapKeys() {
|
||||
resource := field.MapIndex(key)
|
||||
nameField := resource.Elem().FieldByName("Name")
|
||||
if nameField.IsValid() && nameField.Kind() == reflect.String {
|
||||
assert.True(
|
||||
t,
|
||||
strings.Contains(nameField.String(), "dev"),
|
||||
"process_target_mode should rename '%s' in '%s'",
|
||||
key,
|
||||
resources.Type().Field(i).Name,
|
||||
)
|
||||
if !nameField.IsValid() || nameField.Kind() != reflect.String {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(ucFields, resource.Type()) {
|
||||
assert.NotContains(t, nameField.String(), "dev", "process_target_mode should not rename '%s' in '%s'", key, resources.Type().Field(i).Name)
|
||||
} else {
|
||||
assert.Contains(t, nameField.String(), "dev", "process_target_mode should rename '%s' in '%s'", key, resources.Type().Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ type parsePythonDiagnosticsTest struct {
|
|||
}
|
||||
|
||||
func TestParsePythonDiagnostics(t *testing.T) {
|
||||
|
||||
testCases := []parsePythonDiagnosticsTest{
|
||||
{
|
||||
name: "short error with location",
|
||||
|
|
|
@ -9,12 +9,11 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/logger"
|
||||
"github.com/fatih/color"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/libs/python"
|
||||
|
||||
"github.com/databricks/cli/bundle/env"
|
||||
|
@ -94,11 +93,10 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
|
||||
// mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics'
|
||||
var mutateDiags diag.Diagnostics
|
||||
var mutateDiagsHasError = errors.New("unexpected error")
|
||||
mutateDiagsHasError := errors.New("unexpected error")
|
||||
|
||||
err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) {
|
||||
pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath)
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err)
|
||||
}
|
||||
|
@ -141,7 +139,7 @@ func createCacheDir(ctx context.Context) (string, error) {
|
|||
// use 'default' as target name
|
||||
cacheDir := filepath.Join(tempDir, "default", "pydabs")
|
||||
|
||||
err := os.MkdirAll(cacheDir, 0700)
|
||||
err := os.MkdirAll(cacheDir, 0o700)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -152,7 +150,7 @@ func createCacheDir(ctx context.Context) (string, error) {
|
|||
return os.MkdirTemp("", "-pydabs")
|
||||
}
|
||||
|
||||
func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||
func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||
inputPath := filepath.Join(cacheDir, "input.json")
|
||||
outputPath := filepath.Join(cacheDir, "output.json")
|
||||
diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json")
|
||||
|
@ -263,10 +261,10 @@ func writeInputFile(inputPath string, input dyn.Value) error {
|
|||
return fmt.Errorf("failed to marshal input: %w", err)
|
||||
}
|
||||
|
||||
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
||||
return os.WriteFile(inputPath, rootConfigJson, 0o600)
|
||||
}
|
||||
|
||||
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) {
|
||||
func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) {
|
||||
outputFile, err := os.Open(outputPath)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err))
|
||||
|
@ -381,7 +379,7 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor {
|
|||
|
||||
return right, nil
|
||||
},
|
||||
VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) {
|
||||
VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) {
|
||||
return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String())
|
||||
},
|
||||
}
|
||||
|
@ -430,7 +428,7 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor {
|
|||
|
||||
return right, nil
|
||||
},
|
||||
VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) {
|
||||
VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) {
|
||||
if !valuePath.HasPrefix(jobsPath) {
|
||||
return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String())
|
||||
}
|
||||
|
|
|
@ -106,7 +106,6 @@ func TestPythonMutator_load(t *testing.T) {
|
|||
Column: 5,
|
||||
},
|
||||
}, diags[0].Locations)
|
||||
|
||||
}
|
||||
|
||||
func TestPythonMutator_load_disallowed(t *testing.T) {
|
||||
|
@ -588,7 +587,7 @@ or activate the environment before running CLI commands:
|
|||
assert.Equal(t, expected, out)
|
||||
}
|
||||
|
||||
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
||||
func withProcessStub(t *testing.T, args []string, output, diagnostics string) context.Context {
|
||||
ctx := context.Background()
|
||||
ctx, stub := process.WithStub(ctx)
|
||||
|
||||
|
@ -611,10 +610,10 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str
|
|||
assert.NoError(t, err)
|
||||
|
||||
if reflect.DeepEqual(actual.Args, args) {
|
||||
err := os.WriteFile(outputPath, []byte(output), 0600)
|
||||
err := os.WriteFile(outputPath, []byte(output), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600)
|
||||
err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
return nil
|
||||
|
@ -626,7 +625,7 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str
|
|||
return ctx
|
||||
}
|
||||
|
||||
func loadYaml(name string, content string) *bundle.Bundle {
|
||||
func loadYaml(name, content string) *bundle.Bundle {
|
||||
v, diag := config.LoadFromBytes(name, []byte(content))
|
||||
|
||||
if diag.Error() != nil {
|
||||
|
@ -650,17 +649,17 @@ func withFakeVEnv(t *testing.T, venvPath string) {
|
|||
|
||||
interpreterPath := interpreterPath(venvPath)
|
||||
|
||||
err = os.MkdirAll(filepath.Dir(interpreterPath), 0755)
|
||||
err = os.MkdirAll(filepath.Dir(interpreterPath), 0o755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(interpreterPath, []byte(""), 0755)
|
||||
err = os.WriteFile(interpreterPath, []byte(""), 0o755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755)
|
||||
err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0o755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -36,8 +36,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
|||
return fmt.Errorf("failed to resolve %s, err: %w", v.Lookup, err)
|
||||
}
|
||||
|
||||
v.Set(id)
|
||||
return nil
|
||||
return v.Set(id)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -108,7 +108,8 @@ func TestNoLookupIfVariableIsSet(t *testing.T) {
|
|||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
b.Config.Variables["my-cluster-id"].Set("random value")
|
||||
err := b.Config.Variables["my-cluster-id"].Set("random value")
|
||||
require.NoError(t, err)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||
require.NoError(t, diags.Error())
|
||||
|
|
|
@ -32,11 +32,12 @@ func ResolveVariableReferencesInLookup() bundle.Mutator {
|
|||
}
|
||||
|
||||
func ResolveVariableReferencesInComplexVariables() bundle.Mutator {
|
||||
return &resolveVariableReferences{prefixes: []string{
|
||||
"bundle",
|
||||
"workspace",
|
||||
"variables",
|
||||
},
|
||||
return &resolveVariableReferences{
|
||||
prefixes: []string{
|
||||
"bundle",
|
||||
"workspace",
|
||||
"variables",
|
||||
},
|
||||
pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")),
|
||||
lookupFn: lookupForComplexVariables,
|
||||
skipFn: skipResolvingInNonComplexVariables,
|
||||
|
@ -173,7 +174,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
|||
return dyn.InvalidValue, dynvar.ErrSkipResolution
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
@ -184,7 +184,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
|||
diags = diags.Extend(normaliseDiags)
|
||||
return root, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
}
|
||||
|
|
|
@ -63,7 +63,6 @@ func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
|||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -81,5 +81,4 @@ func TestNoWorkspacePrefixUsed(t *testing.T) {
|
|||
require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar)
|
||||
require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath)
|
||||
require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar)
|
||||
|
||||
}
|
||||
|
|
|
@ -12,8 +12,7 @@ import (
|
|||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
type setRunAs struct {
|
||||
}
|
||||
type setRunAs struct{}
|
||||
|
||||
// This mutator does two things:
|
||||
//
|
||||
|
@ -30,7 +29,7 @@ func (m *setRunAs) Name() string {
|
|||
return "SetRunAs"
|
||||
}
|
||||
|
||||
func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics {
|
||||
func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser, runAsUser string) diag.Diagnostics {
|
||||
return diag.Diagnostics{{
|
||||
Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+
|
||||
"Current identity: %s. Run as identity: %s.\n"+
|
||||
|
|
|
@ -42,6 +42,7 @@ func allResourceTypes(t *testing.T) []string {
|
|||
"quality_monitors",
|
||||
"registered_models",
|
||||
"schemas",
|
||||
"volumes",
|
||||
},
|
||||
resourceTypes,
|
||||
)
|
||||
|
@ -141,6 +142,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
|||
"registered_models",
|
||||
"experiments",
|
||||
"schemas",
|
||||
"volumes",
|
||||
}
|
||||
|
||||
base := config.Root{
|
||||
|
|
|
@ -65,7 +65,6 @@ func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable,
|
|||
|
||||
// We should have had a value to set for the variable at this point.
|
||||
return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
||||
|
||||
}
|
||||
|
||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
|
|
|
@ -35,7 +35,7 @@ func (m *syncInferRoot) Name() string {
|
|||
// If the path does not exist, it returns an empty string.
|
||||
//
|
||||
// See "sync_infer_root_internal_test.go" for examples.
|
||||
func (m *syncInferRoot) computeRoot(path string, root string) string {
|
||||
func (m *syncInferRoot) computeRoot(path, root string) string {
|
||||
for !filepath.IsLocal(path) {
|
||||
// Break if we have reached the root of the filesystem.
|
||||
dir := filepath.Dir(root)
|
||||
|
|
|
@ -275,8 +275,8 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
|
||||
func gatherFallbackPaths(v dyn.Value, typ string) (map[string]string, error) {
|
||||
var fallback = make(map[string]string)
|
||||
var pattern = dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey())
|
||||
fallback := make(map[string]string)
|
||||
pattern := dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey())
|
||||
|
||||
// Previous behavior was to use a resource's location as the base path to resolve
|
||||
// relative paths in its definition. With the introduction of [dyn.Value] throughout,
|
||||
|
|
|
@ -28,12 +28,13 @@ import (
|
|||
func touchNotebookFile(t *testing.T, path string) {
|
||||
f, err := os.Create(path)
|
||||
require.NoError(t, err)
|
||||
f.WriteString("# Databricks notebook source\n")
|
||||
_, err = f.WriteString("# Databricks notebook source\n")
|
||||
require.NoError(t, err)
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func touchEmptyFile(t *testing.T, path string) {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0700)
|
||||
err := os.MkdirAll(filepath.Dir(path), 0o700)
|
||||
require.NoError(t, err)
|
||||
f, err := os.Create(path)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -15,8 +15,7 @@ func VerifyCliVersion() bundle.Mutator {
|
|||
return &verifyCliVersion{}
|
||||
}
|
||||
|
||||
type verifyCliVersion struct {
|
||||
}
|
||||
type verifyCliVersion struct{}
|
||||
|
||||
func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// No constraints specified, skip the check.
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package config
|
||||
|
||||
const Paused = "PAUSED"
|
||||
const Unpaused = "UNPAUSED"
|
||||
const (
|
||||
Paused = "PAUSED"
|
||||
Unpaused = "UNPAUSED"
|
||||
)
|
||||
|
||||
type Presets struct {
|
||||
// NamePrefix to prepend to all resource names.
|
||||
|
|
|
@ -20,6 +20,7 @@ type Resources struct {
|
|||
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
|
||||
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
||||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||
Volumes map[string]*resources.Volume `json:"volumes,omitempty"`
|
||||
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
|
||||
Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"`
|
||||
}
|
||||
|
@ -85,6 +86,7 @@ func (r *Resources) AllResources() []ResourceGroup {
|
|||
collectResourceMap(descriptions["schemas"], r.Schemas),
|
||||
collectResourceMap(descriptions["clusters"], r.Clusters),
|
||||
collectResourceMap(descriptions["dashboards"], r.Dashboards),
|
||||
collectResourceMap(descriptions["volumes"], r.Volumes),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -189,5 +191,11 @@ func SupportedResources() map[string]ResourceDescription {
|
|||
SingularTitle: "Dashboard",
|
||||
PluralTitle: "Dashboards",
|
||||
},
|
||||
"volumes": {
|
||||
SingularName: "volume",
|
||||
PluralName: "volumes",
|
||||
SingularTitle: "Volume",
|
||||
PluralTitle: "Volumes",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
)
|
||||
|
||||
type Volume struct {
|
||||
// List of grants to apply on this volume.
|
||||
Grants []Grant `json:"grants,omitempty"`
|
||||
|
||||
// Full name of the volume (catalog_name.schema_name.volume_name). This value is read from
|
||||
// the terraform state after deployment succeeds.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
*catalog.CreateVolumeRequestContent
|
||||
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
}
|
||||
|
||||
func (v *Volume) UnmarshalJSON(b []byte) error {
|
||||
return marshal.Unmarshal(b, v)
|
||||
}
|
||||
|
||||
func (v Volume) MarshalJSON() ([]byte, error) {
|
||||
return marshal.Marshal(v)
|
||||
}
|
||||
|
||||
func (v *Volume) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||
return false, fmt.Errorf("volume.Exists() is not supported")
|
||||
}
|
||||
|
||||
func (v *Volume) TerraformResourceName() string {
|
||||
return "databricks_volume"
|
||||
}
|
||||
|
||||
func (v *Volume) InitializeURL(baseURL url.URL) {
|
||||
if v.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("explore/data/volumes/%s", strings.ReplaceAll(v.ID, ".", "/"))
|
||||
v.URL = baseURL.String()
|
||||
}
|
||||
|
||||
func (v *Volume) GetURL() string {
|
||||
return v.URL
|
||||
}
|
||||
|
||||
func (v *Volume) GetName() string {
|
||||
return v.Name
|
||||
}
|
||||
|
||||
func (v *Volume) IsNil() bool {
|
||||
return v.CreateVolumeRequestContent == nil
|
||||
}
|
|
@ -49,7 +49,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
|
|||
// Eg: resource.Job implements MarshalJSON
|
||||
v := reflect.Zero(vt.Elem()).Interface()
|
||||
assert.NotPanics(t, func() {
|
||||
json.Marshal(v)
|
||||
_, err := json.Marshal(v)
|
||||
assert.NoError(t, err)
|
||||
}, "Resource %s does not have a custom marshaller", field.Name)
|
||||
|
||||
// Unmarshalling a *resourceStruct will panic if the resource does not have a custom unmarshaller
|
||||
|
@ -58,7 +59,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
|
|||
// Eg: *resource.Job implements UnmarshalJSON
|
||||
v = reflect.New(vt.Elem()).Interface()
|
||||
assert.NotPanics(t, func() {
|
||||
json.Unmarshal([]byte("{}"), v)
|
||||
err := json.Unmarshal([]byte("{}"), v)
|
||||
assert.NoError(t, err)
|
||||
}, "Resource %s does not have a custom unmarshaller", field.Name)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ func TestRootMergeTargetOverridesWithMode(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
root.initializeDynamicValue()
|
||||
require.NoError(t, root.initializeDynamicValue())
|
||||
require.NoError(t, root.MergeTargetOverrides("development"))
|
||||
assert.Equal(t, Development, root.Bundle.Mode)
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
|
|||
"complex": {
|
||||
Type: variable.VariableTypeComplex,
|
||||
Description: "complex var",
|
||||
Default: map[string]interface{}{
|
||||
Default: map[string]any{
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
|
@ -148,7 +148,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
|
|||
"complex": {
|
||||
Type: "wrong",
|
||||
Description: "wrong",
|
||||
Default: map[string]interface{}{
|
||||
Default: map[string]any{
|
||||
"key1": "value1",
|
||||
},
|
||||
},
|
||||
|
@ -156,7 +156,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
root.initializeDynamicValue()
|
||||
require.NoError(t, root.initializeDynamicValue())
|
||||
require.NoError(t, root.MergeTargetOverrides("development"))
|
||||
assert.Equal(t, "bar", root.Variables["foo"].Default)
|
||||
assert.Equal(t, "foo var", root.Variables["foo"].Description)
|
||||
|
@ -164,11 +164,10 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
|
|||
assert.Equal(t, "foo2", root.Variables["foo2"].Default)
|
||||
assert.Equal(t, "foo2 var", root.Variables["foo2"].Description)
|
||||
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
assert.Equal(t, map[string]any{
|
||||
"key1": "value1",
|
||||
}, root.Variables["complex"].Default)
|
||||
assert.Equal(t, "complex var", root.Variables["complex"].Description)
|
||||
|
||||
}
|
||||
|
||||
func TestIsFullVariableOverrideDef(t *testing.T) {
|
||||
|
@ -252,5 +251,4 @@ func TestIsFullVariableOverrideDef(t *testing.T) {
|
|||
for i, tc := range testCases {
|
||||
assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -13,8 +13,7 @@ func FilesToSync() bundle.ReadOnlyMutator {
|
|||
return &filesToSync{}
|
||||
}
|
||||
|
||||
type filesToSync struct {
|
||||
}
|
||||
type filesToSync struct{}
|
||||
|
||||
func (v *filesToSync) Name() string {
|
||||
return "validate:files_to_sync"
|
||||
|
|
|
@ -2,6 +2,7 @@ package validate
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -44,6 +45,7 @@ func setupBundleForFilesToSyncTest(t *testing.T) *bundle.Bundle {
|
|||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
WorktreeRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
@ -80,7 +82,7 @@ func TestFilesToSync_EverythingIgnored(t *testing.T) {
|
|||
b := setupBundleForFilesToSyncTest(t)
|
||||
|
||||
// Ignore all files.
|
||||
testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore")
|
||||
testutil.WriteFile(t, filepath.Join(b.BundleRootPath, ".gitignore"), "*\n.*\n")
|
||||
|
||||
ctx := context.Background()
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
|
|
@ -15,8 +15,7 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type folderPermissions struct {
|
||||
}
|
||||
type folderPermissions struct{}
|
||||
|
||||
// Apply implements bundle.ReadOnlyMutator.
|
||||
func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
|
|
|
@ -13,8 +13,7 @@ func JobClusterKeyDefined() bundle.ReadOnlyMutator {
|
|||
return &jobClusterKeyDefined{}
|
||||
}
|
||||
|
||||
type jobClusterKeyDefined struct {
|
||||
}
|
||||
type jobClusterKeyDefined struct{}
|
||||
|
||||
func (v *jobClusterKeyDefined) Name() string {
|
||||
return "validate:job_cluster_key_defined"
|
||||
|
|
|
@ -17,8 +17,7 @@ func JobTaskClusterSpec() bundle.ReadOnlyMutator {
|
|||
return &jobTaskClusterSpec{}
|
||||
}
|
||||
|
||||
type jobTaskClusterSpec struct {
|
||||
}
|
||||
type jobTaskClusterSpec struct{}
|
||||
|
||||
func (v *jobTaskClusterSpec) Name() string {
|
||||
return "validate:job_task_cluster_spec"
|
||||
|
|
|
@ -175,7 +175,6 @@ func TestValidateSingleNodeClusterFailForJobClusters(t *testing.T) {
|
|||
Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.job_clusters[0].new_cluster")},
|
||||
},
|
||||
}, diags)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,8 +8,7 @@ import (
|
|||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type validate struct {
|
||||
}
|
||||
type validate struct{}
|
||||
|
||||
type location struct {
|
||||
path string
|
||||
|
|
|
@ -17,8 +17,7 @@ func ValidateSyncPatterns() bundle.ReadOnlyMutator {
|
|||
return &validateSyncPatterns{}
|
||||
}
|
||||
|
||||
type validateSyncPatterns struct {
|
||||
}
|
||||
type validateSyncPatterns struct{}
|
||||
|
||||
func (v *validateSyncPatterns) Name() string {
|
||||
return "validate:validate_sync_patterns"
|
||||
|
|
|
@ -42,7 +42,6 @@ func TestLookup_Empty(t *testing.T) {
|
|||
|
||||
// No string representation for an invalid lookup
|
||||
assert.Empty(t, lookup.String())
|
||||
|
||||
}
|
||||
|
||||
func TestLookup_Multiple(t *testing.T) {
|
||||
|
|
|
@ -20,7 +20,6 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie
|
|||
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/databricks/cli/libs/databrickscfg"
|
||||
"github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func setupWorkspaceTest(t *testing.T) string {
|
||||
|
@ -42,11 +43,12 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) {
|
|||
setupWorkspaceTest(t)
|
||||
|
||||
// This works if there is a config file with a matching profile.
|
||||
databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
err := databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
Profile: "default",
|
||||
Host: "https://abc.cloud.databricks.com",
|
||||
Token: "123",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
client, err := w.Client()
|
||||
assert.NoError(t, err)
|
||||
|
@ -57,12 +59,13 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) {
|
|||
home := setupWorkspaceTest(t)
|
||||
|
||||
// This works if there is a config file with a matching profile.
|
||||
databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
err := databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
ConfigFile: filepath.Join(home, "customcfg"),
|
||||
Profile: "custom",
|
||||
Host: "https://abc.cloud.databricks.com",
|
||||
Token: "123",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg"))
|
||||
client, err := w.Client()
|
||||
|
@ -90,12 +93,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) {
|
|||
setupWorkspaceTest(t)
|
||||
|
||||
// This works if there is a config file with a matching profile.
|
||||
databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
err := databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
Profile: "abc",
|
||||
Host: "https://abc.cloud.databricks.com",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err := w.Client()
|
||||
_, err = w.Client()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
|
@ -103,12 +107,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) {
|
|||
setupWorkspaceTest(t)
|
||||
|
||||
// This works if there is a config file with a matching profile.
|
||||
databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
err := databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
Profile: "abc",
|
||||
Host: "https://def.cloud.databricks.com",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err := w.Client()
|
||||
_, err = w.Client()
|
||||
assert.ErrorContains(t, err, "config host mismatch")
|
||||
})
|
||||
|
||||
|
@ -116,14 +121,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) {
|
|||
home := setupWorkspaceTest(t)
|
||||
|
||||
// This works if there is a config file with a matching profile.
|
||||
databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
err := databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
ConfigFile: filepath.Join(home, "customcfg"),
|
||||
Profile: "abc",
|
||||
Host: "https://abc.cloud.databricks.com",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg"))
|
||||
_, err := w.Client()
|
||||
_, err = w.Client()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
|
@ -131,14 +137,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) {
|
|||
home := setupWorkspaceTest(t)
|
||||
|
||||
// This works if there is a config file with a matching profile.
|
||||
databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
err := databrickscfg.SaveToProfile(context.Background(), &config.Config{
|
||||
ConfigFile: filepath.Join(home, "customcfg"),
|
||||
Profile: "abc",
|
||||
Host: "https://def.cloud.databricks.com",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg"))
|
||||
_, err := w.Client()
|
||||
_, err = w.Client()
|
||||
assert.ErrorContains(t, err, "config host mismatch")
|
||||
})
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ func (d *DeferredMutator) Name() string {
|
|||
return "deferred"
|
||||
}
|
||||
|
||||
func Defer(mutator Mutator, finally Mutator) Mutator {
|
||||
func Defer(mutator, finally Mutator) Mutator {
|
||||
return &DeferredMutator{
|
||||
mutator: mutator,
|
||||
finally: finally,
|
||||
|
|
|
@ -19,7 +19,7 @@ func (t *mutatorWithError) Name() string {
|
|||
|
||||
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics {
|
||||
t.applyCalled++
|
||||
return diag.Errorf(t.errorMsg)
|
||||
return diag.Errorf(t.errorMsg) // nolint:govet
|
||||
}
|
||||
|
||||
func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
||||
|
|
|
@ -28,10 +28,11 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp
|
|||
}
|
||||
|
||||
opts := &sync.SyncOptions{
|
||||
LocalRoot: rb.SyncRoot(),
|
||||
Paths: rb.Config().Sync.Paths,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
WorktreeRoot: rb.WorktreeRoot(),
|
||||
LocalRoot: rb.SyncRoot(),
|
||||
Paths: rb.Config().Sync.Paths,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
|
||||
RemotePath: rb.Config().Workspace.FilePath,
|
||||
Host: rb.WorkspaceClient().Config.Host,
|
||||
|
|
|
@ -15,8 +15,10 @@ import (
|
|||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const DeploymentStateFileName = "deployment.json"
|
||||
const DeploymentStateVersion = 1
|
||||
const (
|
||||
DeploymentStateFileName = "deployment.json"
|
||||
DeploymentStateVersion = 1
|
||||
)
|
||||
|
||||
type File struct {
|
||||
LocalPath string `json:"local_path"`
|
||||
|
@ -132,7 +134,7 @@ func (f Filelist) ToSlice(root vfs.Path) []fileset.File {
|
|||
return files
|
||||
}
|
||||
|
||||
func isLocalStateStale(local io.Reader, remote io.Reader) bool {
|
||||
func isLocalStateStale(local, remote io.Reader) bool {
|
||||
localState, err := loadState(local)
|
||||
if err != nil {
|
||||
return true
|
||||
|
|
|
@ -44,7 +44,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600)
|
||||
local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0o600)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
@ -62,8 +62,14 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
}
|
||||
|
||||
// Truncating the file before writing
|
||||
local.Truncate(0)
|
||||
local.Seek(0, 0)
|
||||
err = local.Truncate(0)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
_, err = local.Seek(0, 0)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Write file to disk.
|
||||
log.Infof(ctx, "Writing remote deployment state file to local cache directory")
|
||||
|
|
|
@ -99,7 +99,7 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
snapshotPath, err := sync.SnapshotPath(opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(snapshotPath, []byte("snapshot"), 0644)
|
||||
err = os.WriteFile(snapshotPath, []byte("snapshot"), 0o644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
data, err := json.Marshal(opts.localState)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(statePath, data, 0644)
|
||||
err = os.WriteFile(statePath, data, 0o644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ func TestStatePush(t *testing.T) {
|
|||
data, err := json.Marshal(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(statePath, data, 0644)
|
||||
err = os.WriteFile(statePath, data, 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
diags := bundle.Apply(ctx, b, s)
|
||||
|
|
|
@ -17,8 +17,7 @@ import (
|
|||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type stateUpdate struct {
|
||||
}
|
||||
type stateUpdate struct{}
|
||||
|
||||
func (s *stateUpdate) Name() string {
|
||||
return "deploy:state-update"
|
||||
|
@ -57,7 +56,7 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
// Write the state back to the file.
|
||||
f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600)
|
||||
f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o600)
|
||||
if err != nil {
|
||||
log.Infof(ctx, "Unable to open deployment state file: %s", err)
|
||||
return diag.FromErr(err)
|
||||
|
|
|
@ -119,7 +119,7 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
|||
data, err := json.Marshal(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(statePath, data, 0644)
|
||||
err = os.WriteFile(statePath, data, 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
diags := bundle.Apply(ctx, b, s)
|
||||
|
|
|
@ -42,8 +42,7 @@ func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashbo
|
|||
return dashboards, nil
|
||||
}
|
||||
|
||||
type checkDashboardsModifiedRemotely struct {
|
||||
}
|
||||
type checkDashboardsModifiedRemotely struct{}
|
||||
|
||||
func (l *checkDashboardsModifiedRemotely) Name() string {
|
||||
return "CheckDashboardsModifiedRemotely"
|
||||
|
|
|
@ -139,7 +139,7 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle
|
|||
require.NoError(t, err)
|
||||
|
||||
// Write fake state file.
|
||||
testutil.WriteFile(t, `
|
||||
testutil.WriteFile(t, filepath.Join(tfDir, TerraformStateFileName), `
|
||||
{
|
||||
"version": 4,
|
||||
"terraform_version": "1.5.5",
|
||||
|
@ -187,5 +187,5 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle
|
|||
}
|
||||
]
|
||||
}
|
||||
`, filepath.Join(tfDir, TerraformStateFileName))
|
||||
`)
|
||||
}
|
||||
|
|
|
@ -23,8 +23,7 @@ func (e ErrResourceIsRunning) Error() string {
|
|||
return fmt.Sprintf("%s %s is running", e.resourceType, e.resourceId)
|
||||
}
|
||||
|
||||
type checkRunningResources struct {
|
||||
}
|
||||
type checkRunningResources struct{}
|
||||
|
||||
func (l *checkRunningResources) Name() string {
|
||||
return "check-running-resources"
|
||||
|
|
|
@ -166,6 +166,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Schemas[resource.Name] = cur
|
||||
case "databricks_volume":
|
||||
if config.Resources.Volumes == nil {
|
||||
config.Resources.Volumes = make(map[string]*resources.Volume)
|
||||
}
|
||||
cur := config.Resources.Volumes[resource.Name]
|
||||
if cur == nil {
|
||||
cur = &resources.Volume{ModifiedStatus: resources.ModifiedStatusDeleted}
|
||||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Volumes[resource.Name] = cur
|
||||
case "databricks_cluster":
|
||||
if config.Resources.Clusters == nil {
|
||||
config.Resources.Clusters = make(map[string]*resources.Cluster)
|
||||
|
@ -235,6 +245,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Volumes {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Clusters {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
|
|
|
@ -43,7 +43,7 @@ func convertToResourceStruct[T any](t *testing.T, resource *T, data any) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformJob(t *testing.T) {
|
||||
var src = resources.Job{
|
||||
src := resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "my job",
|
||||
JobClusters: []jobs.JobCluster{
|
||||
|
@ -71,7 +71,7 @@ func TestBundleToTerraformJob(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"my_job": &src,
|
||||
|
@ -93,7 +93,7 @@ func TestBundleToTerraformJob(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformJobPermissions(t *testing.T) {
|
||||
var src = resources.Job{
|
||||
src := resources.Job{
|
||||
Permissions: []resources.Permission{
|
||||
{
|
||||
Level: "CAN_VIEW",
|
||||
|
@ -102,7 +102,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"my_job": &src,
|
||||
|
@ -121,7 +121,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformJobTaskLibraries(t *testing.T) {
|
||||
var src = resources.Job{
|
||||
src := resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "my job",
|
||||
Tasks: []jobs.Task{
|
||||
|
@ -139,7 +139,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"my_job": &src,
|
||||
|
@ -158,7 +158,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformForEachTaskLibraries(t *testing.T) {
|
||||
var src = resources.Job{
|
||||
src := resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "my job",
|
||||
Tasks: []jobs.Task{
|
||||
|
@ -182,7 +182,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"my_job": &src,
|
||||
|
@ -201,7 +201,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformPipeline(t *testing.T) {
|
||||
var src = resources.Pipeline{
|
||||
src := resources.Pipeline{
|
||||
PipelineSpec: &pipelines.PipelineSpec{
|
||||
Name: "my pipeline",
|
||||
Libraries: []pipelines.PipelineLibrary{
|
||||
|
@ -239,7 +239,7 @@ func TestBundleToTerraformPipeline(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"my_pipeline": &src,
|
||||
|
@ -262,7 +262,7 @@ func TestBundleToTerraformPipeline(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformPipelinePermissions(t *testing.T) {
|
||||
var src = resources.Pipeline{
|
||||
src := resources.Pipeline{
|
||||
Permissions: []resources.Permission{
|
||||
{
|
||||
Level: "CAN_VIEW",
|
||||
|
@ -271,7 +271,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"my_pipeline": &src,
|
||||
|
@ -290,7 +290,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformModel(t *testing.T) {
|
||||
var src = resources.MlflowModel{
|
||||
src := resources.MlflowModel{
|
||||
Model: &ml.Model{
|
||||
Name: "name",
|
||||
Description: "description",
|
||||
|
@ -307,7 +307,7 @@ func TestBundleToTerraformModel(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"my_model": &src,
|
||||
|
@ -330,7 +330,7 @@ func TestBundleToTerraformModel(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformModelPermissions(t *testing.T) {
|
||||
var src = resources.MlflowModel{
|
||||
src := resources.MlflowModel{
|
||||
Model: &ml.Model{
|
||||
Name: "name",
|
||||
},
|
||||
|
@ -342,7 +342,7 @@ func TestBundleToTerraformModelPermissions(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"my_model": &src,
|
||||
|
@ -361,13 +361,13 @@ func TestBundleToTerraformModelPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformExperiment(t *testing.T) {
|
||||
var src = resources.MlflowExperiment{
|
||||
src := resources.MlflowExperiment{
|
||||
Experiment: &ml.Experiment{
|
||||
Name: "name",
|
||||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"my_experiment": &src,
|
||||
|
@ -384,7 +384,7 @@ func TestBundleToTerraformExperiment(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformExperimentPermissions(t *testing.T) {
|
||||
var src = resources.MlflowExperiment{
|
||||
src := resources.MlflowExperiment{
|
||||
Experiment: &ml.Experiment{
|
||||
Name: "name",
|
||||
},
|
||||
|
@ -396,7 +396,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"my_experiment": &src,
|
||||
|
@ -415,7 +415,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformModelServing(t *testing.T) {
|
||||
var src = resources.ModelServingEndpoint{
|
||||
src := resources.ModelServingEndpoint{
|
||||
CreateServingEndpoint: &serving.CreateServingEndpoint{
|
||||
Name: "name",
|
||||
Config: serving.EndpointCoreConfigInput{
|
||||
|
@ -439,7 +439,7 @@ func TestBundleToTerraformModelServing(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||
"my_model_serving_endpoint": &src,
|
||||
|
@ -462,7 +462,7 @@ func TestBundleToTerraformModelServing(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformModelServingPermissions(t *testing.T) {
|
||||
var src = resources.ModelServingEndpoint{
|
||||
src := resources.ModelServingEndpoint{
|
||||
CreateServingEndpoint: &serving.CreateServingEndpoint{
|
||||
Name: "name",
|
||||
|
||||
|
@ -492,7 +492,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||
"my_model_serving_endpoint": &src,
|
||||
|
@ -511,7 +511,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformRegisteredModel(t *testing.T) {
|
||||
var src = resources.RegisteredModel{
|
||||
src := resources.RegisteredModel{
|
||||
CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{
|
||||
Name: "name",
|
||||
CatalogName: "catalog",
|
||||
|
@ -520,7 +520,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||
"my_registered_model": &src,
|
||||
|
@ -540,7 +540,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
|
||||
var src = resources.RegisteredModel{
|
||||
src := resources.RegisteredModel{
|
||||
CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{
|
||||
Name: "name",
|
||||
CatalogName: "catalog",
|
||||
|
@ -554,7 +554,7 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||
"my_registered_model": &src,
|
||||
|
@ -573,14 +573,14 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBundleToTerraformDeletedResources(t *testing.T) {
|
||||
var job1 = resources.Job{
|
||||
job1 := resources.Job{
|
||||
JobSettings: &jobs.JobSettings{},
|
||||
}
|
||||
var job2 = resources.Job{
|
||||
job2 := resources.Job{
|
||||
ModifiedStatus: resources.ModifiedStatusDeleted,
|
||||
JobSettings: &jobs.JobSettings{},
|
||||
}
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"my_job1": &job1,
|
||||
|
@ -601,10 +601,10 @@ func TestBundleToTerraformDeletedResources(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
tfState := resourcesState{
|
||||
Resources: []stateResource{
|
||||
{
|
||||
Type: "databricks_job",
|
||||
|
@ -670,6 +670,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_volume",
|
||||
Mode: "managed",
|
||||
Name: "test_volume",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_cluster",
|
||||
Mode: "managed",
|
||||
|
@ -715,6 +723,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
|
@ -725,7 +736,7 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test_job": {
|
||||
|
@ -783,6 +794,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"test_volume": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "test_volume",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"test_cluster": {
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
|
@ -799,7 +817,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
tfState := resourcesState{
|
||||
Resources: nil,
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -829,6 +847,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Schemas["test_schema"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
|
@ -839,7 +860,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTerraformToBundleModifiedResources(t *testing.T) {
|
||||
var config = config.Root{
|
||||
config := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test_job": {
|
||||
|
@ -937,6 +958,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"test_volume": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "test_volume",
|
||||
},
|
||||
},
|
||||
"test_volume_new": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "test_volume_new",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"test_cluster": {
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
|
@ -963,7 +996,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
tfState := resourcesState{
|
||||
Resources: []stateResource{
|
||||
{
|
||||
Type: "databricks_job",
|
||||
|
@ -1093,6 +1126,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_volume",
|
||||
Mode: "managed",
|
||||
Name: "test_volume",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_volume",
|
||||
Mode: "managed",
|
||||
Name: "test_volume_old",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_cluster",
|
||||
Mode: "managed",
|
||||
|
@ -1186,6 +1235,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Schemas["test_schema_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, "", config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Volumes["test_volume_old"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Volumes["test_volume_old"].ModifiedStatus)
|
||||
assert.Equal(t, "", config.Resources.Volumes["test_volume_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Clusters["test_cluster_old"].ID)
|
||||
|
|
|
@ -56,7 +56,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
|||
buf := bytes.NewBuffer(nil)
|
||||
tf.SetStdout(buf)
|
||||
|
||||
//lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file
|
||||
//nolint:staticcheck // SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file
|
||||
changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress))
|
||||
if err != nil {
|
||||
return diag.Errorf("terraform plan: %v", err)
|
||||
|
|
|
@ -145,7 +145,7 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error {
|
|||
// This function is used for env vars set by the Databricks VSCode extension. The variables are intended to be used by the CLI
|
||||
// bundled with the Databricks VSCode extension, but users can use different CLI versions in the VSCode terminals, in which case we want to ignore
|
||||
// the variables if that CLI uses different versions of the dependencies.
|
||||
func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versionVarName string, currentVersion string) (string, error) {
|
||||
func getEnvVarWithMatchingVersion(ctx context.Context, envVarName, versionVarName, currentVersion string) (string, error) {
|
||||
envValue := env.Get(ctx, envVarName)
|
||||
versionValue := env.Get(ctx, versionVarName)
|
||||
|
||||
|
|
|
@ -400,7 +400,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) {
|
|||
require.Equal(t, tmpBinPath, b.Config.Bundle.Terraform.ExecPath)
|
||||
}
|
||||
|
||||
func createTempFile(t *testing.T, dest string, name string, executable bool) string {
|
||||
func createTempFile(t *testing.T, dest, name string, executable bool) string {
|
||||
binPath := filepath.Join(dest, name)
|
||||
f, err := os.Create(binPath)
|
||||
require.NoError(t, err)
|
||||
|
@ -409,7 +409,7 @@ func createTempFile(t *testing.T, dest string, name string, executable bool) str
|
|||
require.NoError(t, err)
|
||||
}()
|
||||
if executable {
|
||||
err = f.Chmod(0777)
|
||||
err = f.Chmod(0o777)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return binPath
|
||||
|
@ -422,7 +422,7 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) {
|
|||
tmp := t.TempDir()
|
||||
file := testutil.Touch(t, tmp, "bar")
|
||||
|
||||
var tc = []struct {
|
||||
tc := []struct {
|
||||
envValue string
|
||||
versionValue string
|
||||
currentVersion string
|
||||
|
|
|
@ -10,8 +10,7 @@ import (
|
|||
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||
)
|
||||
|
||||
type interpolateMutator struct {
|
||||
}
|
||||
type interpolateMutator struct{}
|
||||
|
||||
func Interpolate() bundle.Mutator {
|
||||
return &interpolateMutator{}
|
||||
|
@ -58,6 +57,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
|||
path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...)
|
||||
case dyn.Key("schemas"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
||||
case dyn.Key("volumes"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_volume")).Append(path[2:]...)
|
||||
case dyn.Key("clusters"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
|
||||
case dyn.Key("dashboards"):
|
||||
|
|
|
@ -31,6 +31,7 @@ func TestInterpolate(t *testing.T) {
|
|||
"other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}",
|
||||
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
||||
"other_schema": "${resources.schemas.other_schema.id}",
|
||||
"other_volume": "${resources.volumes.other_volume.id}",
|
||||
"other_cluster": "${resources.clusters.other_cluster.id}",
|
||||
"other_dashboard": "${resources.dashboards.other_dashboard.id}",
|
||||
},
|
||||
|
@ -69,6 +70,7 @@ func TestInterpolate(t *testing.T) {
|
|||
assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"])
|
||||
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
||||
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
||||
assert.Equal(t, "${databricks_volume.other_volume.id}", j.Tags["other_volume"])
|
||||
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
|
||||
assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"])
|
||||
|
||||
|
|
|
@ -5,15 +5,19 @@ import (
|
|||
"github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
const TerraformStateFileName = "terraform.tfstate"
|
||||
const TerraformConfigFileName = "bundle.tf.json"
|
||||
const (
|
||||
TerraformStateFileName = "terraform.tfstate"
|
||||
TerraformConfigFileName = "bundle.tf.json"
|
||||
)
|
||||
|
||||
// Users can provide their own terraform binary and databricks terraform provider by setting the following environment variables.
|
||||
// This allows users to use the CLI in an air-gapped environments. See the `debug terraform` command.
|
||||
const TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH"
|
||||
const TerraformVersionEnv = "DATABRICKS_TF_VERSION"
|
||||
const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE"
|
||||
const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION"
|
||||
const (
|
||||
TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH"
|
||||
TerraformVersionEnv = "DATABRICKS_TF_VERSION"
|
||||
TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE"
|
||||
TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION"
|
||||
)
|
||||
|
||||
// Terraform CLI version to use and the corresponding checksums for it. The
|
||||
// checksums are used to verify the integrity of the downloaded binary. Please
|
||||
|
@ -26,8 +30,10 @@ const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION"
|
|||
// downloaded Terraform archive.
|
||||
var TerraformVersion = version.Must(version.NewVersion("1.5.5"))
|
||||
|
||||
const checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2"
|
||||
const checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a"
|
||||
const (
|
||||
checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2"
|
||||
checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a"
|
||||
)
|
||||
|
||||
type Checksum struct {
|
||||
LinuxArm64 string `json:"linux_arm64"`
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func downloadAndChecksum(t *testing.T, url string, expectedChecksum string) {
|
||||
func downloadAndChecksum(t *testing.T, url, expectedChecksum string) {
|
||||
resp, err := http.Get(url)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
|
|
@ -2,7 +2,6 @@ package terraform
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -57,7 +56,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
IsEmpty: !notEmpty,
|
||||
}
|
||||
|
||||
log.Debugf(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath))
|
||||
log.Debugf(ctx, "Planning complete and persisted at %s\n", planPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
localState, err := l.localState(ctx, b)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
log.Infof(ctx, "Local state file does not exist. Using remote Terraform state.")
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0600)
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0o600)
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -114,14 +114,14 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
// If the lineage does not match, the Terraform state files do not correspond to the same deployment.
|
||||
if localState.Lineage != remoteState.Lineage {
|
||||
log.Infof(ctx, "Remote and local state lineages do not match. Using remote Terraform state. Invalidating local Terraform state.")
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0600)
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0o600)
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// If the remote state is newer than the local state, we should use the remote state.
|
||||
if remoteState.Serial > localState.Serial {
|
||||
log.Infof(ctx, "Remote state is newer than local state. Using remote Terraform state.")
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0600)
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0o600)
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
func TestConvertCluster(t *testing.T) {
|
||||
var src = resources.Cluster{
|
||||
src := resources.Cluster{
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
NumWorkers: 3,
|
||||
SparkVersion: "13.3.x-scala2.12",
|
||||
|
@ -93,5 +93,4 @@ func TestConvertCluster(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}, out.Permissions["cluster_my_cluster"])
|
||||
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ const (
|
|||
)
|
||||
|
||||
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
|
||||
func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) {
|
||||
func marshalSerializedDashboard(vin, vout dyn.Value) (dyn.Value, error) {
|
||||
// Skip if the "serialized_dashboard" field is already set.
|
||||
if v := vout.Get(serializedDashboardFieldName); v.IsValid() {
|
||||
return vout, nil
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
func TestConvertDashboard(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
src := resources.Dashboard{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "my dashboard",
|
||||
WarehouseId: "f00dcafe",
|
||||
|
@ -60,7 +60,7 @@ func TestConvertDashboard(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConvertDashboardFilePath(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
src := resources.Dashboard{
|
||||
FilePath: "some/path",
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ func TestConvertDashboardFilePath(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConvertDashboardFilePathQuoted(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
src := resources.Dashboard{
|
||||
FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`,
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ func TestConvertDashboardFilePathQuoted(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConvertDashboardSerializedDashboardString(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
src := resources.Dashboard{
|
||||
SerializedDashboard: `{ "json": true }`,
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ func TestConvertDashboardSerializedDashboardString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConvertDashboardSerializedDashboardAny(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
src := resources.Dashboard{
|
||||
SerializedDashboard: map[string]any{
|
||||
"pages": []map[string]any{
|
||||
{
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
func TestConvertExperiment(t *testing.T) {
|
||||
var src = resources.MlflowExperiment{
|
||||
src := resources.MlflowExperiment{
|
||||
Experiment: &ml.Experiment{
|
||||
Name: "name",
|
||||
},
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue