mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into detect/schema-dep
This commit is contained in:
commit
f7d926e428
|
@ -0,0 +1 @@
|
|||
* @pietern @andrewnester @shreyas-goenka @denik
|
|
@ -0,0 +1,40 @@
|
|||
name: "Close Stale Issues"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Run at midnight every day
|
||||
|
||||
jobs:
|
||||
cleanup:
|
||||
name: Stale issue job
|
||||
runs-on:
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
stale-issue-message: This issue has not received a response in a while. If you want to keep this issue open, please leave a comment below and auto-close will be canceled.
|
||||
stale-pr-message: This PR has not received an update in a while. If you want to keep this PR open, please leave a comment below or push a new commit and auto-close will be canceled.
|
||||
|
||||
# These labels are required
|
||||
stale-issue-label: Stale
|
||||
stale-pr-label: Stale
|
||||
|
||||
exempt-issue-labels: No Autoclose
|
||||
exempt-pr-labels: No Autoclose
|
||||
|
||||
# Issue timing
|
||||
days-before-stale: 30
|
||||
days-before-close: 7
|
||||
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
loglevel: DEBUG
|
||||
# TODO: Remove dry-run after merge when confirmed it works correctly
|
||||
dry-run: true
|
|
@ -13,10 +13,17 @@ on:
|
|||
|
||||
jobs:
|
||||
comment-on-pr:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
# Only run this job for PRs from forks.
|
||||
# Integration tests are not run automatically for PRs from forks.
|
||||
if: "${{ github.event.pull_request.head.repo.fork }}"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
@ -43,7 +50,7 @@ jobs:
|
|||
run: |
|
||||
gh pr comment ${{ github.event.pull_request.number }} --body \
|
||||
"<!-- INTEGRATION_TESTS_MANUAL -->
|
||||
If integration tests don't run automatically, an authorized user can run them manually by following the instructions below:
|
||||
An authorized user can trigger integration tests manually by following the instructions below:
|
||||
|
||||
Trigger:
|
||||
[go/deco-tests-run/cli](https://go/deco-tests-run/cli)
|
||||
|
|
|
@ -17,7 +17,9 @@ jobs:
|
|||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
#
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
steps:
|
||||
- name: Auto-approve squashed commit
|
||||
|
|
|
@ -11,7 +11,10 @@ jobs:
|
|||
# This workflow triggers the integration test workflow in a different repository.
|
||||
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
|
|
|
@ -5,36 +5,20 @@ on:
|
|||
types: [opened, synchronize]
|
||||
|
||||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
|
||||
steps:
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
# Trigger for pull requests.
|
||||
#
|
||||
# This workflow triggers the integration test workflow in a different repository.
|
||||
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
|
||||
# It depends on the "check-token" workflow to confirm access to this environment to avoid failures.
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
environment: "test-trigger-is"
|
||||
|
||||
if: needs.check-token.outputs.has_token == 'true'
|
||||
needs: check-token
|
||||
# Only run this job for PRs from branches on the main repository and not from forks.
|
||||
# Workflows triggered by PRs from forks don't have access to the "test-trigger-is" environment.
|
||||
if: "${{ !github.event.pull_request.head.repo.fork }}"
|
||||
|
||||
steps:
|
||||
- name: Generate GitHub App Token
|
||||
|
|
|
@ -14,6 +14,9 @@ on:
|
|||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
GOTESTSUM_FORMAT: github-actions
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
@ -55,7 +58,7 @@ jobs:
|
|||
pip3 install wheel
|
||||
|
||||
- name: Run tests
|
||||
run: make testonly
|
||||
run: make test
|
||||
|
||||
golangci:
|
||||
name: lint
|
||||
|
@ -75,7 +78,7 @@ jobs:
|
|||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.62.2
|
||||
version: v1.63.1
|
||||
args: --timeout=15m
|
||||
|
||||
validate-bundle-schema:
|
||||
|
@ -90,6 +93,13 @@ jobs:
|
|||
with:
|
||||
go-version: 1.23.4
|
||||
|
||||
- name: Verify that the schema is up to date
|
||||
run: |
|
||||
if ! ( make schema && git diff --exit-code ); then
|
||||
echo "The schema is not up to date. Please run 'make schema' and commit the changes."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||
- name: Install ajv-cli
|
||||
run: npm install -g ajv-cli@5.0.0
|
||||
|
|
|
@ -20,7 +20,10 @@ on:
|
|||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
steps:
|
||||
- name: Checkout repository and submodules
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -9,9 +9,13 @@ on:
|
|||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on:
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
outputs:
|
||||
artifacts: ${{ steps.releaser.outputs.artifacts }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository and submodules
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -11,6 +11,9 @@ linters:
|
|||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
- testifylint
|
||||
- intrange
|
||||
- mirror
|
||||
linters-settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
|
@ -32,7 +35,12 @@ linters-settings:
|
|||
gofumpt:
|
||||
module-path: github.com/databricks/cli
|
||||
extra-rules: true
|
||||
#goimports:
|
||||
# local-prefixes: github.com/databricks/cli
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
# good check, but we have too many assert.(No)?Errorf? so excluding for now
|
||||
- require-error
|
||||
issues:
|
||||
exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/
|
||||
max-issues-per-linter: 1000
|
||||
max-same-issues: 1000
|
||||
|
|
41
Makefile
41
Makefile
|
@ -1,38 +1,35 @@
|
|||
default: build
|
||||
|
||||
lint: vendor
|
||||
@echo "✓ Linting source code with https://golangci-lint.run/ (with --fix)..."
|
||||
@golangci-lint run --fix ./...
|
||||
PACKAGES=./libs/... ./internal/... ./cmd/... ./bundle/... .
|
||||
|
||||
lintcheck: vendor
|
||||
@echo "✓ Linting source code with https://golangci-lint.run/ ..."
|
||||
@golangci-lint run ./...
|
||||
GOTESTSUM_FORMAT ?= pkgname-and-test-fails
|
||||
|
||||
test: lint testonly
|
||||
lint:
|
||||
./lint.sh ./...
|
||||
|
||||
testonly:
|
||||
@echo "✓ Running tests ..."
|
||||
@gotestsum --format pkgname-and-test-fails --no-summary=skipped --raw-command go test -v -json -short -coverprofile=coverage.txt ./...
|
||||
lintcheck:
|
||||
golangci-lint run ./...
|
||||
|
||||
coverage: test
|
||||
@echo "✓ Opening coverage for unit tests ..."
|
||||
@go tool cover -html=coverage.txt
|
||||
test:
|
||||
gotestsum --format ${GOTESTSUM_FORMAT} --no-summary=skipped -- ${PACKAGES}
|
||||
|
||||
cover:
|
||||
gotestsum --format ${GOTESTSUM_FORMAT} --no-summary=skipped -- -coverprofile=coverage.txt ${PACKAGES}
|
||||
|
||||
showcover:
|
||||
go tool cover -html=coverage.txt
|
||||
|
||||
build: vendor
|
||||
@echo "✓ Building source code with go build ..."
|
||||
@go build -mod vendor
|
||||
go build -mod vendor
|
||||
|
||||
snapshot:
|
||||
@echo "✓ Building dev snapshot"
|
||||
@go build -o .databricks/databricks
|
||||
go build -o .databricks/databricks
|
||||
|
||||
vendor:
|
||||
@echo "✓ Filling vendor folder with library code ..."
|
||||
@go mod vendor
|
||||
go mod vendor
|
||||
|
||||
schema:
|
||||
@echo "✓ Generating json-schema ..."
|
||||
@go run ./bundle/internal/schema ./bundle/internal/schema ./bundle/schema/jsonschema.json
|
||||
go run ./bundle/internal/schema ./bundle/internal/schema ./bundle/schema/jsonschema.json
|
||||
|
||||
INTEGRATION = gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h
|
||||
|
||||
|
@ -42,4 +39,4 @@ integration:
|
|||
integration-short:
|
||||
$(INTEGRATION) -short
|
||||
|
||||
.PHONY: lint lintcheck test testonly coverage build snapshot vendor schema integration integration-short
|
||||
.PHONY: lint lintcheck test cover showcover build snapshot vendor schema integration integration-short
|
||||
|
|
|
@ -97,7 +97,7 @@ func (m *expandGlobs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
|
|||
return dyn.SetByPath(v, base, dyn.V(output))
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
}
|
||||
|
||||
return diags
|
||||
|
|
|
@ -2,7 +2,6 @@ package bundle
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -16,7 +15,7 @@ import (
|
|||
|
||||
func TestLoadNotExists(t *testing.T) {
|
||||
b, err := Load(context.Background(), "/doesntexist")
|
||||
assert.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
assert.Nil(t, b)
|
||||
}
|
||||
|
||||
|
|
|
@ -109,19 +109,19 @@ func TestConfigureDashboardDefaultsEmbedCredentials(t *testing.T) {
|
|||
// Set to true; still true.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, true, v.MustBool())
|
||||
assert.True(t, v.MustBool())
|
||||
}
|
||||
|
||||
// Set to false; still false.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, false, v.MustBool())
|
||||
assert.False(t, v.MustBool())
|
||||
}
|
||||
|
||||
// Not set; now false.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, false, v.MustBool())
|
||||
assert.False(t, v.MustBool())
|
||||
}
|
||||
|
||||
// No valid dashboard; no change.
|
||||
|
|
|
@ -28,8 +28,8 @@ func TestDefaultQueueingApplyNoJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||
assert.Len(t, d, 0)
|
||||
assert.Len(t, b.Config.Resources.Jobs, 0)
|
||||
assert.Empty(t, d)
|
||||
assert.Empty(t, b.Config.Resources.Jobs)
|
||||
}
|
||||
|
||||
func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) {
|
||||
|
@ -47,7 +47,7 @@ func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) {
|
|||
},
|
||||
}
|
||||
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||
assert.Len(t, d, 0)
|
||||
assert.Empty(t, d)
|
||||
assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled)
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ func TestDefaultQueueingApplyEnableQueueing(t *testing.T) {
|
|||
},
|
||||
}
|
||||
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||
assert.Len(t, d, 0)
|
||||
assert.Empty(t, d)
|
||||
assert.NotNil(t, b.Config.Resources.Jobs["job"].Queue)
|
||||
assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled)
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||
assert.Len(t, d, 0)
|
||||
assert.Empty(t, d)
|
||||
assert.False(t, b.Config.Resources.Jobs["job1"].Queue.Enabled)
|
||||
assert.True(t, b.Config.Resources.Jobs["job2"].Queue.Enabled)
|
||||
assert.True(t, b.Config.Resources.Jobs["job3"].Queue.Enabled)
|
||||
|
|
|
@ -44,7 +44,7 @@ func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) {
|
|||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Len(t, b.Config.Environments, 0)
|
||||
assert.Empty(t, b.Config.Environments)
|
||||
assert.Len(t, b.Config.Targets, 1)
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,6 @@ func TestEnvironmentsToTargetsWithTargetsDefined(t *testing.T) {
|
|||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Len(t, b.Config.Environments, 0)
|
||||
assert.Empty(t, b.Config.Environments)
|
||||
assert.Len(t, b.Config.Targets, 1)
|
||||
}
|
||||
|
|
|
@ -74,8 +74,8 @@ func TestMergeJobTasks(t *testing.T) {
|
|||
assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId)
|
||||
assert.Equal(t, 4, cluster.NumWorkers)
|
||||
assert.Len(t, task0.Libraries, 2)
|
||||
assert.Equal(t, task0.Libraries[0].Whl, "package1")
|
||||
assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2")
|
||||
assert.Equal(t, "package1", task0.Libraries[0].Whl)
|
||||
assert.Equal(t, "package2", task0.Libraries[1].Pypi.Package)
|
||||
|
||||
// This task was left untouched.
|
||||
task1 := j.Tasks[1].NewCluster
|
||||
|
|
|
@ -163,18 +163,18 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
|||
|
||||
// Job 1
|
||||
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["existing"], "tag")
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["dev"], "lennart")
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused)
|
||||
assert.Equal(t, "tag", b.Config.Resources.Jobs["job1"].Tags["existing"])
|
||||
assert.Equal(t, "lennart", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||
assert.Equal(t, jobs.PauseStatusPaused, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus)
|
||||
|
||||
// Job 2
|
||||
assert.Equal(t, "[dev lennart] job2", b.Config.Resources.Jobs["job2"].Name)
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job2"].Tags["dev"], "lennart")
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job2"].Schedule.PauseStatus, jobs.PauseStatusUnpaused)
|
||||
assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"])
|
||||
assert.Equal(t, jobs.PauseStatusUnpaused, b.Config.Resources.Jobs["job2"].Schedule.PauseStatus)
|
||||
|
||||
// Pipeline 1
|
||||
assert.Equal(t, "[dev lennart] pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.Equal(t, false, b.Config.Resources.Pipelines["pipeline1"].Continuous)
|
||||
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].Continuous)
|
||||
assert.True(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
|
||||
// Experiment 1
|
||||
|
@ -382,7 +382,7 @@ func TestAllResourcesMocked(t *testing.T) {
|
|||
b := mockBundle(config.Development)
|
||||
resources := reflect.ValueOf(b.Config.Resources)
|
||||
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
for i := range resources.NumField() {
|
||||
field := resources.Field(i)
|
||||
if field.Kind() == reflect.Map {
|
||||
assert.True(
|
||||
|
@ -411,7 +411,7 @@ func TestAllNonUcResourcesAreRenamed(t *testing.T) {
|
|||
require.NoError(t, diags.Error())
|
||||
|
||||
resources := reflect.ValueOf(b.Config.Resources)
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
for i := range resources.NumField() {
|
||||
field := resources.Field(i)
|
||||
|
||||
if field.Kind() == reflect.Map {
|
||||
|
|
|
@ -40,6 +40,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
|||
})
|
||||
}
|
||||
|
||||
// Note, diags are lost from all goroutines except the first one to return diag
|
||||
return diag.FromErr(errs.Wait())
|
||||
}
|
||||
|
||||
|
|
|
@ -185,11 +185,11 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) {
|
|||
// Apply for the variable prefix. This should resolve the variables to their values.
|
||||
diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables"))
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns)
|
||||
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns)
|
||||
assert.True(t, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns)
|
||||
assert.True(t, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns)
|
||||
assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers)
|
||||
assert.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MaxWorkers)
|
||||
assert.Equal(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice)
|
||||
assert.InDelta(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice, 0.0001)
|
||||
}
|
||||
|
||||
func TestResolveComplexVariable(t *testing.T) {
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestNoWorkspacePrefixUsed(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range diags {
|
||||
require.Equal(t, d.Severity, diag.Warning)
|
||||
require.Equal(t, diag.Warning, d.Severity)
|
||||
require.Contains(t, expectedErrors, d.Summary)
|
||||
delete(expectedErrors, d.Summary)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) {
|
|||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "process-env")
|
||||
assert.Equal(t, "process-env", variable.Value)
|
||||
}
|
||||
|
||||
func TestSetVariableUsingDefaultValue(t *testing.T) {
|
||||
|
@ -48,7 +48,7 @@ func TestSetVariableUsingDefaultValue(t *testing.T) {
|
|||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "default")
|
||||
assert.Equal(t, "default", variable.Value)
|
||||
}
|
||||
|
||||
func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) {
|
||||
|
@ -70,7 +70,7 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) {
|
|||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "assigned-value")
|
||||
assert.Equal(t, "assigned-value", variable.Value)
|
||||
}
|
||||
|
||||
func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) {
|
||||
|
@ -95,7 +95,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) {
|
|||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "assigned-value")
|
||||
assert.Equal(t, "assigned-value", variable.Value)
|
||||
}
|
||||
|
||||
func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) {
|
||||
|
|
|
@ -33,15 +33,15 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
|
|||
r := Resources{}
|
||||
rt := reflect.TypeOf(r)
|
||||
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
field := rt.Field(i)
|
||||
|
||||
// Fields in Resources are expected be of the form map[string]*resourceStruct
|
||||
assert.Equal(t, field.Type.Kind(), reflect.Map, "Resource %s is not a map", field.Name)
|
||||
assert.Equal(t, reflect.Map, field.Type.Kind(), "Resource %s is not a map", field.Name)
|
||||
kt := field.Type.Key()
|
||||
assert.Equal(t, kt.Kind(), reflect.String, "Resource %s is not a map with string keys", field.Name)
|
||||
assert.Equal(t, reflect.String, kt.Kind(), "Resource %s is not a map with string keys", field.Name)
|
||||
vt := field.Type.Elem()
|
||||
assert.Equal(t, vt.Kind(), reflect.Ptr, "Resource %s is not a map with pointer values", field.Name)
|
||||
assert.Equal(t, reflect.Ptr, vt.Kind(), "Resource %s is not a map with pointer values", field.Name)
|
||||
|
||||
// Marshalling a resourceStruct will panic if resourceStruct does not have a custom marshaller
|
||||
// This is because resourceStruct embeds a Go SDK struct that implements
|
||||
|
@ -75,7 +75,7 @@ func TestResourcesAllResourcesCompleteness(t *testing.T) {
|
|||
types = append(types, group.Description.PluralName)
|
||||
}
|
||||
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
field := rt.Field(i)
|
||||
jsonTag := field.Tag.Get("json")
|
||||
|
||||
|
@ -92,7 +92,7 @@ func TestSupportedResources(t *testing.T) {
|
|||
actual := SupportedResources()
|
||||
|
||||
typ := reflect.TypeOf(Resources{})
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
for i := range typ.NumField() {
|
||||
field := typ.Field(i)
|
||||
jsonTags := strings.Split(field.Tag.Get("json"), ",")
|
||||
pluralName := jsonTags[0]
|
||||
|
|
|
@ -102,7 +102,8 @@ func LoadFromBytes(path string, raw []byte) (*Root, diag.Diagnostics) {
|
|||
// Convert normalized configuration tree to typed configuration.
|
||||
err = r.updateWithDynamicValue(v)
|
||||
if err != nil {
|
||||
return nil, diag.Errorf("failed to load %s: %v", path, err)
|
||||
diags = diags.Extend(diag.Errorf("failed to load %s: %v", path, err))
|
||||
return nil, diags
|
||||
}
|
||||
return &r, diags
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
// FastValidate runs a subset of fast validation checks. This is a subset of the full
|
||||
// suite of validation mutators that satisfy ANY ONE of the following criteria:
|
||||
//
|
||||
// 1. No file i/o or network requests are made in the mutator.
|
||||
// 2. The validation is blocking for bundle deployments.
|
||||
//
|
||||
// The full suite of validation mutators is available in the [Validate] mutator.
|
||||
type fastValidateReadonly struct{}
|
||||
|
||||
func FastValidateReadonly() bundle.ReadOnlyMutator {
|
||||
return &fastValidateReadonly{}
|
||||
}
|
||||
|
||||
func (f *fastValidateReadonly) Name() string {
|
||||
return "fast_validate(readonly)"
|
||||
}
|
||||
|
||||
func (f *fastValidateReadonly) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
return bundle.ApplyReadOnly(ctx, rb, bundle.Parallel(
|
||||
// Fast mutators with only in-memory checks
|
||||
JobClusterKeyDefined(),
|
||||
JobTaskClusterSpec(),
|
||||
SingleNodeCluster(),
|
||||
|
||||
// Blocking mutators. Deployments will fail if these checks fail.
|
||||
ValidateArtifactPath(),
|
||||
))
|
||||
}
|
||||
|
||||
type fastValidate struct{}
|
||||
|
||||
func FastValidate() bundle.Mutator {
|
||||
return &fastValidate{}
|
||||
}
|
||||
|
||||
func (f *fastValidate) Name() string {
|
||||
return "fast_validate"
|
||||
}
|
||||
|
||||
func (f *fastValidate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
return bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), FastValidateReadonly())
|
||||
}
|
|
@ -87,7 +87,7 @@ func TestFilesToSync_EverythingIgnored(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
rb := bundle.ReadOnly(b)
|
||||
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
|
||||
require.Equal(t, 1, len(diags))
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||
assert.Equal(t, "There are no files to sync, please check your .gitignore", diags[0].Summary)
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func TestFilesToSync_EverythingExcluded(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
rb := bundle.ReadOnly(b)
|
||||
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
|
||||
require.Equal(t, 1, len(diags))
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||
assert.Equal(t, "There are no files to sync, please check your .gitignore and sync.exclude configuration", diags[0].Summary)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,8 @@ func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle)
|
|||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return diag.FromErr(err)
|
||||
// Note, only diag from first coroutine is captured, others are lost
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestJobClusterKeyDefined(t *testing.T) {
|
|||
}
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined())
|
||||
require.Len(t, diags, 0)
|
||||
require.Empty(t, diags)
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
||||
|
@ -59,8 +59,8 @@ func TestJobClusterKeyNotDefined(t *testing.T) {
|
|||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined())
|
||||
require.Len(t, diags, 1)
|
||||
require.NoError(t, diags.Error())
|
||||
require.Equal(t, diags[0].Severity, diag.Warning)
|
||||
require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined")
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "job_cluster_key do-not-exist is not defined", diags[0].Summary)
|
||||
}
|
||||
|
||||
func TestJobClusterKeyDefinedInDifferentJob(t *testing.T) {
|
||||
|
@ -92,6 +92,6 @@ func TestJobClusterKeyDefinedInDifferentJob(t *testing.T) {
|
|||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined())
|
||||
require.Len(t, diags, 1)
|
||||
require.NoError(t, diags.Error())
|
||||
require.Equal(t, diags[0].Severity, diag.Warning)
|
||||
require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined")
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "job_cluster_key do-not-exist is not defined", diags[0].Summary)
|
||||
}
|
||||
|
|
|
@ -30,12 +30,13 @@ func (l location) Path() dyn.Path {
|
|||
// Apply implements bundle.Mutator.
|
||||
func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
return bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), bundle.Parallel(
|
||||
JobClusterKeyDefined(),
|
||||
FastValidateReadonly(),
|
||||
|
||||
// Slow mutators that require network or file i/o. These are only
|
||||
// run in the `bundle validate` command.
|
||||
FilesToSync(),
|
||||
ValidateSyncPatterns(),
|
||||
JobTaskClusterSpec(),
|
||||
ValidateFolderPermissions(),
|
||||
SingleNodeCluster(),
|
||||
ValidateSyncPatterns(),
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
)
|
||||
|
||||
type validateArtifactPath struct{}
|
||||
|
||||
func ValidateArtifactPath() bundle.ReadOnlyMutator {
|
||||
return &validateArtifactPath{}
|
||||
}
|
||||
|
||||
func (v *validateArtifactPath) Name() string {
|
||||
return "validate:artifact_paths"
|
||||
}
|
||||
|
||||
func extractVolumeFromPath(artifactPath string) (string, string, string, error) {
|
||||
if !libraries.IsVolumesPath(artifactPath) {
|
||||
return "", "", "", fmt.Errorf("expected artifact_path to start with /Volumes/, got %s", artifactPath)
|
||||
}
|
||||
|
||||
parts := strings.Split(artifactPath, "/")
|
||||
volumeFormatErr := fmt.Errorf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", artifactPath)
|
||||
|
||||
// Incorrect format.
|
||||
if len(parts) < 5 {
|
||||
return "", "", "", volumeFormatErr
|
||||
}
|
||||
|
||||
catalogName := parts[2]
|
||||
schemaName := parts[3]
|
||||
volumeName := parts[4]
|
||||
|
||||
// Incorrect format.
|
||||
if catalogName == "" || schemaName == "" || volumeName == "" {
|
||||
return "", "", "", volumeFormatErr
|
||||
}
|
||||
|
||||
return catalogName, schemaName, volumeName, nil
|
||||
}
|
||||
|
||||
func findVolumeInBundle(r config.Root, catalogName, schemaName, volumeName string) (dyn.Path, []dyn.Location, bool) {
|
||||
volumes := r.Resources.Volumes
|
||||
for k, v := range volumes {
|
||||
if v.CatalogName != catalogName || v.Name != volumeName {
|
||||
continue
|
||||
}
|
||||
// UC schemas can be defined in the bundle itself, and thus might be interpolated
|
||||
// at runtime via the ${resources.schemas.<name>} syntax. Thus we match the volume
|
||||
// definition if the schema name is the same as the one in the bundle, or if the
|
||||
// schema name is interpolated.
|
||||
// We only have to check for ${resources.schemas...} references because any
|
||||
// other valid reference (like ${var.foo}) would have been interpolated by this point.
|
||||
p, ok := dynvar.PureReferenceToPath(v.SchemaName)
|
||||
isSchemaDefinedInBundle := ok && p.HasPrefix(dyn.Path{dyn.Key("resources"), dyn.Key("schemas")})
|
||||
if v.SchemaName != schemaName && !isSchemaDefinedInBundle {
|
||||
continue
|
||||
}
|
||||
pathString := fmt.Sprintf("resources.volumes.%s", k)
|
||||
return dyn.MustPathFromString(pathString), r.GetLocations(pathString), true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
func (v *validateArtifactPath) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
// We only validate UC Volumes paths right now.
|
||||
if !libraries.IsVolumesPath(rb.Config().Workspace.ArtifactPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
wrapErrorMsg := func(s string) diag.Diagnostics {
|
||||
return diag.Diagnostics{
|
||||
{
|
||||
Summary: s,
|
||||
Severity: diag.Error,
|
||||
Locations: rb.Config().GetLocations("workspace.artifact_path"),
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
catalogName, schemaName, volumeName, err := extractVolumeFromPath(rb.Config().Workspace.ArtifactPath)
|
||||
if err != nil {
|
||||
return wrapErrorMsg(err.Error())
|
||||
}
|
||||
volumeFullName := fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName)
|
||||
w := rb.WorkspaceClient()
|
||||
_, err = w.Volumes.ReadByName(ctx, volumeFullName)
|
||||
|
||||
if errors.Is(err, apierr.ErrPermissionDenied) {
|
||||
return wrapErrorMsg(fmt.Sprintf("cannot access volume %s: %s", volumeFullName, err))
|
||||
}
|
||||
if errors.Is(err, apierr.ErrNotFound) {
|
||||
path, locations, ok := findVolumeInBundle(rb.Config(), catalogName, schemaName, volumeName)
|
||||
if !ok {
|
||||
return wrapErrorMsg(fmt.Sprintf("volume %s does not exist", volumeFullName))
|
||||
}
|
||||
|
||||
// If the volume is defined in the bundle, provide a more helpful error diagnostic,
|
||||
// with more details and location information.
|
||||
return diag.Diagnostics{{
|
||||
Summary: fmt.Sprintf("volume %s does not exist", volumeFullName),
|
||||
Severity: diag.Error,
|
||||
Detail: `You are using a volume in your artifact_path that is managed by
|
||||
this bundle but which has not been deployed yet. Please first deploy
|
||||
the volume using 'bundle deploy' and then switch over to using it in
|
||||
the artifact_path.`,
|
||||
Locations: slices.Concat(rb.Config().GetLocations("workspace.artifact_path"), locations),
|
||||
Paths: append([]dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, path),
|
||||
}}
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
return wrapErrorMsg(fmt.Sprintf("cannot read volume %s: %s", volumeFullName, err))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,244 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestValidateArtifactPathWithVolumeInBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/catalogN/schemaN/volumeN/abc",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"foo": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
CatalogName: "catalogN",
|
||||
Name: "volumeN",
|
||||
SchemaName: "schemaN",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "file", Line: 1, Column: 1}})
|
||||
bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{{File: "file", Line: 2, Column: 2}})
|
||||
|
||||
ctx := context.Background()
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockVolumesAPI()
|
||||
api.EXPECT().ReadByName(mock.Anything, "catalogN.schemaN.volumeN").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
})
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), ValidateArtifactPath())
|
||||
assert.Equal(t, diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: "volume catalogN.schemaN.volumeN does not exist",
|
||||
Locations: []dyn.Location{
|
||||
{File: "file", Line: 1, Column: 1},
|
||||
{File: "file", Line: 2, Column: 2},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("workspace.artifact_path"),
|
||||
dyn.MustPathFromString("resources.volumes.foo"),
|
||||
},
|
||||
Detail: `You are using a volume in your artifact_path that is managed by
|
||||
this bundle but which has not been deployed yet. Please first deploy
|
||||
the volume using 'bundle deploy' and then switch over to using it in
|
||||
the artifact_path.`,
|
||||
}}, diags)
|
||||
}
|
||||
|
||||
func TestValidateArtifactPath(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/catalogN/schemaN/volumeN/abc",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "file", Line: 1, Column: 1}})
|
||||
assertDiags := func(t *testing.T, diags diag.Diagnostics, expected string) {
|
||||
assert.Len(t, diags, 1)
|
||||
assert.Equal(t, diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: expected,
|
||||
Locations: []dyn.Location{{File: "file", Line: 1, Column: 1}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}}, diags)
|
||||
}
|
||||
|
||||
rb := bundle.ReadOnly(b)
|
||||
ctx := context.Background()
|
||||
|
||||
tcases := []struct {
|
||||
err error
|
||||
expectedSummary string
|
||||
}{
|
||||
{
|
||||
err: &apierr.APIError{
|
||||
StatusCode: 403,
|
||||
Message: "User does not have USE SCHEMA on Schema 'catalogN.schemaN'",
|
||||
},
|
||||
expectedSummary: "cannot access volume catalogN.schemaN.volumeN: User does not have USE SCHEMA on Schema 'catalogN.schemaN'",
|
||||
},
|
||||
{
|
||||
err: &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
},
|
||||
expectedSummary: "volume catalogN.schemaN.volumeN does not exist",
|
||||
},
|
||||
{
|
||||
err: &apierr.APIError{
|
||||
StatusCode: 500,
|
||||
Message: "Internal Server Error",
|
||||
},
|
||||
expectedSummary: "cannot read volume catalogN.schemaN.volumeN: Internal Server Error",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcases {
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockVolumesAPI()
|
||||
api.EXPECT().ReadByName(mock.Anything, "catalogN.schemaN.volumeN").Return(nil, tc.err)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
diags := bundle.ApplyReadOnly(ctx, rb, ValidateArtifactPath())
|
||||
assertDiags(t, diags, tc.expectedSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func invalidVolumePaths() []string {
|
||||
return []string{
|
||||
"/Volumes/",
|
||||
"/Volumes/main",
|
||||
"/Volumes/main/",
|
||||
"/Volumes/main//",
|
||||
"/Volumes/main//my_schema",
|
||||
"/Volumes/main/my_schema",
|
||||
"/Volumes/main/my_schema/",
|
||||
"/Volumes/main/my_schema//",
|
||||
"/Volumes//my_schema/my_volume",
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractVolumeFromPath(t *testing.T) {
|
||||
catalogName, schemaName, volumeName, err := extractVolumeFromPath("/Volumes/main/my_schema/my_volume")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "main", catalogName)
|
||||
assert.Equal(t, "my_schema", schemaName)
|
||||
assert.Equal(t, "my_volume", volumeName)
|
||||
|
||||
for _, p := range invalidVolumePaths() {
|
||||
_, _, _, err := extractVolumeFromPath(p)
|
||||
assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p))
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateArtifactPathWithInvalidPaths(t *testing.T) {
|
||||
for _, p := range invalidVolumePaths() {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: p,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), ValidateArtifactPath())
|
||||
require.Equal(t, diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p),
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}}, diags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindVolumeInBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"foo": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
CatalogName: "main",
|
||||
Name: "my_volume",
|
||||
SchemaName: "my_schema",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{
|
||||
{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
},
|
||||
})
|
||||
|
||||
// volume is in DAB.
|
||||
path, locations, ok := findVolumeInBundle(b.Config, "main", "my_schema", "my_volume")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, []dyn.Location{{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
}}, locations)
|
||||
assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path)
|
||||
|
||||
// wrong volume name
|
||||
_, _, ok = findVolumeInBundle(b.Config, "main", "my_schema", "doesnotexist")
|
||||
assert.False(t, ok)
|
||||
|
||||
// wrong schema name
|
||||
_, _, ok = findVolumeInBundle(b.Config, "main", "doesnotexist", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// wrong catalog name
|
||||
_, _, ok = findVolumeInBundle(b.Config, "doesnotexist", "my_schema", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// schema name is interpolated but does not have the right prefix. In this case
|
||||
// we should not match the volume.
|
||||
b.Config.Resources.Volumes["foo"].SchemaName = "${foo.bar.baz}"
|
||||
_, _, ok = findVolumeInBundle(b.Config, "main", "my_schema", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// schema name is interpolated.
|
||||
b.Config.Resources.Volumes["foo"].SchemaName = "${resources.schemas.my_schema.name}"
|
||||
path, locations, ok = findVolumeInBundle(b.Config, "main", "valuedoesnotmatter", "my_volume")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, []dyn.Location{{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
}}, locations)
|
||||
assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path)
|
||||
}
|
|
@ -13,7 +13,7 @@ func TestLookup_Coverage(t *testing.T) {
|
|||
val := reflect.ValueOf(lookup)
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
for i := range val.NumField() {
|
||||
field := val.Field(i)
|
||||
if field.Kind() != reflect.String {
|
||||
t.Fatalf("Field %s is not a string", typ.Field(i).Name)
|
||||
|
|
|
@ -12,7 +12,7 @@ func TestGetPanics(t *testing.T) {
|
|||
defer func() {
|
||||
r := recover()
|
||||
require.NotNil(t, r, "The function did not panic")
|
||||
assert.Equal(t, r, "context not configured with bundle")
|
||||
assert.Equal(t, "context not configured with bundle", r)
|
||||
}()
|
||||
|
||||
Get(context.Background())
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
@ -279,7 +278,7 @@ func TestStatePullNoState(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
_, err = os.Stat(statePath)
|
||||
require.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestStatePullOlderState(t *testing.T) {
|
||||
|
|
|
@ -60,7 +60,7 @@ func TestStateUpdate(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, int64(1), state.Seq)
|
||||
require.Equal(t, state.Files, Filelist{
|
||||
require.Equal(t, Filelist{
|
||||
{
|
||||
LocalPath: "test1.py",
|
||||
},
|
||||
|
@ -68,7 +68,7 @@ func TestStateUpdate(t *testing.T) {
|
|||
LocalPath: "test2.py",
|
||||
IsNotebook: true,
|
||||
},
|
||||
})
|
||||
}, state.Files)
|
||||
require.Equal(t, build.GetInfo().Version, state.CliVersion)
|
||||
|
||||
diags = bundle.Apply(ctx, b, s)
|
||||
|
@ -79,7 +79,7 @@ func TestStateUpdate(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, int64(2), state.Seq)
|
||||
require.Equal(t, state.Files, Filelist{
|
||||
require.Equal(t, Filelist{
|
||||
{
|
||||
LocalPath: "test1.py",
|
||||
},
|
||||
|
@ -87,7 +87,7 @@ func TestStateUpdate(t *testing.T) {
|
|||
LocalPath: "test2.py",
|
||||
IsNotebook: true,
|
||||
},
|
||||
})
|
||||
}, state.Files)
|
||||
require.Equal(t, build.GetInfo().Version, state.CliVersion)
|
||||
|
||||
// Valid non-empty UUID is generated.
|
||||
|
@ -130,7 +130,7 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, int64(11), state.Seq)
|
||||
require.Equal(t, state.Files, Filelist{
|
||||
require.Equal(t, Filelist{
|
||||
{
|
||||
LocalPath: "test1.py",
|
||||
},
|
||||
|
@ -138,7 +138,7 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
|||
LocalPath: "test2.py",
|
||||
IsNotebook: true,
|
||||
},
|
||||
})
|
||||
}, state.Files)
|
||||
require.Equal(t, build.GetInfo().Version, state.CliVersion)
|
||||
|
||||
// Existing UUID is not overwritten.
|
||||
|
|
|
@ -254,10 +254,10 @@ func TestBundleToTerraformPipeline(t *testing.T) {
|
|||
assert.Equal(t, "my pipeline", resource.Name)
|
||||
assert.Len(t, resource.Library, 2)
|
||||
assert.Len(t, resource.Notification, 2)
|
||||
assert.Equal(t, resource.Notification[0].Alerts, []string{"on-update-fatal-failure"})
|
||||
assert.Equal(t, resource.Notification[0].EmailRecipients, []string{"jane@doe.com"})
|
||||
assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"})
|
||||
assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"})
|
||||
assert.Equal(t, []string{"on-update-fatal-failure"}, resource.Notification[0].Alerts)
|
||||
assert.Equal(t, []string{"jane@doe.com"}, resource.Notification[0].EmailRecipients)
|
||||
assert.Equal(t, []string{"on-update-failure", "on-flow-failure"}, resource.Notification[1].Alerts)
|
||||
assert.Equal(t, []string{"jane@doe.com", "john@doe.com"}, resource.Notification[1].EmailRecipients)
|
||||
assert.Nil(t, out.Data)
|
||||
}
|
||||
|
||||
|
@ -454,7 +454,7 @@ func TestBundleToTerraformModelServing(t *testing.T) {
|
|||
assert.Equal(t, "name", resource.Name)
|
||||
assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName)
|
||||
assert.Equal(t, "1", resource.Config.ServedModels[0].ModelVersion)
|
||||
assert.Equal(t, true, resource.Config.ServedModels[0].ScaleToZeroEnabled)
|
||||
assert.True(t, resource.Config.ServedModels[0].ScaleToZeroEnabled)
|
||||
assert.Equal(t, "Small", resource.Config.ServedModels[0].WorkloadSize)
|
||||
assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName)
|
||||
assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage)
|
||||
|
@ -1261,7 +1261,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
|
||||
func AssertFullResourceCoverage(t *testing.T, config *config.Root) {
|
||||
resources := reflect.ValueOf(config.Resources)
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
for i := range resources.NumField() {
|
||||
field := resources.Field(i)
|
||||
if field.Kind() == reflect.Map {
|
||||
assert.True(
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
|
@ -67,7 +68,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
|||
if changed && !m.opts.AutoApprove {
|
||||
output := buf.String()
|
||||
// Remove output starting from Warning until end of output
|
||||
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
|
||||
output = output[:strings.Index(output, "Warning:")]
|
||||
cmdio.LogString(ctx, output)
|
||||
|
||||
if !cmdio.IsPromptSupported(ctx) {
|
||||
|
|
|
@ -225,7 +225,7 @@ func TestSetProxyEnvVars(t *testing.T) {
|
|||
env := make(map[string]string, 0)
|
||||
err := setProxyEnvVars(context.Background(), env, b)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, env, 0)
|
||||
assert.Empty(t, env)
|
||||
|
||||
// Lower case set.
|
||||
clearEnv()
|
||||
|
@ -293,7 +293,7 @@ func TestSetUserProfileFromInheritEnvVars(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, env, "USERPROFILE")
|
||||
assert.Equal(t, env["USERPROFILE"], "c:\\foo\\c")
|
||||
assert.Equal(t, "c:\\foo\\c", env["USERPROFILE"])
|
||||
}
|
||||
|
||||
func TestInheritEnvVarsWithAbsentTFConfigFile(t *testing.T) {
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestStatePushLargeState(t *testing.T) {
|
|||
b := statePushTestBundle(t)
|
||||
|
||||
largeState := map[string]any{}
|
||||
for i := 0; i < 1000000; i++ {
|
||||
for i := range 1000000 {
|
||||
largeState[fmt.Sprintf("field_%d", i)] = i
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -80,7 +79,7 @@ func TestRequiredAnnotationsForNewFields(t *testing.T) {
|
|||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, updatedFieldPaths, fmt.Sprintf("Missing JSON-schema descriptions for new config fields in bundle/internal/schema/annotations.yml:\n%s", strings.Join(updatedFieldPaths, "\n")))
|
||||
assert.Empty(t, updatedFieldPaths, "Missing JSON-schema descriptions for new config fields in bundle/internal/schema/annotations.yml:\n%s", strings.Join(updatedFieldPaths, "\n"))
|
||||
}
|
||||
|
||||
// Checks whether types in annotation files are still present in Config type
|
||||
|
|
|
@ -54,7 +54,7 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) {
|
|||
|
||||
// Check for embedded Databricks Go SDK types.
|
||||
if typ.Kind() == reflect.Struct {
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
for i := range typ.NumField() {
|
||||
if !typ.Field(i).Anonymous {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ func GetFilerForLibraries(ctx context.Context, b *bundle.Bundle) (filer.Filer, s
|
|||
|
||||
switch {
|
||||
case IsVolumesPath(artifactPath):
|
||||
return filerForVolume(ctx, b)
|
||||
return filerForVolume(b)
|
||||
|
||||
default:
|
||||
return filerForWorkspace(b)
|
||||
|
|
|
@ -7,10 +7,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -39,11 +36,6 @@ func TestGetFilerForLibrariesValidUcVolume(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
client, uploadPath, diags := GetFilerForLibraries(context.Background(), b)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "/Volumes/main/my_schema/my_volume/.internal", uploadPath)
|
||||
|
|
|
@ -1,132 +1,16 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
)
|
||||
|
||||
func extractVolumeFromPath(artifactPath string) (string, string, string, error) {
|
||||
if !IsVolumesPath(artifactPath) {
|
||||
return "", "", "", fmt.Errorf("expected artifact_path to start with /Volumes/, got %s", artifactPath)
|
||||
}
|
||||
|
||||
parts := strings.Split(artifactPath, "/")
|
||||
volumeFormatErr := fmt.Errorf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", artifactPath)
|
||||
|
||||
// Incorrect format.
|
||||
if len(parts) < 5 {
|
||||
return "", "", "", volumeFormatErr
|
||||
}
|
||||
|
||||
catalogName := parts[2]
|
||||
schemaName := parts[3]
|
||||
volumeName := parts[4]
|
||||
|
||||
// Incorrect format.
|
||||
if catalogName == "" || schemaName == "" || volumeName == "" {
|
||||
return "", "", "", volumeFormatErr
|
||||
}
|
||||
|
||||
return catalogName, schemaName, volumeName, nil
|
||||
}
|
||||
|
||||
// This function returns a filer for ".internal" folder inside the directory configured
|
||||
// at `workspace.artifact_path`.
|
||||
// This function also checks if the UC volume exists in the workspace and then:
|
||||
// 1. If the UC volume exists in the workspace:
|
||||
// Returns a filer for the UC volume.
|
||||
// 2. If the UC volume does not exist in the workspace but is (with high confidence) defined in
|
||||
// the bundle configuration:
|
||||
// Returns an error and a warning that instructs the user to deploy the
|
||||
// UC volume before using it in the artifact path.
|
||||
// 3. If the UC volume does not exist in the workspace and is not defined in the bundle configuration:
|
||||
// Returns an error.
|
||||
func filerForVolume(ctx context.Context, b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) {
|
||||
artifactPath := b.Config.Workspace.ArtifactPath
|
||||
func filerForVolume(b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) {
|
||||
w := b.WorkspaceClient()
|
||||
|
||||
catalogName, schemaName, volumeName, err := extractVolumeFromPath(artifactPath)
|
||||
if err != nil {
|
||||
return nil, "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: err.Error(),
|
||||
Locations: b.Config.GetLocations("workspace.artifact_path"),
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the UC volume exists in the workspace.
|
||||
volumePath := fmt.Sprintf("/Volumes/%s/%s/%s", catalogName, schemaName, volumeName)
|
||||
err = w.Files.GetDirectoryMetadataByDirectoryPath(ctx, volumePath)
|
||||
|
||||
// If the volume exists already, directly return the filer for the path to
|
||||
// upload the artifacts to.
|
||||
if err == nil {
|
||||
uploadPath := path.Join(artifactPath, InternalDirName)
|
||||
f, err := filer.NewFilesClient(w, uploadPath)
|
||||
return f, uploadPath, diag.FromErr(err)
|
||||
}
|
||||
|
||||
baseErr := diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("unable to determine if volume at %s exists: %s", volumePath, err),
|
||||
Locations: b.Config.GetLocations("workspace.artifact_path"),
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}
|
||||
|
||||
if errors.Is(err, apierr.ErrNotFound) {
|
||||
// Since the API returned a 404, the volume does not exist.
|
||||
// Modify the error message to provide more context.
|
||||
baseErr.Summary = fmt.Sprintf("volume %s does not exist: %s", volumePath, err)
|
||||
|
||||
// If the volume is defined in the bundle, provide a more helpful error diagnostic,
|
||||
// with more details and location information.
|
||||
path, locations, ok := findVolumeInBundle(b, catalogName, schemaName, volumeName)
|
||||
if !ok {
|
||||
return nil, "", diag.Diagnostics{baseErr}
|
||||
}
|
||||
baseErr.Detail = `You are using a volume in your artifact_path that is managed by
|
||||
this bundle but which has not been deployed yet. Please first deploy
|
||||
the volume using 'bundle deploy' and then switch over to using it in
|
||||
the artifact_path.`
|
||||
baseErr.Paths = append(baseErr.Paths, path)
|
||||
baseErr.Locations = append(baseErr.Locations, locations...)
|
||||
}
|
||||
|
||||
return nil, "", diag.Diagnostics{baseErr}
|
||||
}
|
||||
|
||||
func findVolumeInBundle(b *bundle.Bundle, catalogName, schemaName, volumeName string) (dyn.Path, []dyn.Location, bool) {
|
||||
volumes := b.Config.Resources.Volumes
|
||||
for k, v := range volumes {
|
||||
if v.CatalogName != catalogName || v.Name != volumeName {
|
||||
continue
|
||||
}
|
||||
// UC schemas can be defined in the bundle itself, and thus might be interpolated
|
||||
// at runtime via the ${resources.schemas.<name>} syntax. Thus we match the volume
|
||||
// definition if the schema name is the same as the one in the bundle, or if the
|
||||
// schema name is interpolated.
|
||||
// We only have to check for ${resources.schemas...} references because any
|
||||
// other valid reference (like ${var.foo}) would have been interpolated by this point.
|
||||
p, ok := dynvar.PureReferenceToPath(v.SchemaName)
|
||||
isSchemaDefinedInBundle := ok && p.HasPrefix(dyn.Path{dyn.Key("resources"), dyn.Key("schemas")})
|
||||
if v.SchemaName != schemaName && !isSchemaDefinedInBundle {
|
||||
continue
|
||||
}
|
||||
pathString := fmt.Sprintf("resources.volumes.%s", k)
|
||||
return dyn.MustPathFromString(pathString), b.Config.GetLocations(pathString), true
|
||||
}
|
||||
return nil, nil, false
|
||||
uploadPath := path.Join(b.Config.Workspace.ArtifactPath, InternalDirName)
|
||||
f, err := filer.NewFilesClient(w, uploadPath)
|
||||
return f, uploadPath, diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -1,277 +1,27 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFindVolumeInBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"foo": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
CatalogName: "main",
|
||||
Name: "my_volume",
|
||||
SchemaName: "my_schema",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{
|
||||
{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
},
|
||||
})
|
||||
|
||||
// volume is in DAB.
|
||||
path, locations, ok := findVolumeInBundle(b, "main", "my_schema", "my_volume")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, []dyn.Location{{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
}}, locations)
|
||||
assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path)
|
||||
|
||||
// wrong volume name
|
||||
_, _, ok = findVolumeInBundle(b, "main", "my_schema", "doesnotexist")
|
||||
assert.False(t, ok)
|
||||
|
||||
// wrong schema name
|
||||
_, _, ok = findVolumeInBundle(b, "main", "doesnotexist", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// wrong catalog name
|
||||
_, _, ok = findVolumeInBundle(b, "doesnotexist", "my_schema", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// schema name is interpolated but does not have the right prefix. In this case
|
||||
// we should not match the volume.
|
||||
b.Config.Resources.Volumes["foo"].SchemaName = "${foo.bar.baz}"
|
||||
_, _, ok = findVolumeInBundle(b, "main", "my_schema", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// schema name is interpolated.
|
||||
b.Config.Resources.Volumes["foo"].SchemaName = "${resources.schemas.my_schema.name}"
|
||||
path, locations, ok = findVolumeInBundle(b, "main", "valuedoesnotmatter", "my_volume")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, []dyn.Location{{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
}}, locations)
|
||||
assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path)
|
||||
}
|
||||
|
||||
func TestFilerForVolumeForErrorFromAPI(t *testing.T) {
|
||||
func TestFilerForVolume(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/main/my_schema/my_volume",
|
||||
ArtifactPath: "/Volumes/main/my_schema/my_volume/abc",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(fmt.Errorf("error from API"))
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, _, diags := filerForVolume(context.Background(), b)
|
||||
assert.Equal(t, diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "unable to determine if volume at /Volumes/main/my_schema/my_volume exists: error from API",
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
},
|
||||
}, diags)
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithVolumeNotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/main/my_schema/doesnotexist",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/doesnotexist").Return(apierr.NotFound("some error message"))
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, _, diags := filerForVolume(context.Background(), b)
|
||||
assert.Equal(t, diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "volume /Volumes/main/my_schema/doesnotexist does not exist: some error message",
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
},
|
||||
}, diags)
|
||||
}
|
||||
|
||||
func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/main/my_schema/my_volume",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"foo": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
CatalogName: "main",
|
||||
Name: "my_volume",
|
||||
VolumeType: "MANAGED",
|
||||
SchemaName: "my_schema",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{{File: "volume.yml", Line: 1, Column: 2}})
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(apierr.NotFound("error from API"))
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, _, diags := GetFilerForLibraries(context.Background(), b)
|
||||
assert.Equal(t, diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API",
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}, {File: "volume.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")},
|
||||
Detail: `You are using a volume in your artifact_path that is managed by
|
||||
this bundle but which has not been deployed yet. Please first deploy
|
||||
the volume using 'bundle deploy' and then switch over to using it in
|
||||
the artifact_path.`,
|
||||
},
|
||||
}, diags)
|
||||
}
|
||||
|
||||
func invalidVolumePaths() []string {
|
||||
return []string{
|
||||
"/Volumes/",
|
||||
"/Volumes/main",
|
||||
"/Volumes/main/",
|
||||
"/Volumes/main//",
|
||||
"/Volumes/main//my_schema",
|
||||
"/Volumes/main/my_schema",
|
||||
"/Volumes/main/my_schema/",
|
||||
"/Volumes/main/my_schema//",
|
||||
"/Volumes//my_schema/my_volume",
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithInvalidVolumePaths(t *testing.T) {
|
||||
for _, p := range invalidVolumePaths() {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: p,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
|
||||
_, _, diags := GetFilerForLibraries(context.Background(), b)
|
||||
require.Equal(t, diags, diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p),
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithInvalidPrefix(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volume/main/my_schema/my_volume",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, _, diags := filerForVolume(context.Background(), b)
|
||||
require.EqualError(t, diags.Error(), "expected artifact_path to start with /Volumes/, got /Volume/main/my_schema/my_volume")
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithValidVolumePaths(t *testing.T) {
|
||||
validPaths := []string{
|
||||
"/Volumes/main/my_schema/my_volume",
|
||||
"/Volumes/main/my_schema/my_volume/",
|
||||
"/Volumes/main/my_schema/my_volume/a/b/c",
|
||||
"/Volumes/main/my_schema/my_volume/a/a/a",
|
||||
}
|
||||
|
||||
for _, p := range validPaths {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: p,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
client, uploadPath, diags := filerForVolume(context.Background(), b)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, path.Join(p, ".internal"), uploadPath)
|
||||
assert.IsType(t, &filer.FilesClient{}, client)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractVolumeFromPath(t *testing.T) {
|
||||
catalogName, schemaName, volumeName, err := extractVolumeFromPath("/Volumes/main/my_schema/my_volume")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "main", catalogName)
|
||||
assert.Equal(t, "my_schema", schemaName)
|
||||
assert.Equal(t, "my_volume", volumeName)
|
||||
|
||||
for _, p := range invalidVolumePaths() {
|
||||
_, _, _, err := extractVolumeFromPath(p)
|
||||
assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p))
|
||||
}
|
||||
client, uploadPath, diags := filerForVolume(b)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, path.Join("/Volumes/main/my_schema/my_volume/abc/.internal"), uploadPath)
|
||||
assert.IsType(t, &filer.FilesClient{}, client)
|
||||
}
|
||||
|
|
|
@ -12,25 +12,25 @@ func TestLibraryPath(t *testing.T) {
|
|||
|
||||
p, err := libraryPath(&compute.Library{Whl: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Jar: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Egg: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Requirements: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{})
|
||||
assert.Equal(t, "", p)
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}})
|
||||
assert.Equal(t, "", p)
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
|
|
@ -11,8 +11,6 @@ import (
|
|||
mockfiler "github.com/databricks/cli/internal/mocks/libs/filer"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
@ -183,11 +181,6 @@ func TestArtifactUploadForVolumes(t *testing.T) {
|
|||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/foo/bar/artifacts").Return(nil)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler)))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
|
|
|
@ -99,32 +99,32 @@ func TestFilterCurrentUser(t *testing.T) {
|
|||
assert.NoError(t, diags.Error())
|
||||
|
||||
// Assert current user is filtered out.
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Jobs["job1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, robot)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Jobs["job2"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, robot)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, robot)
|
||||
assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Experiments["experiment1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, robot)
|
||||
assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Models["model1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, robot)
|
||||
assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, robot)
|
||||
assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob)
|
||||
|
||||
// Assert there's no change to the grant.
|
||||
assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants))
|
||||
assert.Len(t, b.Config.Resources.RegisteredModels["registered_model1"].Grants, 1)
|
||||
}
|
||||
|
||||
func TestFilterCurrentServicePrincipal(t *testing.T) {
|
||||
|
@ -134,32 +134,32 @@ func TestFilterCurrentServicePrincipal(t *testing.T) {
|
|||
assert.NoError(t, diags.Error())
|
||||
|
||||
// Assert current user is filtered out.
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Jobs["job1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, alice)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Jobs["job2"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, alice)
|
||||
assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, alice)
|
||||
assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Experiments["experiment1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, alice)
|
||||
assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.Models["model1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, alice)
|
||||
assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob)
|
||||
|
||||
assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions))
|
||||
assert.Len(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, 2)
|
||||
assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, alice)
|
||||
assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob)
|
||||
|
||||
// Assert there's no change to the grant.
|
||||
assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants))
|
||||
assert.Len(t, b.Config.Resources.RegisteredModels["registered_model1"].Grants, 1)
|
||||
}
|
||||
|
||||
func TestFilterCurrentUserDoesNotErrorWhenNoResources(t *testing.T) {
|
||||
|
|
|
@ -164,7 +164,7 @@ func TestAllResourcesExplicitlyDefinedForPermissionsSupport(t *testing.T) {
|
|||
|
||||
for _, resource := range unsupportedResources {
|
||||
_, ok := levelsMap[resource]
|
||||
assert.False(t, ok, fmt.Sprintf("Resource %s is defined in both levelsMap and unsupportedResources", resource))
|
||||
assert.False(t, ok, "Resource %s is defined in both levelsMap and unsupportedResources", resource)
|
||||
}
|
||||
|
||||
for _, resource := range r.AllResources() {
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestPermissionDiagnosticsApplyFail(t *testing.T) {
|
|||
})
|
||||
|
||||
diags := permissions.PermissionDiagnostics().Apply(context.Background(), b)
|
||||
require.Equal(t, diags[0].Severity, diag.Warning)
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Contains(t, diags[0].Summary, "permissions section should include testuser@databricks.com or one of their groups with CAN_MANAGE permissions")
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ func TestConvertPythonParams(t *testing.T) {
|
|||
err = runner.convertPythonParams(opts)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, opts.Job.notebookParams, "__python_params")
|
||||
require.Equal(t, opts.Job.notebookParams["__python_params"], `["param1","param2","param3"]`)
|
||||
require.Equal(t, `["param1","param2","param3"]`, opts.Job.notebookParams["__python_params"])
|
||||
}
|
||||
|
||||
func TestJobRunnerCancel(t *testing.T) {
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
|
||||
func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent {
|
||||
result := []pipelines.PipelineEvent{}
|
||||
for i := 0; i < len(events); i++ {
|
||||
for i := range events {
|
||||
if events[i].Origin.UpdateId == updateId {
|
||||
result = append(result, events[i])
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE
|
|||
}
|
||||
if event.Error != nil && len(event.Error.Exceptions) > 0 {
|
||||
logString += "trace for most recent exception: \n"
|
||||
for i := 0; i < len(event.Error.Exceptions); i++ {
|
||||
for i := range len(event.Error.Exceptions) {
|
||||
logString += fmt.Sprintf("%s\n", event.Error.Exceptions[i].Message)
|
||||
}
|
||||
}
|
||||
|
@ -90,11 +90,6 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
|
|||
// Include resource key in logger.
|
||||
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("resource", r.Key()))
|
||||
w := r.bundle.WorkspaceClient()
|
||||
_, err := w.Pipelines.GetByPipelineId(ctx, pipelineID)
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "Cannot get pipeline: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := opts.Pipeline.toPayload(r.pipeline, pipelineID)
|
||||
if err != nil {
|
||||
|
|
|
@ -90,8 +90,6 @@ func TestPipelineRunnerRestart(t *testing.T) {
|
|||
PipelineId: "123",
|
||||
}).Return(mockWait, nil)
|
||||
|
||||
pipelineApi.EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{}, nil)
|
||||
|
||||
// Mock runner starting a new update
|
||||
pipelineApi.EXPECT().StartUpdate(mock.Anything, pipelines.StartUpdate{
|
||||
PipelineId: "123",
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestComplexVariables(t *testing.T) {
|
|||
require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"])
|
||||
require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"])
|
||||
|
||||
require.Equal(t, 3, len(b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries))
|
||||
require.Len(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, 3)
|
||||
require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{
|
||||
Jar: "/path/to/jar",
|
||||
})
|
||||
|
|
|
@ -2,7 +2,6 @@ package config_tests
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -16,7 +15,7 @@ func TestGitAutoLoadWithEnvironment(t *testing.T) {
|
|||
bundle.Apply(context.Background(), b, mutator.LoadGitDetails())
|
||||
assert.True(t, b.Config.Bundle.Git.Inferred)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
|
||||
assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)
|
||||
}
|
||||
|
||||
func TestGitManuallySetBranchWithEnvironment(t *testing.T) {
|
||||
|
@ -25,5 +24,5 @@ func TestGitManuallySetBranchWithEnvironment(t *testing.T) {
|
|||
assert.False(t, b.Config.Bundle.Git.Inferred)
|
||||
assert.Equal(t, "main", b.Config.Bundle.Git.Branch)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
|
||||
assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ func TestEnvironmentOverridesResourcesDev(t *testing.T) {
|
|||
assert.Equal(t, "base job", b.Config.Resources.Jobs["job1"].Name)
|
||||
|
||||
// Base values are preserved in the development environment.
|
||||
assert.Equal(t, true, b.Config.Resources.Pipelines["boolean1"].Photon)
|
||||
assert.Equal(t, false, b.Config.Resources.Pipelines["boolean2"].Photon)
|
||||
assert.True(t, b.Config.Resources.Pipelines["boolean1"].Photon)
|
||||
assert.False(t, b.Config.Resources.Pipelines["boolean2"].Photon)
|
||||
}
|
||||
|
||||
func TestEnvironmentOverridesResourcesStaging(t *testing.T) {
|
||||
|
@ -30,6 +30,6 @@ func TestEnvironmentOverridesResourcesStaging(t *testing.T) {
|
|||
assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name)
|
||||
|
||||
// Override values are applied in the staging environment.
|
||||
assert.Equal(t, false, b.Config.Resources.Pipelines["boolean1"].Photon)
|
||||
assert.Equal(t, true, b.Config.Resources.Pipelines["boolean2"].Photon)
|
||||
assert.False(t, b.Config.Resources.Pipelines["boolean1"].Photon)
|
||||
assert.True(t, b.Config.Resources.Pipelines["boolean2"].Photon)
|
||||
}
|
||||
|
|
|
@ -10,11 +10,11 @@ import (
|
|||
|
||||
func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) {
|
||||
b := loadTarget(t, "./environments_job_and_pipeline", "development")
|
||||
assert.Len(t, b.Config.Resources.Jobs, 0)
|
||||
assert.Empty(t, b.Config.Resources.Jobs)
|
||||
assert.Len(t, b.Config.Resources.Pipelines, 1)
|
||||
|
||||
p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"]
|
||||
assert.Equal(t, b.Config.Bundle.Mode, config.Development)
|
||||
assert.Equal(t, config.Development, b.Config.Bundle.Mode)
|
||||
assert.True(t, p.Development)
|
||||
require.Len(t, p.Libraries, 1)
|
||||
assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path)
|
||||
|
@ -23,7 +23,7 @@ func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) {
|
|||
|
||||
func TestJobAndPipelineStagingWithEnvironment(t *testing.T) {
|
||||
b := loadTarget(t, "./environments_job_and_pipeline", "staging")
|
||||
assert.Len(t, b.Config.Resources.Jobs, 0)
|
||||
assert.Empty(t, b.Config.Resources.Jobs)
|
||||
assert.Len(t, b.Config.Resources.Pipelines, 1)
|
||||
|
||||
p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"]
|
||||
|
|
|
@ -2,7 +2,6 @@ package config_tests
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -17,7 +16,7 @@ func TestGitAutoLoad(t *testing.T) {
|
|||
bundle.Apply(context.Background(), b, mutator.LoadGitDetails())
|
||||
assert.True(t, b.Config.Bundle.Git.Inferred)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
|
||||
assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)
|
||||
}
|
||||
|
||||
func TestGitManuallySetBranch(t *testing.T) {
|
||||
|
@ -26,7 +25,7 @@ func TestGitManuallySetBranch(t *testing.T) {
|
|||
assert.False(t, b.Config.Bundle.Git.Inferred)
|
||||
assert.Equal(t, "main", b.Config.Bundle.Git.Branch)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
|
||||
assert.True(t, validUrl, "Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)
|
||||
}
|
||||
|
||||
func TestGitBundleBranchValidation(t *testing.T) {
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestIssue1828(t *testing.T) {
|
|||
}
|
||||
|
||||
if assert.Contains(t, b.Config.Variables, "float") {
|
||||
assert.Equal(t, 3.14, b.Config.Variables["float"].Default)
|
||||
assert.InDelta(t, 3.14, b.Config.Variables["float"].Default, 0.0001)
|
||||
}
|
||||
|
||||
if assert.Contains(t, b.Config.Variables, "time") {
|
||||
|
@ -43,6 +43,6 @@ func TestIssue1828(t *testing.T) {
|
|||
}
|
||||
|
||||
if assert.Contains(t, b.Config.Variables, "nil") {
|
||||
assert.Equal(t, nil, b.Config.Variables["nil"].Default)
|
||||
assert.Nil(t, b.Config.Variables["nil"].Default)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,11 +10,11 @@ import (
|
|||
|
||||
func TestJobAndPipelineDevelopment(t *testing.T) {
|
||||
b := loadTarget(t, "./job_and_pipeline", "development")
|
||||
assert.Len(t, b.Config.Resources.Jobs, 0)
|
||||
assert.Empty(t, b.Config.Resources.Jobs)
|
||||
assert.Len(t, b.Config.Resources.Pipelines, 1)
|
||||
|
||||
p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"]
|
||||
assert.Equal(t, b.Config.Bundle.Mode, config.Development)
|
||||
assert.Equal(t, config.Development, b.Config.Bundle.Mode)
|
||||
assert.True(t, p.Development)
|
||||
require.Len(t, p.Libraries, 1)
|
||||
assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path)
|
||||
|
@ -23,7 +23,7 @@ func TestJobAndPipelineDevelopment(t *testing.T) {
|
|||
|
||||
func TestJobAndPipelineStaging(t *testing.T) {
|
||||
b := loadTarget(t, "./job_and_pipeline", "staging")
|
||||
assert.Len(t, b.Config.Resources.Jobs, 0)
|
||||
assert.Empty(t, b.Config.Resources.Jobs)
|
||||
assert.Len(t, b.Config.Resources.Pipelines, 1)
|
||||
|
||||
p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"]
|
||||
|
|
|
@ -16,13 +16,13 @@ func TestJobClusterKeyNotDefinedTest(t *testing.T) {
|
|||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined())
|
||||
require.Len(t, diags, 1)
|
||||
require.NoError(t, diags.Error())
|
||||
require.Equal(t, diags[0].Severity, diag.Warning)
|
||||
require.Equal(t, diags[0].Summary, "job_cluster_key key is not defined")
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "job_cluster_key key is not defined", diags[0].Summary)
|
||||
}
|
||||
|
||||
func TestJobClusterKeyDefinedTest(t *testing.T) {
|
||||
b := loadTarget(t, "./job_cluster_key", "development")
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined())
|
||||
require.Len(t, diags, 0)
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func assertExpected(t *testing.T, p *resources.ModelServingEndpoint) {
|
|||
func TestModelServingEndpointDevelopment(t *testing.T) {
|
||||
b := loadTarget(t, "./model_serving_endpoint", "development")
|
||||
assert.Len(t, b.Config.Resources.ModelServingEndpoints, 1)
|
||||
assert.Equal(t, b.Config.Bundle.Mode, config.Development)
|
||||
assert.Equal(t, config.Development, b.Config.Bundle.Mode)
|
||||
|
||||
p := b.Config.Resources.ModelServingEndpoints["my_model_serving_endpoint"]
|
||||
assert.Equal(t, "my-dev-endpoint", p.Name)
|
||||
|
|
|
@ -12,14 +12,14 @@ func TestOverrideTasksDev(t *testing.T) {
|
|||
assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2)
|
||||
|
||||
tasks := b.Config.Resources.Jobs["foo"].Tasks
|
||||
assert.Equal(t, tasks[0].TaskKey, "key1")
|
||||
assert.Equal(t, tasks[0].NewCluster.NodeTypeId, "i3.xlarge")
|
||||
assert.Equal(t, tasks[0].NewCluster.NumWorkers, 1)
|
||||
assert.Equal(t, tasks[0].SparkPythonTask.PythonFile, "./test1.py")
|
||||
assert.Equal(t, "key1", tasks[0].TaskKey)
|
||||
assert.Equal(t, "i3.xlarge", tasks[0].NewCluster.NodeTypeId)
|
||||
assert.Equal(t, 1, tasks[0].NewCluster.NumWorkers)
|
||||
assert.Equal(t, "./test1.py", tasks[0].SparkPythonTask.PythonFile)
|
||||
|
||||
assert.Equal(t, tasks[1].TaskKey, "key2")
|
||||
assert.Equal(t, tasks[1].NewCluster.SparkVersion, "13.3.x-scala2.12")
|
||||
assert.Equal(t, tasks[1].SparkPythonTask.PythonFile, "./test2.py")
|
||||
assert.Equal(t, "key2", tasks[1].TaskKey)
|
||||
assert.Equal(t, "13.3.x-scala2.12", tasks[1].NewCluster.SparkVersion)
|
||||
assert.Equal(t, "./test2.py", tasks[1].SparkPythonTask.PythonFile)
|
||||
}
|
||||
|
||||
func TestOverrideTasksStaging(t *testing.T) {
|
||||
|
@ -28,12 +28,12 @@ func TestOverrideTasksStaging(t *testing.T) {
|
|||
assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2)
|
||||
|
||||
tasks := b.Config.Resources.Jobs["foo"].Tasks
|
||||
assert.Equal(t, tasks[0].TaskKey, "key1")
|
||||
assert.Equal(t, tasks[0].NewCluster.SparkVersion, "13.3.x-scala2.12")
|
||||
assert.Equal(t, tasks[0].SparkPythonTask.PythonFile, "./test1.py")
|
||||
assert.Equal(t, "key1", tasks[0].TaskKey)
|
||||
assert.Equal(t, "13.3.x-scala2.12", tasks[0].NewCluster.SparkVersion)
|
||||
assert.Equal(t, "./test1.py", tasks[0].SparkPythonTask.PythonFile)
|
||||
|
||||
assert.Equal(t, tasks[1].TaskKey, "key2")
|
||||
assert.Equal(t, tasks[1].NewCluster.NodeTypeId, "i3.2xlarge")
|
||||
assert.Equal(t, tasks[1].NewCluster.NumWorkers, 4)
|
||||
assert.Equal(t, tasks[1].SparkPythonTask.PythonFile, "./test3.py")
|
||||
assert.Equal(t, "key2", tasks[1].TaskKey)
|
||||
assert.Equal(t, "i3.2xlarge", tasks[1].NewCluster.NodeTypeId)
|
||||
assert.Equal(t, 4, tasks[1].NewCluster.NumWorkers)
|
||||
assert.Equal(t, "./test3.py", tasks[1].SparkPythonTask.PythonFile)
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestPresetsDev(t *testing.T) {
|
|||
assert.Equal(t, "myprefix", b.Config.Presets.NamePrefix)
|
||||
assert.Equal(t, config.Paused, b.Config.Presets.TriggerPauseStatus)
|
||||
assert.Equal(t, 10, b.Config.Presets.JobsMaxConcurrentRuns)
|
||||
assert.Equal(t, true, *b.Config.Presets.PipelinesDevelopment)
|
||||
assert.True(t, *b.Config.Presets.PipelinesDevelopment)
|
||||
assert.Equal(t, "true", b.Config.Presets.Tags["dev"])
|
||||
assert.Equal(t, "finance", b.Config.Presets.Tags["team"])
|
||||
assert.Equal(t, "false", b.Config.Presets.Tags["prod"])
|
||||
|
@ -22,7 +22,7 @@ func TestPresetsDev(t *testing.T) {
|
|||
func TestPresetsProd(t *testing.T) {
|
||||
b := loadTarget(t, "./presets", "prod")
|
||||
|
||||
assert.Equal(t, false, *b.Config.Presets.PipelinesDevelopment)
|
||||
assert.False(t, *b.Config.Presets.PipelinesDevelopment)
|
||||
assert.Equal(t, "finance", b.Config.Presets.Tags["team"])
|
||||
assert.Equal(t, "true", b.Config.Presets.Tags["prod"])
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func TestPythonWheelBuild(t *testing.T) {
|
|||
|
||||
matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(matches))
|
||||
require.Len(t, matches, 1)
|
||||
|
||||
match := libraries.ExpandGlobReferences()
|
||||
diags = bundle.Apply(ctx, b, match)
|
||||
|
@ -39,7 +39,7 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) {
|
|||
|
||||
matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(matches))
|
||||
require.Len(t, matches, 1)
|
||||
|
||||
match := libraries.ExpandGlobReferences()
|
||||
diags = bundle.Apply(ctx, b, match)
|
||||
|
@ -55,7 +55,7 @@ func TestPythonWheelBuildAutoDetectWithNotebookTask(t *testing.T) {
|
|||
|
||||
matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact_notebook/dist/my_test_code-*.whl")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(matches))
|
||||
require.Len(t, matches, 1)
|
||||
|
||||
match := libraries.ExpandGlobReferences()
|
||||
diags = bundle.Apply(ctx, b, match)
|
||||
|
@ -108,7 +108,7 @@ func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) {
|
|||
|
||||
matches, err := filepath.Glob("./python_wheel/environment_key/my_test_code/dist/my_test_code-*.whl")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(matches))
|
||||
require.Len(t, matches, 1)
|
||||
|
||||
match := libraries.ExpandGlobReferences()
|
||||
diags = bundle.Apply(ctx, b, match)
|
||||
|
@ -124,7 +124,7 @@ func TestPythonWheelBuildMultiple(t *testing.T) {
|
|||
|
||||
matches, err := filepath.Glob("./python_wheel/python_wheel_multiple/my_test_code/dist/my_test_code*.whl")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(matches))
|
||||
require.Len(t, matches, 2)
|
||||
|
||||
match := libraries.ExpandGlobReferences()
|
||||
diags = bundle.Apply(ctx, b, match)
|
||||
|
|
|
@ -19,7 +19,7 @@ func assertExpectedMonitor(t *testing.T, p *resources.QualityMonitor) {
|
|||
func TestMonitorTableNames(t *testing.T) {
|
||||
b := loadTarget(t, "./quality_monitor", "development")
|
||||
assert.Len(t, b.Config.Resources.QualityMonitors, 1)
|
||||
assert.Equal(t, b.Config.Bundle.Mode, config.Development)
|
||||
assert.Equal(t, config.Development, b.Config.Bundle.Mode)
|
||||
|
||||
p := b.Config.Resources.QualityMonitors["my_monitor"]
|
||||
assert.Equal(t, "main.test.dev", p.TableName)
|
||||
|
|
|
@ -19,7 +19,7 @@ func assertExpectedModel(t *testing.T, p *resources.RegisteredModel) {
|
|||
func TestRegisteredModelDevelopment(t *testing.T) {
|
||||
b := loadTarget(t, "./registered_model", "development")
|
||||
assert.Len(t, b.Config.Resources.RegisteredModels, 1)
|
||||
assert.Equal(t, b.Config.Bundle.Mode, config.Development)
|
||||
assert.Equal(t, config.Development, b.Config.Bundle.Mode)
|
||||
|
||||
p := b.Config.Resources.RegisteredModels["my_registered_model"]
|
||||
assert.Equal(t, "my-dev-model", p.Name)
|
||||
|
|
|
@ -20,26 +20,26 @@ func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) {
|
|||
require.Len(t, diags, 3)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
require.Equal(t, diags[0].Severity, diag.Warning)
|
||||
require.Equal(t, diags[0].Summary, "Pattern dist does not match any files")
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "Pattern dist does not match any files", diags[0].Summary)
|
||||
|
||||
require.Len(t, diags[0].Paths, 1)
|
||||
require.Equal(t, diags[0].Paths[0].String(), "sync.exclude[0]")
|
||||
require.Equal(t, "sync.exclude[0]", diags[0].Paths[0].String())
|
||||
|
||||
assert.Len(t, diags[0].Locations, 1)
|
||||
require.Equal(t, diags[0].Locations[0].File, filepath.Join("sync", "override", "databricks.yml"))
|
||||
require.Equal(t, diags[0].Locations[0].Line, 17)
|
||||
require.Equal(t, diags[0].Locations[0].Column, 11)
|
||||
require.Equal(t, 17, diags[0].Locations[0].Line)
|
||||
require.Equal(t, 11, diags[0].Locations[0].Column)
|
||||
|
||||
summaries := []string{
|
||||
fmt.Sprintf("Pattern %s does not match any files", filepath.Join("src", "*")),
|
||||
fmt.Sprintf("Pattern %s does not match any files", filepath.Join("tests", "*")),
|
||||
}
|
||||
|
||||
require.Equal(t, diags[1].Severity, diag.Warning)
|
||||
require.Equal(t, diag.Warning, diags[1].Severity)
|
||||
require.Contains(t, summaries, diags[1].Summary)
|
||||
|
||||
require.Equal(t, diags[2].Severity, diag.Warning)
|
||||
require.Equal(t, diag.Warning, diags[2].Severity)
|
||||
require.Contains(t, summaries, diags[2].Summary)
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ func TestSyncIncludeWithNegate(t *testing.T) {
|
|||
b := loadTarget(t, "./sync/negate", "default")
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.ValidateSyncPatterns())
|
||||
require.Len(t, diags, 0)
|
||||
require.Empty(t, diags)
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
||||
|
@ -58,6 +58,6 @@ func TestSyncIncludeWithNegateNoMatches(t *testing.T) {
|
|||
require.Len(t, diags, 1)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
require.Equal(t, diags[0].Severity, diag.Warning)
|
||||
require.Equal(t, diags[0].Summary, "Pattern !*.txt2 does not match any files")
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "Pattern !*.txt2 does not match any files", diags[0].Summary)
|
||||
}
|
||||
|
|
|
@ -115,12 +115,12 @@ func TestSyncPathsNoRoot(t *testing.T) {
|
|||
// If set to nil, it won't sync anything.
|
||||
b = loadTarget(t, "./sync/paths_no_root", "nil")
|
||||
assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath)
|
||||
assert.Len(t, b.Config.Sync.Paths, 0)
|
||||
assert.Empty(t, b.Config.Sync.Paths)
|
||||
|
||||
// If set to an empty sequence, it won't sync anything.
|
||||
b = loadTarget(t, "./sync/paths_no_root", "empty")
|
||||
assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath)
|
||||
assert.Len(t, b.Config.Sync.Paths, 0)
|
||||
assert.Empty(t, b.Config.Sync.Paths)
|
||||
}
|
||||
|
||||
func TestSyncSharedCode(t *testing.T) {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/validate"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/render"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
|
@ -71,6 +72,7 @@ func newDeployCommand() *cobra.Command {
|
|||
diags = diags.Extend(
|
||||
bundle.Apply(ctx, b, bundle.Seq(
|
||||
phases.Initialize(),
|
||||
validate.FastValidate(),
|
||||
phases.Build(),
|
||||
phases.Deploy(outputHandler),
|
||||
)),
|
||||
|
|
|
@ -44,7 +44,7 @@ func TestDashboard_ErrorOnLegacyDashboard(t *testing.T) {
|
|||
|
||||
_, diags := d.resolveID(ctx, b)
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, diags[0].Summary, "dashboard \"legacy dashboard\" is a legacy dashboard")
|
||||
assert.Equal(t, "dashboard \"legacy dashboard\" is a legacy dashboard", diags[0].Summary)
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingID_Nominal(t *testing.T) {
|
||||
|
|
|
@ -3,7 +3,6 @@ package generate
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
|
@ -302,7 +301,7 @@ func TestGenerateJobCommandOldFileRename(t *testing.T) {
|
|||
|
||||
// Make sure file do not exists after the run
|
||||
_, err = os.Stat(oldFilename)
|
||||
require.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
data, err := os.ReadFile(filepath.Join(configDir, "test_job.job.yml"))
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -148,9 +148,9 @@ func TestEnvVarsConfigureNoInteractive(t *testing.T) {
|
|||
|
||||
// We should only save host and token for a profile, other env variables should not be saved
|
||||
_, err = defaultSection.GetKey("auth_type")
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
_, err = defaultSection.GetKey("metadata_service_url")
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestEnvVarsConfigureNoArgsNoInteractive(t *testing.T) {
|
||||
|
|
|
@ -7,14 +7,15 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFileFromRef(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/databrickslabs/ucx/main/README.md" {
|
||||
_, err := w.Write([]byte(`abc`))
|
||||
require.NoError(t, err)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Logf("Requested: %s", r.URL.Path)
|
||||
|
@ -34,7 +35,9 @@ func TestDownloadZipball(t *testing.T) {
|
|||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/repos/databrickslabs/ucx/zipball/main" {
|
||||
_, err := w.Write([]byte(`abc`))
|
||||
require.NoError(t, err)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Logf("Requested: %s", r.URL.Path)
|
||||
|
|
|
@ -7,14 +7,15 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLoadsReleasesForCLI(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/repos/databricks/cli/releases" {
|
||||
_, err := w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`))
|
||||
require.NoError(t, err)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Logf("Requested: %s", r.URL.Path)
|
||||
|
|
|
@ -7,14 +7,13 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRepositories(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/users/databrickslabs/repos" {
|
||||
_, err := w.Write([]byte(`[{"name": "x"}]`))
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
t.Logf("Requested: %s", r.URL.Path)
|
||||
|
@ -28,5 +27,5 @@ func TestRepositories(t *testing.T) {
|
|||
r := NewRepositoryCache("databrickslabs", t.TempDir())
|
||||
all, err := r.Load(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, len(all) > 0)
|
||||
assert.NotEmpty(t, all)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestCreatesDirectoryIfNeeded(t *testing.T) {
|
|||
}
|
||||
first, err := c.Load(ctx, tick)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, first, int64(1))
|
||||
assert.Equal(t, int64(1), first)
|
||||
}
|
||||
|
||||
func TestImpossibleToCreateDir(t *testing.T) {
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/databricks/databricks-sdk-go/service/sql"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -169,17 +170,17 @@ func TestInstallerWorksForReleases(t *testing.T) {
|
|||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/databrickslabs/blueprint/v0.3.15/labs.yml" {
|
||||
raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml")
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
_, err = w.Write(raw)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.3.15" {
|
||||
raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib")
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/octet-stream")
|
||||
_, err = w.Write(raw)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/api/2.1/clusters/get" {
|
||||
|
@ -376,17 +377,17 @@ func TestUpgraderWorksForReleases(t *testing.T) {
|
|||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/databrickslabs/blueprint/v0.4.0/labs.yml" {
|
||||
raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml")
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
_, err = w.Write(raw)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.4.0" {
|
||||
raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib")
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/octet-stream")
|
||||
_, err = w.Write(raw)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/api/2.1/clusters/get" {
|
||||
|
|
2
go.mod
2
go.mod
|
@ -68,7 +68,7 @@ require (
|
|||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/api v0.182.0 // indirect
|
||||
|
|
|
@ -204,8 +204,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
|||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
|
|
|
@ -85,13 +85,13 @@ func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) {
|
|||
|
||||
// The remote path attribute on the artifact file should have been set.
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)),
|
||||
path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`),
|
||||
b.Config.Artifacts["test"].Files[0].RemotePath,
|
||||
)
|
||||
|
||||
// The task library path should have been updated to the remote path.
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)),
|
||||
path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`),
|
||||
b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl,
|
||||
)
|
||||
}
|
||||
|
@ -149,13 +149,13 @@ func TestUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) {
|
|||
|
||||
// The remote path attribute on the artifact file should have been set.
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)),
|
||||
path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`),
|
||||
b.Config.Artifacts["test"].Files[0].RemotePath,
|
||||
)
|
||||
|
||||
// The job environment deps path should have been updated to the remote path.
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)),
|
||||
path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`),
|
||||
b.Config.Resources.Jobs["test"].JobSettings.Environments[0].Spec.Dependencies[0],
|
||||
)
|
||||
}
|
||||
|
@ -218,13 +218,13 @@ func TestUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) {
|
|||
|
||||
// The remote path attribute on the artifact file should have been set.
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)),
|
||||
path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`),
|
||||
b.Config.Artifacts["test"].Files[0].RemotePath,
|
||||
)
|
||||
|
||||
// The task library path should have been updated to the remote path.
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)),
|
||||
path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`),
|
||||
b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl,
|
||||
)
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) {
|
|||
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/doesnotexist does not exist: Not Found
|
||||
assert.Equal(t, fmt.Sprintf(`Error: volume main.%s.doesnotexist does not exist
|
||||
at workspace.artifact_path
|
||||
in databricks.yml:6:18
|
||||
|
||||
|
@ -293,7 +293,7 @@ func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) {
|
|||
stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/my_volume does not exist: Not Found
|
||||
assert.Equal(t, fmt.Sprintf(`Error: volume main.%s.my_volume does not exist
|
||||
at workspace.artifact_path
|
||||
resources.volumes.foo
|
||||
in databricks.yml:6:18
|
||||
|
|
|
@ -2,7 +2,6 @@ package bundle_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -99,7 +98,7 @@ func TestBundleDeployUcSchema(t *testing.T) {
|
|||
// Assert the schema is deleted
|
||||
_, err = w.Schemas.GetByFullName(ctx, strings.Join([]string{catalogName, schemaName}, "."))
|
||||
apiErr := &apierr.APIError{}
|
||||
assert.True(t, errors.As(err, &apiErr))
|
||||
assert.ErrorAs(t, err, &apiErr)
|
||||
assert.Equal(t, "SCHEMA_DOES_NOT_EXIST", apiErr.ErrorCode)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package bundle_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
@ -71,11 +70,11 @@ func TestBundleDestroy(t *testing.T) {
|
|||
// Assert snapshot file is deleted
|
||||
entries, err = os.ReadDir(snapshotsDir)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, entries, 0)
|
||||
assert.Empty(t, entries)
|
||||
|
||||
// Assert bundle deployment path is deleted
|
||||
_, err = w.Workspace.GetStatusByPath(ctx, remoteRoot)
|
||||
apiErr := &apierr.APIError{}
|
||||
assert.True(t, errors.As(err, &apiErr))
|
||||
assert.ErrorAs(t, err, &apiErr)
|
||||
assert.Equal(t, "RESOURCE_DOES_NOT_EXIST", apiErr.ErrorCode)
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/databricks/cli/internal/testcli"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/python/pythontest"
|
||||
"github.com/databricks/cli/libs/testdiff"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -50,14 +51,14 @@ func testDefaultPython(t *testing.T, pythonVersion string) {
|
|||
ctx, wt := acc.WorkspaceTest(t)
|
||||
|
||||
uniqueProjectId := testutil.RandomName("")
|
||||
ctx, replacements := testcli.WithReplacementsMap(ctx)
|
||||
ctx, replacements := testdiff.WithReplacementsMap(ctx)
|
||||
replacements.Set(uniqueProjectId, "$UNIQUE_PRJ")
|
||||
|
||||
user, err := wt.W.CurrentUser.Me(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, user)
|
||||
testcli.PrepareReplacementsUser(t, replacements, *user)
|
||||
testcli.PrepareReplacements(t, replacements, wt.W)
|
||||
testdiff.PrepareReplacementsUser(t, replacements, *user)
|
||||
testdiff.PrepareReplacements(t, replacements, wt.W)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
testutil.Chdir(t, tmpDir)
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/json"
|
||||
"io/fs"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -65,11 +64,11 @@ func TestFsLs(t *testing.T) {
|
|||
|
||||
assert.Equal(t, "a", parsedStdout[0]["name"])
|
||||
assert.Equal(t, true, parsedStdout[0]["is_directory"])
|
||||
assert.Equal(t, float64(0), parsedStdout[0]["size"])
|
||||
assert.InDelta(t, float64(0), parsedStdout[0]["size"], 0.0001)
|
||||
|
||||
assert.Equal(t, "bye.txt", parsedStdout[1]["name"])
|
||||
assert.Equal(t, false, parsedStdout[1]["is_directory"])
|
||||
assert.Equal(t, float64(3), parsedStdout[1]["size"])
|
||||
assert.InDelta(t, float64(3), parsedStdout[1]["size"], 0.0001)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -99,11 +98,11 @@ func TestFsLsWithAbsolutePaths(t *testing.T) {
|
|||
|
||||
assert.Equal(t, path.Join(tmpDir, "a"), parsedStdout[0]["name"])
|
||||
assert.Equal(t, true, parsedStdout[0]["is_directory"])
|
||||
assert.Equal(t, float64(0), parsedStdout[0]["size"])
|
||||
assert.InDelta(t, float64(0), parsedStdout[0]["size"], 0.0001)
|
||||
|
||||
assert.Equal(t, path.Join(tmpDir, "bye.txt"), parsedStdout[1]["name"])
|
||||
assert.Equal(t, false, parsedStdout[1]["is_directory"])
|
||||
assert.Equal(t, float64(3), parsedStdout[1]["size"])
|
||||
assert.InDelta(t, float64(3), parsedStdout[1]["size"].(float64), 0.0001)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -122,7 +121,7 @@ func TestFsLsOnFile(t *testing.T) {
|
|||
setupLsFiles(t, f)
|
||||
|
||||
_, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json")
|
||||
assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error())
|
||||
assert.Regexp(t, "not a directory: .*/a/hello.txt", err.Error())
|
||||
assert.ErrorAs(t, err, &filer.NotADirectory{})
|
||||
})
|
||||
}
|
||||
|
@ -147,7 +146,7 @@ func TestFsLsOnEmptyDir(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// assert on ls output
|
||||
assert.Equal(t, 0, len(parsedStdout))
|
||||
assert.Empty(t, parsedStdout)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +165,7 @@ func TestFsLsForNonexistingDir(t *testing.T) {
|
|||
|
||||
_, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
assert.Regexp(t, regexp.MustCompile("no such directory: .*/nonexistent"), err.Error())
|
||||
assert.Regexp(t, "no such directory: .*/nonexistent", err.Error())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestFsMkdir(t *testing.T) {
|
|||
info, err := f.Stat(context.Background(), "a")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "a", info.Name())
|
||||
assert.Equal(t, true, info.IsDir())
|
||||
assert.True(t, info.IsDir())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -60,19 +60,19 @@ func TestFsMkdirCreatesIntermediateDirectories(t *testing.T) {
|
|||
infoA, err := f.Stat(context.Background(), "a")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "a", infoA.Name())
|
||||
assert.Equal(t, true, infoA.IsDir())
|
||||
assert.True(t, infoA.IsDir())
|
||||
|
||||
// assert directory "b" is created
|
||||
infoB, err := f.Stat(context.Background(), "a/b")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "b", infoB.Name())
|
||||
assert.Equal(t, true, infoB.IsDir())
|
||||
assert.True(t, infoB.IsDir())
|
||||
|
||||
// assert directory "c" is created
|
||||
infoC, err := f.Stat(context.Background(), "a/b/c")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "c", infoC.Name())
|
||||
assert.Equal(t, true, infoC.IsDir())
|
||||
assert.True(t, infoC.IsDir())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ func (a *syncTest) snapshotContains(files []string) {
|
|||
assert.Equal(a.t, s.RemotePath, a.remoteRoot)
|
||||
for _, filePath := range files {
|
||||
_, ok := s.LastModifiedTimes[filePath]
|
||||
assert.True(a.t, ok, fmt.Sprintf("%s not in snapshot file: %v", filePath, s.LastModifiedTimes))
|
||||
assert.True(a.t, ok, "%s not in snapshot file: %v", filePath, s.LastModifiedTimes)
|
||||
}
|
||||
assert.Equal(a.t, len(files), len(s.LastModifiedTimes))
|
||||
}
|
||||
|
|
|
@ -11,14 +11,14 @@ import (
|
|||
)
|
||||
|
||||
// Detects if test is run from "debug test" feature in VS Code.
|
||||
func isInDebug() bool {
|
||||
func IsInDebug() bool {
|
||||
ex, _ := os.Executable()
|
||||
return strings.HasPrefix(path.Base(ex), "__debug_bin")
|
||||
}
|
||||
|
||||
// Loads debug environment from ~/.databricks/debug-env.json.
|
||||
func loadDebugEnvIfRunFromIDE(t testutil.TestingT, key string) {
|
||||
if !isInDebug() {
|
||||
if !IsInDebug() {
|
||||
return
|
||||
}
|
||||
home, err := os.UserHomeDir()
|
||||
|
|
|
@ -21,9 +21,10 @@ type WorkspaceT struct {
|
|||
}
|
||||
|
||||
func WorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) {
|
||||
t.Helper()
|
||||
loadDebugEnvIfRunFromIDE(t, "workspace")
|
||||
|
||||
t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
t.Logf("CLOUD_ENV=%s", testutil.GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
w, err := databricks.NewWorkspaceClient()
|
||||
require.NoError(t, err)
|
||||
|
@ -41,9 +42,10 @@ func WorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) {
|
|||
|
||||
// Run the workspace test only on UC workspaces.
|
||||
func UcWorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) {
|
||||
t.Helper()
|
||||
loadDebugEnvIfRunFromIDE(t, "workspace")
|
||||
|
||||
t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
t.Logf("CLOUD_ENV=%s", testutil.GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
if os.Getenv("TEST_METASTORE_ID") == "" {
|
||||
t.Skipf("Skipping on non-UC workspaces")
|
||||
|
@ -67,19 +69,21 @@ func UcWorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) {
|
|||
}
|
||||
|
||||
func (t *WorkspaceT) TestClusterID() string {
|
||||
t.Helper()
|
||||
clusterID := testutil.GetEnvOrSkipTest(t, "TEST_BRICKS_CLUSTER_ID")
|
||||
err := t.W.Clusters.EnsureClusterIsRunning(t.ctx, clusterID)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Unexpected error from EnsureClusterIsRunning for clusterID=%s", clusterID)
|
||||
return clusterID
|
||||
}
|
||||
|
||||
func (t *WorkspaceT) RunPython(code string) (string, error) {
|
||||
t.Helper()
|
||||
var err error
|
||||
|
||||
// Create command executor only once per test.
|
||||
if t.exec == nil {
|
||||
t.exec, err = t.W.CommandExecution.Start(t.ctx, t.TestClusterID(), compute.LanguagePython)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Unexpected error from CommandExecution.Start(clusterID=%v)", t.TestClusterID())
|
||||
|
||||
t.Cleanup(func() {
|
||||
err := t.exec.Destroy(t.ctx)
|
||||
|
@ -88,7 +92,7 @@ func (t *WorkspaceT) RunPython(code string) (string, error) {
|
|||
}
|
||||
|
||||
results, err := t.exec.Execute(t.ctx, code)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Unexpected error from Execute(%v)", code)
|
||||
require.NotEqual(t, compute.ResultTypeError, results.ResultType, results.Cause)
|
||||
output, ok := results.Data.(string)
|
||||
require.True(t, ok, "unexpected type %T", results.Data)
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/integration/internal/acc"
|
||||
)
|
||||
|
||||
// Main is the entry point for integration tests.
|
||||
|
@ -11,7 +13,7 @@ import (
|
|||
// they are not inadvertently executed when calling `go test ./...`.
|
||||
func Main(m *testing.M) {
|
||||
value := os.Getenv("CLOUD_ENV")
|
||||
if value == "" {
|
||||
if value == "" && !acc.IsInDebug() {
|
||||
fmt.Println("CLOUD_ENV is not set, skipping integration tests")
|
||||
return
|
||||
}
|
||||
|
|
|
@ -4,11 +4,9 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -106,7 +104,7 @@ func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.F
|
|||
for _, e := range entriesBeforeDelete {
|
||||
names = append(names, e.Name())
|
||||
}
|
||||
assert.Equal(t, names, []string{"file1", "file2", "subdir1", "subdir2"})
|
||||
assert.Equal(t, []string{"file1", "file2", "subdir1", "subdir2"}, names)
|
||||
|
||||
err = f.Delete(ctx, "dir")
|
||||
assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{})
|
||||
|
@ -149,13 +147,13 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer)
|
|||
|
||||
// Write should fail because the intermediate directory doesn't exist.
|
||||
err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`))
|
||||
assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{}))
|
||||
assert.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{})
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Read should fail because the intermediate directory doesn't yet exist.
|
||||
_, err = f.Read(ctx, "/foo/bar")
|
||||
assert.True(t, errors.As(err, &filer.FileDoesNotExistError{}))
|
||||
assert.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
assert.ErrorAs(t, err, &filer.FileDoesNotExistError{})
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Read should fail because the path points to a directory
|
||||
err = f.Mkdir(ctx, "/dir")
|
||||
|
@ -170,8 +168,8 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer)
|
|||
|
||||
// Write should fail because there is an existing file at the specified path.
|
||||
err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`))
|
||||
assert.True(t, errors.As(err, &filer.FileAlreadyExistsError{}))
|
||||
assert.True(t, errors.Is(err, fs.ErrExist))
|
||||
assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{})
|
||||
assert.ErrorIs(t, err, fs.ErrExist)
|
||||
|
||||
// Write with OverwriteIfExists should succeed.
|
||||
err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`), filer.OverwriteIfExists)
|
||||
|
@ -188,7 +186,7 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer)
|
|||
require.NoError(t, err)
|
||||
assert.Equal(t, "foo", info.Name())
|
||||
assert.True(t, info.Mode().IsDir())
|
||||
assert.Equal(t, true, info.IsDir())
|
||||
assert.True(t, info.IsDir())
|
||||
|
||||
// Stat on a file should succeed.
|
||||
// Note: size and modification time behave differently between backends.
|
||||
|
@ -196,17 +194,17 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer)
|
|||
require.NoError(t, err)
|
||||
assert.Equal(t, "bar", info.Name())
|
||||
assert.True(t, info.Mode().IsRegular())
|
||||
assert.Equal(t, false, info.IsDir())
|
||||
assert.False(t, info.IsDir())
|
||||
|
||||
// Delete should fail if the file doesn't exist.
|
||||
err = f.Delete(ctx, "/doesnt_exist")
|
||||
assert.ErrorAs(t, err, &filer.FileDoesNotExistError{})
|
||||
assert.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Stat should fail if the file doesn't exist.
|
||||
_, err = f.Stat(ctx, "/doesnt_exist")
|
||||
assert.ErrorAs(t, err, &filer.FileDoesNotExistError{})
|
||||
assert.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Delete should succeed for file that does exist.
|
||||
err = f.Delete(ctx, "/foo/bar")
|
||||
|
@ -215,7 +213,7 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer)
|
|||
// Delete should fail for a non-empty directory.
|
||||
err = f.Delete(ctx, "/foo")
|
||||
assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{})
|
||||
assert.True(t, errors.Is(err, fs.ErrInvalid))
|
||||
assert.ErrorIs(t, err, fs.ErrInvalid)
|
||||
|
||||
// Delete should succeed for a non-empty directory if the DeleteRecursively flag is set.
|
||||
err = f.Delete(ctx, "/foo", filer.DeleteRecursively)
|
||||
|
@ -224,8 +222,8 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer)
|
|||
// Delete of the filer root should ALWAYS fail, otherwise subsequent writes would fail.
|
||||
// It is not in the filer's purview to delete its root directory.
|
||||
err = f.Delete(ctx, "/")
|
||||
assert.True(t, errors.As(err, &filer.CannotDeleteRootError{}))
|
||||
assert.True(t, errors.Is(err, fs.ErrInvalid))
|
||||
assert.ErrorAs(t, err, &filer.CannotDeleteRootError{})
|
||||
assert.ErrorIs(t, err, fs.ErrInvalid)
|
||||
}
|
||||
|
||||
func TestFilerReadWrite(t *testing.T) {
|
||||
|
@ -262,7 +260,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) {
|
|||
// We start with an empty directory.
|
||||
entries, err := f.ReadDir(ctx, ".")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, entries, 0)
|
||||
assert.Empty(t, entries)
|
||||
|
||||
// Write a file.
|
||||
err = f.Write(ctx, "/hello.txt", strings.NewReader(`hello world`))
|
||||
|
@ -282,8 +280,8 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) {
|
|||
|
||||
// Expect an error if the path doesn't exist.
|
||||
_, err = f.ReadDir(ctx, "/dir/a/b/c/d/e")
|
||||
assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{}), err)
|
||||
assert.True(t, errors.Is(err, fs.ErrNotExist))
|
||||
assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}, err)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Expect two entries in the root.
|
||||
entries, err = f.ReadDir(ctx, ".")
|
||||
|
@ -295,7 +293,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) {
|
|||
assert.False(t, entries[1].IsDir())
|
||||
info, err = entries[1].Info()
|
||||
require.NoError(t, err)
|
||||
assert.Greater(t, info.ModTime().Unix(), int64(0))
|
||||
assert.Positive(t, info.ModTime().Unix())
|
||||
|
||||
// Expect two entries in the directory.
|
||||
entries, err = f.ReadDir(ctx, "/dir")
|
||||
|
@ -307,7 +305,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) {
|
|||
assert.False(t, entries[1].IsDir())
|
||||
info, err = entries[1].Info()
|
||||
require.NoError(t, err)
|
||||
assert.Greater(t, info.ModTime().Unix(), int64(0))
|
||||
assert.Positive(t, info.ModTime().Unix())
|
||||
|
||||
// Expect a single entry in the nested path.
|
||||
entries, err = f.ReadDir(ctx, "/dir/a/b")
|
||||
|
@ -325,7 +323,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) {
|
|||
require.NoError(t, err)
|
||||
entries, err = f.ReadDir(ctx, "empty-dir")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, entries, 0)
|
||||
assert.Empty(t, entries)
|
||||
|
||||
// Expect one entry for a directory with a file in it
|
||||
err = f.Write(ctx, "dir-with-one-file/my-file.txt", strings.NewReader("abc"), filer.CreateParentDirectories)
|
||||
|
@ -333,7 +331,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) {
|
|||
entries, err = f.ReadDir(ctx, "dir-with-one-file")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, entries, 1)
|
||||
assert.Equal(t, entries[0].Name(), "my-file.txt")
|
||||
assert.Equal(t, "my-file.txt", entries[0].Name())
|
||||
assert.False(t, entries[0].IsDir())
|
||||
}
|
||||
|
||||
|
@ -459,7 +457,7 @@ func TestFilerWorkspaceNotebook(t *testing.T) {
|
|||
// Assert uploading a second time fails due to overwrite mode missing
|
||||
err = f.Write(ctx, tc.name, strings.NewReader(tc.content2))
|
||||
require.ErrorIs(t, err, fs.ErrExist)
|
||||
assert.Regexp(t, regexp.MustCompile(`file already exists: .*/`+tc.nameWithoutExt+`$`), err.Error())
|
||||
assert.Regexp(t, `file already exists: .*/`+tc.nameWithoutExt+`$`, err.Error())
|
||||
|
||||
// Try uploading the notebook again with overwrite flag. This time it should succeed.
|
||||
err = f.Write(ctx, tc.name, strings.NewReader(tc.content2), filer.OverwriteIfExists)
|
||||
|
|
|
@ -60,13 +60,13 @@ func TestLock(t *testing.T) {
|
|||
|
||||
lockerErrs := make([]error, numConcurrentLocks)
|
||||
lockers := make([]*lockpkg.Locker, numConcurrentLocks)
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
lockers[i], err = lockpkg.CreateLocker("humpty.dumpty@databricks.com", remoteProjectRoot, wsc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
wg.Add(1)
|
||||
currentIndex := i
|
||||
go func() {
|
||||
|
@ -80,7 +80,7 @@ func TestLock(t *testing.T) {
|
|||
countActive := 0
|
||||
indexOfActiveLocker := 0
|
||||
indexOfAnInactiveLocker := -1
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if lockers[i].Active {
|
||||
countActive += 1
|
||||
assert.NoError(t, lockerErrs[i])
|
||||
|
@ -102,7 +102,7 @@ func TestLock(t *testing.T) {
|
|||
assert.True(t, remoteLocker.AcquisitionTime.Equal(lockers[indexOfActiveLocker].State.AcquisitionTime), "remote locker acquisition time does not match active locker")
|
||||
|
||||
// test all other locks (inactive ones) do not match the remote lock and Unlock fails
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if i == indexOfActiveLocker {
|
||||
continue
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func TestLock(t *testing.T) {
|
|||
}
|
||||
|
||||
// test inactive locks fail to write a file
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if i == indexOfActiveLocker {
|
||||
continue
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func TestLock(t *testing.T) {
|
|||
assert.Equal(t, "Shah Rukh", res["name"])
|
||||
|
||||
// inactive locker file reads fail
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if i == indexOfActiveLocker {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ func prepareRepoFiles(t *testing.T) *testFiles {
|
|||
|
||||
func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
|
||||
tasks := make([]jobs.SubmitTask, 0)
|
||||
for i := 0; i < len(versions); i++ {
|
||||
for i := range versions {
|
||||
task := jobs.SubmitTask{
|
||||
TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")),
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
|
@ -287,7 +287,7 @@ func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId st
|
|||
|
||||
func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
|
||||
tasks := make([]jobs.SubmitTask, 0)
|
||||
for i := 0; i < len(versions); i++ {
|
||||
for i := range versions {
|
||||
task := jobs.SubmitTask{
|
||||
TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")),
|
||||
SparkPythonTask: &jobs.SparkPythonTask{
|
||||
|
@ -308,7 +308,7 @@ func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId
|
|||
|
||||
func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
|
||||
tasks := make([]jobs.SubmitTask, 0)
|
||||
for i := 0; i < len(versions); i++ {
|
||||
for i := range versions {
|
||||
task := jobs.SubmitTask{
|
||||
TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")),
|
||||
PythonWheelTask: &jobs.PythonWheelTask{
|
||||
|
|
|
@ -3,222 +3,28 @@ package testcli
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/iamutil"
|
||||
"github.com/databricks/cli/libs/testdiff"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var OverwriteMode = os.Getenv("TESTS_OUTPUT") == "OVERWRITE"
|
||||
|
||||
func ReadFile(t testutil.TestingT, ctx context.Context, filename string) string {
|
||||
data, err := os.ReadFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return ""
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
// On CI, on Windows \n in the file somehow end up as \r\n
|
||||
return NormalizeNewlines(string(data))
|
||||
}
|
||||
|
||||
func captureOutput(t testutil.TestingT, ctx context.Context, args []string) string {
|
||||
t.Logf("run args: [%s]", strings.Join(args, ", "))
|
||||
t.Helper()
|
||||
r := NewRunner(t, ctx, args...)
|
||||
stdout, stderr, err := r.Run()
|
||||
assert.NoError(t, err)
|
||||
out := stderr.String() + stdout.String()
|
||||
return ReplaceOutput(t, ctx, out)
|
||||
}
|
||||
|
||||
func WriteFile(t testutil.TestingT, filename, data string) {
|
||||
t.Logf("Overwriting %s", filename)
|
||||
err := os.WriteFile(filename, []byte(data), 0o644)
|
||||
assert.NoError(t, err)
|
||||
return stderr.String() + stdout.String()
|
||||
}
|
||||
|
||||
func AssertOutput(t testutil.TestingT, ctx context.Context, args []string, expectedPath string) {
|
||||
expected := ReadFile(t, ctx, expectedPath)
|
||||
|
||||
t.Helper()
|
||||
out := captureOutput(t, ctx, args)
|
||||
|
||||
if out != expected {
|
||||
actual := fmt.Sprintf("Output from %v", args)
|
||||
testdiff.AssertEqualTexts(t, expectedPath, actual, expected, out)
|
||||
|
||||
if OverwriteMode {
|
||||
WriteFile(t, expectedPath, out)
|
||||
}
|
||||
}
|
||||
testdiff.AssertOutput(t, ctx, out, fmt.Sprintf("Output from %v", args), expectedPath)
|
||||
}
|
||||
|
||||
func AssertOutputJQ(t testutil.TestingT, ctx context.Context, args []string, expectedPath string, ignorePaths []string) {
|
||||
expected := ReadFile(t, ctx, expectedPath)
|
||||
|
||||
t.Helper()
|
||||
out := captureOutput(t, ctx, args)
|
||||
|
||||
if out != expected {
|
||||
actual := fmt.Sprintf("Output from %v", args)
|
||||
testdiff.AssertEqualJQ(t.(*testing.T), expectedPath, actual, expected, out, ignorePaths)
|
||||
|
||||
if OverwriteMode {
|
||||
WriteFile(t, expectedPath, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
uuidRegex = regexp.MustCompile(`[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}`)
|
||||
numIdRegex = regexp.MustCompile(`[0-9]{3,}`)
|
||||
privatePathRegex = regexp.MustCompile(`(/tmp|/private)(/.*)/([a-zA-Z0-9]+)`)
|
||||
)
|
||||
|
||||
func ReplaceOutput(t testutil.TestingT, ctx context.Context, out string) string {
|
||||
out = NormalizeNewlines(out)
|
||||
replacements := GetReplacementsMap(ctx)
|
||||
if replacements == nil {
|
||||
t.Fatal("WithReplacementsMap was not called")
|
||||
}
|
||||
out = replacements.Replace(out)
|
||||
out = uuidRegex.ReplaceAllString(out, "<UUID>")
|
||||
out = numIdRegex.ReplaceAllString(out, "<NUMID>")
|
||||
out = privatePathRegex.ReplaceAllString(out, "/tmp/.../$3")
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
type key int
|
||||
|
||||
const (
|
||||
replacementsMapKey = key(1)
|
||||
)
|
||||
|
||||
type Replacement struct {
|
||||
Old string
|
||||
New string
|
||||
}
|
||||
|
||||
type ReplacementsContext struct {
|
||||
Repls []Replacement
|
||||
}
|
||||
|
||||
func (r *ReplacementsContext) Replace(s string) string {
|
||||
// QQQ Should probably only replace whole words
|
||||
for _, repl := range r.Repls {
|
||||
s = strings.ReplaceAll(s, repl.Old, repl.New)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ReplacementsContext) Set(old, new string) {
|
||||
if old == "" || new == "" {
|
||||
return
|
||||
}
|
||||
r.Repls = append(r.Repls, Replacement{Old: old, New: new})
|
||||
}
|
||||
|
||||
func WithReplacementsMap(ctx context.Context) (context.Context, *ReplacementsContext) {
|
||||
value := ctx.Value(replacementsMapKey)
|
||||
if value != nil {
|
||||
if existingMap, ok := value.(*ReplacementsContext); ok {
|
||||
return ctx, existingMap
|
||||
}
|
||||
}
|
||||
|
||||
newMap := &ReplacementsContext{}
|
||||
ctx = context.WithValue(ctx, replacementsMapKey, newMap)
|
||||
return ctx, newMap
|
||||
}
|
||||
|
||||
func GetReplacementsMap(ctx context.Context) *ReplacementsContext {
|
||||
value := ctx.Value(replacementsMapKey)
|
||||
if value != nil {
|
||||
if existingMap, ok := value.(*ReplacementsContext); ok {
|
||||
return existingMap
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareReplacements(t testutil.TestingT, r *ReplacementsContext, w *databricks.WorkspaceClient) {
|
||||
// in some clouds (gcp) w.Config.Host includes "https://" prefix in others it's really just a host (azure)
|
||||
host := strings.TrimPrefix(strings.TrimPrefix(w.Config.Host, "http://"), "https://")
|
||||
r.Set(host, "$DATABRICKS_HOST")
|
||||
r.Set(w.Config.ClusterID, "$DATABRICKS_CLUSTER_ID")
|
||||
r.Set(w.Config.WarehouseID, "$DATABRICKS_WAREHOUSE_ID")
|
||||
r.Set(w.Config.ServerlessComputeID, "$DATABRICKS_SERVERLESS_COMPUTE_ID")
|
||||
r.Set(w.Config.MetadataServiceURL, "$DATABRICKS_METADATA_SERVICE_URL")
|
||||
r.Set(w.Config.AccountID, "$DATABRICKS_ACCOUNT_ID")
|
||||
r.Set(w.Config.Token, "$DATABRICKS_TOKEN")
|
||||
r.Set(w.Config.Username, "$DATABRICKS_USERNAME")
|
||||
r.Set(w.Config.Password, "$DATABRICKS_PASSWORD")
|
||||
r.Set(w.Config.Profile, "$DATABRICKS_CONFIG_PROFILE")
|
||||
r.Set(w.Config.ConfigFile, "$DATABRICKS_CONFIG_FILE")
|
||||
r.Set(w.Config.GoogleServiceAccount, "$DATABRICKS_GOOGLE_SERVICE_ACCOUNT")
|
||||
r.Set(w.Config.GoogleCredentials, "$GOOGLE_CREDENTIALS")
|
||||
r.Set(w.Config.AzureResourceID, "$DATABRICKS_AZURE_RESOURCE_ID")
|
||||
r.Set(w.Config.AzureClientSecret, "$ARM_CLIENT_SECRET")
|
||||
// r.Set(w.Config.AzureClientID, "$ARM_CLIENT_ID")
|
||||
r.Set(w.Config.AzureClientID, "$USERNAME")
|
||||
r.Set(w.Config.AzureTenantID, "$ARM_TENANT_ID")
|
||||
r.Set(w.Config.ActionsIDTokenRequestURL, "$ACTIONS_ID_TOKEN_REQUEST_URL")
|
||||
r.Set(w.Config.ActionsIDTokenRequestToken, "$ACTIONS_ID_TOKEN_REQUEST_TOKEN")
|
||||
r.Set(w.Config.AzureEnvironment, "$ARM_ENVIRONMENT")
|
||||
r.Set(w.Config.ClientID, "$DATABRICKS_CLIENT_ID")
|
||||
r.Set(w.Config.ClientSecret, "$DATABRICKS_CLIENT_SECRET")
|
||||
r.Set(w.Config.DatabricksCliPath, "$DATABRICKS_CLI_PATH")
|
||||
// This is set to words like "path" that happen too frequently
|
||||
// r.Set(w.Config.AuthType, "$DATABRICKS_AUTH_TYPE")
|
||||
}
|
||||
|
||||
func PrepareReplacementsUser(t testutil.TestingT, r *ReplacementsContext, u iam.User) {
|
||||
// There could be exact matches or overlap between different name fields, so sort them by length
|
||||
// to ensure we match the largest one first and map them all to the same token
|
||||
names := []string{
|
||||
u.DisplayName,
|
||||
u.UserName,
|
||||
iamutil.GetShortUserName(&u),
|
||||
u.Name.FamilyName,
|
||||
u.Name.GivenName,
|
||||
}
|
||||
if u.Name != nil {
|
||||
names = append(names, u.Name.FamilyName)
|
||||
names = append(names, u.Name.GivenName)
|
||||
}
|
||||
for _, val := range u.Emails {
|
||||
names = append(names, val.Value)
|
||||
}
|
||||
stableSortReverseLength(names)
|
||||
|
||||
for _, name := range names {
|
||||
r.Set(name, "$USERNAME")
|
||||
}
|
||||
|
||||
for ind, val := range u.Groups {
|
||||
r.Set(val.Value, fmt.Sprintf("$USER.Groups[%d]", ind))
|
||||
}
|
||||
|
||||
r.Set(u.Id, "$USER.Id")
|
||||
|
||||
for ind, val := range u.Roles {
|
||||
r.Set(val.Value, fmt.Sprintf("$USER.Roles[%d]", ind))
|
||||
}
|
||||
}
|
||||
|
||||
func stableSortReverseLength(strs []string) {
|
||||
slices.SortStableFunc(strs, func(a, b string) int {
|
||||
return len(b) - len(a)
|
||||
})
|
||||
}
|
||||
|
||||
func NormalizeNewlines(input string) string {
|
||||
output := strings.ReplaceAll(input, "\r\n", "\n")
|
||||
return strings.ReplaceAll(output, "\r", "\n")
|
||||
testdiff.AssertOutputJQ(t, ctx, out, fmt.Sprintf("Output from %v", args), expectedPath, ignorePaths)
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan s
|
|||
}
|
||||
|
||||
func (r *Runner) registerFlagCleanup(c *cobra.Command) {
|
||||
r.Helper()
|
||||
// Find target command that will be run. Example: if the command run is `databricks fs cp`,
|
||||
// target command corresponds to `cp`
|
||||
targetCmd, _, err := c.Find(r.args)
|
||||
|
@ -230,13 +231,48 @@ func (r *Runner) RunBackground() {
|
|||
}
|
||||
|
||||
func (r *Runner) Run() (bytes.Buffer, bytes.Buffer, error) {
|
||||
r.RunBackground()
|
||||
err := <-r.errch
|
||||
return r.stdout, r.stderr, err
|
||||
r.Helper()
|
||||
var stdout, stderr bytes.Buffer
|
||||
ctx := cmdio.NewContext(r.ctx, &cmdio.Logger{
|
||||
Mode: flags.ModeAppend,
|
||||
Reader: bufio.Reader{},
|
||||
Writer: &stderr,
|
||||
})
|
||||
|
||||
cli := cmd.New(ctx)
|
||||
cli.SetOut(&stdout)
|
||||
cli.SetErr(&stderr)
|
||||
cli.SetArgs(r.args)
|
||||
|
||||
r.Logf(" args: %s", strings.Join(r.args, ", "))
|
||||
|
||||
err := root.Execute(ctx, cli)
|
||||
if err != nil {
|
||||
r.Logf(" error: %s", err)
|
||||
}
|
||||
|
||||
if stdout.Len() > 0 {
|
||||
// Make a copy of the buffer such that it remains "unread".
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stdout.Bytes()))
|
||||
for scanner.Scan() {
|
||||
r.Logf("stdout: %s", scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
if stderr.Len() > 0 {
|
||||
// Make a copy of the buffer such that it remains "unread".
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stderr.Bytes()))
|
||||
for scanner.Scan() {
|
||||
r.Logf("stderr: %s", scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
// Like [require.Eventually] but errors if the underlying command has failed.
|
||||
func (r *Runner) Eventually(condition func() bool, waitFor, tick time.Duration, msgAndArgs ...any) {
|
||||
r.Helper()
|
||||
ch := make(chan bool, 1)
|
||||
|
||||
timer := time.NewTimer(waitFor)
|
||||
|
@ -269,12 +305,14 @@ func (r *Runner) Eventually(condition func() bool, waitFor, tick time.Duration,
|
|||
}
|
||||
|
||||
func (r *Runner) RunAndExpectOutput(heredoc string) {
|
||||
r.Helper()
|
||||
stdout, _, err := r.Run()
|
||||
require.NoError(r, err)
|
||||
require.Equal(r, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String()))
|
||||
}
|
||||
|
||||
func (r *Runner) RunAndParseJSON(v any) {
|
||||
r.Helper()
|
||||
stdout, _, err := r.Run()
|
||||
require.NoError(r, err)
|
||||
err = json.Unmarshal(stdout.Bytes(), &v)
|
||||
|
@ -291,7 +329,7 @@ func NewRunner(t testutil.TestingT, ctx context.Context, args ...string) *Runner
|
|||
}
|
||||
|
||||
func RequireSuccessfulRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer) {
|
||||
t.Logf("run args: [%s]", strings.Join(args, ", "))
|
||||
t.Helper()
|
||||
r := NewRunner(t, ctx, args...)
|
||||
stdout, stderr, err := r.Run()
|
||||
require.NoError(t, err)
|
||||
|
@ -299,6 +337,7 @@ func RequireSuccessfulRun(t testutil.TestingT, ctx context.Context, args ...stri
|
|||
}
|
||||
|
||||
func RequireErrorRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer, error) {
|
||||
t.Helper()
|
||||
r := NewRunner(t, ctx, args...)
|
||||
stdout, stderr, err := r.Run()
|
||||
require.Error(t, err)
|
||||
|
|
|
@ -5,6 +5,9 @@ import (
|
|||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// GetEnvOrSkipTest proceeds with test only with that env variable.
|
||||
|
@ -30,3 +33,12 @@ func RandomName(prefix ...string) string {
|
|||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func SkipUntil(t TestingT, date string) {
|
||||
deadline, err := time.Parse(time.DateOnly, date)
|
||||
require.NoError(t, err)
|
||||
|
||||
if time.Now().Before(deadline) {
|
||||
t.Skipf("Skipping test until %s. Time right now: %s", deadline.Format(time.DateOnly), time.Now())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,4 +24,6 @@ type TestingT interface {
|
|||
Setenv(key, value string)
|
||||
|
||||
TempDir() string
|
||||
|
||||
Helper()
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func TestStoreAndLookup(t *testing.T) {
|
|||
tok, err := l.Lookup("x")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "abc", tok.AccessToken)
|
||||
assert.Equal(t, 2, len(l.Tokens))
|
||||
assert.Len(t, l.Tokens, 2)
|
||||
|
||||
_, err = l.Lookup("z")
|
||||
assert.Equal(t, ErrNotConfigured, err)
|
||||
|
|
|
@ -39,7 +39,7 @@ func Heredoc(tmpl string) (trimmed string) {
|
|||
break
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(lines); i++ {
|
||||
for i := range lines {
|
||||
if lines[i] == "" || strings.TrimSpace(lines[i]) == "" {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func (d *dummyIterator) Next(ctx context.Context) (*provisioning.Workspace, erro
|
|||
func makeWorkspaces(count int) []*provisioning.Workspace {
|
||||
res := make([]*provisioning.Workspace, 0, count)
|
||||
next := []*provisioning.Workspace{&dummyWorkspace1, &dummyWorkspace2}
|
||||
for i := 0; i < count; i++ {
|
||||
for range count {
|
||||
n := next[0]
|
||||
next = append(next[1:], n)
|
||||
res = append(res, n)
|
||||
|
@ -74,7 +74,7 @@ func makeIterator(count int) listing.Iterator[*provisioning.Workspace] {
|
|||
func makeBigOutput(count int) string {
|
||||
res := bytes.Buffer{}
|
||||
for _, ws := range makeWorkspaces(count) {
|
||||
res.Write([]byte(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName)))
|
||||
res.WriteString(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName))
|
||||
}
|
||||
return res.String()
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue