mirror of https://github.com/databricks/cli.git
Merge branch 'main' into feature/logout
This commit is contained in:
commit
ca08796f77
|
@ -0,0 +1,56 @@
|
|||
name: PR Comment
|
||||
|
||||
# WARNING:
|
||||
# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL.
|
||||
# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH.
|
||||
# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR.
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
comment-on-pr:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Delete old comments
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Delete previous comment if it exists
|
||||
previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \
|
||||
--jq '.[] | select(.body | startswith("<!-- INTEGRATION_TESTS_MANUAL -->")) | .id')
|
||||
echo "Previous comment IDs: $previous_comment_ids"
|
||||
# Iterate over each comment ID and delete the comment
|
||||
if [ ! -z "$previous_comment_ids" ]; then
|
||||
echo "$previous_comment_ids" | while read -r comment_id; do
|
||||
echo "Deleting comment with ID: $comment_id"
|
||||
gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE
|
||||
done
|
||||
fi
|
||||
|
||||
- name: Comment on PR
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.pull_request.number }} --body \
|
||||
"<!-- INTEGRATION_TESTS_MANUAL -->
|
||||
If integration tests don't run automatically, an authorized user can run them manually by following the instructions below:
|
||||
|
||||
Trigger:
|
||||
[go/deco-tests-run/cli](https://go/deco-tests-run/cli)
|
||||
|
||||
Inputs:
|
||||
* PR number: ${{github.event.pull_request.number}}
|
||||
* Commit SHA: \`${{ env.COMMIT_SHA }}\`
|
||||
|
||||
Checks will be approved automatically on success.
|
||||
"
|
|
@ -0,0 +1,78 @@
|
|||
name: integration
|
||||
|
||||
on:
|
||||
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
merge_group:
|
||||
|
||||
|
||||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
steps:
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
trigger-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-token
|
||||
if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true'
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f pull_request_number=${{ github.event.pull_request.number }} \
|
||||
-f commit_sha=${{ github.event.pull_request.head.sha }}
|
||||
|
||||
|
||||
|
||||
# Statuses and checks apply to specific commits (by hash).
|
||||
# Enforcement of required checks is done both at the PR level and the merge queue level.
|
||||
# In case of multiple commits in a single PR, the hash of the squashed commit
|
||||
# will not match the one for the latest (approved) commit in the PR.
|
||||
# We auto approve the check for the merge queue for two reasons:
|
||||
# * Queue times out due to duration of tests.
|
||||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
auto-approve:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark Check
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh api -X POST -H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
|
||||
-f 'state=success' \
|
||||
-f 'context=Integration Tests Check'
|
|
@ -33,7 +33,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# No need to download cached dependencies when running gofmt.
|
||||
cache: false
|
||||
|
@ -100,7 +100,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||
- name: Install ajv-cli
|
||||
|
|
|
@ -6,6 +6,15 @@ on:
|
|||
- "main"
|
||||
- "demo-*"
|
||||
|
||||
# Confirm that snapshot builds work if this file is modified.
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
paths:
|
||||
- ".github/workflows/release-snapshot.yml"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
@ -21,7 +30,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.7
|
||||
go-version: 1.23.2
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
@ -63,7 +63,7 @@ jobs:
|
|||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Update setup-cli
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
@ -87,7 +87,7 @@ jobs:
|
|||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Update homebrew-tap
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
@ -124,7 +124,7 @@ jobs:
|
|||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||
|
||||
- name: Update CLI version in the VSCode extension
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
|
|
@ -95,7 +95,7 @@ checksum:
|
|||
algorithm: sha256
|
||||
|
||||
snapshot:
|
||||
name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
||||
version_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
|
|
95
CHANGELOG.md
95
CHANGELOG.md
|
@ -1,5 +1,100 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.233.0
|
||||
|
||||
CLI:
|
||||
* Clean host URL in the `auth login` command ([#1879](https://github.com/databricks/cli/pull/1879)).
|
||||
|
||||
Bundles:
|
||||
* Fix bundle run when run interactively ([#1880](https://github.com/databricks/cli/pull/1880)).
|
||||
* Fix relative path resolution for dashboards on Windows ([#1881](https://github.com/databricks/cli/pull/1881)).
|
||||
|
||||
Internal:
|
||||
* Address goreleaser deprecation warning ([#1872](https://github.com/databricks/cli/pull/1872)).
|
||||
* Update actions/github-script to v7 ([#1873](https://github.com/databricks/cli/pull/1873)).
|
||||
* Use Go 1.23 ([#1871](https://github.com/databricks/cli/pull/1871)).
|
||||
* [Internal] Always write message for manual integration test trigger ([#1874](https://github.com/databricks/cli/pull/1874)).
|
||||
* Add `cmd-exec-id` to user agent ([#1808](https://github.com/databricks/cli/pull/1808)).
|
||||
* Added E2E test to run Python wheels on interactive cluster created in bundle ([#1864](https://github.com/databricks/cli/pull/1864)).
|
||||
|
||||
|
||||
Dependency updates:
|
||||
* Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 ([#1877](https://github.com/databricks/cli/pull/1877)).
|
||||
|
||||
## [Release] Release v0.232.1
|
||||
|
||||
This patch release fixes the following error observed when deploying to /Shared root folder
|
||||
"Error: Path (/Shared/.bundle/.../resources) doesn't exist"
|
||||
|
||||
Bundles:
|
||||
* Fixed adding /Workspace prefix for resource paths ([#1866](https://github.com/databricks/cli/pull/1866)).
|
||||
|
||||
|
||||
## [Release] Release v0.232.0
|
||||
|
||||
**New features for Databricks Asset Bundles:**
|
||||
|
||||
This release adds support for managing AI/BI dashboards as part of your bundle configuration. The `bundle generate` command is updated to support producing dashboard bundle configuration as well as a serialized JSON representation of the dashboard.
|
||||
You can find an example configuration and walkthrough at https://github.com/databricks/bundle-examples/tree/main/knowledge_base/dashboard_nyc_taxi
|
||||
|
||||
CLI:
|
||||
* Add privacy notice to README ([#1841](https://github.com/databricks/cli/pull/1841)).
|
||||
|
||||
Bundles:
|
||||
* Add support for AI/BI dashboards ([#1743](https://github.com/databricks/cli/pull/1743)).
|
||||
* Added validator for folder permissions ([#1824](https://github.com/databricks/cli/pull/1824)).
|
||||
* Add bundle generate variant for dashboards ([#1847](https://github.com/databricks/cli/pull/1847)).
|
||||
* Use SetPermissions instead of UpdatePermissions when setting folder permissions based on top-level ones ([#1822](https://github.com/databricks/cli/pull/1822)).
|
||||
|
||||
Internal:
|
||||
* Attempt to reduce test flakiness on Windows ([#1845](https://github.com/databricks/cli/pull/1845)).
|
||||
* Reuse resource resolution code for the run command ([#1858](https://github.com/databricks/cli/pull/1858)).
|
||||
* [Internal] Automatically trigger integration tests on PR ([#1857](https://github.com/databricks/cli/pull/1857)).
|
||||
* [Internal] Add test instructions for external contributors ([#1863](https://github.com/databricks/cli/pull/1863)).
|
||||
* Add `libs/dyn/jsonsaver` ([#1862](https://github.com/databricks/cli/pull/1862)).
|
||||
|
||||
|
||||
Dependency updates:
|
||||
* Bump github.com/fatih/color from 1.17.0 to 1.18.0 ([#1861](https://github.com/databricks/cli/pull/1861)).
|
||||
|
||||
## [Release] Release v0.231.0
|
||||
|
||||
CLI:
|
||||
* Added JSON input validation for CLI commands ([#1771](https://github.com/databricks/cli/pull/1771)).
|
||||
* Support Git worktrees for `sync` ([#1831](https://github.com/databricks/cli/pull/1831)).
|
||||
|
||||
Bundles:
|
||||
* Add `bundle summary` to display URLs for deployed resources ([#1731](https://github.com/databricks/cli/pull/1731)).
|
||||
* Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ([#1821](https://github.com/databricks/cli/pull/1821)).
|
||||
* Show actionable errors for collaborative deployment scenarios ([#1386](https://github.com/databricks/cli/pull/1386)).
|
||||
* Fix path to repository-wide exclude file ([#1837](https://github.com/databricks/cli/pull/1837)).
|
||||
* Fixed typo in converting cluster permissions ([#1826](https://github.com/databricks/cli/pull/1826)).
|
||||
* Ignore metastore permission error during template generation ([#1819](https://github.com/databricks/cli/pull/1819)).
|
||||
* Handle normalization of `dyn.KindTime` into an any type ([#1836](https://github.com/databricks/cli/pull/1836)).
|
||||
* Added support for pip options in environment dependencies ([#1842](https://github.com/databricks/cli/pull/1842)).
|
||||
* Fix race condition when restarting continuous jobs ([#1849](https://github.com/databricks/cli/pull/1849)).
|
||||
* Fix pipeline in default-python template not working for certain workspaces ([#1854](https://github.com/databricks/cli/pull/1854)).
|
||||
* Add "output" flag to the bundle sync command ([#1853](https://github.com/databricks/cli/pull/1853)).
|
||||
|
||||
Internal:
|
||||
* Move utility functions dealing with IAM to libs/iamutil ([#1820](https://github.com/databricks/cli/pull/1820)).
|
||||
* Remove unused `IS_OWNER` constant ([#1823](https://github.com/databricks/cli/pull/1823)).
|
||||
* Assert SDK version is consistent in the CLI generation process ([#1814](https://github.com/databricks/cli/pull/1814)).
|
||||
* Fixed unmarshalling json input into `interface{}` type ([#1832](https://github.com/databricks/cli/pull/1832)).
|
||||
* Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments ([#1833](https://github.com/databricks/cli/pull/1833)).
|
||||
* Add behavioral tests for examples from the YAML spec ([#1835](https://github.com/databricks/cli/pull/1835)).
|
||||
* Remove Terraform conversion function that's no longer used ([#1840](https://github.com/databricks/cli/pull/1840)).
|
||||
* Encode assumptions about the dashboards API in a test ([#1839](https://github.com/databricks/cli/pull/1839)).
|
||||
* Add script to make testing of code on branches easier ([#1844](https://github.com/databricks/cli/pull/1844)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks disable-legacy-dbfs` command group.
|
||||
|
||||
OpenAPI commit cf9c61453990df0f9453670f2fe68e1b128647a2 (2024-10-14)
|
||||
Dependency updates:
|
||||
* Upgrade TF provider to 1.54.0 ([#1852](https://github.com/databricks/cli/pull/1852)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 ([#1843](https://github.com/databricks/cli/pull/1843)).
|
||||
|
||||
## [Release] Release v0.230.0
|
||||
|
||||
Notable changes for Databricks Asset Bundles:
|
||||
|
|
|
@ -35,3 +35,6 @@ docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghc
|
|||
This CLI follows the Databricks Unified Authentication principles.
|
||||
|
||||
You can find a detailed description at https://github.com/databricks/databricks-sdk-go#authentication.
|
||||
|
||||
## Privacy Notice
|
||||
Databricks CLI use is subject to the [Databricks License](https://github.com/databricks/cli/blob/main/LICENSE) and [Databricks Privacy Notice](https://www.databricks.com/legal/privacynotice), including any Usage Data provisions.
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
)
|
||||
|
||||
func ConvertDashboardToValue(dashboard *dashboards.Dashboard, filePath string) (dyn.Value, error) {
|
||||
// The majority of fields of the dashboard struct are read-only.
|
||||
// We copy the relevant fields manually.
|
||||
dv := map[string]dyn.Value{
|
||||
"display_name": dyn.NewValue(dashboard.DisplayName, []dyn.Location{{Line: 1}}),
|
||||
"warehouse_id": dyn.NewValue(dashboard.WarehouseId, []dyn.Location{{Line: 2}}),
|
||||
"file_path": dyn.NewValue(filePath, []dyn.Location{{Line: 3}}),
|
||||
}
|
||||
|
||||
return dyn.V(dv), nil
|
||||
}
|
|
@ -212,6 +212,15 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
}
|
||||
|
||||
// Dashboards: Prefix
|
||||
for key, dashboard := range r.Dashboards {
|
||||
if dashboard == nil || dashboard.CreateDashboardRequest == nil {
|
||||
diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key))
|
||||
continue
|
||||
}
|
||||
dashboard.DisplayName = prefix + dashboard.DisplayName
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type configureDashboardDefaults struct{}
|
||||
|
||||
func ConfigureDashboardDefaults() bundle.Mutator {
|
||||
return &configureDashboardDefaults{}
|
||||
}
|
||||
|
||||
func (m *configureDashboardDefaults) Name() string {
|
||||
return "ConfigureDashboardDefaults"
|
||||
}
|
||||
|
||||
func (m *configureDashboardDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
pattern := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("dashboards"),
|
||||
dyn.AnyKey(),
|
||||
)
|
||||
|
||||
// Configure defaults for all dashboards.
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("parent_path")), dyn.V(b.Config.Workspace.ResourcePath))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("embed_credentials")), dyn.V(false))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
return diags
|
||||
}
|
||||
|
||||
func setIfNotExists(v dyn.Value, path dyn.Path, defaultValue dyn.Value) (dyn.Value, error) {
|
||||
// Get the field at the specified path (if set).
|
||||
_, err := dyn.GetByPath(v, path)
|
||||
switch {
|
||||
case dyn.IsNoSuchKeyError(err):
|
||||
// OK, we'll set the default value.
|
||||
break
|
||||
case dyn.IsCannotTraverseNilError(err):
|
||||
// Cannot traverse the value, skip it.
|
||||
return v, nil
|
||||
case err == nil:
|
||||
// The field is set, skip it.
|
||||
return v, nil
|
||||
default:
|
||||
// Return the error.
|
||||
return v, err
|
||||
}
|
||||
|
||||
// Set the field at the specified path.
|
||||
return dyn.SetByPath(v, path, defaultValue)
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigureDashboardDefaultsParentPath(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ResourcePath: "/foo/bar",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"d1": {
|
||||
// Empty string is skipped.
|
||||
// See below for how it is set.
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
ParentPath: "",
|
||||
},
|
||||
},
|
||||
"d2": {
|
||||
// Non-empty string is skipped.
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
ParentPath: "already-set",
|
||||
},
|
||||
},
|
||||
"d3": {
|
||||
// No parent path set.
|
||||
},
|
||||
"d4": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We can't set an empty string in the typed configuration.
|
||||
// Do it on the dyn.Value directly.
|
||||
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Set(v, "resources.dashboards.d1.parent_path", dyn.V(""))
|
||||
})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
var v dyn.Value
|
||||
var err error
|
||||
|
||||
// Set to empty string; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.parent_path")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "", v.MustString())
|
||||
}
|
||||
|
||||
// Set to "already-set"; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.parent_path")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "already-set", v.MustString())
|
||||
}
|
||||
|
||||
// Not set; now set to the workspace resource path.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.parent_path")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "/foo/bar", v.MustString())
|
||||
}
|
||||
|
||||
// No valid dashboard; no change.
|
||||
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.parent_path")
|
||||
assert.True(t, dyn.IsCannotTraverseNilError(err))
|
||||
}
|
||||
|
||||
func TestConfigureDashboardDefaultsEmbedCredentials(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"d1": {
|
||||
EmbedCredentials: true,
|
||||
},
|
||||
"d2": {
|
||||
EmbedCredentials: false,
|
||||
},
|
||||
"d3": {
|
||||
// No parent path set.
|
||||
},
|
||||
"d4": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ConfigureDashboardDefaults())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
var v dyn.Value
|
||||
var err error
|
||||
|
||||
// Set to true; still true.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d1.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, true, v.MustBool())
|
||||
}
|
||||
|
||||
// Set to false; still false.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d2.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, false, v.MustBool())
|
||||
}
|
||||
|
||||
// Not set; now false.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.dashboards.d3.embed_credentials")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, false, v.MustBool())
|
||||
}
|
||||
|
||||
// No valid dashboard; no change.
|
||||
_, err = dyn.Get(b.Config.Value(), "resources.dashboards.d4.embed_credentials")
|
||||
assert.True(t, dyn.IsCannotTraverseNilError(err))
|
||||
}
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
|
@ -85,6 +86,14 @@ func TestInitializeURLs(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
ID: "01ef8d56871e1d50ae30ce7375e42478",
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "My special dashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -99,6 +108,7 @@ func TestInitializeURLs(t *testing.T) {
|
|||
"qualityMonitor1": "https://mycompany.databricks.com/explore/data/catalog/schema/qualityMonitor1?o=123456",
|
||||
"schema1": "https://mycompany.databricks.com/explore/data/catalog/schema?o=123456",
|
||||
"cluster1": "https://mycompany.databricks.com/compute/clusters/1017-103929-vlr7jzcf?o=123456",
|
||||
"dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456",
|
||||
}
|
||||
|
||||
initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/")
|
||||
|
|
|
@ -32,6 +32,7 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
|||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("resource_path")),
|
||||
}
|
||||
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
|
|
|
@ -41,6 +41,7 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
|||
ArtifactPath: tc.path,
|
||||
FilePath: tc.path,
|
||||
StatePath: tc.path,
|
||||
ResourcePath: tc.path,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -51,6 +52,7 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
|||
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.ResourcePath)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,4 +78,5 @@ func TestPrependWorkspaceForDefaultConfig(t *testing.T) {
|
|||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/resources", b.Config.Workspace.ResourcePath)
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
|
@ -123,6 +124,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
Clusters: map[string]*resources.Cluster{
|
||||
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "dashboard1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Use AWS implementation for testing.
|
||||
|
@ -184,6 +192,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
|||
|
||||
// Clusters
|
||||
assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
|
||||
// Dashboards
|
||||
assert.Equal(t, "[dev lennart] dashboard1", b.Config.Resources.Dashboards["dashboard1"].DisplayName)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||
|
@ -272,6 +283,7 @@ func TestValidateDevelopmentMode(t *testing.T) {
|
|||
b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state"
|
||||
b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files"
|
||||
b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts"
|
||||
b.Config.Workspace.ResourcePath = "/Users/lennart@company.com/.bundle/x/y/resources"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
@ -300,6 +312,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
|||
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
||||
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
||||
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
||||
b.Config.Workspace.ResourcePath = "/Shared/.bundle/x/y/resources"
|
||||
|
||||
diags = validateProductionMode(context.Background(), b, false)
|
||||
require.ErrorContains(t, diags.Error(), "production")
|
||||
|
|
|
@ -110,6 +110,16 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
|
|||
))
|
||||
}
|
||||
|
||||
// Dashboards do not support run_as in the API.
|
||||
if len(b.Config.Resources.Dashboards) > 0 {
|
||||
diags = diags.Extend(reportRunAsNotSupported(
|
||||
"dashboards",
|
||||
b.Config.GetLocation("resources.dashboards"),
|
||||
b.Config.Workspace.CurrentUser.UserName,
|
||||
identity,
|
||||
))
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ func allResourceTypes(t *testing.T) []string {
|
|||
// also update this check when adding a new resource
|
||||
require.Equal(t, []string{
|
||||
"clusters",
|
||||
"dashboards",
|
||||
"experiments",
|
||||
"jobs",
|
||||
"model_serving_endpoints",
|
||||
|
@ -188,6 +189,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
|||
Config: *r,
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||
require.Error(t, diags.Error())
|
||||
assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+
|
||||
"Current identity: alice. Run as identity: bob.\n"+
|
||||
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt)
|
||||
|
|
|
@ -162,6 +162,20 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r
|
|||
return localRelPath, nil
|
||||
}
|
||||
|
||||
func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("file %s not found", literal)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to determine if %s is a file: %w", localFullPath, err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
return "", fmt.Errorf("expected %s to be a file but found a directory", literal)
|
||||
}
|
||||
return localFullPath, nil
|
||||
}
|
||||
|
||||
func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
if !strings.HasPrefix(localRelPath, ".") {
|
||||
localRelPath = "." + string(filepath.Separator) + localRelPath
|
||||
|
@ -215,6 +229,7 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
t.applyJobTranslations,
|
||||
t.applyPipelineTranslations,
|
||||
t.applyArtifactTranslations,
|
||||
t.applyDashboardTranslations,
|
||||
} {
|
||||
v, err = fn(v)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
func (t *translateContext) applyDashboardTranslations(v dyn.Value) (dyn.Value, error) {
|
||||
// Convert the `file_path` field to a local absolute path.
|
||||
// We load the file at this path and use its contents for the dashboard contents.
|
||||
pattern := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("dashboards"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("file_path"),
|
||||
)
|
||||
|
||||
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
key := p[2].Key()
|
||||
dir, err := v.Location().Directory()
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for dashboard %s: %w", key, err)
|
||||
}
|
||||
|
||||
return t.rewriteRelativeTo(p, v, t.retainLocalAbsoluteFilePath, dir, "")
|
||||
})
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTranslatePathsDashboards_FilePathRelativeSubDirectory(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
touchEmptyFile(t, filepath.Join(dir, "src", "my_dashboard.lvdash.json"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "My Dashboard",
|
||||
},
|
||||
FilePath: "../src/my_dashboard.lvdash.json",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.dashboards", []dyn.Location{{
|
||||
File: filepath.Join(dir, "resources/dashboard.yml"),
|
||||
}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that the file path for the dashboard has been converted to its local absolute path.
|
||||
assert.Equal(
|
||||
t,
|
||||
filepath.Join(dir, "src", "my_dashboard.lvdash.json"),
|
||||
b.Config.Resources.Dashboards["dashboard"].FilePath,
|
||||
)
|
||||
}
|
|
@ -21,6 +21,7 @@ type Resources struct {
|
|||
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
||||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
|
||||
Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"`
|
||||
}
|
||||
|
||||
type ConfigResource interface {
|
||||
|
@ -77,6 +78,7 @@ func (r *Resources) AllResources() []ResourceGroup {
|
|||
collectResourceMap(descriptions["quality_monitors"], r.QualityMonitors),
|
||||
collectResourceMap(descriptions["schemas"], r.Schemas),
|
||||
collectResourceMap(descriptions["clusters"], r.Clusters),
|
||||
collectResourceMap(descriptions["dashboards"], r.Dashboards),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,5 +177,11 @@ func SupportedResources() map[string]ResourceDescription {
|
|||
SingularTitle: "Cluster",
|
||||
PluralTitle: "Clusters",
|
||||
},
|
||||
"dashboards": {
|
||||
SingularName: "dashboard",
|
||||
PluralName: "dashboards",
|
||||
SingularTitle: "Dashboard",
|
||||
PluralTitle: "Dashboards",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
)
|
||||
|
||||
type Dashboard struct {
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
|
||||
*dashboards.CreateDashboardRequest
|
||||
|
||||
// =========================
|
||||
// === Additional fields ===
|
||||
// =========================
|
||||
|
||||
// SerializedDashboard holds the contents of the dashboard in serialized JSON form.
|
||||
// We override the field's type from the SDK struct here to allow for inlining as YAML.
|
||||
// If the value is a string, it is used as is.
|
||||
// If it is not a string, its contents is marshalled as JSON.
|
||||
SerializedDashboard any `json:"serialized_dashboard,omitempty"`
|
||||
|
||||
// EmbedCredentials is a flag to indicate if the publisher's credentials should
|
||||
// be embedded in the published dashboard. These embedded credentials will be used
|
||||
// to execute the published dashboard's queries.
|
||||
//
|
||||
// Defaults to false if not set.
|
||||
EmbedCredentials bool `json:"embed_credentials,omitempty"`
|
||||
|
||||
// FilePath points to the local `.lvdash.json` file containing the dashboard definition.
|
||||
FilePath string `json:"file_path,omitempty"`
|
||||
}
|
||||
|
||||
func (r *Dashboard) UnmarshalJSON(b []byte) error {
|
||||
return marshal.Unmarshal(b, r)
|
||||
}
|
||||
|
||||
func (r Dashboard) MarshalJSON() ([]byte, error) {
|
||||
return marshal.Marshal(r)
|
||||
}
|
||||
|
||||
func (*Dashboard) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||
_, err := w.Lakeview.Get(ctx, dashboards.GetDashboardRequest{
|
||||
DashboardId: id,
|
||||
})
|
||||
if err != nil {
|
||||
log.Debugf(ctx, "dashboard %s does not exist", id)
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (*Dashboard) TerraformResourceName() string {
|
||||
return "databricks_dashboard"
|
||||
}
|
||||
|
||||
func (r *Dashboard) InitializeURL(baseURL url.URL) {
|
||||
if r.ID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
baseURL.Path = fmt.Sprintf("dashboardsv3/%s/published", r.ID)
|
||||
r.URL = baseURL.String()
|
||||
}
|
||||
|
||||
func (r *Dashboard) GetName() string {
|
||||
return r.DisplayName
|
||||
}
|
||||
|
||||
func (r *Dashboard) GetURL() string {
|
||||
return r.URL
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package resources
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Permission holds the permission level setting for a single principal.
|
||||
// Multiple of these can be defined on any resource.
|
||||
type Permission struct {
|
||||
|
@ -9,3 +11,19 @@ type Permission struct {
|
|||
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
}
|
||||
|
||||
func (p Permission) String() string {
|
||||
if p.UserName != "" {
|
||||
return fmt.Sprintf("level: %s, user_name: %s", p.Level, p.UserName)
|
||||
}
|
||||
|
||||
if p.ServicePrincipalName != "" {
|
||||
return fmt.Sprintf("level: %s, service_principal_name: %s", p.Level, p.ServicePrincipalName)
|
||||
}
|
||||
|
||||
if p.GroupName != "" {
|
||||
return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("level: %s", p.Level)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/paths"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type folderPermissions struct {
|
||||
}
|
||||
|
||||
// Apply implements bundle.ReadOnlyMutator.
|
||||
func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
if len(b.Config().Permissions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config().Workspace)
|
||||
|
||||
var diags diag.Diagnostics
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
results := make([]diag.Diagnostics, len(bundlePaths))
|
||||
for i, p := range bundlePaths {
|
||||
g.Go(func() error {
|
||||
results[i] = checkFolderPermission(ctx, b, p)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
diags = diags.Extend(r)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics {
|
||||
// If the folder is shared, then we don't need to check permissions as it was already checked in the other mutator before.
|
||||
if libraries.IsWorkspaceSharedPath(folderPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
w := b.WorkspaceClient().Workspace
|
||||
obj, err := getClosestExistingObject(ctx, w, folderPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||
WorkspaceObjectType: "directories",
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
p := permissions.ObjectAclToResourcePermissions(folderPath, objPermissions.AccessControlList)
|
||||
return p.Compare(b.Config().Permissions)
|
||||
}
|
||||
|
||||
func getClosestExistingObject(ctx context.Context, w workspace.WorkspaceInterface, folderPath string) (*workspace.ObjectInfo, error) {
|
||||
for {
|
||||
obj, err := w.GetStatusByPath(ctx, folderPath)
|
||||
if err == nil {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
if !apierr.IsMissing(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parent := path.Dir(folderPath)
|
||||
// If the parent is the same as the current folder, then we have reached the root
|
||||
if folderPath == parent {
|
||||
break
|
||||
}
|
||||
|
||||
folderPath = parent
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("folder %s and its parent folders do not exist", folderPath)
|
||||
}
|
||||
|
||||
// Name implements bundle.ReadOnlyMutator.
|
||||
func (f *folderPermissions) Name() string {
|
||||
return "validate:folder_permissions"
|
||||
}
|
||||
|
||||
// ValidateFolderPermissions validates that permissions for the folders in Workspace file system matches
|
||||
// the permissions in the top-level permissions section of the bundle.
|
||||
func ValidateFolderPermissions() bundle.ReadOnlyMutator {
|
||||
return &folderPermissions{}
|
||||
}
|
|
@ -0,0 +1,208 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFolderPermissionsInheritedWhenRootPathDoesNotExist(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/otherfoo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com/artifacts").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnMissingBundlePermission(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "The following permissions apply to the workspace folder at \"/Workspace/Users/foo@bar.com\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n", diags[0].Detail)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnPermissionMismatch(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnNoRootFolder(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/NotExisting",
|
||||
ArtifactPath: "/NotExisting/artifacts",
|
||||
FilePath: "/NotExisting/files",
|
||||
StatePath: "/NotExisting/state",
|
||||
ResourcePath: "/NotExisting/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/NotExisting").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "folder / and its parent folders do not exist", diags[0].Summary)
|
||||
require.Equal(t, diag.Error, diags[0].Severity)
|
||||
}
|
|
@ -35,6 +35,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
FilesToSync(),
|
||||
ValidateSyncPatterns(),
|
||||
JobTaskClusterSpec(),
|
||||
ValidateFolderPermissions(),
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
tfjson "github.com/hashicorp/terraform-json"
|
||||
)
|
||||
|
||||
type dashboardState struct {
|
||||
Name string
|
||||
ID string
|
||||
ETag string
|
||||
}
|
||||
|
||||
func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashboardState, error) {
|
||||
state, err := ParseResourcesState(ctx, b)
|
||||
if err != nil && state == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dashboards []dashboardState
|
||||
for _, resource := range state.Resources {
|
||||
if resource.Mode != tfjson.ManagedResourceMode {
|
||||
continue
|
||||
}
|
||||
for _, instance := range resource.Instances {
|
||||
switch resource.Type {
|
||||
case "databricks_dashboard":
|
||||
dashboards = append(dashboards, dashboardState{
|
||||
Name: resource.Name,
|
||||
ID: instance.Attributes.ID,
|
||||
ETag: instance.Attributes.ETag,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dashboards, nil
|
||||
}
|
||||
|
||||
type checkDashboardsModifiedRemotely struct {
|
||||
}
|
||||
|
||||
func (l *checkDashboardsModifiedRemotely) Name() string {
|
||||
return "CheckDashboardsModifiedRemotely"
|
||||
}
|
||||
|
||||
func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// This mutator is relevant only if the bundle includes dashboards.
|
||||
if len(b.Config.Resources.Dashboards) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the user has forced the deployment, skip this check.
|
||||
if b.Config.Bundle.Force {
|
||||
return nil
|
||||
}
|
||||
|
||||
dashboards, err := collectDashboardsFromState(ctx, b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
var diags diag.Diagnostics
|
||||
for _, dashboard := range dashboards {
|
||||
// Skip dashboards that are not defined in the bundle.
|
||||
// These will be destroyed upon deployment.
|
||||
if _, ok := b.Config.Resources.Dashboards[dashboard.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name))
|
||||
loc := b.Config.GetLocation(path.String())
|
||||
actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID)
|
||||
if err != nil {
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("failed to get dashboard %q", dashboard.Name),
|
||||
Detail: err.Error(),
|
||||
Paths: []dyn.Path{path},
|
||||
Locations: []dyn.Location{loc},
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// If the ETag is the same, the dashboard has not been modified.
|
||||
if actual.Etag == dashboard.ETag {
|
||||
continue
|
||||
}
|
||||
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("dashboard %q has been modified remotely", dashboard.Name),
|
||||
Detail: "" +
|
||||
"This dashboard has been modified remotely since the last bundle deployment.\n" +
|
||||
"These modifications are untracked and will be overwritten on deploy.\n" +
|
||||
"\n" +
|
||||
"Make sure that the local dashboard definition matches what you intend to deploy\n" +
|
||||
"before proceeding with the deployment.\n" +
|
||||
"\n" +
|
||||
"Run `databricks bundle deploy --force` to bypass this error." +
|
||||
"",
|
||||
Paths: []dyn.Path{path},
|
||||
Locations: []dyn.Location{loc},
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func CheckDashboardsModifiedRemotely() *checkDashboardsModifiedRemotely {
|
||||
return &checkDashboardsModifiedRemotely{}
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func mockDashboardBundle(t *testing.T) *bundle.Bundle {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: dir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "test",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dash1": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "My Special Dashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_NoDashboards(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: dir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "test",
|
||||
},
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
|
||||
assert.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_FirstDeployment(t *testing.T) {
|
||||
b := mockDashboardBundle(t)
|
||||
diags := bundle.Apply(context.Background(), b, CheckDashboardsModifiedRemotely())
|
||||
assert.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_ExistingStateNoChange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
b := mockDashboardBundle(t)
|
||||
writeFakeDashboardState(t, ctx, b)
|
||||
|
||||
// Mock the call to the API.
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().
|
||||
GetByDashboardId(mock.Anything, "id1").
|
||||
Return(&dashboards.Dashboard{
|
||||
DisplayName: "My Special Dashboard",
|
||||
Etag: "1000",
|
||||
}, nil).
|
||||
Once()
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
// No changes, so no diags.
|
||||
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
|
||||
assert.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_ExistingStateChange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
b := mockDashboardBundle(t)
|
||||
writeFakeDashboardState(t, ctx, b)
|
||||
|
||||
// Mock the call to the API.
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().
|
||||
GetByDashboardId(mock.Anything, "id1").
|
||||
Return(&dashboards.Dashboard{
|
||||
DisplayName: "My Special Dashboard",
|
||||
Etag: "1234",
|
||||
}, nil).
|
||||
Once()
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
// The dashboard has changed, so expect an error.
|
||||
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
|
||||
if assert.Len(t, diags, 1) {
|
||||
assert.Equal(t, diag.Error, diags[0].Severity)
|
||||
assert.Equal(t, `dashboard "dash1" has been modified remotely`, diags[0].Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
b := mockDashboardBundle(t)
|
||||
writeFakeDashboardState(t, ctx, b)
|
||||
|
||||
// Mock the call to the API.
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().
|
||||
GetByDashboardId(mock.Anything, "id1").
|
||||
Return(nil, fmt.Errorf("failure")).
|
||||
Once()
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
// Unable to get the dashboard, so expect an error.
|
||||
diags := bundle.Apply(ctx, b, CheckDashboardsModifiedRemotely())
|
||||
if assert.Len(t, diags, 1) {
|
||||
assert.Equal(t, diag.Error, diags[0].Severity)
|
||||
assert.Equal(t, `failed to get dashboard "dash1"`, diags[0].Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle) {
|
||||
tfDir, err := Dir(ctx, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write fake state file.
|
||||
testutil.WriteFile(t, `
|
||||
{
|
||||
"version": 4,
|
||||
"terraform_version": "1.5.5",
|
||||
"resources": [
|
||||
{
|
||||
"mode": "managed",
|
||||
"type": "databricks_dashboard",
|
||||
"name": "dash1",
|
||||
"instances": [
|
||||
{
|
||||
"schema_version": 0,
|
||||
"attributes": {
|
||||
"etag": "1000",
|
||||
"id": "id1"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"mode": "managed",
|
||||
"type": "databricks_job",
|
||||
"name": "job",
|
||||
"instances": [
|
||||
{
|
||||
"schema_version": 0,
|
||||
"attributes": {
|
||||
"id": "1234"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"mode": "managed",
|
||||
"type": "databricks_dashboard",
|
||||
"name": "dash2",
|
||||
"instances": [
|
||||
{
|
||||
"schema_version": 0,
|
||||
"attributes": {
|
||||
"etag": "1001",
|
||||
"id": "id2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
`, filepath.Join(tfDir, TerraformStateFileName))
|
||||
}
|
|
@ -176,6 +176,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Clusters[resource.Name] = cur
|
||||
case "databricks_dashboard":
|
||||
if config.Resources.Dashboards == nil {
|
||||
config.Resources.Dashboards = make(map[string]*resources.Dashboard)
|
||||
}
|
||||
cur := config.Resources.Dashboards[resource.Name]
|
||||
if cur == nil {
|
||||
cur = &resources.Dashboard{ModifiedStatus: resources.ModifiedStatusDeleted}
|
||||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Dashboards[resource.Name] = cur
|
||||
case "databricks_permissions":
|
||||
case "databricks_grants":
|
||||
// Ignore; no need to pull these back into the configuration.
|
||||
|
@ -230,6 +240,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Dashboards {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
|
@ -677,6 +678,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_dashboard",
|
||||
Mode: "managed",
|
||||
Name: "test_dashboard",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -709,6 +718,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -778,6 +790,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -813,6 +832,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -927,6 +949,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
"test_dashboard_new": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "test_dashboard_new",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -1075,6 +1109,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_dashboard",
|
||||
Mode: "managed",
|
||||
Name: "test_dashboard",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_dashboard",
|
||||
Mode: "managed",
|
||||
Name: "test_dashboard_old",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -1143,6 +1193,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Dashboards["test_dashboard"].ID)
|
||||
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Dashboards["test_dashboard_old"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Dashboards["test_dashboard_old"].ModifiedStatus)
|
||||
assert.Equal(t, "", config.Resources.Dashboards["test_dashboard_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Dashboards["test_dashboard_new"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
|
|
@ -60,6 +60,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
|||
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
||||
case dyn.Key("clusters"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
|
||||
case dyn.Key("dashboards"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_dashboard")).Append(path[2:]...)
|
||||
default:
|
||||
// Trigger "key not found" for unknown resource types.
|
||||
return dyn.GetByPath(root, path)
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestInterpolate(t *testing.T) {
|
|||
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
||||
"other_schema": "${resources.schemas.other_schema.id}",
|
||||
"other_cluster": "${resources.clusters.other_cluster.id}",
|
||||
"other_dashboard": "${resources.dashboards.other_dashboard.id}",
|
||||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
|
@ -69,6 +70,7 @@ func TestInterpolate(t *testing.T) {
|
|||
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
||||
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
||||
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
|
||||
assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"])
|
||||
|
||||
m := b.Config.Resources.Models["my_model"]
|
||||
assert.Equal(t, "my_model", m.Model.Name)
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
const (
|
||||
filePathFieldName = "file_path"
|
||||
serializedDashboardFieldName = "serialized_dashboard"
|
||||
)
|
||||
|
||||
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
|
||||
func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) {
|
||||
// Skip if the "serialized_dashboard" field is already set.
|
||||
if v := vout.Get(serializedDashboardFieldName); v.IsValid() {
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
// Skip if the "serialized_dashboard" field on the input is not set.
|
||||
v := vin.Get(serializedDashboardFieldName)
|
||||
if !v.IsValid() {
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
// Marshal the "serialized_dashboard" field as JSON.
|
||||
data, err := json.Marshal(v.AsAny())
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to marshal serialized_dashboard: %w", err)
|
||||
}
|
||||
|
||||
// Set the "serialized_dashboard" field on the output.
|
||||
return dyn.Set(vout, serializedDashboardFieldName, dyn.V(string(data)))
|
||||
}
|
||||
|
||||
func convertDashboardResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
|
||||
// Normalize the output value to the target schema.
|
||||
vout, diags := convert.Normalize(schema.ResourceDashboard{}, vin)
|
||||
for _, diag := range diags {
|
||||
log.Debugf(ctx, "dashboard normalization diagnostic: %s", diag.Summary)
|
||||
}
|
||||
|
||||
// Include "serialized_dashboard" field if "file_path" is set.
|
||||
// Note: the Terraform resource supports "file_path" natively, but its
|
||||
// change detection mechanism doesn't work as expected at the time of writing (Sep 30).
|
||||
if path, ok := vout.Get(filePathFieldName).AsString(); ok {
|
||||
vout, err = dyn.Set(vout, serializedDashboardFieldName, dyn.V(fmt.Sprintf("${file(%q)}", path)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to set serialized_dashboard: %w", err)
|
||||
}
|
||||
// Drop the "file_path" field. It is mutually exclusive with "serialized_dashboard".
|
||||
vout, err = dyn.Walk(vout, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
switch len(p) {
|
||||
case 0:
|
||||
return v, nil
|
||||
case 1:
|
||||
if p[0] == dyn.Key(filePathFieldName) {
|
||||
return v, dyn.ErrDrop
|
||||
}
|
||||
}
|
||||
|
||||
// Skip everything else.
|
||||
return v, dyn.ErrSkip
|
||||
})
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to drop file_path: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output.
|
||||
vout, err = marshalSerializedDashboard(vin, vout)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
type dashboardConverter struct{}
|
||||
|
||||
func (dashboardConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
|
||||
vout, err := convertDashboardResource(ctx, vin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the converted resource to the output.
|
||||
out.Dashboard[key] = vout.AsAny()
|
||||
|
||||
// Configure permissions for this resource.
|
||||
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
|
||||
permissions.DashboardId = fmt.Sprintf("${databricks_dashboard.%s.id}", key)
|
||||
out.Permissions["dashboard_"+key] = permissions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerConverter("dashboards", dashboardConverter{})
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConvertDashboard(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
DisplayName: "my dashboard",
|
||||
WarehouseId: "f00dcafe",
|
||||
ParentPath: "/some/path",
|
||||
},
|
||||
|
||||
EmbedCredentials: true,
|
||||
|
||||
Permissions: []resources.Permission{
|
||||
{
|
||||
Level: "CAN_VIEW",
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert equality on the job
|
||||
assert.Equal(t, map[string]any{
|
||||
"display_name": "my dashboard",
|
||||
"warehouse_id": "f00dcafe",
|
||||
"parent_path": "/some/path",
|
||||
"embed_credentials": true,
|
||||
}, out.Dashboard["my_dashboard"])
|
||||
|
||||
// Assert equality on the permissions
|
||||
assert.Equal(t, &schema.ResourcePermissions{
|
||||
DashboardId: "${databricks_dashboard.my_dashboard.id}",
|
||||
AccessControl: []schema.ResourcePermissionsAccessControl{
|
||||
{
|
||||
PermissionLevel: "CAN_VIEW",
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
}, out.Permissions["dashboard_my_dashboard"])
|
||||
}
|
||||
|
||||
func TestConvertDashboardFilePath(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
FilePath: "some/path",
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": "${file(\"some/path\")}",
|
||||
})
|
||||
|
||||
// Assert that the "file_path" doesn't carry over.
|
||||
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"file_path": "some/path",
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertDashboardFilePathQuoted(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`,
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": `${file("C:\\foo\\bar\\baz\\dashboard.lvdash.json")}`,
|
||||
})
|
||||
|
||||
// Assert that the "file_path" doesn't carry over.
|
||||
assert.NotSubset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"file_path": `C:\foo\bar\baz\dashboard.lvdash.json`,
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertDashboardSerializedDashboardString(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
SerializedDashboard: `{ "json": true }`,
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": `{ "json": true }`,
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertDashboardSerializedDashboardAny(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
SerializedDashboard: map[string]any{
|
||||
"pages": []map[string]any{
|
||||
{
|
||||
"displayName": "New Page",
|
||||
"layout": []map[string]any{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = dashboardConverter{}.Convert(ctx, "my_dashboard", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the "serialized_dashboard" is included.
|
||||
assert.Subset(t, out.Dashboard["my_dashboard"], map[string]any{
|
||||
"serialized_dashboard": `{"pages":[{"displayName":"New Page","layout":[]}]}`,
|
||||
})
|
||||
}
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
// Partial representation of the Terraform state file format.
|
||||
// We are only interested global version and serial numbers,
|
||||
// plus resource types, names, modes, and ids.
|
||||
// plus resource types, names, modes, IDs, and ETags (for dashboards).
|
||||
type resourcesState struct {
|
||||
Version int `json:"version"`
|
||||
Resources []stateResource `json:"resources"`
|
||||
|
@ -34,6 +34,7 @@ type stateResourceInstance struct {
|
|||
|
||||
type stateInstanceAttributes struct {
|
||||
ID string `json:"id"`
|
||||
ETag string `json:"etag,omitempty"`
|
||||
}
|
||||
|
||||
func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) {
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
package bundletest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Mutate(t *testing.T, b *bundle.Bundle, f func(v dyn.Value) (dyn.Value, error)) {
|
||||
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(f)
|
||||
require.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
|
@ -36,3 +36,13 @@ func IsWorkspaceLibrary(library *compute.Library) bool {
|
|||
|
||||
return IsWorkspacePath(path)
|
||||
}
|
||||
|
||||
// IsVolumesPath returns true if the specified path indicates that
|
||||
// it should be interpreted as a Databricks Volumes path.
|
||||
func IsVolumesPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Volumes/")
|
||||
}
|
||||
|
||||
func IsWorkspaceSharedPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Workspace/Shared/")
|
||||
}
|
|
@ -31,3 +31,13 @@ func TestIsWorkspaceLibrary(t *testing.T) {
|
|||
// Empty.
|
||||
assert.False(t, IsWorkspaceLibrary(&compute.Library{}))
|
||||
}
|
||||
|
||||
func TestIsVolumesPath(t *testing.T) {
|
||||
// Absolute paths with particular prefixes.
|
||||
assert.True(t, IsVolumesPath("/Volumes/path/to/package"))
|
||||
|
||||
// Relative paths.
|
||||
assert.False(t, IsVolumesPath("myfile.txt"))
|
||||
assert.False(t, IsVolumesPath("./myfile.txt"))
|
||||
assert.False(t, IsVolumesPath("../myfile.txt"))
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
)
|
||||
|
||||
func CollectUniqueWorkspacePathPrefixes(workspace config.Workspace) []string {
|
||||
rootPath := workspace.RootPath
|
||||
paths := []string{}
|
||||
if !libraries.IsVolumesPath(rootPath) {
|
||||
paths = append(paths, rootPath)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(rootPath, "/") {
|
||||
rootPath += "/"
|
||||
}
|
||||
|
||||
for _, p := range []string{
|
||||
workspace.ArtifactPath,
|
||||
workspace.FilePath,
|
||||
workspace.StatePath,
|
||||
workspace.ResourcePath,
|
||||
} {
|
||||
if libraries.IsVolumesPath(p) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(p, rootPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
paths = append(paths, p)
|
||||
}
|
||||
|
||||
return paths
|
||||
}
|
|
@ -39,6 +39,10 @@ var levelsMap = map[string](map[string]string){
|
|||
CAN_VIEW: "CAN_VIEW",
|
||||
CAN_RUN: "CAN_QUERY",
|
||||
},
|
||||
"dashboards": {
|
||||
CAN_MANAGE: "CAN_MANAGE",
|
||||
CAN_VIEW: "CAN_READ",
|
||||
},
|
||||
}
|
||||
|
||||
type bundlePermissions struct{}
|
||||
|
|
|
@ -3,9 +3,9 @@ package permissions
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
|
@ -21,17 +21,13 @@ func (*validateSharedRootPermissions) Name() string {
|
|||
}
|
||||
|
||||
func (*validateSharedRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if isWorkspaceSharedRoot(b.Config.Workspace.RootPath) {
|
||||
if libraries.IsWorkspaceSharedPath(b.Config.Workspace.RootPath) {
|
||||
return isUsersGroupPermissionSet(b)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isWorkspaceSharedRoot(path string) bool {
|
||||
return strings.HasPrefix(path, "/Workspace/Shared/")
|
||||
}
|
||||
|
||||
// isUsersGroupPermissionSet checks that top-level permissions set for bundle contain group_name: users with CAN_MANAGE permission.
|
||||
func isUsersGroupPermissionSet(b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
package permissions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
type WorkspacePathPermissions struct {
|
||||
Path string
|
||||
Permissions []resources.Permission
|
||||
}
|
||||
|
||||
func ObjectAclToResourcePermissions(path string, acl []workspace.WorkspaceObjectAccessControlResponse) *WorkspacePathPermissions {
|
||||
permissions := make([]resources.Permission, 0)
|
||||
for _, a := range acl {
|
||||
// Skip the admin group because it's added to all resources by default.
|
||||
if a.GroupName == "admins" {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pl := range a.AllPermissions {
|
||||
permissions = append(permissions, resources.Permission{
|
||||
Level: convertWorkspaceObjectPermissionLevel(pl.PermissionLevel),
|
||||
GroupName: a.GroupName,
|
||||
UserName: a.UserName,
|
||||
ServicePrincipalName: a.ServicePrincipalName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &WorkspacePathPermissions{Permissions: permissions, Path: path}
|
||||
}
|
||||
|
||||
func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
// Check the permissions in the workspace and see if they are all set in the bundle.
|
||||
ok, missing := containsAll(p.Permissions, perms)
|
||||
if !ok {
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: fmt.Sprintf("The following permissions apply to the workspace folder at %q but are not configured in the bundle:\n%s", p.Path, toString(missing)),
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// containsAll checks if permA contains all permissions in permB.
|
||||
func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) {
|
||||
missing := make([]resources.Permission, 0)
|
||||
for _, a := range permA {
|
||||
found := false
|
||||
for _, b := range permB {
|
||||
if a == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
missing = append(missing, a)
|
||||
}
|
||||
}
|
||||
return len(missing) == 0, missing
|
||||
}
|
||||
|
||||
// convertWorkspaceObjectPermissionLevel converts matching object permission levels to bundle ones.
|
||||
// If there is no matching permission level, it returns permission level as is, for example, CAN_EDIT.
|
||||
func convertWorkspaceObjectPermissionLevel(level workspace.WorkspaceObjectPermissionLevel) string {
|
||||
switch level {
|
||||
case workspace.WorkspaceObjectPermissionLevelCanRead:
|
||||
return CAN_VIEW
|
||||
default:
|
||||
return string(level)
|
||||
}
|
||||
}
|
||||
|
||||
func toString(p []resources.Permission) string {
|
||||
var sb strings.Builder
|
||||
for _, perm := range p {
|
||||
sb.WriteString(fmt.Sprintf("- %s\n", perm.String()))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
package permissions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWorkspacePathPermissionsCompare(t *testing.T) {
|
||||
testCases := []struct {
|
||||
perms []resources.Permission
|
||||
acl []workspace.WorkspaceObjectAccessControlResponse
|
||||
expected diag.Diagnostics
|
||||
}{
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
GroupName: "admins",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_VIEW, UserName: "foo@bar.com"},
|
||||
{Level: CAN_MANAGE, ServicePrincipalName: "sp.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_READ"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
GroupName: "foo",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, group_name: foo\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
wp := ObjectAclToResourcePermissions("path", tc.acl)
|
||||
diags := wp.Compare(tc.perms)
|
||||
require.Equal(t, tc.expected, diags)
|
||||
}
|
||||
|
||||
}
|
|
@ -5,8 +5,11 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/paths"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type workspaceRootPermissions struct {
|
||||
|
@ -34,7 +37,7 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
|
|||
permissions := make([]workspace.WorkspaceObjectAccessControlRequest, 0)
|
||||
|
||||
for _, p := range b.Config.Permissions {
|
||||
level, err := getWorkspaceObjectPermissionLevel(p.Level)
|
||||
level, err := GetWorkspaceObjectPermissionLevel(p.Level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -52,20 +55,39 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
|
|||
}
|
||||
|
||||
w := b.WorkspaceClient().Workspace
|
||||
obj, err := w.GetStatusByPath(ctx, b.Config.Workspace.RootPath)
|
||||
bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config.Workspace)
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
for _, p := range bundlePaths {
|
||||
g.Go(func() error {
|
||||
return setPermissions(ctx, w, p, permissions)
|
||||
})
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path string, permissions []workspace.WorkspaceObjectAccessControlRequest) error {
|
||||
// If the folder is shared, then we don't need to set permissions since it's always set for all users and it's checked in mutators before.
|
||||
if libraries.IsWorkspaceSharedPath(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
obj, err := w.GetStatusByPath(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.UpdatePermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
|
||||
_, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||
WorkspaceObjectType: "directories",
|
||||
AccessControlList: permissions,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
|
||||
func GetWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
|
||||
switch bundlePermission {
|
||||
case CAN_MANAGE:
|
||||
return workspace.WorkspaceObjectPermissionLevelCanManage, nil
|
||||
|
|
|
@ -22,6 +22,10 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
|||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Users/foo@bar.com",
|
||||
ArtifactPath: "/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Users/foo@bar.com/files",
|
||||
StatePath: "/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "TestUser"},
|
||||
|
@ -59,7 +63,7 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
|||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().UpdatePermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
|
@ -72,3 +76,116 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions(), ApplyWorkspaceRootPermissions()))
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestApplyWorkspaceRootPermissionsForAllPaths(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Some/Root/Path",
|
||||
ArtifactPath: "/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Users/foo@bar.com/files",
|
||||
StatePath: "/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "TestUser"},
|
||||
{Level: CAN_VIEW, GroupName: "TestGroup"},
|
||||
{Level: CAN_RUN, ServicePrincipalName: "TestServicePrincipal"},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}},
|
||||
"job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}},
|
||||
"pipeline_2": {PipelineSpec: &pipelines.PipelineSpec{}},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"model_1": {Model: &ml.Model{}},
|
||||
"model_2": {Model: &ml.Model{}},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment_1": {Experiment: &ml.Experiment{}},
|
||||
"experiment_2": {Experiment: &ml.Experiment{}},
|
||||
},
|
||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||
"endpoint_1": {CreateServingEndpoint: &serving.CreateServingEndpoint{}},
|
||||
"endpoint_2": {CreateServingEndpoint: &serving.CreateServingEndpoint{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
workspaceApi := m.GetMockWorkspaceAPI()
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Some/Root/Path").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/artifacts").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 2,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/files").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 3,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/state").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 4,
|
||||
}, nil)
|
||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/resources").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 5,
|
||||
}, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "1",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "2",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "3",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "4",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
||||
},
|
||||
WorkspaceObjectId: "5",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(nil, nil)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions())
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
|
|
@ -152,6 +152,7 @@ func Deploy(outputHandler sync.OutputHandler) bundle.Mutator {
|
|||
bundle.Defer(
|
||||
bundle.Seq(
|
||||
terraform.StatePull(),
|
||||
terraform.CheckDashboardsModifiedRemotely(),
|
||||
deploy.StatePull(),
|
||||
mutator.ValidateGitDetails(),
|
||||
artifacts.CleanUp(),
|
||||
|
|
|
@ -66,6 +66,7 @@ func Initialize() bundle.Mutator {
|
|||
permissions.PermissionDiagnostics(),
|
||||
mutator.SetRunAs(),
|
||||
mutator.OverrideCompute(),
|
||||
mutator.ConfigureDashboardDefaults(),
|
||||
mutator.ProcessTargetMode(),
|
||||
mutator.ApplyPresets(),
|
||||
mutator.DefaultQueueing(),
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
package resources
|
||||
|
||||
import "github.com/databricks/cli/bundle"
|
||||
|
||||
// Completions returns the same as [References] except
|
||||
// that every key maps directly to a single reference.
|
||||
func Completions(b *bundle.Bundle, filters ...Filter) map[string]Reference {
|
||||
out := make(map[string]Reference)
|
||||
keyOnlyRefs, _ := References(b, filters...)
|
||||
for k, refs := range keyOnlyRefs {
|
||||
if len(refs) != 1 {
|
||||
continue
|
||||
}
|
||||
out[k] = refs[0]
|
||||
}
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompletions_SkipDuplicates(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Test that this skips duplicates and only includes unambiguous completions.
|
||||
out := Completions(b)
|
||||
if assert.Len(t, out, 1) {
|
||||
assert.Contains(t, out, "bar")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompletions_Filter(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
includeJobs := func(ref Reference) bool {
|
||||
_, ok := ref.Resource.(*resources.Job)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Test that this does not include the pipeline.
|
||||
out := Completions(b, includeJobs)
|
||||
if assert.Len(t, out, 1) {
|
||||
assert.Contains(t, out, "foo")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
// Reference is a reference to a resource.
|
||||
// It includes the resource type description, and a reference to the resource itself.
|
||||
type Reference struct {
|
||||
// Key is the unique key of the resource, e.g. "my_job".
|
||||
Key string
|
||||
|
||||
// KeyWithType is the unique key of the resource, including the resource type, e.g. "jobs.my_job".
|
||||
KeyWithType string
|
||||
|
||||
// Description is the resource type description.
|
||||
Description config.ResourceDescription
|
||||
|
||||
// Resource is the resource itself.
|
||||
Resource config.ConfigResource
|
||||
}
|
||||
|
||||
// Map is the core type for resource lookup and completion.
|
||||
type Map map[string][]Reference
|
||||
|
||||
// Filter defines the function signature for filtering resources.
|
||||
type Filter func(Reference) bool
|
||||
|
||||
// includeReference checks if the specified reference passes all filters.
|
||||
// If the list of filters is empty, the reference is always included.
|
||||
func includeReference(filters []Filter, ref Reference) bool {
|
||||
for _, filter := range filters {
|
||||
if !filter(ref) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// References returns maps of resource keys to a slice of [Reference].
|
||||
//
|
||||
// The first map is indexed by the resource key only.
|
||||
// The second map is indexed by the resource type name and its key.
|
||||
//
|
||||
// While the return types allows for multiple resources to share the same key,
|
||||
// this is confirmed not to happen in the [validate.UniqueResourceKeys] mutator.
|
||||
func References(b *bundle.Bundle, filters ...Filter) (Map, Map) {
|
||||
keyOnly := make(Map)
|
||||
keyWithType := make(Map)
|
||||
|
||||
// Collect map of resource references indexed by their keys.
|
||||
for _, group := range b.Config.Resources.AllResources() {
|
||||
for k, v := range group.Resources {
|
||||
ref := Reference{
|
||||
Key: k,
|
||||
KeyWithType: fmt.Sprintf("%s.%s", group.Description.PluralName, k),
|
||||
Description: group.Description,
|
||||
Resource: v,
|
||||
}
|
||||
|
||||
// Skip resources that do not pass all filters.
|
||||
if !includeReference(filters, ref) {
|
||||
continue
|
||||
}
|
||||
|
||||
keyOnly[ref.Key] = append(keyOnly[ref.Key], ref)
|
||||
keyWithType[ref.KeyWithType] = append(keyWithType[ref.KeyWithType], ref)
|
||||
}
|
||||
}
|
||||
|
||||
return keyOnly, keyWithType
|
||||
}
|
||||
|
||||
// Lookup returns the resource with the specified key.
|
||||
// If the key maps to more than one resource, an error is returned.
|
||||
// If the key does not map to any resource, an error is returned.
|
||||
func Lookup(b *bundle.Bundle, key string, filters ...Filter) (Reference, error) {
|
||||
keyOnlyRefs, keyWithTypeRefs := References(b, filters...)
|
||||
refs, ok := keyOnlyRefs[key]
|
||||
if !ok {
|
||||
refs, ok = keyWithTypeRefs[key]
|
||||
if !ok {
|
||||
return Reference{}, fmt.Errorf("resource with key %q not found", key)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(refs) == 1:
|
||||
return refs[0], nil
|
||||
case len(refs) > 1:
|
||||
return Reference{}, fmt.Errorf("multiple resources with key %q found", key)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLookup_EmptyBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "foo")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "resource with key \"foo\" not found")
|
||||
}
|
||||
|
||||
func TestLookup_NotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "qux")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `resource with key "qux" not found`)
|
||||
}
|
||||
|
||||
func TestLookup_MultipleFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "foo")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `multiple resources with key "foo" found`)
|
||||
}
|
||||
|
||||
func TestLookup_Nominal(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "Foo job",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Lookup by key only.
|
||||
out, err := Lookup(b, "foo")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "Foo job", out.Resource.GetName())
|
||||
}
|
||||
|
||||
// Lookup by type and key.
|
||||
out, err = Lookup(b, "jobs.foo")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "Foo job", out.Resource.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookup_NominalWithFilters(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
includeJobs := func(ref Reference) bool {
|
||||
_, ok := ref.Resource.(*resources.Job)
|
||||
return ok
|
||||
}
|
||||
|
||||
// This should succeed because the filter includes jobs.
|
||||
_, err := Lookup(b, "foo", includeJobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// This should fail because the filter excludes pipelines.
|
||||
_, err = Lookup(b, "bar", includeJobs)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `resource with key "bar" not found`)
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// RunnerLookup maps identifiers to a list of workloads that match that identifier.
|
||||
// The list can have more than 1 entry if resources of different types use the
|
||||
// same key. When this happens, the user should disambiguate between them.
|
||||
type RunnerLookup map[string][]Runner
|
||||
|
||||
// ResourceKeys computes a map with
|
||||
func ResourceKeys(b *bundle.Bundle) (keyOnly RunnerLookup, keyWithType RunnerLookup) {
|
||||
keyOnly = make(RunnerLookup)
|
||||
keyWithType = make(RunnerLookup)
|
||||
|
||||
r := b.Config.Resources
|
||||
for k, v := range r.Jobs {
|
||||
kt := fmt.Sprintf("jobs.%s", k)
|
||||
w := jobRunner{key: key(kt), bundle: b, job: v}
|
||||
keyOnly[k] = append(keyOnly[k], &w)
|
||||
keyWithType[kt] = append(keyWithType[kt], &w)
|
||||
}
|
||||
for k, v := range r.Pipelines {
|
||||
kt := fmt.Sprintf("pipelines.%s", k)
|
||||
w := pipelineRunner{key: key(kt), bundle: b, pipeline: v}
|
||||
keyOnly[k] = append(keyOnly[k], &w)
|
||||
keyWithType[kt] = append(keyWithType[kt], &w)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ResourceCompletionMap returns a map of resource keys to their respective names.
|
||||
func ResourceCompletionMap(b *bundle.Bundle) map[string]string {
|
||||
out := make(map[string]string)
|
||||
keyOnly, keyWithType := ResourceKeys(b)
|
||||
|
||||
// Keep track of resources we have seen by their fully qualified key.
|
||||
seen := make(map[string]bool)
|
||||
|
||||
// First add resources that can be identified by key alone.
|
||||
for k, v := range keyOnly {
|
||||
// Invariant: len(v) >= 1. See [ResourceKeys].
|
||||
if len(v) == 1 {
|
||||
seen[v[0].Key()] = true
|
||||
out[k] = v[0].Name()
|
||||
}
|
||||
}
|
||||
|
||||
// Then add resources that can only be identified by their type and key.
|
||||
for k, v := range keyWithType {
|
||||
// Invariant: len(v) == 1. See [ResourceKeys].
|
||||
_, ok := seen[v[0].Key()]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
out[k] = v[0].Name()
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ResourceCompletions returns a list of keys that unambiguously reference resources in the bundle.
|
||||
func ResourceCompletions(b *bundle.Bundle) []string {
|
||||
return maps.Keys(ResourceCompletionMap(b))
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestResourceCompletionsUnique(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, []string{"foo", "bar"}, ResourceCompletions(b))
|
||||
}
|
|
@ -3,9 +3,10 @@ package run
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
refs "github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/bundle/run/output"
|
||||
)
|
||||
|
||||
|
@ -38,34 +39,24 @@ type Runner interface {
|
|||
argsHandler
|
||||
}
|
||||
|
||||
// Find locates a runner matching the specified argument.
|
||||
//
|
||||
// Its behavior is as follows:
|
||||
// 1. Try to find a resource with <key> identical to the argument.
|
||||
// 2. Try to find a resource with <type>.<key> identical to the argument.
|
||||
//
|
||||
// If an argument resolves to multiple resources, it returns an error.
|
||||
func Find(b *bundle.Bundle, arg string) (Runner, error) {
|
||||
keyOnly, keyWithType := ResourceKeys(b)
|
||||
if len(keyWithType) == 0 {
|
||||
return nil, fmt.Errorf("bundle defines no resources")
|
||||
}
|
||||
|
||||
runners, ok := keyOnly[arg]
|
||||
if !ok {
|
||||
runners, ok = keyWithType[arg]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such resource: %s", arg)
|
||||
// IsRunnable returns a filter that only allows runnable resources.
|
||||
func IsRunnable(ref refs.Reference) bool {
|
||||
switch ref.Resource.(type) {
|
||||
case *resources.Job, *resources.Pipeline:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if len(runners) != 1 {
|
||||
var keys []string
|
||||
for _, runner := range runners {
|
||||
keys = append(keys, runner.Key())
|
||||
// ToRunner converts a resource reference to a runnable resource.
|
||||
func ToRunner(b *bundle.Bundle, ref refs.Reference) (Runner, error) {
|
||||
switch resource := ref.Resource.(type) {
|
||||
case *resources.Job:
|
||||
return &jobRunner{key: key(ref.KeyWithType), bundle: b, job: resource}, nil
|
||||
case *resources.Pipeline:
|
||||
return &pipelineRunner{key: key(ref.KeyWithType), bundle: b, pipeline: resource}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported resource type: %T", resource)
|
||||
}
|
||||
return nil, fmt.Errorf("ambiguous: %s (can resolve to all of %s)", arg, strings.Join(keys, ", "))
|
||||
}
|
||||
|
||||
return runners[0], nil
|
||||
}
|
||||
|
|
|
@ -3,82 +3,14 @@ package run
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
refs "github.com/databricks/cli/bundle/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindNoResources(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "foo")
|
||||
assert.ErrorContains(t, err, "bundle defines no resources")
|
||||
}
|
||||
|
||||
func TestFindSingleArg(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "foo")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestFindSingleArgNotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "bar")
|
||||
assert.ErrorContains(t, err, "no such resource: bar")
|
||||
}
|
||||
|
||||
func TestFindSingleArgAmbiguous(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"key": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"key": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "key")
|
||||
assert.ErrorContains(t, err, "ambiguous: ")
|
||||
}
|
||||
|
||||
func TestFindSingleArgWithType(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"key": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "jobs.key")
|
||||
assert.NoError(t, err)
|
||||
func TestRunner_IsRunnable(t *testing.T) {
|
||||
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Job{}}))
|
||||
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Pipeline{}}))
|
||||
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowModel{}}))
|
||||
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowExperiment{}}))
|
||||
}
|
||||
|
|
|
@ -180,6 +180,48 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"resources.Dashboard": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"display_name": {
|
||||
"description": "The display name of the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"embed_credentials": {
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"file_path": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"parent_path": {
|
||||
"description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"permissions": {
|
||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||
},
|
||||
"serialized_dashboard": {
|
||||
"description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.",
|
||||
"$ref": "#/$defs/interface"
|
||||
},
|
||||
"warehouse_id": {
|
||||
"description": "The warehouse ID used to run the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"display_name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resources.Grant": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
@ -1054,6 +1096,9 @@
|
|||
"clusters": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||
},
|
||||
"dashboards": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard"
|
||||
},
|
||||
"experiments": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
|
||||
},
|
||||
|
@ -5292,6 +5337,20 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"resources.Dashboard": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Dashboard"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resources.Job": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
|
|
@ -27,5 +27,6 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(newGenerateCommand())
|
||||
cmd.AddCommand(newDebugCommand())
|
||||
cmd.AddCommand(deployment.NewDeploymentCommand())
|
||||
cmd.AddCommand(newOpenCommand())
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ func newGenerateCommand() *cobra.Command {
|
|||
|
||||
cmd.AddCommand(generate.NewGenerateJobCommand())
|
||||
cmd.AddCommand(generate.NewGeneratePipelineCommand())
|
||||
cmd.AddCommand(generate.NewGenerateDashboardCommand())
|
||||
cmd.PersistentFlags().StringVar(&key, "key", "", `resource key to use for the generated configuration`)
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -0,0 +1,467 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/generate"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/render"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/yamlsaver"
|
||||
"github.com/databricks/cli/libs/textutil"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type dashboard struct {
|
||||
// Lookup flags for one-time generate.
|
||||
existingPath string
|
||||
existingID string
|
||||
|
||||
// Lookup flag for existing bundle resource.
|
||||
resource string
|
||||
|
||||
// Where to write the configuration and dashboard representation.
|
||||
resourceDir string
|
||||
dashboardDir string
|
||||
|
||||
// Force overwrite of existing files.
|
||||
force bool
|
||||
|
||||
// Watch for changes to the dashboard.
|
||||
watch bool
|
||||
|
||||
// Relative path from the resource directory to the dashboard directory.
|
||||
relativeDashboardDir string
|
||||
}
|
||||
|
||||
func (d *dashboard) resolveID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
||||
switch {
|
||||
case d.existingPath != "":
|
||||
return d.resolveFromPath(ctx, b)
|
||||
case d.existingID != "":
|
||||
return d.resolveFromID(ctx, b)
|
||||
}
|
||||
|
||||
return "", diag.Errorf("expected one of --dashboard-path, --dashboard-id")
|
||||
}
|
||||
|
||||
func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
||||
w := b.WorkspaceClient()
|
||||
obj, err := w.Workspace.GetStatusByPath(ctx, d.existingPath)
|
||||
if err != nil {
|
||||
if apierr.IsMissing(err) {
|
||||
return "", diag.Errorf("dashboard %q not found", path.Base(d.existingPath))
|
||||
}
|
||||
|
||||
// Emit a more descriptive error message for legacy dashboards.
|
||||
if errors.Is(err, apierr.ErrBadRequest) && strings.HasPrefix(err.Error(), "dbsqlDashboard ") {
|
||||
return "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("dashboard %q is a legacy dashboard", path.Base(d.existingPath)),
|
||||
Detail: "" +
|
||||
"Databricks Asset Bundles work exclusively with AI/BI dashboards.\n" +
|
||||
"\n" +
|
||||
"Instructions on how to convert a legacy dashboard to an AI/BI dashboard\n" +
|
||||
"can be found at: https://docs.databricks.com/en/dashboards/clone-legacy-to-aibi.html.",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return "", diag.FromErr(err)
|
||||
}
|
||||
|
||||
if obj.ObjectType != workspace.ObjectTypeDashboard {
|
||||
found := strings.ToLower(obj.ObjectType.String())
|
||||
return "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("expected a dashboard, found a %s", found),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if obj.ResourceId == "" {
|
||||
return "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "expected a non-empty dashboard resource ID",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return obj.ResourceId, nil
|
||||
}
|
||||
|
||||
func (d *dashboard) resolveFromID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
||||
w := b.WorkspaceClient()
|
||||
obj, err := w.Lakeview.GetByDashboardId(ctx, d.existingID)
|
||||
if err != nil {
|
||||
if apierr.IsMissing(err) {
|
||||
return "", diag.Errorf("dashboard with ID %s not found", d.existingID)
|
||||
}
|
||||
return "", diag.FromErr(err)
|
||||
}
|
||||
|
||||
return obj.DashboardId, nil
|
||||
}
|
||||
|
||||
func remarshalJSON(data []byte) ([]byte, error) {
|
||||
var tmp any
|
||||
var err error
|
||||
err = json.Unmarshal(data, &tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remarshal the data to ensure its formatting is stable.
|
||||
// The result will have alphabetically sorted keys and be indented.
|
||||
// HTML escaping is disabled to retain characters such as &, <, and >.
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
enc.SetIndent("", " ")
|
||||
enc.SetEscapeHTML(false)
|
||||
err = enc.Encode(tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, filename string) error {
|
||||
// Unmarshal and remarshal the serialized dashboard to ensure it is formatted correctly.
|
||||
// The result will have alphabetically sorted keys and be indented.
|
||||
data, err := remarshalJSON([]byte(dashboard.SerializedDashboard))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure the output directory exists.
|
||||
if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clean the filename to ensure it is a valid path (and can be used on this OS).
|
||||
filename = filepath.Clean(filename)
|
||||
|
||||
// Attempt to make the path relative to the bundle root.
|
||||
rel, err := filepath.Rel(b.BundleRootPath, filename)
|
||||
if err != nil {
|
||||
rel = filename
|
||||
}
|
||||
|
||||
// Verify that the file does not already exist.
|
||||
info, err := os.Stat(filename)
|
||||
if err == nil {
|
||||
if info.IsDir() {
|
||||
return fmt.Errorf("%s is a directory", rel)
|
||||
}
|
||||
if !d.force {
|
||||
return fmt.Errorf("%s already exists. Use --force to overwrite", rel)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Writing dashboard to %q\n", rel)
|
||||
return os.WriteFile(filename, data, 0644)
|
||||
}
|
||||
|
||||
func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error {
|
||||
// Save serialized dashboard definition to the dashboard directory.
|
||||
dashboardBasename := fmt.Sprintf("%s.lvdash.json", key)
|
||||
dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename)
|
||||
err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Synthesize resource configuration.
|
||||
v, err := generate.ConvertDashboardToValue(dashboard, path.Join(d.relativeDashboardDir, dashboardBasename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := map[string]dyn.Value{
|
||||
"resources": dyn.V(map[string]dyn.Value{
|
||||
"dashboards": dyn.V(map[string]dyn.Value{
|
||||
key: v,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
|
||||
// Make sure the output directory exists.
|
||||
if err := os.MkdirAll(d.resourceDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the configuration to the resource directory.
|
||||
resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key))
|
||||
saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{
|
||||
"display_name": yaml.DoubleQuotedStyle,
|
||||
})
|
||||
|
||||
// Attempt to make the path relative to the bundle root.
|
||||
rel, err := filepath.Rel(b.BundleRootPath, resourcePath)
|
||||
if err != nil {
|
||||
rel = resourcePath
|
||||
}
|
||||
|
||||
fmt.Printf("Writing configuration to %q\n", rel)
|
||||
err = saver.SaveAsYAML(result, resourcePath, d.force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForChanges(ctx context.Context, w *databricks.WorkspaceClient, dashboard *dashboards.Dashboard) diag.Diagnostics {
|
||||
// Compute [time.Time] for the most recent update.
|
||||
tref, err := time.Parse(time.RFC3339, dashboard.UpdateTime)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for {
|
||||
obj, err := w.Workspace.GetStatusByPath(ctx, dashboard.Path)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Compute [time.Time] from timestamp in millis since epoch.
|
||||
tcur := time.Unix(0, obj.ModifiedAt*int64(time.Millisecond))
|
||||
if tcur.After(tref) {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dashboard) updateDashboardForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
resource, ok := b.Config.Resources.Dashboards[d.resource]
|
||||
if !ok {
|
||||
return diag.Errorf("dashboard resource %q is not defined", d.resource)
|
||||
}
|
||||
|
||||
if resource.FilePath == "" {
|
||||
return diag.Errorf("dashboard resource %q has no file path defined", d.resource)
|
||||
}
|
||||
|
||||
// Resolve the dashboard ID from the resource.
|
||||
dashboardID := resource.ID
|
||||
|
||||
// Overwrite the dashboard at the path referenced from the resource.
|
||||
dashboardPath := resource.FilePath
|
||||
|
||||
w := b.WorkspaceClient()
|
||||
|
||||
// Start polling the underlying dashboard for changes.
|
||||
var etag string
|
||||
for {
|
||||
dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if etag != dashboard.Etag {
|
||||
err = d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Abort if we are not watching for changes.
|
||||
if !d.watch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the etag for the next iteration.
|
||||
etag = dashboard.Etag
|
||||
|
||||
// Now poll the workspace API for changes.
|
||||
// This is much more efficient than polling the dashboard API because it
|
||||
// includes the entire serialized dashboard whereas we're only interested
|
||||
// in the last modified time of the dashboard here.
|
||||
waitForChanges(ctx, w, dashboard)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dashboard) generateForExisting(ctx context.Context, b *bundle.Bundle, dashboardID string) diag.Diagnostics {
|
||||
w := b.WorkspaceClient()
|
||||
dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
key := textutil.NormalizeString(dashboard.DisplayName)
|
||||
err = d.saveConfiguration(ctx, b, dashboard, key)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dashboard) initialize(b *bundle.Bundle) diag.Diagnostics {
|
||||
// Make the paths absolute if they aren't already.
|
||||
if !filepath.IsAbs(d.resourceDir) {
|
||||
d.resourceDir = filepath.Join(b.BundleRootPath, d.resourceDir)
|
||||
}
|
||||
if !filepath.IsAbs(d.dashboardDir) {
|
||||
d.dashboardDir = filepath.Join(b.BundleRootPath, d.dashboardDir)
|
||||
}
|
||||
|
||||
// Make sure we know how the dashboard path is relative to the resource path.
|
||||
rel, err := filepath.Rel(d.resourceDir, d.dashboardDir)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.relativeDashboardDir = filepath.ToSlash(rel)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dashboard) runForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
phases.Initialize(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
terraform.StatePull(),
|
||||
terraform.Load(),
|
||||
))
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
return d.updateDashboardForResource(ctx, b)
|
||||
}
|
||||
|
||||
func (d *dashboard) runForExisting(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// Resolve the ID of the dashboard to generate configuration for.
|
||||
dashboardID, diags := d.resolveID(ctx, b)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
return d.generateForExisting(ctx, b, dashboardID)
|
||||
}
|
||||
|
||||
func (d *dashboard) RunE(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
b, diags := root.MustConfigureBundle(cmd)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
||||
diags = d.initialize(b)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
||||
if d.resource != "" {
|
||||
diags = d.runForResource(ctx, b)
|
||||
} else {
|
||||
diags = d.runForExisting(ctx, b)
|
||||
}
|
||||
|
||||
renderOpts := render.RenderOptions{RenderSummaryTable: false}
|
||||
err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render output: %w", err)
|
||||
}
|
||||
|
||||
if diags.HasError() {
|
||||
return root.ErrAlreadyPrinted
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterDashboards returns a filter that only includes dashboards.
|
||||
func filterDashboards(ref resources.Reference) bool {
|
||||
return ref.Description.SingularName == "dashboard"
|
||||
}
|
||||
|
||||
// dashboardResourceCompletion executes to autocomplete the argument to the resource flag.
|
||||
func dashboardResourceCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
b, diags := root.MustConfigureBundle(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
cobra.CompErrorln(err.Error())
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
if b == nil {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
return maps.Keys(resources.Completions(b, filterDashboards)), cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
func NewGenerateDashboardCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "dashboard",
|
||||
Short: "Generate configuration for a dashboard",
|
||||
}
|
||||
|
||||
d := &dashboard{}
|
||||
|
||||
// Lookup flags.
|
||||
cmd.Flags().StringVar(&d.existingPath, "existing-path", "", `workspace path of the dashboard to generate configuration for`)
|
||||
cmd.Flags().StringVar(&d.existingID, "existing-id", "", `ID of the dashboard to generate configuration for`)
|
||||
cmd.Flags().StringVar(&d.resource, "resource", "", `resource key of dashboard to watch for changes`)
|
||||
|
||||
// Alias lookup flags that include the resource type name.
|
||||
// Included for symmetry with the other generate commands, but we prefer the shorter flags.
|
||||
cmd.Flags().StringVar(&d.existingPath, "existing-dashboard-path", "", `workspace path of the dashboard to generate configuration for`)
|
||||
cmd.Flags().StringVar(&d.existingID, "existing-dashboard-id", "", `ID of the dashboard to generate configuration for`)
|
||||
cmd.Flags().MarkHidden("existing-dashboard-path")
|
||||
cmd.Flags().MarkHidden("existing-dashboard-id")
|
||||
|
||||
// Output flags.
|
||||
cmd.Flags().StringVarP(&d.resourceDir, "resource-dir", "d", "./resources", `directory to write the configuration to`)
|
||||
cmd.Flags().StringVarP(&d.dashboardDir, "dashboard-dir", "s", "./src", `directory to write the dashboard representation to`)
|
||||
cmd.Flags().BoolVarP(&d.force, "force", "f", false, `force overwrite existing files in the output directory`)
|
||||
|
||||
// Exactly one of the lookup flags must be provided.
|
||||
cmd.MarkFlagsOneRequired(
|
||||
"existing-path",
|
||||
"existing-id",
|
||||
"resource",
|
||||
)
|
||||
|
||||
// Watch flag. This is relevant only in combination with the resource flag.
|
||||
cmd.Flags().BoolVar(&d.watch, "watch", false, `watch for changes to the dashboard and update the configuration`)
|
||||
|
||||
// Make sure the watch flag is only used with the existing-resource flag.
|
||||
cmd.MarkFlagsMutuallyExclusive("watch", "existing-path")
|
||||
cmd.MarkFlagsMutuallyExclusive("watch", "existing-id")
|
||||
|
||||
// Completion for the resource flag.
|
||||
cmd.RegisterFlagCompletionFunc("resource", dashboardResourceCompletion)
|
||||
|
||||
cmd.RunE = d.RunE
|
||||
return cmd
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDashboard_ErrorOnLegacyDashboard(t *testing.T) {
|
||||
// Response to a GetStatus request on a path pointing to a legacy dashboard.
|
||||
//
|
||||
// < HTTP/2.0 400 Bad Request
|
||||
// < {
|
||||
// < "error_code": "BAD_REQUEST",
|
||||
// < "message": "dbsqlDashboard is not user-facing."
|
||||
// < }
|
||||
|
||||
d := dashboard{
|
||||
existingPath: "/path/to/legacy dashboard",
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
w := m.GetMockWorkspaceAPI()
|
||||
w.On("GetStatusByPath", mock.Anything, "/path/to/legacy dashboard").Return(nil, &apierr.APIError{
|
||||
StatusCode: 400,
|
||||
ErrorCode: "BAD_REQUEST",
|
||||
Message: "dbsqlDashboard is not user-facing.",
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
b := &bundle.Bundle{}
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, diags := d.resolveID(ctx, b)
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, diags[0].Summary, "dashboard \"legacy dashboard\" is a legacy dashboard")
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingID_Nominal(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{
|
||||
DashboardId: "f00dcafe",
|
||||
DisplayName: "This is a test dashboard",
|
||||
SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`,
|
||||
WarehouseId: "w4r3h0us3",
|
||||
}, nil)
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-id").Value.Set("f00dcafe")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the contents of the generated configuration
|
||||
data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `resources:
|
||||
dashboards:
|
||||
this_is_a_test_dashboard:
|
||||
display_name: "This is a test dashboard"
|
||||
warehouse_id: w4r3h0us3
|
||||
file_path: ../src/this_is_a_test_dashboard.lvdash.json
|
||||
`, string(data))
|
||||
|
||||
data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json"))
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data))
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingID_NotFound(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
})
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-id").Value.Set("f00dcafe")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingPath_Nominal(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
workspaceAPI := m.GetMockWorkspaceAPI()
|
||||
workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(&workspace.ObjectInfo{
|
||||
ObjectType: workspace.ObjectTypeDashboard,
|
||||
ResourceId: "f00dcafe",
|
||||
}, nil)
|
||||
|
||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{
|
||||
DashboardId: "f00dcafe",
|
||||
DisplayName: "This is a test dashboard",
|
||||
SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`,
|
||||
WarehouseId: "w4r3h0us3",
|
||||
}, nil)
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-path").Value.Set("/path/to/dashboard")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the contents of the generated configuration
|
||||
data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `resources:
|
||||
dashboards:
|
||||
this_is_a_test_dashboard:
|
||||
display_name: "This is a test dashboard"
|
||||
warehouse_id: w4r3h0us3
|
||||
file_path: ../src/this_is_a_test_dashboard.lvdash.json
|
||||
`, string(data))
|
||||
|
||||
data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json"))
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data))
|
||||
}
|
||||
|
||||
func TestDashboard_ExistingPath_NotFound(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
workspaceAPI := m.GetMockWorkspaceAPI()
|
||||
workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
})
|
||||
|
||||
ctx := bundle.Context(context.Background(), b)
|
||||
cmd := NewGenerateDashboardCommand()
|
||||
cmd.SetContext(ctx)
|
||||
cmd.Flag("existing-path").Value.Set("/path/to/dashboard")
|
||||
|
||||
err := cmd.RunE(cmd, []string{})
|
||||
require.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/pkg/browser"
|
||||
)
|
||||
|
||||
func promptOpenArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
|
||||
// Compute map of "Human readable name of resource" -> "resource key".
|
||||
inv := make(map[string]string)
|
||||
for k, ref := range resources.Completions(b) {
|
||||
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
|
||||
inv[title] = k
|
||||
}
|
||||
|
||||
key, err := cmdio.Select(ctx, inv, "Resource to open")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) {
|
||||
// If no arguments are specified, prompt the user to select the resource to open.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
return promptOpenArgument(ctx, b)
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", fmt.Errorf("expected a KEY of the resource to open")
|
||||
}
|
||||
|
||||
return args[0], nil
|
||||
}
|
||||
|
||||
func newOpenCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "open",
|
||||
Short: "Open a resource in the browser",
|
||||
Args: root.MaximumNArgs(1),
|
||||
}
|
||||
|
||||
var forcePull bool
|
||||
cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
b, diags := utils.ConfigureBundleWithVariables(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
return diags.Error()
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, phases.Initialize())
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
arg, err := resolveOpenArgument(ctx, b, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cacheDir, err := terraform.Dir(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName))
|
||||
_, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName))
|
||||
noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist)
|
||||
|
||||
if forcePull || noCache {
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.StatePull(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.Load(),
|
||||
mutator.InitializeURLs(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Locate resource to open.
|
||||
ref, err := resources.Lookup(b, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Confirm that the resource has a URL.
|
||||
url := ref.Resource.GetURL()
|
||||
if url == "" {
|
||||
return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)")
|
||||
}
|
||||
|
||||
return browser.OpenURL(url)
|
||||
}
|
||||
|
||||
cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
b, diags := root.MustConfigureBundle(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
cobra.CompErrorln(err.Error())
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
// No completion in the context of a bundle.
|
||||
// Source and destination paths are taken from bundle configuration.
|
||||
if b == nil {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
completions := resources.Completions(b)
|
||||
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
|
||||
} else {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
|
@ -1,12 +1,14 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/bundle/run"
|
||||
"github.com/databricks/cli/bundle/run/output"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
|
@ -14,8 +16,60 @@ import (
|
|||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func promptRunArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
|
||||
// Compute map of "Human readable name of resource" -> "resource key".
|
||||
inv := make(map[string]string)
|
||||
for k, ref := range resources.Completions(b, run.IsRunnable) {
|
||||
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
|
||||
inv[title] = k
|
||||
}
|
||||
|
||||
key, err := cmdio.Select(ctx, inv, "Resource to run")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// resolveRunArgument resolves the resource key to run.
|
||||
// It returns the remaining arguments to pass to the runner, if applicable.
|
||||
func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, []string, error) {
|
||||
// If no arguments are specified, prompt the user to select something to run.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
key, err := promptRunArgument(ctx, b)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return key, args, nil
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", nil, fmt.Errorf("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
return args[0], args[1:], nil
|
||||
}
|
||||
|
||||
func keyToRunner(b *bundle.Bundle, arg string) (run.Runner, error) {
|
||||
// Locate the resource to run.
|
||||
ref, err := resources.Lookup(b, arg, run.IsRunnable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the resource to a runnable resource.
|
||||
runner, err := run.ToRunner(b, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
func newRunCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "run [flags] KEY",
|
||||
|
@ -61,23 +115,10 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
// If no arguments are specified, prompt the user to select something to run.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
// Invert completions from KEY -> NAME, to NAME -> KEY.
|
||||
inv := make(map[string]string)
|
||||
for k, v := range run.ResourceCompletionMap(b) {
|
||||
inv[v] = k
|
||||
}
|
||||
id, err := cmdio.Select(ctx, inv, "Resource to run")
|
||||
key, args, err := resolveRunArgument(ctx, b, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.Interpolate(),
|
||||
|
@ -89,13 +130,13 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
runner, err := run.Find(b, args[0])
|
||||
runner, err := keyToRunner(b, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse additional positional arguments.
|
||||
err = runner.ParseArgs(args[1:], &runOptions)
|
||||
err = runner.ParseArgs(args, &runOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -146,10 +187,11 @@ task or a Python wheel task, the second example applies.
|
|||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp
|
||||
completions := resources.Completions(b, run.IsRunnable)
|
||||
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
|
||||
} else {
|
||||
// If we know the resource to run, we can complete additional positional arguments.
|
||||
runner, err := run.Find(b, args[0])
|
||||
runner, err := keyToRunner(b, args[0])
|
||||
if err != nil {
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -9,6 +11,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -18,6 +21,7 @@ type syncFlags struct {
|
|||
interval time.Duration
|
||||
full bool
|
||||
watch bool
|
||||
output flags.Output
|
||||
}
|
||||
|
||||
func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) {
|
||||
|
@ -26,6 +30,21 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle)
|
|||
return nil, fmt.Errorf("cannot get sync options: %w", err)
|
||||
}
|
||||
|
||||
if f.output != "" {
|
||||
var outputFunc func(context.Context, <-chan sync.Event, io.Writer)
|
||||
switch f.output {
|
||||
case flags.OutputText:
|
||||
outputFunc = sync.TextOutput
|
||||
case flags.OutputJSON:
|
||||
outputFunc = sync.JsonOutput
|
||||
}
|
||||
if outputFunc != nil {
|
||||
opts.OutputHandler = func(ctx context.Context, c <-chan sync.Event) {
|
||||
outputFunc(ctx, c, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opts.Full = f.full
|
||||
opts.PollInterval = f.interval
|
||||
return opts, nil
|
||||
|
@ -42,6 +61,7 @@ func newSyncCommand() *cobra.Command {
|
|||
cmd.Flags().DurationVar(&f.interval, "interval", 1*time.Second, "file system polling interval (for --watch)")
|
||||
cmd.Flags().BoolVar(&f.full, "full", false, "perform full synchronization (default is incremental)")
|
||||
cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes")
|
||||
cmd.Flags().Var(&f.output, "output", "type of the output format")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
@ -65,6 +85,7 @@ func newSyncCommand() *cobra.Command {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
log.Infof(ctx, "Remote file sync location: %v", opts.RemotePath)
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ func New(ctx context.Context) *cobra.Command {
|
|||
|
||||
// Configure our user agent with the command that's about to be executed.
|
||||
ctx = withCommandInUserAgent(ctx, cmd)
|
||||
ctx = withCommandExecIdInUserAgent(ctx)
|
||||
ctx = withUpstreamInUserAgent(ctx)
|
||||
cmd.SetContext(ctx)
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func withCommandExecIdInUserAgent(ctx context.Context) context.Context {
|
||||
// A UUID that will allow us to correlate multiple API requests made by
|
||||
// the same CLI invocation.
|
||||
return useragent.InContext(ctx, "cmd-exec-id", uuid.New().String())
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWithCommandExecIdInUserAgent(t *testing.T) {
|
||||
ctx := withCommandExecIdInUserAgent(context.Background())
|
||||
|
||||
// Check that the command exec ID is in the user agent string.
|
||||
ua := useragent.FromContext(ctx)
|
||||
re := regexp.MustCompile(`cmd-exec-id/([a-f0-9-]+)`)
|
||||
matches := re.FindAllStringSubmatch(ua, -1)
|
||||
|
||||
// Assert that we have exactly one match and that it's a valid UUID.
|
||||
require.Len(t, matches, 1)
|
||||
_, err := uuid.Parse(matches[0][1])
|
||||
assert.NoError(t, err)
|
||||
}
|
|
@ -1,13 +1,15 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCommandString(t *testing.T) {
|
||||
func TestWithCommandInUserAgent(t *testing.T) {
|
||||
root := &cobra.Command{
|
||||
Use: "root",
|
||||
}
|
||||
|
@ -26,4 +28,9 @@ func TestCommandString(t *testing.T) {
|
|||
assert.Equal(t, "root", commandString(root))
|
||||
assert.Equal(t, "hello", commandString(hello))
|
||||
assert.Equal(t, "hello_world", commandString(world))
|
||||
|
||||
ctx := withCommandInUserAgent(context.Background(), world)
|
||||
|
||||
ua := useragent.FromContext(ctx)
|
||||
assert.Contains(t, ua, "cmd/hello_world")
|
||||
}
|
||||
|
|
10
go.mod
10
go.mod
|
@ -1,20 +1,20 @@
|
|||
module github.com/databricks/cli
|
||||
|
||||
go 1.22.0
|
||||
go 1.23
|
||||
|
||||
toolchain go1.22.7
|
||||
toolchain go1.23.2
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.49.0 // Apache 2.0
|
||||
github.com/fatih/color v1.17.0 // MIT
|
||||
github.com/fatih/color v1.18.0 // MIT
|
||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||
github.com/hashicorp/go-version v1.7.0 // MPL 2.0
|
||||
github.com/hashicorp/hc-install v0.9.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0
|
||||
github.com/hashicorp/terraform-json v0.23.0 // MPL 2.0
|
||||
github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause
|
||||
github.com/mattn/go-isatty v0.0.20 // MIT
|
||||
github.com/nwidger/jsoncolor v0.3.2 // MIT
|
||||
|
@ -56,7 +56,7 @@ require (
|
|||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/zclconf/go-cty v1.14.4 // indirect
|
||||
github.com/zclconf/go-cty v1.15.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
|
|
|
@ -44,8 +44,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
|
||||
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
|
@ -109,8 +109,8 @@ github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6e
|
|||
github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg=
|
||||
github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ=
|
||||
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
|
||||
github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec=
|
||||
github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A=
|
||||
github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI=
|
||||
github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
|
@ -160,8 +160,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
|
|||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
|
||||
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ=
|
||||
github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"properties": {
|
||||
"unique_id": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for job name"
|
||||
},
|
||||
"warehouse_id": {
|
||||
"type": "string",
|
||||
"description": "The SQL warehouse ID to use for the dashboard"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"pages": [
|
||||
{
|
||||
"displayName": "New Page",
|
||||
"layout": [
|
||||
{
|
||||
"position": {
|
||||
"height": 2,
|
||||
"width": 6,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"widget": {
|
||||
"name": "82eb9107",
|
||||
"textbox_spec": "# I'm a title"
|
||||
}
|
||||
},
|
||||
{
|
||||
"position": {
|
||||
"height": 2,
|
||||
"width": 6,
|
||||
"x": 0,
|
||||
"y": 2
|
||||
},
|
||||
"widget": {
|
||||
"name": "ffa6de4f",
|
||||
"textbox_spec": "Text"
|
||||
}
|
||||
}
|
||||
],
|
||||
"name": "fdd21a3c"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
bundle:
|
||||
name: dashboards
|
||||
|
||||
workspace:
|
||||
root_path: "~/.bundle/{{.unique_id}}"
|
||||
|
||||
resources:
|
||||
dashboards:
|
||||
file_reference:
|
||||
display_name: test-dashboard-{{.unique_id}}
|
||||
file_path: ./dashboard.lvdash.json
|
||||
warehouse_id: {{.warehouse_id}}
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"properties": {
|
||||
"project_name": {
|
||||
"type": "string",
|
||||
"default": "my_test_code",
|
||||
"description": "Unique name for this project"
|
||||
},
|
||||
"spark_version": {
|
||||
"type": "string",
|
||||
"description": "Spark version used for job cluster"
|
||||
},
|
||||
"node_type_id": {
|
||||
"type": "string",
|
||||
"description": "Node type id for job cluster"
|
||||
},
|
||||
"unique_id": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for job name"
|
||||
},
|
||||
"instance_pool_id": {
|
||||
"type": "string",
|
||||
"description": "Instance pool id for job cluster"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
bundle:
|
||||
name: wheel-task
|
||||
|
||||
workspace:
|
||||
root_path: "~/.bundle/{{.unique_id}}"
|
||||
|
||||
resources:
|
||||
clusters:
|
||||
test_cluster:
|
||||
cluster_name: "test-cluster-{{.unique_id}}"
|
||||
spark_version: "{{.spark_version}}"
|
||||
node_type_id: "{{.node_type_id}}"
|
||||
num_workers: 1
|
||||
data_security_mode: USER_ISOLATION
|
||||
|
||||
jobs:
|
||||
some_other_job:
|
||||
name: "[${bundle.target}] Test Wheel Job {{.unique_id}}"
|
||||
tasks:
|
||||
- task_key: TestTask
|
||||
existing_cluster_id: "${resources.clusters.test_cluster.cluster_id}"
|
||||
python_wheel_task:
|
||||
package_name: my_test_code
|
||||
entry_point: run
|
||||
parameters:
|
||||
- "one"
|
||||
- "two"
|
||||
libraries:
|
||||
- whl: ./dist/*.whl
|
|
@ -0,0 +1,15 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
import {{.project_name}}
|
||||
|
||||
setup(
|
||||
name="{{.project_name}}",
|
||||
version={{.project_name}}.__version__,
|
||||
author={{.project_name}}.__author__,
|
||||
url="https://databricks.com",
|
||||
author_email="john.doe@databricks.com",
|
||||
description="my example wheel",
|
||||
packages=find_packages(include=["{{.project_name}}"]),
|
||||
entry_points={"group1": "run={{.project_name}}.__main__:main"},
|
||||
install_requires=["setuptools"],
|
||||
)
|
|
@ -0,0 +1,2 @@
|
|||
__version__ = "0.0.1"
|
||||
__author__ = "Databricks"
|
|
@ -0,0 +1,16 @@
|
|||
"""
|
||||
The entry point of the Python Wheel
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
# This method will print the provided arguments
|
||||
print("Hello from my func")
|
||||
print("Got arguments:")
|
||||
print(sys.argv)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,63 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/acc"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAccDashboards(t *testing.T) {
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
|
||||
warehouseID := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID")
|
||||
uniqueID := uuid.New().String()
|
||||
root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{
|
||||
"unique_id": uniqueID,
|
||||
"warehouse_id": warehouseID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = destroyBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
err = deployBundle(t, ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load bundle configuration by running the validate command.
|
||||
b := unmarshalConfig(t, mustValidateBundle(t, ctx, root))
|
||||
|
||||
// Assert that the dashboard exists at the expected path and is, indeed, a dashboard.
|
||||
oi, err := wt.W.Workspace.GetStatusByPath(ctx, fmt.Sprintf("%s/test-dashboard-%s.lvdash.json", b.Config.Workspace.ResourcePath, uniqueID))
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, workspace.ObjectTypeDashboard, oi.ObjectType)
|
||||
|
||||
// Load the dashboard by its ID and confirm its display name.
|
||||
dashboard, err := wt.W.Lakeview.GetByDashboardId(ctx, oi.ResourceId)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fmt.Sprintf("test-dashboard-%s", uniqueID), dashboard.DisplayName)
|
||||
|
||||
// Make an out of band modification to the dashboard and confirm that it is detected.
|
||||
_, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{
|
||||
DashboardId: oi.ResourceId,
|
||||
SerializedDashboard: dashboard.SerializedDashboard,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to redeploy the bundle and confirm that the out of band modification is detected.
|
||||
stdout, _, err := deployBundleWithArgs(t, ctx, root)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n")
|
||||
|
||||
// Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored.
|
||||
_, stderr, err := deployBundleWithArgs(t, ctx, root, "--force")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, stderr, `Deployment complete!`+"\n")
|
||||
}
|
|
@ -11,6 +11,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/internal"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
|
@ -66,6 +67,19 @@ func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, err
|
|||
return stdout.Bytes(), err
|
||||
}
|
||||
|
||||
func mustValidateBundle(t *testing.T, ctx context.Context, path string) []byte {
|
||||
data, err := validateBundle(t, ctx, path)
|
||||
require.NoError(t, err)
|
||||
return data
|
||||
}
|
||||
|
||||
func unmarshalConfig(t *testing.T, data []byte) *bundle.Bundle {
|
||||
bundle := &bundle.Bundle{}
|
||||
err := json.Unmarshal(data, &bundle.Config)
|
||||
require.NoError(t, err)
|
||||
return bundle
|
||||
}
|
||||
|
||||
func deployBundle(t *testing.T, ctx context.Context, path string) error {
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve")
|
||||
|
@ -73,6 +87,14 @@ func deployBundle(t *testing.T, ctx context.Context, path string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func deployBundleWithArgs(t *testing.T, ctx context.Context, path string, args ...string) (string, string, error) {
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||
args = append([]string{"bundle", "deploy"}, args...)
|
||||
c := internal.NewCobraTestRunnerWithContext(t, ctx, args...)
|
||||
stdout, stderr, err := c.Run()
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
||||
func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags []string) error {
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", path)
|
||||
args := []string{"bundle", "deploy", "--force-lock"}
|
||||
|
|
|
@ -5,17 +5,18 @@ import (
|
|||
|
||||
"github.com/databricks/cli/internal"
|
||||
"github.com/databricks/cli/internal/acc"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bool) {
|
||||
func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, pythonWheelWrapper bool) {
|
||||
ctx, _ := acc.WorkspaceTest(t)
|
||||
|
||||
nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV"))
|
||||
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task", map[string]any{
|
||||
bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{
|
||||
"node_type_id": nodeTypeId,
|
||||
"unique_id": uuid.New().String(),
|
||||
"spark_version": sparkVersion,
|
||||
|
@ -45,9 +46,19 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo
|
|||
}
|
||||
|
||||
func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) {
|
||||
runPythonWheelTest(t, "13.3.x-snapshot-scala2.12", false)
|
||||
runPythonWheelTest(t, "python_wheel_task", "13.3.x-snapshot-scala2.12", false)
|
||||
}
|
||||
|
||||
func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) {
|
||||
runPythonWheelTest(t, "12.2.x-scala2.12", true)
|
||||
runPythonWheelTest(t, "python_wheel_task", "12.2.x-scala2.12", true)
|
||||
}
|
||||
|
||||
func TestAccPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) {
|
||||
_, wt := acc.WorkspaceTest(t)
|
||||
|
||||
if testutil.IsAWSCloud(wt.T) {
|
||||
t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters")
|
||||
}
|
||||
|
||||
runPythonWheelTest(t, "python_wheel_task_with_cluster", defaultSparkVersion, false)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -155,6 +156,26 @@ func (a *PersistentAuth) ClearToken(ctx context.Context) error {
|
|||
return a.cache.Delete(key)
|
||||
}
|
||||
|
||||
// This function cleans up the host URL by only retaining the scheme and the host.
|
||||
// This function thus removes any path, query arguments, or fragments from the URL.
|
||||
func (a *PersistentAuth) cleanHost() {
|
||||
parsedHost, err := url.Parse(a.Host)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// when either host or scheme is empty, we don't want to clean it. This is because
|
||||
// the Go url library parses a raw "abc" string as the path of a URL and cleaning
|
||||
// it will return thus return an empty string.
|
||||
if parsedHost.Host == "" || parsedHost.Scheme == "" {
|
||||
return
|
||||
}
|
||||
host := url.URL{
|
||||
Scheme: parsedHost.Scheme,
|
||||
Host: parsedHost.Host,
|
||||
}
|
||||
a.Host = host.String()
|
||||
}
|
||||
|
||||
func (a *PersistentAuth) init(ctx context.Context) error {
|
||||
if a.Host == "" && a.AccountID == "" {
|
||||
return ErrFetchCredentials
|
||||
|
@ -168,6 +189,9 @@ func (a *PersistentAuth) init(ctx context.Context) error {
|
|||
if a.browser == nil {
|
||||
a.browser = browser.OpenURL
|
||||
}
|
||||
|
||||
a.cleanHost()
|
||||
|
||||
// try acquire listener, which we also use as a machine-local
|
||||
// exclusive lock to prevent token cache corruption in the scope
|
||||
// of developer machine, where this command runs.
|
||||
|
|
|
@ -282,3 +282,37 @@ func TestClearTokenNotExist(t *testing.T) {
|
|||
_, err = p.cache.Lookup(key)
|
||||
assert.Equal(t, ErrNotConfigured, err)
|
||||
}
|
||||
|
||||
func TestPersistentAuthCleanHost(t *testing.T) {
|
||||
for _, tcases := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"https://example.com", "https://example.com"},
|
||||
{"https://example.com/", "https://example.com"},
|
||||
{"https://example.com/path", "https://example.com"},
|
||||
{"https://example.com/path/subpath", "https://example.com"},
|
||||
{"https://example.com/path?query=1", "https://example.com"},
|
||||
{"https://example.com/path?query=1&other=2", "https://example.com"},
|
||||
{"https://example.com/path#fragment", "https://example.com"},
|
||||
{"https://example.com/path?query=1#fragment", "https://example.com"},
|
||||
{"https://example.com/path?query=1&other=2#fragment", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1&other=2", "https://example.com"},
|
||||
{"https://example.com/path/subpath#fragment", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1#fragment", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1&other=2#fragment", "https://example.com"},
|
||||
{"https://example.com/path?query=1%20value&other=2%20value", "https://example.com"},
|
||||
{"http://example.com/path/subpath?query=1%20value&other=2%20value", "http://example.com"},
|
||||
|
||||
// URLs without scheme should be left as is
|
||||
{"abc", "abc"},
|
||||
{"abc.com/def", "abc.com/def"},
|
||||
} {
|
||||
p := &PersistentAuth{
|
||||
Host: tcases.in,
|
||||
}
|
||||
p.cleanHost()
|
||||
assert.Equal(t, tcases.out, p.Host)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,3 +111,7 @@ func PanicsWithError(t assert.TestingT, errString string, f func(), msgAndArgs .
|
|||
func NotPanics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool {
|
||||
return assert.NotPanics(t, f, msgAndArgs...)
|
||||
}
|
||||
|
||||
func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
|
||||
return assert.JSONEq(t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
package jsonsaver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// The encoder type encapsulates a [json.Encoder] and its target buffer.
|
||||
// Escaping of HTML characters in the output is disabled.
|
||||
type encoder struct {
|
||||
*json.Encoder
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func newEncoder() encoder {
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
|
||||
// By default, json.Encoder escapes HTML characters, converting symbols like '<' to '\u003c'.
|
||||
// This behavior helps prevent XSS attacks when JSON is embedded within HTML.
|
||||
// However, we disable this feature since we're not dealing with HTML context.
|
||||
// Keeping the escapes enabled would result in unnecessary differences when processing JSON payloads
|
||||
// that already contain escaped characters.
|
||||
enc.SetEscapeHTML(false)
|
||||
return encoder{enc, &buf}
|
||||
}
|
||||
|
||||
func marshalNoEscape(v any) ([]byte, error) {
|
||||
enc := newEncoder()
|
||||
err := enc.Encode(v)
|
||||
return enc.Bytes(), err
|
||||
}
|
||||
|
||||
func marshalIndentNoEscape(v any, prefix, indent string) ([]byte, error) {
|
||||
enc := newEncoder()
|
||||
enc.SetIndent(prefix, indent)
|
||||
err := enc.Encode(v)
|
||||
return enc.Bytes(), err
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package jsonsaver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||
)
|
||||
|
||||
func TestEncoder_MarshalNoEscape(t *testing.T) {
|
||||
out, err := marshalNoEscape("1 < 2")
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Confirm the output.
|
||||
assert.JSONEq(t, `"1 < 2"`, string(out))
|
||||
|
||||
// Confirm that HTML escaping is disabled.
|
||||
assert.NotContains(t, string(out), "\\u003c")
|
||||
|
||||
// Confirm that the encoder writes a trailing newline.
|
||||
assert.Contains(t, string(out), "\n")
|
||||
}
|
||||
|
||||
func TestEncoder_MarshalIndentNoEscape(t *testing.T) {
|
||||
out, err := marshalIndentNoEscape([]string{"1 < 2", "2 < 3"}, "", " ")
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Confirm the output.
|
||||
assert.JSONEq(t, `["1 < 2", "2 < 3"]`, string(out))
|
||||
|
||||
// Confirm that HTML escaping is disabled.
|
||||
assert.NotContains(t, string(out), "\\u003c")
|
||||
|
||||
// Confirm that the encoder performs indenting and writes a trailing newline.
|
||||
assert.Contains(t, string(out), "[\n")
|
||||
assert.Contains(t, string(out), " \"1 < 2\",\n")
|
||||
assert.Contains(t, string(out), "]\n")
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package jsonsaver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
// Marshal is a version of [json.Marshal] for [dyn.Value].
|
||||
//
|
||||
// Objects in the output retain the order of keys as they appear in the underlying [dyn.Value].
|
||||
// The output does not escape HTML characters in strings.
|
||||
func Marshal(v dyn.Value) ([]byte, error) {
|
||||
return marshalNoEscape(wrap{v})
|
||||
}
|
||||
|
||||
// MarshalIndent is a version of [json.MarshalIndent] for [dyn.Value].
|
||||
//
|
||||
// Objects in the output retain the order of keys as they appear in the underlying [dyn.Value].
|
||||
// The output does not escape HTML characters in strings.
|
||||
func MarshalIndent(v dyn.Value, prefix, indent string) ([]byte, error) {
|
||||
return marshalIndentNoEscape(wrap{v}, prefix, indent)
|
||||
}
|
||||
|
||||
// Wrapper type for [dyn.Value] to expose the [json.Marshaler] interface.
|
||||
type wrap struct {
|
||||
v dyn.Value
|
||||
}
|
||||
|
||||
// MarshalJSON implements the [json.Marshaler] interface for the [dyn.Value] wrapper type.
|
||||
func (w wrap) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := marshalValue(&buf, w.v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// marshalValue recursively writes JSON for a [dyn.Value] to the buffer.
|
||||
func marshalValue(buf *bytes.Buffer, v dyn.Value) error {
|
||||
switch v.Kind() {
|
||||
case dyn.KindString, dyn.KindBool, dyn.KindInt, dyn.KindFloat, dyn.KindTime, dyn.KindNil:
|
||||
out, err := marshalNoEscape(v.AsAny())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The encoder writes a trailing newline, so we need to remove it
|
||||
// to avoid adding extra newlines when embedding this JSON.
|
||||
out = out[:len(out)-1]
|
||||
buf.Write(out)
|
||||
case dyn.KindMap:
|
||||
buf.WriteByte('{')
|
||||
for i, pair := range v.MustMap().Pairs() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
// Require keys to be strings.
|
||||
if pair.Key.Kind() != dyn.KindString {
|
||||
return fmt.Errorf("map key must be a string, got %s", pair.Key.Kind())
|
||||
}
|
||||
// Marshal the key
|
||||
if err := marshalValue(buf, pair.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
buf.WriteByte(':')
|
||||
// Marshal the value
|
||||
if err := marshalValue(buf, pair.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
case dyn.KindSequence:
|
||||
buf.WriteByte('[')
|
||||
for i, item := range v.MustSequence() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if err := marshalValue(buf, item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
default:
|
||||
return fmt.Errorf("unsupported kind: %d", v.Kind())
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
package jsonsaver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||
)
|
||||
|
||||
func TestMarshal_String(t *testing.T) {
|
||||
b, err := Marshal(dyn.V("string"))
|
||||
if assert.NoError(t, err) {
|
||||
assert.JSONEq(t, `"string"`, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal_Bool(t *testing.T) {
|
||||
b, err := Marshal(dyn.V(true))
|
||||
if assert.NoError(t, err) {
|
||||
assert.JSONEq(t, `true`, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal_Int(t *testing.T) {
|
||||
b, err := Marshal(dyn.V(42))
|
||||
if assert.NoError(t, err) {
|
||||
assert.JSONEq(t, `42`, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal_Float(t *testing.T) {
|
||||
b, err := Marshal(dyn.V(42.1))
|
||||
if assert.NoError(t, err) {
|
||||
assert.JSONEq(t, `42.1`, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal_Time(t *testing.T) {
|
||||
b, err := Marshal(dyn.V(dyn.MustTime("2021-01-01T00:00:00Z")))
|
||||
if assert.NoError(t, err) {
|
||||
assert.JSONEq(t, `"2021-01-01T00:00:00Z"`, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal_Map(t *testing.T) {
|
||||
m := dyn.NewMapping()
|
||||
m.Set(dyn.V("key1"), dyn.V("value1"))
|
||||
m.Set(dyn.V("key2"), dyn.V("value2"))
|
||||
|
||||
b, err := Marshal(dyn.V(m))
|
||||
if assert.NoError(t, err) {
|
||||
assert.JSONEq(t, `{"key1":"value1","key2":"value2"}`, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal_Sequence(t *testing.T) {
|
||||
var s []dyn.Value
|
||||
s = append(s, dyn.V("value1"))
|
||||
s = append(s, dyn.V("value2"))
|
||||
|
||||
b, err := Marshal(dyn.V(s))
|
||||
if assert.NoError(t, err) {
|
||||
assert.JSONEq(t, `["value1","value2"]`, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal_Complex(t *testing.T) {
|
||||
map1 := dyn.NewMapping()
|
||||
map1.Set(dyn.V("str1"), dyn.V("value1"))
|
||||
map1.Set(dyn.V("str2"), dyn.V("value2"))
|
||||
|
||||
seq1 := []dyn.Value{}
|
||||
seq1 = append(seq1, dyn.V("value1"))
|
||||
seq1 = append(seq1, dyn.V("value2"))
|
||||
|
||||
root := dyn.NewMapping()
|
||||
root.Set(dyn.V("map1"), dyn.V(map1))
|
||||
root.Set(dyn.V("seq1"), dyn.V(seq1))
|
||||
|
||||
// Marshal without indent.
|
||||
b, err := Marshal(dyn.V(root))
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, `{"map1":{"str1":"value1","str2":"value2"},"seq1":["value1","value2"]}`+"\n", string(b))
|
||||
}
|
||||
|
||||
// Marshal with indent.
|
||||
b, err = MarshalIndent(dyn.V(root), "", " ")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, `{
|
||||
"map1": {
|
||||
"str1": "value1",
|
||||
"str2": "value2"
|
||||
},
|
||||
"seq1": [
|
||||
"value1",
|
||||
"value2"
|
||||
]
|
||||
}`+"\n", string(b))
|
||||
}
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type execOption func(context.Context, *exec.Cmd) error
|
||||
|
@ -69,10 +70,24 @@ func WithStdoutWriter(dst io.Writer) execOption {
|
|||
}
|
||||
}
|
||||
|
||||
// safeWriter is a writer that is safe to use concurrently.
|
||||
// It serializes writes to the underlying writer.
|
||||
type safeWriter struct {
|
||||
w io.Writer
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
func (s *safeWriter) Write(p []byte) (n int, err error) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
return s.w.Write(p)
|
||||
}
|
||||
|
||||
func WithCombinedOutput(buf *bytes.Buffer) execOption {
|
||||
sw := &safeWriter{w: buf}
|
||||
return func(_ context.Context, c *exec.Cmd) error {
|
||||
c.Stdout = io.MultiWriter(buf, c.Stdout)
|
||||
c.Stderr = io.MultiWriter(buf, c.Stderr)
|
||||
c.Stdout = io.MultiWriter(sw, c.Stdout)
|
||||
c.Stderr = io.MultiWriter(sw, c.Stderr)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue