mirror of https://github.com/databricks/cli.git
Compare commits
No commits in common. "1218178e64844a091fb06293b6fc14552273942b" and "8a2fe4969c58774b9ae4ea5cc286be5bf378f141" have entirely different histories.
1218178e64
...
8a2fe4969c
|
@ -1,114 +0,0 @@
|
||||||
name: PR Comment
|
|
||||||
|
|
||||||
# WARNING:
|
|
||||||
# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL.
|
|
||||||
# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH.
|
|
||||||
# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR.
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [opened, reopened, synchronize]
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
comment-on-pr:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# NOTE: The following checks may not be accurate depending on Org or Repo settings.
|
|
||||||
- name: Check user and potential secret access
|
|
||||||
id: check-secrets-access
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
USER_LOGIN="${{ github.event.pull_request.user.login }}"
|
|
||||||
REPO_OWNER="${{ github.repository_owner }}"
|
|
||||||
REPO_NAME="${{ github.event.repository.name }}"
|
|
||||||
|
|
||||||
echo "Pull request opened by: $USER_LOGIN"
|
|
||||||
|
|
||||||
# Check if PR is from a fork
|
|
||||||
IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false")
|
|
||||||
|
|
||||||
HAS_ACCESS="false"
|
|
||||||
|
|
||||||
# Check user's permission level on the repository
|
|
||||||
USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission')
|
|
||||||
|
|
||||||
if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then
|
|
||||||
HAS_ACCESS="true"
|
|
||||||
elif [[ "$USER_PERMISSION" == "read" ]]; then
|
|
||||||
# For read access, we need to check if the user has been explicitly granted secret access
|
|
||||||
# This information is not directly available via API, so we'll make an assumption
|
|
||||||
# that read access does not imply secret access
|
|
||||||
HAS_ACCESS="false"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if repo owner is an organization
|
|
||||||
IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"')
|
|
||||||
|
|
||||||
if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then
|
|
||||||
# Check if user is a member of any team with write or admin access to the repo
|
|
||||||
TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug')
|
|
||||||
for team in $TEAMS_WITH_ACCESS; do
|
|
||||||
IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false")
|
|
||||||
if [[ "$IS_TEAM_MEMBER" == "true" ]]; then
|
|
||||||
HAS_ACCESS="true"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If it's a fork, set HAS_ACCESS to false regardless of other checks
|
|
||||||
if [[ "$IS_FORK" == "true" ]]; then
|
|
||||||
HAS_ACCESS="false"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT
|
|
||||||
if [[ "$HAS_ACCESS" == "true" ]]; then
|
|
||||||
echo "User $USER_LOGIN likely has access to secrets"
|
|
||||||
else
|
|
||||||
echo "User $USER_LOGIN likely does not have access to secrets"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Delete old comments
|
|
||||||
if: steps.check-secrets-access.outputs.has_secrets_access != 'true'
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
# Delete previous comment if it exists
|
|
||||||
previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \
|
|
||||||
--jq '.[] | select(.body | startswith("<!-- INTEGRATION_TESTS -->")) | .id')
|
|
||||||
echo "Previous comment IDs: $previous_comment_ids"
|
|
||||||
# Iterate over each comment ID and delete the comment
|
|
||||||
if [ ! -z "$previous_comment_ids" ]; then
|
|
||||||
echo "$previous_comment_ids" | while read -r comment_id; do
|
|
||||||
echo "Deleting comment with ID: $comment_id"
|
|
||||||
gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Comment on PR
|
|
||||||
if: steps.check-secrets-access.outputs.has_secrets_access != 'true'
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
|
|
||||||
run: |
|
|
||||||
gh pr comment ${{ github.event.pull_request.number }} --body \
|
|
||||||
"<!-- INTEGRATION_TESTS -->
|
|
||||||
Run integration tests manually:
|
|
||||||
[go/deco-tests-run/cli](https://go/deco-tests-run/cli)
|
|
||||||
|
|
||||||
Inputs:
|
|
||||||
* PR number: ${{github.event.pull_request.number}}
|
|
||||||
* Commit SHA: \`${{ env.COMMIT_SHA }}\`
|
|
||||||
|
|
||||||
Checks will be approved automatically on success.
|
|
||||||
"
|
|
|
@ -9,26 +9,9 @@ on:
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-token:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
|
||||||
steps:
|
|
||||||
- name: Check if GITHUB_TOKEN is set
|
|
||||||
id: set-token-status
|
|
||||||
run: |
|
|
||||||
if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then
|
|
||||||
echo "GITHUB_TOKEN is empty. User has no access to tokens."
|
|
||||||
echo "::set-output name=has_token::false"
|
|
||||||
else
|
|
||||||
echo "GITHUB_TOKEN is set. User has no access to tokens."
|
|
||||||
echo "::set-output name=has_token::true"
|
|
||||||
fi
|
|
||||||
|
|
||||||
trigger-tests:
|
trigger-tests:
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: check-token
|
|
||||||
if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true'
|
|
||||||
environment: "test-trigger-is"
|
environment: "test-trigger-is"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|
|
@ -33,7 +33,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.22.7
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
|
@ -68,7 +68,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.22.7
|
||||||
|
|
||||||
# No need to download cached dependencies when running gofmt.
|
# No need to download cached dependencies when running gofmt.
|
||||||
cache: false
|
cache: false
|
||||||
|
@ -100,7 +100,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.22.7
|
||||||
|
|
||||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||||
- name: Install ajv-cli
|
- name: Install ajv-cli
|
||||||
|
|
|
@ -6,15 +6,6 @@ on:
|
||||||
- "main"
|
- "main"
|
||||||
- "demo-*"
|
- "demo-*"
|
||||||
|
|
||||||
# Confirm that snapshot builds work if this file is modified.
|
|
||||||
pull_request:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- synchronize
|
|
||||||
- reopened
|
|
||||||
paths:
|
|
||||||
- ".github/workflows/release-snapshot.yml"
|
|
||||||
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
@ -30,7 +21,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.22.7
|
||||||
|
|
||||||
# The default cache key for this action considers only the `go.sum` file.
|
# The default cache key for this action considers only the `go.sum` file.
|
||||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.22.7
|
||||||
|
|
||||||
# The default cache key for this action considers only the `go.sum` file.
|
# The default cache key for this action considers only the `go.sum` file.
|
||||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||||
|
@ -63,7 +63,7 @@ jobs:
|
||||||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Update setup-cli
|
- name: Update setup-cli
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v6
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
|
@ -87,7 +87,7 @@ jobs:
|
||||||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Update homebrew-tap
|
- name: Update homebrew-tap
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v6
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
|
@ -124,7 +124,7 @@ jobs:
|
||||||
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Update CLI version in the VSCode extension
|
- name: Update CLI version in the VSCode extension
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v6
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
|
|
|
@ -95,7 +95,7 @@ checksum:
|
||||||
algorithm: sha256
|
algorithm: sha256
|
||||||
|
|
||||||
snapshot:
|
snapshot:
|
||||||
version_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
||||||
|
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
|
|
36
CHANGELOG.md
36
CHANGELOG.md
|
@ -1,41 +1,5 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
## [Release] Release v0.232.1
|
|
||||||
|
|
||||||
This patch release fixes the following error observed when deploying to /Shared root folder
|
|
||||||
"Error: Path (/Shared/.bundle/.../resources) doesn't exist"
|
|
||||||
|
|
||||||
Bundles:
|
|
||||||
* Fixed adding /Workspace prefix for resource paths ([#1866](https://github.com/databricks/cli/pull/1866)).
|
|
||||||
|
|
||||||
|
|
||||||
## [Release] Release v0.232.0
|
|
||||||
|
|
||||||
**New features for Databricks Asset Bundles:**
|
|
||||||
|
|
||||||
This release adds support for managing AI/BI dashboards as part of your bundle configuration. The `bundle generate` command is updated to support producing dashboard bundle configuration as well as a serialized JSON representation of the dashboard.
|
|
||||||
You can find an example configuration and walkthrough at https://github.com/databricks/bundle-examples/tree/main/knowledge_base/dashboard_nyc_taxi
|
|
||||||
|
|
||||||
CLI:
|
|
||||||
* Add privacy notice to README ([#1841](https://github.com/databricks/cli/pull/1841)).
|
|
||||||
|
|
||||||
Bundles:
|
|
||||||
* Add support for AI/BI dashboards ([#1743](https://github.com/databricks/cli/pull/1743)).
|
|
||||||
* Added validator for folder permissions ([#1824](https://github.com/databricks/cli/pull/1824)).
|
|
||||||
* Add bundle generate variant for dashboards ([#1847](https://github.com/databricks/cli/pull/1847)).
|
|
||||||
* Use SetPermissions instead of UpdatePermissions when setting folder permissions based on top-level ones ([#1822](https://github.com/databricks/cli/pull/1822)).
|
|
||||||
|
|
||||||
Internal:
|
|
||||||
* Attempt to reduce test flakiness on Windows ([#1845](https://github.com/databricks/cli/pull/1845)).
|
|
||||||
* Reuse resource resolution code for the run command ([#1858](https://github.com/databricks/cli/pull/1858)).
|
|
||||||
* [Internal] Automatically trigger integration tests on PR ([#1857](https://github.com/databricks/cli/pull/1857)).
|
|
||||||
* [Internal] Add test instructions for external contributors ([#1863](https://github.com/databricks/cli/pull/1863)).
|
|
||||||
* Add `libs/dyn/jsonsaver` ([#1862](https://github.com/databricks/cli/pull/1862)).
|
|
||||||
|
|
||||||
|
|
||||||
Dependency updates:
|
|
||||||
* Bump github.com/fatih/color from 1.17.0 to 1.18.0 ([#1861](https://github.com/databricks/cli/pull/1861)).
|
|
||||||
|
|
||||||
## [Release] Release v0.231.0
|
## [Release] Release v0.231.0
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -7,8 +7,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/internal/testutil"
|
"github.com/databricks/cli/internal/testutil"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
package generate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ConvertDashboardToValue(dashboard *dashboards.Dashboard, filePath string) (dyn.Value, error) {
|
|
||||||
// The majority of fields of the dashboard struct are read-only.
|
|
||||||
// We copy the relevant fields manually.
|
|
||||||
dv := map[string]dyn.Value{
|
|
||||||
"display_name": dyn.NewValue(dashboard.DisplayName, []dyn.Location{{Line: 1}}),
|
|
||||||
"warehouse_id": dyn.NewValue(dashboard.WarehouseId, []dyn.Location{{Line: 2}}),
|
|
||||||
"file_path": dyn.NewValue(filePath, []dyn.Location{{Line: 3}}),
|
|
||||||
}
|
|
||||||
|
|
||||||
return dyn.V(dv), nil
|
|
||||||
}
|
|
|
@ -195,7 +195,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
||||||
// the Databricks UI and via the SQL API.
|
// the Databricks UI and via the SQL API.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Volumes: Prefix
|
// Apply the prefix to volumes
|
||||||
for _, v := range r.Volumes {
|
for _, v := range r.Volumes {
|
||||||
if containsUserIdentity(v.CatalogName, b.Config.Workspace.CurrentUser) {
|
if containsUserIdentity(v.CatalogName, b.Config.Workspace.CurrentUser) {
|
||||||
log.Debugf(ctx, "Skipping prefix for volume %s because catalog %s contains the current user's identity", v.Name, v.CatalogName)
|
log.Debugf(ctx, "Skipping prefix for volume %s because catalog %s contains the current user's identity", v.Name, v.CatalogName)
|
||||||
|
|
|
@ -5,10 +5,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
|
@ -7,9 +7,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
|
|
@ -32,7 +32,6 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
||||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
|
||||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
|
||||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
|
||||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("resource_path")),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
|
|
@ -41,7 +41,6 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
||||||
ArtifactPath: tc.path,
|
ArtifactPath: tc.path,
|
||||||
FilePath: tc.path,
|
FilePath: tc.path,
|
||||||
StatePath: tc.path,
|
StatePath: tc.path,
|
||||||
ResourcePath: tc.path,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -52,7 +51,6 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
||||||
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
|
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
|
||||||
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
|
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
|
||||||
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
|
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
|
||||||
require.Equal(t, tc.expected, b.Config.Workspace.ResourcePath)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,5 +76,4 @@ func TestPrependWorkspaceForDefaultConfig(t *testing.T) {
|
||||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
|
||||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
|
||||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
|
||||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/resources", b.Config.Workspace.ResourcePath)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -286,7 +286,6 @@ func TestValidateDevelopmentMode(t *testing.T) {
|
||||||
b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state"
|
b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state"
|
||||||
b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files"
|
b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files"
|
||||||
b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts"
|
b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts"
|
||||||
b.Config.Workspace.ResourcePath = "/Users/lennart@company.com/.bundle/x/y/resources"
|
|
||||||
diags = validateDevelopmentMode(b)
|
diags = validateDevelopmentMode(b)
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
@ -317,7 +316,6 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
||||||
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
||||||
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
||||||
b.Config.Workspace.ResourcePath = "/Shared/.bundle/x/y/resources"
|
|
||||||
|
|
||||||
diags = validateProductionMode(context.Background(), b, false)
|
diags = validateProductionMode(context.Background(), b, false)
|
||||||
require.ErrorContains(t, diags.Error(), "production")
|
require.ErrorContains(t, diags.Error(), "production")
|
||||||
|
|
|
@ -6,9 +6,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
|
@ -6,9 +6,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
|
@ -8,11 +8,11 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/vfs"
|
"github.com/databricks/cli/libs/vfs"
|
||||||
|
|
|
@ -4,10 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
"github.com/databricks/cli/bundle/paths"
|
|
||||||
"github.com/databricks/cli/bundle/permissions"
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go/apierr"
|
"github.com/databricks/databricks-sdk-go/apierr"
|
||||||
|
@ -24,12 +24,37 @@ func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config().Workspace)
|
rootPath := b.Config().Workspace.RootPath
|
||||||
|
paths := []string{}
|
||||||
|
if !libraries.IsVolumesPath(rootPath) && !libraries.IsWorkspaceSharedPath(rootPath) {
|
||||||
|
paths = append(paths, rootPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasSuffix(rootPath, "/") {
|
||||||
|
rootPath += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range []string{
|
||||||
|
b.Config().Workspace.ArtifactPath,
|
||||||
|
b.Config().Workspace.FilePath,
|
||||||
|
b.Config().Workspace.StatePath,
|
||||||
|
b.Config().Workspace.ResourcePath,
|
||||||
|
} {
|
||||||
|
if libraries.IsWorkspaceSharedPath(p) || libraries.IsVolumesPath(p) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(p, rootPath) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
paths = append(paths, p)
|
||||||
|
}
|
||||||
|
|
||||||
var diags diag.Diagnostics
|
var diags diag.Diagnostics
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
results := make([]diag.Diagnostics, len(bundlePaths))
|
results := make([]diag.Diagnostics, len(paths))
|
||||||
for i, p := range bundlePaths {
|
for i, p := range paths {
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
results[i] = checkFolderPermission(ctx, b, p)
|
results[i] = checkFolderPermission(ctx, b, p)
|
||||||
return nil
|
return nil
|
||||||
|
@ -48,11 +73,6 @@ func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics {
|
func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics {
|
||||||
// If the folder is shared, then we don't need to check permissions as it was already checked in the other mutator before.
|
|
||||||
if libraries.IsWorkspaceSharedPath(folderPath) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
w := b.WorkspaceClient().Workspace
|
w := b.WorkspaceClient().Workspace
|
||||||
obj, err := getClosestExistingObject(ctx, w, folderPath)
|
obj, err := getClosestExistingObject(ctx, w, folderPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -5,9 +5,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/bundle/metadata"
|
"github.com/databricks/cli/bundle/metadata"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
|
|
@ -6,9 +6,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/internal/testutil"
|
"github.com/databricks/cli/internal/testutil"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
|
|
@ -2,6 +2,7 @@ package libraries
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
@ -19,7 +20,7 @@ func GetFilerForLibraries(ctx context.Context, b *bundle.Bundle) (filer.Filer, s
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case IsVolumesPath(artifactPath):
|
case strings.HasPrefix(artifactPath, "/Volumes/"):
|
||||||
return filerForVolume(ctx, b)
|
return filerForVolume(ctx, b)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -7,9 +7,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
|
|
|
@ -38,7 +38,6 @@ func IsWorkspaceLibrary(library *compute.Library) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsVolumesPath returns true if the specified path indicates that
|
// IsVolumesPath returns true if the specified path indicates that
|
||||||
// it should be interpreted as a Databricks Volumes path.
|
|
||||||
func IsVolumesPath(path string) bool {
|
func IsVolumesPath(path string) bool {
|
||||||
return strings.HasPrefix(path, "/Volumes/")
|
return strings.HasPrefix(path, "/Volumes/")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
package paths
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CollectUniqueWorkspacePathPrefixes(workspace config.Workspace) []string {
|
|
||||||
rootPath := workspace.RootPath
|
|
||||||
paths := []string{}
|
|
||||||
if !libraries.IsVolumesPath(rootPath) {
|
|
||||||
paths = append(paths, rootPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasSuffix(rootPath, "/") {
|
|
||||||
rootPath += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range []string{
|
|
||||||
workspace.ArtifactPath,
|
|
||||||
workspace.FilePath,
|
|
||||||
workspace.StatePath,
|
|
||||||
workspace.ResourcePath,
|
|
||||||
} {
|
|
||||||
if libraries.IsVolumesPath(p) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(p, rootPath) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
paths = append(paths, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return paths
|
|
||||||
}
|
|
|
@ -5,11 +5,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
|
||||||
"github.com/databricks/cli/bundle/paths"
|
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type workspaceRootPermissions struct {
|
type workspaceRootPermissions struct {
|
||||||
|
@ -55,35 +52,16 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
w := b.WorkspaceClient().Workspace
|
w := b.WorkspaceClient().Workspace
|
||||||
bundlePaths := paths.CollectUniqueWorkspacePathPrefixes(b.Config.Workspace)
|
obj, err := w.GetStatusByPath(ctx, b.Config.Workspace.RootPath)
|
||||||
|
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
|
||||||
for _, p := range bundlePaths {
|
|
||||||
g.Go(func() error {
|
|
||||||
return setPermissions(ctx, w, p, permissions)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return g.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path string, permissions []workspace.WorkspaceObjectAccessControlRequest) error {
|
|
||||||
// If the folder is shared, then we don't need to set permissions since it's always set for all users and it's checked in mutators before.
|
|
||||||
if libraries.IsWorkspaceSharedPath(path) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
obj, err := w.GetStatusByPath(ctx, path)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
|
_, err = w.UpdatePermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
|
||||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||||
WorkspaceObjectType: "directories",
|
WorkspaceObjectType: "directories",
|
||||||
AccessControlList: permissions,
|
AccessControlList: permissions,
|
||||||
})
|
})
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,11 +21,7 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
RootPath: "/Users/foo@bar.com",
|
RootPath: "/Users/foo@bar.com",
|
||||||
ArtifactPath: "/Users/foo@bar.com/artifacts",
|
|
||||||
FilePath: "/Users/foo@bar.com/files",
|
|
||||||
StatePath: "/Users/foo@bar.com/state",
|
|
||||||
ResourcePath: "/Users/foo@bar.com/resources",
|
|
||||||
},
|
},
|
||||||
Permissions: []resources.Permission{
|
Permissions: []resources.Permission{
|
||||||
{Level: CAN_MANAGE, UserName: "TestUser"},
|
{Level: CAN_MANAGE, UserName: "TestUser"},
|
||||||
|
@ -63,7 +59,7 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
||||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||||
ObjectId: 1234,
|
ObjectId: 1234,
|
||||||
}, nil)
|
}, nil)
|
||||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
workspaceApi.EXPECT().UpdatePermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
||||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
||||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
||||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
||||||
|
@ -76,116 +72,3 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
|
||||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions(), ApplyWorkspaceRootPermissions()))
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions(), ApplyWorkspaceRootPermissions()))
|
||||||
require.Empty(t, diags)
|
require.Empty(t, diags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestApplyWorkspaceRootPermissionsForAllPaths(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Workspace: config.Workspace{
|
|
||||||
RootPath: "/Some/Root/Path",
|
|
||||||
ArtifactPath: "/Users/foo@bar.com/artifacts",
|
|
||||||
FilePath: "/Users/foo@bar.com/files",
|
|
||||||
StatePath: "/Users/foo@bar.com/state",
|
|
||||||
ResourcePath: "/Users/foo@bar.com/resources",
|
|
||||||
},
|
|
||||||
Permissions: []resources.Permission{
|
|
||||||
{Level: CAN_MANAGE, UserName: "TestUser"},
|
|
||||||
{Level: CAN_VIEW, GroupName: "TestGroup"},
|
|
||||||
{Level: CAN_RUN, ServicePrincipalName: "TestServicePrincipal"},
|
|
||||||
},
|
|
||||||
Resources: config.Resources{
|
|
||||||
Jobs: map[string]*resources.Job{
|
|
||||||
"job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}},
|
|
||||||
"job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}},
|
|
||||||
},
|
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
|
||||||
"pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}},
|
|
||||||
"pipeline_2": {PipelineSpec: &pipelines.PipelineSpec{}},
|
|
||||||
},
|
|
||||||
Models: map[string]*resources.MlflowModel{
|
|
||||||
"model_1": {Model: &ml.Model{}},
|
|
||||||
"model_2": {Model: &ml.Model{}},
|
|
||||||
},
|
|
||||||
Experiments: map[string]*resources.MlflowExperiment{
|
|
||||||
"experiment_1": {Experiment: &ml.Experiment{}},
|
|
||||||
"experiment_2": {Experiment: &ml.Experiment{}},
|
|
||||||
},
|
|
||||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
|
||||||
"endpoint_1": {CreateServingEndpoint: &serving.CreateServingEndpoint{}},
|
|
||||||
"endpoint_2": {CreateServingEndpoint: &serving.CreateServingEndpoint{}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
workspaceApi := m.GetMockWorkspaceAPI()
|
|
||||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Some/Root/Path").Return(&workspace.ObjectInfo{
|
|
||||||
ObjectId: 1,
|
|
||||||
}, nil)
|
|
||||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/artifacts").Return(&workspace.ObjectInfo{
|
|
||||||
ObjectId: 2,
|
|
||||||
}, nil)
|
|
||||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/files").Return(&workspace.ObjectInfo{
|
|
||||||
ObjectId: 3,
|
|
||||||
}, nil)
|
|
||||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/state").Return(&workspace.ObjectInfo{
|
|
||||||
ObjectId: 4,
|
|
||||||
}, nil)
|
|
||||||
workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/Users/foo@bar.com/resources").Return(&workspace.ObjectInfo{
|
|
||||||
ObjectId: 5,
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
|
||||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
|
||||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
|
||||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
|
||||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
|
||||||
},
|
|
||||||
WorkspaceObjectId: "1",
|
|
||||||
WorkspaceObjectType: "directories",
|
|
||||||
}).Return(nil, nil)
|
|
||||||
|
|
||||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
|
||||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
|
||||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
|
||||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
|
||||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
|
||||||
},
|
|
||||||
WorkspaceObjectId: "2",
|
|
||||||
WorkspaceObjectType: "directories",
|
|
||||||
}).Return(nil, nil)
|
|
||||||
|
|
||||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
|
||||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
|
||||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
|
||||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
|
||||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
|
||||||
},
|
|
||||||
WorkspaceObjectId: "3",
|
|
||||||
WorkspaceObjectType: "directories",
|
|
||||||
}).Return(nil, nil)
|
|
||||||
|
|
||||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
|
||||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
|
||||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
|
||||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
|
||||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
|
||||||
},
|
|
||||||
WorkspaceObjectId: "4",
|
|
||||||
WorkspaceObjectType: "directories",
|
|
||||||
}).Return(nil, nil)
|
|
||||||
|
|
||||||
workspaceApi.EXPECT().SetPermissions(mock.Anything, workspace.WorkspaceObjectPermissionsRequest{
|
|
||||||
AccessControlList: []workspace.WorkspaceObjectAccessControlRequest{
|
|
||||||
{UserName: "TestUser", PermissionLevel: "CAN_MANAGE"},
|
|
||||||
{GroupName: "TestGroup", PermissionLevel: "CAN_READ"},
|
|
||||||
{ServicePrincipalName: "TestServicePrincipal", PermissionLevel: "CAN_RUN"},
|
|
||||||
},
|
|
||||||
WorkspaceObjectId: "5",
|
|
||||||
WorkspaceObjectType: "directories",
|
|
||||||
}).Return(nil, nil)
|
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions())
|
|
||||||
require.NoError(t, diags.Error())
|
|
||||||
}
|
|
||||||
|
|
|
@ -96,10 +96,9 @@ properties such as the 'catalog' or 'storage' are changed:`
|
||||||
// One or more volumes is being recreated.
|
// One or more volumes is being recreated.
|
||||||
if len(volumeActions) != 0 {
|
if len(volumeActions) != 0 {
|
||||||
msg := `
|
msg := `
|
||||||
This action will result in the deletion or recreation of the following Volumes.
|
This action will result in the deletion or recreation of the following Volumes. For managed volumes,
|
||||||
For managed volumes, the files stored in the volume are also deleted from your
|
this typically results in a deletion of the upstream data in the cloud tenant in ~30 days. For external
|
||||||
cloud tenant within 30 days. For external volumes, the metadata about the volume
|
volumes the upstream data in the cloud tenant is not affected:`
|
||||||
is removed from the catalog, but the underlying files are not deleted:`
|
|
||||||
cmdio.LogString(ctx, msg)
|
cmdio.LogString(ctx, msg)
|
||||||
for _, action := range volumeActions {
|
for _, action := range volumeActions {
|
||||||
cmdio.Log(ctx, action)
|
cmdio.Log(ctx, action)
|
||||||
|
|
|
@ -16,7 +16,6 @@ func newGenerateCommand() *cobra.Command {
|
||||||
|
|
||||||
cmd.AddCommand(generate.NewGenerateJobCommand())
|
cmd.AddCommand(generate.NewGenerateJobCommand())
|
||||||
cmd.AddCommand(generate.NewGeneratePipelineCommand())
|
cmd.AddCommand(generate.NewGeneratePipelineCommand())
|
||||||
cmd.AddCommand(generate.NewGenerateDashboardCommand())
|
|
||||||
cmd.PersistentFlags().StringVar(&key, "key", "", `resource key to use for the generated configuration`)
|
cmd.PersistentFlags().StringVar(&key, "key", "", `resource key to use for the generated configuration`)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,467 +0,0 @@
|
||||||
package generate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config/generate"
|
|
||||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
|
||||||
"github.com/databricks/cli/bundle/phases"
|
|
||||||
"github.com/databricks/cli/bundle/render"
|
|
||||||
"github.com/databricks/cli/bundle/resources"
|
|
||||||
"github.com/databricks/cli/cmd/root"
|
|
||||||
"github.com/databricks/cli/libs/diag"
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
|
||||||
"github.com/databricks/cli/libs/dyn/yamlsaver"
|
|
||||||
"github.com/databricks/cli/libs/textutil"
|
|
||||||
"github.com/databricks/databricks-sdk-go"
|
|
||||||
"github.com/databricks/databricks-sdk-go/apierr"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
type dashboard struct {
|
|
||||||
// Lookup flags for one-time generate.
|
|
||||||
existingPath string
|
|
||||||
existingID string
|
|
||||||
|
|
||||||
// Lookup flag for existing bundle resource.
|
|
||||||
resource string
|
|
||||||
|
|
||||||
// Where to write the configuration and dashboard representation.
|
|
||||||
resourceDir string
|
|
||||||
dashboardDir string
|
|
||||||
|
|
||||||
// Force overwrite of existing files.
|
|
||||||
force bool
|
|
||||||
|
|
||||||
// Watch for changes to the dashboard.
|
|
||||||
watch bool
|
|
||||||
|
|
||||||
// Relative path from the resource directory to the dashboard directory.
|
|
||||||
relativeDashboardDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) resolveID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
|
||||||
switch {
|
|
||||||
case d.existingPath != "":
|
|
||||||
return d.resolveFromPath(ctx, b)
|
|
||||||
case d.existingID != "":
|
|
||||||
return d.resolveFromID(ctx, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", diag.Errorf("expected one of --dashboard-path, --dashboard-id")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
|
||||||
w := b.WorkspaceClient()
|
|
||||||
obj, err := w.Workspace.GetStatusByPath(ctx, d.existingPath)
|
|
||||||
if err != nil {
|
|
||||||
if apierr.IsMissing(err) {
|
|
||||||
return "", diag.Errorf("dashboard %q not found", path.Base(d.existingPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit a more descriptive error message for legacy dashboards.
|
|
||||||
if errors.Is(err, apierr.ErrBadRequest) && strings.HasPrefix(err.Error(), "dbsqlDashboard ") {
|
|
||||||
return "", diag.Diagnostics{
|
|
||||||
{
|
|
||||||
Severity: diag.Error,
|
|
||||||
Summary: fmt.Sprintf("dashboard %q is a legacy dashboard", path.Base(d.existingPath)),
|
|
||||||
Detail: "" +
|
|
||||||
"Databricks Asset Bundles work exclusively with AI/BI dashboards.\n" +
|
|
||||||
"\n" +
|
|
||||||
"Instructions on how to convert a legacy dashboard to an AI/BI dashboard\n" +
|
|
||||||
"can be found at: https://docs.databricks.com/en/dashboards/clone-legacy-to-aibi.html.",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.ObjectType != workspace.ObjectTypeDashboard {
|
|
||||||
found := strings.ToLower(obj.ObjectType.String())
|
|
||||||
return "", diag.Diagnostics{
|
|
||||||
{
|
|
||||||
Severity: diag.Error,
|
|
||||||
Summary: fmt.Sprintf("expected a dashboard, found a %s", found),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.ResourceId == "" {
|
|
||||||
return "", diag.Diagnostics{
|
|
||||||
{
|
|
||||||
Severity: diag.Error,
|
|
||||||
Summary: "expected a non-empty dashboard resource ID",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj.ResourceId, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) resolveFromID(ctx context.Context, b *bundle.Bundle) (string, diag.Diagnostics) {
|
|
||||||
w := b.WorkspaceClient()
|
|
||||||
obj, err := w.Lakeview.GetByDashboardId(ctx, d.existingID)
|
|
||||||
if err != nil {
|
|
||||||
if apierr.IsMissing(err) {
|
|
||||||
return "", diag.Errorf("dashboard with ID %s not found", d.existingID)
|
|
||||||
}
|
|
||||||
return "", diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj.DashboardId, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func remarshalJSON(data []byte) ([]byte, error) {
|
|
||||||
var tmp any
|
|
||||||
var err error
|
|
||||||
err = json.Unmarshal(data, &tmp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remarshal the data to ensure its formatting is stable.
|
|
||||||
// The result will have alphabetically sorted keys and be indented.
|
|
||||||
// HTML escaping is disabled to retain characters such as &, <, and >.
|
|
||||||
var buf bytes.Buffer
|
|
||||||
enc := json.NewEncoder(&buf)
|
|
||||||
enc.SetIndent("", " ")
|
|
||||||
enc.SetEscapeHTML(false)
|
|
||||||
err = enc.Encode(tmp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, filename string) error {
|
|
||||||
// Unmarshal and remarshal the serialized dashboard to ensure it is formatted correctly.
|
|
||||||
// The result will have alphabetically sorted keys and be indented.
|
|
||||||
data, err := remarshalJSON([]byte(dashboard.SerializedDashboard))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the output directory exists.
|
|
||||||
if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean the filename to ensure it is a valid path (and can be used on this OS).
|
|
||||||
filename = filepath.Clean(filename)
|
|
||||||
|
|
||||||
// Attempt to make the path relative to the bundle root.
|
|
||||||
rel, err := filepath.Rel(b.BundleRootPath, filename)
|
|
||||||
if err != nil {
|
|
||||||
rel = filename
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the file does not already exist.
|
|
||||||
info, err := os.Stat(filename)
|
|
||||||
if err == nil {
|
|
||||||
if info.IsDir() {
|
|
||||||
return fmt.Errorf("%s is a directory", rel)
|
|
||||||
}
|
|
||||||
if !d.force {
|
|
||||||
return fmt.Errorf("%s already exists. Use --force to overwrite", rel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Writing dashboard to %q\n", rel)
|
|
||||||
return os.WriteFile(filename, data, 0644)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error {
|
|
||||||
// Save serialized dashboard definition to the dashboard directory.
|
|
||||||
dashboardBasename := fmt.Sprintf("%s.lvdash.json", key)
|
|
||||||
dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename)
|
|
||||||
err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Synthesize resource configuration.
|
|
||||||
v, err := generate.ConvertDashboardToValue(dashboard, path.Join(d.relativeDashboardDir, dashboardBasename))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := map[string]dyn.Value{
|
|
||||||
"resources": dyn.V(map[string]dyn.Value{
|
|
||||||
"dashboards": dyn.V(map[string]dyn.Value{
|
|
||||||
key: v,
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the output directory exists.
|
|
||||||
if err := os.MkdirAll(d.resourceDir, 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the configuration to the resource directory.
|
|
||||||
resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key))
|
|
||||||
saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{
|
|
||||||
"display_name": yaml.DoubleQuotedStyle,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Attempt to make the path relative to the bundle root.
|
|
||||||
rel, err := filepath.Rel(b.BundleRootPath, resourcePath)
|
|
||||||
if err != nil {
|
|
||||||
rel = resourcePath
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Writing configuration to %q\n", rel)
|
|
||||||
err = saver.SaveAsYAML(result, resourcePath, d.force)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForChanges(ctx context.Context, w *databricks.WorkspaceClient, dashboard *dashboards.Dashboard) diag.Diagnostics {
|
|
||||||
// Compute [time.Time] for the most recent update.
|
|
||||||
tref, err := time.Parse(time.RFC3339, dashboard.UpdateTime)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
obj, err := w.Workspace.GetStatusByPath(ctx, dashboard.Path)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute [time.Time] from timestamp in millis since epoch.
|
|
||||||
tcur := time.Unix(0, obj.ModifiedAt*int64(time.Millisecond))
|
|
||||||
if tcur.After(tref) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) updateDashboardForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|
||||||
resource, ok := b.Config.Resources.Dashboards[d.resource]
|
|
||||||
if !ok {
|
|
||||||
return diag.Errorf("dashboard resource %q is not defined", d.resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resource.FilePath == "" {
|
|
||||||
return diag.Errorf("dashboard resource %q has no file path defined", d.resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the dashboard ID from the resource.
|
|
||||||
dashboardID := resource.ID
|
|
||||||
|
|
||||||
// Overwrite the dashboard at the path referenced from the resource.
|
|
||||||
dashboardPath := resource.FilePath
|
|
||||||
|
|
||||||
w := b.WorkspaceClient()
|
|
||||||
|
|
||||||
// Start polling the underlying dashboard for changes.
|
|
||||||
var etag string
|
|
||||||
for {
|
|
||||||
dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if etag != dashboard.Etag {
|
|
||||||
err = d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort if we are not watching for changes.
|
|
||||||
if !d.watch {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the etag for the next iteration.
|
|
||||||
etag = dashboard.Etag
|
|
||||||
|
|
||||||
// Now poll the workspace API for changes.
|
|
||||||
// This is much more efficient than polling the dashboard API because it
|
|
||||||
// includes the entire serialized dashboard whereas we're only interested
|
|
||||||
// in the last modified time of the dashboard here.
|
|
||||||
waitForChanges(ctx, w, dashboard)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) generateForExisting(ctx context.Context, b *bundle.Bundle, dashboardID string) diag.Diagnostics {
|
|
||||||
w := b.WorkspaceClient()
|
|
||||||
dashboard, err := w.Lakeview.GetByDashboardId(ctx, dashboardID)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := textutil.NormalizeString(dashboard.DisplayName)
|
|
||||||
err = d.saveConfiguration(ctx, b, dashboard, key)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) initialize(b *bundle.Bundle) diag.Diagnostics {
|
|
||||||
// Make the paths absolute if they aren't already.
|
|
||||||
if !filepath.IsAbs(d.resourceDir) {
|
|
||||||
d.resourceDir = filepath.Join(b.BundleRootPath, d.resourceDir)
|
|
||||||
}
|
|
||||||
if !filepath.IsAbs(d.dashboardDir) {
|
|
||||||
d.dashboardDir = filepath.Join(b.BundleRootPath, d.dashboardDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we know how the dashboard path is relative to the resource path.
|
|
||||||
rel, err := filepath.Rel(d.resourceDir, d.dashboardDir)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.relativeDashboardDir = filepath.ToSlash(rel)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) runForResource(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
|
||||||
phases.Initialize(),
|
|
||||||
terraform.Interpolate(),
|
|
||||||
terraform.Write(),
|
|
||||||
terraform.StatePull(),
|
|
||||||
terraform.Load(),
|
|
||||||
))
|
|
||||||
if diags.HasError() {
|
|
||||||
return diags
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.updateDashboardForResource(ctx, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) runForExisting(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|
||||||
// Resolve the ID of the dashboard to generate configuration for.
|
|
||||||
dashboardID, diags := d.resolveID(ctx, b)
|
|
||||||
if diags.HasError() {
|
|
||||||
return diags
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.generateForExisting(ctx, b, dashboardID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dashboard) RunE(cmd *cobra.Command, args []string) error {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
b, diags := root.MustConfigureBundle(cmd)
|
|
||||||
if diags.HasError() {
|
|
||||||
return diags.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
diags = d.initialize(b)
|
|
||||||
if diags.HasError() {
|
|
||||||
return diags.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.resource != "" {
|
|
||||||
diags = d.runForResource(ctx, b)
|
|
||||||
} else {
|
|
||||||
diags = d.runForExisting(ctx, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
renderOpts := render.RenderOptions{RenderSummaryTable: false}
|
|
||||||
err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to render output: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if diags.HasError() {
|
|
||||||
return root.ErrAlreadyPrinted
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterDashboards returns a filter that only includes dashboards.
|
|
||||||
func filterDashboards(ref resources.Reference) bool {
|
|
||||||
return ref.Description.SingularName == "dashboard"
|
|
||||||
}
|
|
||||||
|
|
||||||
// dashboardResourceCompletion executes to autocomplete the argument to the resource flag.
|
|
||||||
func dashboardResourceCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
|
||||||
b, diags := root.MustConfigureBundle(cmd)
|
|
||||||
if err := diags.Error(); err != nil {
|
|
||||||
cobra.CompErrorln(err.Error())
|
|
||||||
return nil, cobra.ShellCompDirectiveError
|
|
||||||
}
|
|
||||||
|
|
||||||
if b == nil {
|
|
||||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
|
||||||
}
|
|
||||||
|
|
||||||
return maps.Keys(resources.Completions(b, filterDashboards)), cobra.ShellCompDirectiveNoFileComp
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGenerateDashboardCommand() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "dashboard",
|
|
||||||
Short: "Generate configuration for a dashboard",
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &dashboard{}
|
|
||||||
|
|
||||||
// Lookup flags.
|
|
||||||
cmd.Flags().StringVar(&d.existingPath, "existing-path", "", `workspace path of the dashboard to generate configuration for`)
|
|
||||||
cmd.Flags().StringVar(&d.existingID, "existing-id", "", `ID of the dashboard to generate configuration for`)
|
|
||||||
cmd.Flags().StringVar(&d.resource, "resource", "", `resource key of dashboard to watch for changes`)
|
|
||||||
|
|
||||||
// Alias lookup flags that include the resource type name.
|
|
||||||
// Included for symmetry with the other generate commands, but we prefer the shorter flags.
|
|
||||||
cmd.Flags().StringVar(&d.existingPath, "existing-dashboard-path", "", `workspace path of the dashboard to generate configuration for`)
|
|
||||||
cmd.Flags().StringVar(&d.existingID, "existing-dashboard-id", "", `ID of the dashboard to generate configuration for`)
|
|
||||||
cmd.Flags().MarkHidden("existing-dashboard-path")
|
|
||||||
cmd.Flags().MarkHidden("existing-dashboard-id")
|
|
||||||
|
|
||||||
// Output flags.
|
|
||||||
cmd.Flags().StringVarP(&d.resourceDir, "resource-dir", "d", "./resources", `directory to write the configuration to`)
|
|
||||||
cmd.Flags().StringVarP(&d.dashboardDir, "dashboard-dir", "s", "./src", `directory to write the dashboard representation to`)
|
|
||||||
cmd.Flags().BoolVarP(&d.force, "force", "f", false, `force overwrite existing files in the output directory`)
|
|
||||||
|
|
||||||
// Exactly one of the lookup flags must be provided.
|
|
||||||
cmd.MarkFlagsOneRequired(
|
|
||||||
"existing-path",
|
|
||||||
"existing-id",
|
|
||||||
"resource",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watch flag. This is relevant only in combination with the resource flag.
|
|
||||||
cmd.Flags().BoolVar(&d.watch, "watch", false, `watch for changes to the dashboard and update the configuration`)
|
|
||||||
|
|
||||||
// Make sure the watch flag is only used with the existing-resource flag.
|
|
||||||
cmd.MarkFlagsMutuallyExclusive("watch", "existing-path")
|
|
||||||
cmd.MarkFlagsMutuallyExclusive("watch", "existing-id")
|
|
||||||
|
|
||||||
// Completion for the resource flag.
|
|
||||||
cmd.RegisterFlagCompletionFunc("resource", dashboardResourceCompletion)
|
|
||||||
|
|
||||||
cmd.RunE = d.RunE
|
|
||||||
return cmd
|
|
||||||
}
|
|
|
@ -1,182 +0,0 @@
|
||||||
package generate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/databricks-sdk-go/apierr"
|
|
||||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDashboard_ErrorOnLegacyDashboard(t *testing.T) {
|
|
||||||
// Response to a GetStatus request on a path pointing to a legacy dashboard.
|
|
||||||
//
|
|
||||||
// < HTTP/2.0 400 Bad Request
|
|
||||||
// < {
|
|
||||||
// < "error_code": "BAD_REQUEST",
|
|
||||||
// < "message": "dbsqlDashboard is not user-facing."
|
|
||||||
// < }
|
|
||||||
|
|
||||||
d := dashboard{
|
|
||||||
existingPath: "/path/to/legacy dashboard",
|
|
||||||
}
|
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
w := m.GetMockWorkspaceAPI()
|
|
||||||
w.On("GetStatusByPath", mock.Anything, "/path/to/legacy dashboard").Return(nil, &apierr.APIError{
|
|
||||||
StatusCode: 400,
|
|
||||||
ErrorCode: "BAD_REQUEST",
|
|
||||||
Message: "dbsqlDashboard is not user-facing.",
|
|
||||||
})
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
b := &bundle.Bundle{}
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
_, diags := d.resolveID(ctx, b)
|
|
||||||
require.Len(t, diags, 1)
|
|
||||||
assert.Equal(t, diags[0].Summary, "dashboard \"legacy dashboard\" is a legacy dashboard")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDashboard_ExistingID_Nominal(t *testing.T) {
|
|
||||||
root := t.TempDir()
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
BundleRootPath: root,
|
|
||||||
}
|
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
|
||||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{
|
|
||||||
DashboardId: "f00dcafe",
|
|
||||||
DisplayName: "This is a test dashboard",
|
|
||||||
SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`,
|
|
||||||
WarehouseId: "w4r3h0us3",
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
ctx := bundle.Context(context.Background(), b)
|
|
||||||
cmd := NewGenerateDashboardCommand()
|
|
||||||
cmd.SetContext(ctx)
|
|
||||||
cmd.Flag("existing-id").Value.Set("f00dcafe")
|
|
||||||
|
|
||||||
err := cmd.RunE(cmd, []string{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Assert the contents of the generated configuration
|
|
||||||
data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, `resources:
|
|
||||||
dashboards:
|
|
||||||
this_is_a_test_dashboard:
|
|
||||||
display_name: "This is a test dashboard"
|
|
||||||
warehouse_id: w4r3h0us3
|
|
||||||
file_path: ../src/this_is_a_test_dashboard.lvdash.json
|
|
||||||
`, string(data))
|
|
||||||
|
|
||||||
data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDashboard_ExistingID_NotFound(t *testing.T) {
|
|
||||||
root := t.TempDir()
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
BundleRootPath: root,
|
|
||||||
}
|
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
|
||||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(nil, &apierr.APIError{
|
|
||||||
StatusCode: 404,
|
|
||||||
})
|
|
||||||
|
|
||||||
ctx := bundle.Context(context.Background(), b)
|
|
||||||
cmd := NewGenerateDashboardCommand()
|
|
||||||
cmd.SetContext(ctx)
|
|
||||||
cmd.Flag("existing-id").Value.Set("f00dcafe")
|
|
||||||
|
|
||||||
err := cmd.RunE(cmd, []string{})
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDashboard_ExistingPath_Nominal(t *testing.T) {
|
|
||||||
root := t.TempDir()
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
BundleRootPath: root,
|
|
||||||
}
|
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
workspaceAPI := m.GetMockWorkspaceAPI()
|
|
||||||
workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(&workspace.ObjectInfo{
|
|
||||||
ObjectType: workspace.ObjectTypeDashboard,
|
|
||||||
ResourceId: "f00dcafe",
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
dashboardsAPI := m.GetMockLakeviewAPI()
|
|
||||||
dashboardsAPI.EXPECT().GetByDashboardId(mock.Anything, "f00dcafe").Return(&dashboards.Dashboard{
|
|
||||||
DashboardId: "f00dcafe",
|
|
||||||
DisplayName: "This is a test dashboard",
|
|
||||||
SerializedDashboard: `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`,
|
|
||||||
WarehouseId: "w4r3h0us3",
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
ctx := bundle.Context(context.Background(), b)
|
|
||||||
cmd := NewGenerateDashboardCommand()
|
|
||||||
cmd.SetContext(ctx)
|
|
||||||
cmd.Flag("existing-path").Value.Set("/path/to/dashboard")
|
|
||||||
|
|
||||||
err := cmd.RunE(cmd, []string{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Assert the contents of the generated configuration
|
|
||||||
data, err := os.ReadFile(filepath.Join(root, "resources", "this_is_a_test_dashboard.dashboard.yml"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, `resources:
|
|
||||||
dashboards:
|
|
||||||
this_is_a_test_dashboard:
|
|
||||||
display_name: "This is a test dashboard"
|
|
||||||
warehouse_id: w4r3h0us3
|
|
||||||
file_path: ../src/this_is_a_test_dashboard.lvdash.json
|
|
||||||
`, string(data))
|
|
||||||
|
|
||||||
data, err = os.ReadFile(filepath.Join(root, "src", "this_is_a_test_dashboard.lvdash.json"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.JSONEq(t, `{"pages":[{"displayName":"New Page","layout":[],"name":"12345678"}]}`, string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDashboard_ExistingPath_NotFound(t *testing.T) {
|
|
||||||
root := t.TempDir()
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
BundleRootPath: root,
|
|
||||||
}
|
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
workspaceAPI := m.GetMockWorkspaceAPI()
|
|
||||||
workspaceAPI.EXPECT().GetStatusByPath(mock.Anything, "/path/to/dashboard").Return(nil, &apierr.APIError{
|
|
||||||
StatusCode: 404,
|
|
||||||
})
|
|
||||||
|
|
||||||
ctx := bundle.Context(context.Background(), b)
|
|
||||||
cmd := NewGenerateDashboardCommand()
|
|
||||||
cmd.SetContext(ctx)
|
|
||||||
cmd.Flag("existing-path").Value.Set("/path/to/dashboard")
|
|
||||||
|
|
||||||
err := cmd.RunE(cmd, []string{})
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
4
go.mod
4
go.mod
|
@ -1,8 +1,8 @@
|
||||||
module github.com/databricks/cli
|
module github.com/databricks/cli
|
||||||
|
|
||||||
go 1.23
|
go 1.22.0
|
||||||
|
|
||||||
toolchain go1.23.2
|
toolchain go1.22.7
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/bundletest"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
@ -245,11 +244,6 @@ func TestAccUploadArtifactFileToInvalidVolume(t *testing.T) {
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
err = w.Schemas.DeleteByFullName(ctx, "main."+schemaName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("volume not in DAB", func(t *testing.T) {
|
t.Run("volume not in DAB", func(t *testing.T) {
|
||||||
volumePath := fmt.Sprintf("/Volumes/main/%s/doesnotexist", schemaName)
|
volumePath := fmt.Sprintf("/Volumes/main/%s/doesnotexist", schemaName)
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
@ -313,11 +307,17 @@ func TestAccUploadArtifactFileToInvalidVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// set location of volume definition in config.
|
// set location of volume definition in config.
|
||||||
bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{{
|
b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
File: filepath.Join(dir, "databricks.yml"),
|
return dyn.Map(v, "resources.volumes.foo", func(p dyn.Path, volume dyn.Value) (dyn.Value, error) {
|
||||||
Line: 1,
|
return volume.WithLocations([]dyn.Location{
|
||||||
Column: 2,
|
{
|
||||||
}})
|
File: filepath.Join(dir, "databricks.yml"),
|
||||||
|
Line: 1,
|
||||||
|
Column: 2,
|
||||||
|
},
|
||||||
|
}), nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(libraries.ExpandGlobReferences(), libraries.Upload()))
|
diags := bundle.Apply(ctx, b, bundle.Seq(libraries.ExpandGlobReferences(), libraries.Upload()))
|
||||||
assert.Contains(t, diags, diag.Diagnostic{
|
assert.Contains(t, diags, diag.Diagnostic{
|
||||||
|
|
|
@ -249,7 +249,7 @@ func TestAccDeployUcVolume(t *testing.T) {
|
||||||
w := wt.W
|
w := wt.W
|
||||||
|
|
||||||
uniqueId := uuid.New().String()
|
uniqueId := uuid.New().String()
|
||||||
bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{
|
bundleRoot, err := initTestTemplate(t, ctx, "uc_volume", map[string]any{
|
||||||
"unique_id": uniqueId,
|
"unique_id": uniqueId,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -284,10 +284,9 @@ func TestAccDeployUcVolume(t *testing.T) {
|
||||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run()
|
stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following Volumes.
|
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following Volumes. For managed volumes,
|
||||||
For managed volumes, the files stored in the volume are also deleted from your
|
this typically results in a deletion of the upstream data in the cloud tenant in ~30 days. For external
|
||||||
cloud tenant within 30 days. For external volumes, the metadata about the volume
|
volumes the upstream data in the cloud tenant is not affected:
|
||||||
is removed from the catalog, but the underlying files are not deleted:
|
|
||||||
recreate volume foo`)
|
recreate volume foo`)
|
||||||
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,3 @@ func PanicsWithError(t assert.TestingT, errString string, f func(), msgAndArgs .
|
||||||
func NotPanics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool {
|
func NotPanics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool {
|
||||||
return assert.NotPanics(t, f, msgAndArgs...)
|
return assert.NotPanics(t, f, msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
|
|
||||||
return assert.JSONEq(t, expected, actual, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
package jsonsaver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The encoder type encapsulates a [json.Encoder] and its target buffer.
|
|
||||||
// Escaping of HTML characters in the output is disabled.
|
|
||||||
type encoder struct {
|
|
||||||
*json.Encoder
|
|
||||||
*bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEncoder() encoder {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
enc := json.NewEncoder(&buf)
|
|
||||||
|
|
||||||
// By default, json.Encoder escapes HTML characters, converting symbols like '<' to '\u003c'.
|
|
||||||
// This behavior helps prevent XSS attacks when JSON is embedded within HTML.
|
|
||||||
// However, we disable this feature since we're not dealing with HTML context.
|
|
||||||
// Keeping the escapes enabled would result in unnecessary differences when processing JSON payloads
|
|
||||||
// that already contain escaped characters.
|
|
||||||
enc.SetEscapeHTML(false)
|
|
||||||
return encoder{enc, &buf}
|
|
||||||
}
|
|
||||||
|
|
||||||
func marshalNoEscape(v any) ([]byte, error) {
|
|
||||||
enc := newEncoder()
|
|
||||||
err := enc.Encode(v)
|
|
||||||
return enc.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func marshalIndentNoEscape(v any, prefix, indent string) ([]byte, error) {
|
|
||||||
enc := newEncoder()
|
|
||||||
enc.SetIndent(prefix, indent)
|
|
||||||
err := enc.Encode(v)
|
|
||||||
return enc.Bytes(), err
|
|
||||||
}
|
|
|
@ -1,41 +0,0 @@
|
||||||
package jsonsaver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestEncoder_MarshalNoEscape(t *testing.T) {
|
|
||||||
out, err := marshalNoEscape("1 < 2")
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Confirm the output.
|
|
||||||
assert.JSONEq(t, `"1 < 2"`, string(out))
|
|
||||||
|
|
||||||
// Confirm that HTML escaping is disabled.
|
|
||||||
assert.NotContains(t, string(out), "\\u003c")
|
|
||||||
|
|
||||||
// Confirm that the encoder writes a trailing newline.
|
|
||||||
assert.Contains(t, string(out), "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncoder_MarshalIndentNoEscape(t *testing.T) {
|
|
||||||
out, err := marshalIndentNoEscape([]string{"1 < 2", "2 < 3"}, "", " ")
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Confirm the output.
|
|
||||||
assert.JSONEq(t, `["1 < 2", "2 < 3"]`, string(out))
|
|
||||||
|
|
||||||
// Confirm that HTML escaping is disabled.
|
|
||||||
assert.NotContains(t, string(out), "\\u003c")
|
|
||||||
|
|
||||||
// Confirm that the encoder performs indenting and writes a trailing newline.
|
|
||||||
assert.Contains(t, string(out), "[\n")
|
|
||||||
assert.Contains(t, string(out), " \"1 < 2\",\n")
|
|
||||||
assert.Contains(t, string(out), "]\n")
|
|
||||||
}
|
|
|
@ -1,89 +0,0 @@
|
||||||
package jsonsaver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Marshal is a version of [json.Marshal] for [dyn.Value].
|
|
||||||
//
|
|
||||||
// Objects in the output retain the order of keys as they appear in the underlying [dyn.Value].
|
|
||||||
// The output does not escape HTML characters in strings.
|
|
||||||
func Marshal(v dyn.Value) ([]byte, error) {
|
|
||||||
return marshalNoEscape(wrap{v})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalIndent is a version of [json.MarshalIndent] for [dyn.Value].
|
|
||||||
//
|
|
||||||
// Objects in the output retain the order of keys as they appear in the underlying [dyn.Value].
|
|
||||||
// The output does not escape HTML characters in strings.
|
|
||||||
func MarshalIndent(v dyn.Value, prefix, indent string) ([]byte, error) {
|
|
||||||
return marshalIndentNoEscape(wrap{v}, prefix, indent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper type for [dyn.Value] to expose the [json.Marshaler] interface.
|
|
||||||
type wrap struct {
|
|
||||||
v dyn.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements the [json.Marshaler] interface for the [dyn.Value] wrapper type.
|
|
||||||
func (w wrap) MarshalJSON() ([]byte, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := marshalValue(&buf, w.v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// marshalValue recursively writes JSON for a [dyn.Value] to the buffer.
|
|
||||||
func marshalValue(buf *bytes.Buffer, v dyn.Value) error {
|
|
||||||
switch v.Kind() {
|
|
||||||
case dyn.KindString, dyn.KindBool, dyn.KindInt, dyn.KindFloat, dyn.KindTime, dyn.KindNil:
|
|
||||||
out, err := marshalNoEscape(v.AsAny())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The encoder writes a trailing newline, so we need to remove it
|
|
||||||
// to avoid adding extra newlines when embedding this JSON.
|
|
||||||
out = out[:len(out)-1]
|
|
||||||
buf.Write(out)
|
|
||||||
case dyn.KindMap:
|
|
||||||
buf.WriteByte('{')
|
|
||||||
for i, pair := range v.MustMap().Pairs() {
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
}
|
|
||||||
// Require keys to be strings.
|
|
||||||
if pair.Key.Kind() != dyn.KindString {
|
|
||||||
return fmt.Errorf("map key must be a string, got %s", pair.Key.Kind())
|
|
||||||
}
|
|
||||||
// Marshal the key
|
|
||||||
if err := marshalValue(buf, pair.Key); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
buf.WriteByte(':')
|
|
||||||
// Marshal the value
|
|
||||||
if err := marshalValue(buf, pair.Value); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.WriteByte('}')
|
|
||||||
case dyn.KindSequence:
|
|
||||||
buf.WriteByte('[')
|
|
||||||
for i, item := range v.MustSequence() {
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
}
|
|
||||||
if err := marshalValue(buf, item); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.WriteByte(']')
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported kind: %d", v.Kind())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,100 +0,0 @@
|
||||||
package jsonsaver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
|
||||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMarshal_String(t *testing.T) {
|
|
||||||
b, err := Marshal(dyn.V("string"))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.JSONEq(t, `"string"`, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshal_Bool(t *testing.T) {
|
|
||||||
b, err := Marshal(dyn.V(true))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.JSONEq(t, `true`, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshal_Int(t *testing.T) {
|
|
||||||
b, err := Marshal(dyn.V(42))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.JSONEq(t, `42`, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshal_Float(t *testing.T) {
|
|
||||||
b, err := Marshal(dyn.V(42.1))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.JSONEq(t, `42.1`, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshal_Time(t *testing.T) {
|
|
||||||
b, err := Marshal(dyn.V(dyn.MustTime("2021-01-01T00:00:00Z")))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.JSONEq(t, `"2021-01-01T00:00:00Z"`, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshal_Map(t *testing.T) {
|
|
||||||
m := dyn.NewMapping()
|
|
||||||
m.Set(dyn.V("key1"), dyn.V("value1"))
|
|
||||||
m.Set(dyn.V("key2"), dyn.V("value2"))
|
|
||||||
|
|
||||||
b, err := Marshal(dyn.V(m))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.JSONEq(t, `{"key1":"value1","key2":"value2"}`, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshal_Sequence(t *testing.T) {
|
|
||||||
var s []dyn.Value
|
|
||||||
s = append(s, dyn.V("value1"))
|
|
||||||
s = append(s, dyn.V("value2"))
|
|
||||||
|
|
||||||
b, err := Marshal(dyn.V(s))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.JSONEq(t, `["value1","value2"]`, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshal_Complex(t *testing.T) {
|
|
||||||
map1 := dyn.NewMapping()
|
|
||||||
map1.Set(dyn.V("str1"), dyn.V("value1"))
|
|
||||||
map1.Set(dyn.V("str2"), dyn.V("value2"))
|
|
||||||
|
|
||||||
seq1 := []dyn.Value{}
|
|
||||||
seq1 = append(seq1, dyn.V("value1"))
|
|
||||||
seq1 = append(seq1, dyn.V("value2"))
|
|
||||||
|
|
||||||
root := dyn.NewMapping()
|
|
||||||
root.Set(dyn.V("map1"), dyn.V(map1))
|
|
||||||
root.Set(dyn.V("seq1"), dyn.V(seq1))
|
|
||||||
|
|
||||||
// Marshal without indent.
|
|
||||||
b, err := Marshal(dyn.V(root))
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.Equal(t, `{"map1":{"str1":"value1","str2":"value2"},"seq1":["value1","value2"]}`+"\n", string(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal with indent.
|
|
||||||
b, err = MarshalIndent(dyn.V(root), "", " ")
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.Equal(t, `{
|
|
||||||
"map1": {
|
|
||||||
"str1": "value1",
|
|
||||||
"str2": "value2"
|
|
||||||
},
|
|
||||||
"seq1": [
|
|
||||||
"value1",
|
|
||||||
"value2"
|
|
||||||
]
|
|
||||||
}`+"\n", string(b))
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue