mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into feature/uc-volumes
This commit is contained in:
commit
68dc6c1ce4
|
@ -1 +1 @@
|
|||
cf9c61453990df0f9453670f2fe68e1b128647a2
|
||||
d25296d2f4aa7bd6195c816fdf82e0f960f775da
|
|
@ -115,6 +115,9 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- if .Request}}
|
||||
|
||||
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||
{{- if .RequestBodyField }}
|
||||
{{.CamelName}}Req.{{.RequestBodyField.PascalName}} = &{{.Service.Package.Name}}.{{.RequestBodyField.Entity.PascalName}}{}
|
||||
{{- end }}
|
||||
{{- if .CanUseJson}}
|
||||
var {{.CamelName}}Json flags.JsonFlag
|
||||
{{- end}}
|
||||
|
@ -127,21 +130,27 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
cmd.Flags().BoolVar(&{{.CamelName}}SkipWait, "no-wait", {{.CamelName}}SkipWait, `do not wait to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
{{end -}}
|
||||
{{if .Request}}// TODO: short flags
|
||||
{{- $request := .Request -}}
|
||||
{{- if .RequestBodyField -}}
|
||||
{{- $request = .RequestBodyField.Entity -}}
|
||||
{{- end -}}
|
||||
{{if $request }}// TODO: short flags
|
||||
{{- if .CanUseJson}}
|
||||
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
{{- end}}
|
||||
{{$method := .}}
|
||||
{{ if not .IsJsonOnly }}
|
||||
{{range .Request.Fields -}}
|
||||
{{range $request.Fields -}}
|
||||
{{- if not .Required -}}
|
||||
{{if .Entity.IsObject }}// TODO: complex arg: {{.Name}}
|
||||
{{else if .Entity.IsAny }}// TODO: any: {{.Name}}
|
||||
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
||||
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
||||
{{else if .Entity.IsEmpty }}// TODO: output-only field
|
||||
{{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`)
|
||||
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
||||
{{else if .Entity.IsComputed -}}
|
||||
{{else if .IsOutputOnly -}}
|
||||
{{else if .Entity.Enum }}cmd.Flags().Var(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`)
|
||||
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", {{- template "request-body-obj" (dict "Method" $method "Field" .)}}, `{{.Summary | without "`"}}`)
|
||||
{{end}}
|
||||
{{- end -}}
|
||||
{{- end}}
|
||||
|
@ -161,14 +170,14 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
||||
{{- $hasPosArgs := .HasRequiredPositionalArguments -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len $request.RequiredFields)) -}}
|
||||
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}}
|
||||
{{- $onlyPathArgsRequiredAsPositionalArguments := and .Request (eq (len .RequiredPositionalArguments) (len .Request.RequiredPathFields)) -}}
|
||||
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson .Request.HasRequiredRequestBodyFields) -}}
|
||||
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt $request) (eq 1 (len $request.RequiredRequestBodyFields)) -}}
|
||||
{{- $onlyPathArgsRequiredAsPositionalArguments := and $request (eq (len .RequiredPositionalArguments) (len $request.RequiredPathFields)) -}}
|
||||
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson (or $request.HasRequiredRequestBodyFields )) -}}
|
||||
{{- $hasCustomArgHandler := or $hasRequiredArgs $hasDifferentArgsWithJsonFlag -}}
|
||||
|
||||
{{- $atleastOneArgumentWithDescription := false -}}
|
||||
|
@ -206,12 +215,12 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
{{- if $hasDifferentArgsWithJsonFlag }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args)
|
||||
err := root.ExactArgs({{len $request.RequiredPathFields}})(cmd, args)
|
||||
if err != nil {
|
||||
{{- if eq 0 (len .Request.RequiredPathFields) }}
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
{{- if eq 0 (len $request.RequiredPathFields) }}
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := $request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
{{- else }}
|
||||
return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := .Request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := .Request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := $request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := $request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||
{{- end }}
|
||||
}
|
||||
return nil
|
||||
|
@ -232,7 +241,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- if .Request }}
|
||||
{{ if .CanUseJson }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req{{ if .RequestBodyField }}.{{.RequestBodyField.PascalName}}{{ end }})
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -251,20 +260,20 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- if $hasIdPrompt}}
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
|
||||
promptSpinner <- "No{{range $request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
|
||||
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||
id, err := cmdio.Select(ctx, names, "{{range $request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||
return fmt.Errorf("expected to have {{range $request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||
}
|
||||
{{- end -}}
|
||||
|
||||
|
@ -388,13 +397,19 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
if !cmd.Flags().Changed("json") {
|
||||
{{- end }}
|
||||
{{if not $field.Entity.IsString -}}
|
||||
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
||||
_, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
|
||||
}{{else -}}
|
||||
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
||||
{{- template "request-body-obj" (dict "Method" $method "Field" $field)}} = args[{{$arg}}]
|
||||
{{- end -}}
|
||||
{{- if $optionalIfJsonIsUsed }}
|
||||
}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "request-body-obj" -}}
|
||||
{{- $method := .Method -}}
|
||||
{{- $field := .Field -}}
|
||||
{{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -30,13 +30,14 @@ cmd/account/users/users.go linguist-generated=true
|
|||
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
||||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true
|
||||
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true
|
||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||
cmd/workspace/apps/apps.go linguist-generated=true
|
||||
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||
cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true
|
||||
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
||||
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
||||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||
cmd/workspace/cmd.go linguist-generated=true
|
||||
|
@ -48,6 +49,7 @@ cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true
|
|||
cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true
|
||||
cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true
|
||||
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
|
||||
cmd/workspace/credentials/credentials.go linguist-generated=true
|
||||
cmd/workspace/current-user/current-user.go linguist-generated=true
|
||||
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||
|
|
|
@ -11,7 +11,6 @@ on:
|
|||
branches:
|
||||
- main
|
||||
|
||||
|
||||
jobs:
|
||||
comment-on-pr:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -19,73 +18,15 @@ jobs:
|
|||
pull-requests: write
|
||||
|
||||
steps:
|
||||
# NOTE: The following checks may not be accurate depending on Org or Repo settings.
|
||||
- name: Check user and potential secret access
|
||||
id: check-secrets-access
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
USER_LOGIN="${{ github.event.pull_request.user.login }}"
|
||||
REPO_OWNER="${{ github.repository_owner }}"
|
||||
REPO_NAME="${{ github.event.repository.name }}"
|
||||
|
||||
echo "Pull request opened by: $USER_LOGIN"
|
||||
|
||||
# Check if PR is from a fork
|
||||
IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false")
|
||||
|
||||
HAS_ACCESS="false"
|
||||
|
||||
# Check user's permission level on the repository
|
||||
USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission')
|
||||
|
||||
if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then
|
||||
HAS_ACCESS="true"
|
||||
elif [[ "$USER_PERMISSION" == "read" ]]; then
|
||||
# For read access, we need to check if the user has been explicitly granted secret access
|
||||
# This information is not directly available via API, so we'll make an assumption
|
||||
# that read access does not imply secret access
|
||||
HAS_ACCESS="false"
|
||||
fi
|
||||
|
||||
# Check if repo owner is an organization
|
||||
IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"')
|
||||
|
||||
if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then
|
||||
# Check if user is a member of any team with write or admin access to the repo
|
||||
TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug')
|
||||
for team in $TEAMS_WITH_ACCESS; do
|
||||
IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false")
|
||||
if [[ "$IS_TEAM_MEMBER" == "true" ]]; then
|
||||
HAS_ACCESS="true"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# If it's a fork, set HAS_ACCESS to false regardless of other checks
|
||||
if [[ "$IS_FORK" == "true" ]]; then
|
||||
HAS_ACCESS="false"
|
||||
fi
|
||||
|
||||
echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT
|
||||
if [[ "$HAS_ACCESS" == "true" ]]; then
|
||||
echo "User $USER_LOGIN likely has access to secrets"
|
||||
else
|
||||
echo "User $USER_LOGIN likely does not have access to secrets"
|
||||
fi
|
||||
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Delete old comments
|
||||
if: steps.check-secrets-access.outputs.has_secrets_access != 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Delete previous comment if it exists
|
||||
previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \
|
||||
--jq '.[] | select(.body | startswith("<!-- INTEGRATION_TESTS -->")) | .id')
|
||||
--jq '.[] | select(.body | startswith("<!-- INTEGRATION_TESTS_MANUAL -->")) | .id')
|
||||
echo "Previous comment IDs: $previous_comment_ids"
|
||||
# Iterate over each comment ID and delete the comment
|
||||
if [ ! -z "$previous_comment_ids" ]; then
|
||||
|
@ -96,14 +37,15 @@ jobs:
|
|||
fi
|
||||
|
||||
- name: Comment on PR
|
||||
if: steps.check-secrets-access.outputs.has_secrets_access != 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.pull_request.number }} --body \
|
||||
"<!-- INTEGRATION_TESTS -->
|
||||
Run integration tests manually:
|
||||
"<!-- INTEGRATION_TESTS_MANUAL -->
|
||||
If integration tests don't run automatically, an authorized user can run them manually by following the instructions below:
|
||||
|
||||
Trigger:
|
||||
[go/deco-tests-run/cli](https://go/deco-tests-run/cli)
|
||||
|
||||
Inputs:
|
||||
|
|
|
@ -11,17 +11,18 @@ on:
|
|||
jobs:
|
||||
check-token:
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
outputs:
|
||||
has_token: ${{ steps.set-token-status.outputs.has_token }}
|
||||
steps:
|
||||
- name: Check if GITHUB_TOKEN is set
|
||||
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
|
||||
id: set-token-status
|
||||
run: |
|
||||
if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then
|
||||
echo "GITHUB_TOKEN is empty. User has no access to tokens."
|
||||
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
|
||||
echo "::set-output name=has_token::false"
|
||||
else
|
||||
echo "GITHUB_TOKEN is set. User has no access to tokens."
|
||||
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
|
||||
echo "::set-output name=has_token::true"
|
||||
fi
|
||||
|
||||
|
|
40
CHANGELOG.md
40
CHANGELOG.md
|
@ -1,5 +1,45 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.234.0
|
||||
|
||||
Bundles:
|
||||
* Do not execute build on bundle destroy ([#1882](https://github.com/databricks/cli/pull/1882)).
|
||||
* Add support for non-Python ipynb notebooks to DABs ([#1827](https://github.com/databricks/cli/pull/1827)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks credentials` command group.
|
||||
* Changed `databricks lakeview create` command with new required argument order.
|
||||
|
||||
OpenAPI commit d25296d2f4aa7bd6195c816fdf82e0f960f775da (2024-11-07)
|
||||
Dependency updates:
|
||||
* Upgrade TF provider to 1.58.0 ([#1900](https://github.com/databricks/cli/pull/1900)).
|
||||
* Bump golang.org/x/sync from 0.8.0 to 0.9.0 ([#1892](https://github.com/databricks/cli/pull/1892)).
|
||||
* Bump golang.org/x/text from 0.19.0 to 0.20.0 ([#1893](https://github.com/databricks/cli/pull/1893)).
|
||||
* Bump golang.org/x/mod from 0.21.0 to 0.22.0 ([#1895](https://github.com/databricks/cli/pull/1895)).
|
||||
* Bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 ([#1894](https://github.com/databricks/cli/pull/1894)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.49.0 to 0.51.0 ([#1878](https://github.com/databricks/cli/pull/1878)).
|
||||
|
||||
## [Release] Release v0.233.0
|
||||
|
||||
CLI:
|
||||
* Clean host URL in the `auth login` command ([#1879](https://github.com/databricks/cli/pull/1879)).
|
||||
|
||||
Bundles:
|
||||
* Fix bundle run when run interactively ([#1880](https://github.com/databricks/cli/pull/1880)).
|
||||
* Fix relative path resolution for dashboards on Windows ([#1881](https://github.com/databricks/cli/pull/1881)).
|
||||
|
||||
Internal:
|
||||
* Address goreleaser deprecation warning ([#1872](https://github.com/databricks/cli/pull/1872)).
|
||||
* Update actions/github-script to v7 ([#1873](https://github.com/databricks/cli/pull/1873)).
|
||||
* Use Go 1.23 ([#1871](https://github.com/databricks/cli/pull/1871)).
|
||||
* [Internal] Always write message for manual integration test trigger ([#1874](https://github.com/databricks/cli/pull/1874)).
|
||||
* Add `cmd-exec-id` to user agent ([#1808](https://github.com/databricks/cli/pull/1808)).
|
||||
* Added E2E test to run Python wheels on interactive cluster created in bundle ([#1864](https://github.com/databricks/cli/pull/1864)).
|
||||
|
||||
|
||||
Dependency updates:
|
||||
* Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 ([#1877](https://github.com/databricks/cli/pull/1877)).
|
||||
|
||||
## [Release] Release v0.232.1
|
||||
|
||||
This patch release fixes the following error observed when deploying to /Shared root folder
|
||||
|
|
|
@ -240,7 +240,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
|
||||
// Dashboards: Prefix
|
||||
for key, dashboard := range r.Dashboards {
|
||||
if dashboard == nil || dashboard.CreateDashboardRequest == nil {
|
||||
if dashboard == nil || dashboard.Dashboard == nil {
|
||||
diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -26,13 +26,13 @@ func TestConfigureDashboardDefaultsParentPath(t *testing.T) {
|
|||
"d1": {
|
||||
// Empty string is skipped.
|
||||
// See below for how it is set.
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
ParentPath: "",
|
||||
},
|
||||
},
|
||||
"d2": {
|
||||
// Non-empty string is skipped.
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
ParentPath: "already-set",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -5,14 +5,12 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
)
|
||||
|
||||
const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION"
|
||||
|
||||
type configureWSFS struct{}
|
||||
|
||||
func ConfigureWSFS() bundle.Mutator {
|
||||
|
@ -32,7 +30,7 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
}
|
||||
|
||||
// The executable must be running on DBR.
|
||||
if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok {
|
||||
if !dbr.RunsOnRuntime(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func mockBundleForConfigureWSFS(t *testing.T, syncRootPath string) *bundle.Bundle {
|
||||
// The native path of the sync root on Windows will never match the /Workspace prefix,
|
||||
// so the test case for nominal behavior will always fail.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("this test is not applicable on Windows")
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
SyncRoot: vfs.MustNew(syncRootPath),
|
||||
}
|
||||
|
||||
w := mocks.NewMockWorkspaceClient(t)
|
||||
w.WorkspaceClient.Config = &config.Config{}
|
||||
b.SetWorkpaceClient(w.WorkspaceClient)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func TestConfigureWSFS_SkipsIfNotWorkspacePrefix(t *testing.T) {
|
||||
b := mockBundleForConfigureWSFS(t, "/foo")
|
||||
originalSyncRoot := b.SyncRoot
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
|
||||
assert.Empty(t, diags)
|
||||
assert.Equal(t, originalSyncRoot, b.SyncRoot)
|
||||
}
|
||||
|
||||
func TestConfigureWSFS_SkipsIfNotRunningOnRuntime(t *testing.T) {
|
||||
b := mockBundleForConfigureWSFS(t, "/Workspace/foo")
|
||||
originalSyncRoot := b.SyncRoot
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = dbr.MockRuntime(ctx, false)
|
||||
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
|
||||
assert.Empty(t, diags)
|
||||
assert.Equal(t, originalSyncRoot, b.SyncRoot)
|
||||
}
|
||||
|
||||
func TestConfigureWSFS_SwapSyncRoot(t *testing.T) {
|
||||
b := mockBundleForConfigureWSFS(t, "/Workspace/foo")
|
||||
originalSyncRoot := b.SyncRoot
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = dbr.MockRuntime(ctx, true)
|
||||
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
|
||||
assert.Empty(t, diags)
|
||||
assert.NotEqual(t, originalSyncRoot, b.SyncRoot)
|
||||
}
|
|
@ -65,9 +65,8 @@ func TestInitializeURLs(t *testing.T) {
|
|||
},
|
||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||
"qualityMonitor1": {
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "catalog.schema.qualityMonitor1",
|
||||
},
|
||||
TableName: "catalog.schema.qualityMonitor1",
|
||||
CreateMonitor: &catalog.CreateMonitor{},
|
||||
},
|
||||
},
|
||||
Schemas: map[string]*resources.Schema{
|
||||
|
@ -89,7 +88,7 @@ func TestInitializeURLs(t *testing.T) {
|
|||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
ID: "01ef8d56871e1d50ae30ce7375e42478",
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My special dashboard",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -44,6 +44,11 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
|||
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
|
||||
}
|
||||
|
||||
// Skip prefixing if the path does not start with /, it might be variable reference or smth else.
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
for _, prefix := range skipPrefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return pv, nil
|
||||
|
|
|
@ -31,6 +31,14 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
|||
path: "/Volumes/Users/test",
|
||||
expected: "/Volumes/Users/test",
|
||||
},
|
||||
{
|
||||
path: "~/test",
|
||||
expected: "~/test",
|
||||
},
|
||||
{
|
||||
path: "${workspace.file_path}/test",
|
||||
expected: "${workspace.file_path}/test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
|
|
@ -102,16 +102,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
|
||||
},
|
||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||
"qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}},
|
||||
"qualityMonitor2": {
|
||||
"qualityMonitor1": {
|
||||
TableName: "qualityMonitor1",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "qualityMonitor2",
|
||||
Schedule: &catalog.MonitorCronSchedule{},
|
||||
OutputSchemaName: "catalog.schema",
|
||||
},
|
||||
},
|
||||
"qualityMonitor2": {
|
||||
TableName: "qualityMonitor2",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
OutputSchemaName: "catalog.schema",
|
||||
Schedule: &catalog.MonitorCronSchedule{},
|
||||
},
|
||||
},
|
||||
"qualityMonitor3": {
|
||||
TableName: "qualityMonitor3",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "qualityMonitor3",
|
||||
OutputSchemaName: "catalog.schema",
|
||||
Schedule: &catalog.MonitorCronSchedule{
|
||||
PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused,
|
||||
},
|
||||
|
@ -129,7 +136,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "dashboard1",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -163,7 +163,7 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r
|
|||
}
|
||||
|
||||
func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
info, err := t.b.SyncRoot.Stat(localRelPath)
|
||||
info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("file %s not found", literal)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go/service/dashboards"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTranslatePathsDashboards_FilePathRelativeSubDirectory(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
touchEmptyFile(t, filepath.Join(dir, "src", "my_dashboard.lvdash.json"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard": {
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My Dashboard",
|
||||
},
|
||||
FilePath: "../src/my_dashboard.lvdash.json",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.dashboards", []dyn.Location{{
|
||||
File: filepath.Join(dir, "resources/dashboard.yml"),
|
||||
}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that the file path for the dashboard has been converted to its local absolute path.
|
||||
assert.Equal(
|
||||
t,
|
||||
filepath.Join(dir, "src", "my_dashboard.lvdash.json"),
|
||||
b.Config.Resources.Dashboards["dashboard"].FilePath,
|
||||
)
|
||||
}
|
|
@ -17,7 +17,7 @@ type Dashboard struct {
|
|||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
|
||||
*dashboards.CreateDashboardRequest
|
||||
*dashboards.Dashboard
|
||||
|
||||
// =========================
|
||||
// === Additional fields ===
|
||||
|
|
|
@ -13,17 +13,15 @@ import (
|
|||
)
|
||||
|
||||
type QualityMonitor struct {
|
||||
// Represents the Input Arguments for Terraform and will get
|
||||
// converted to a HCL representation for CRUD
|
||||
*catalog.CreateMonitor
|
||||
|
||||
// This represents the id which is the full name of the monitor
|
||||
// (catalog_name.schema_name.table_name) that can be used
|
||||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
|
||||
// The table name is a required field but not included as a JSON field in [catalog.CreateMonitor].
|
||||
TableName string `json:"table_name"`
|
||||
|
||||
// This struct defines the creation payload for a monitor.
|
||||
*catalog.CreateMonitor
|
||||
}
|
||||
|
||||
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
|
||||
|
|
|
@ -29,7 +29,7 @@ func mockDashboardBundle(t *testing.T) *bundle.Bundle {
|
|||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dash1": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My Special Dashboard",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -810,7 +810,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
|
@ -984,12 +984,12 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
"test_dashboard_new": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard_new",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
func TestConvertDashboard(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "my dashboard",
|
||||
WarehouseId: "f00dcafe",
|
||||
ParentPath: "/some/path",
|
||||
|
|
|
@ -15,8 +15,8 @@ import (
|
|||
|
||||
func TestConvertQualityMonitor(t *testing.T) {
|
||||
var src = resources.QualityMonitor{
|
||||
TableName: "test_table_name",
|
||||
CreateMonitor: &catalog.CreateMonitor{
|
||||
TableName: "test_table_name",
|
||||
AssetsDir: "assets_dir",
|
||||
OutputSchemaName: "output_schema_name",
|
||||
InferenceLog: &catalog.MonitorInferenceLog{
|
||||
|
|
|
@ -4,6 +4,7 @@ bundle:
|
|||
resources:
|
||||
quality_monitors:
|
||||
myqualitymonitor:
|
||||
table_name: catalog.schema.quality_monitor
|
||||
inference_log:
|
||||
granularities:
|
||||
- a
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package schema
|
||||
|
||||
const ProviderVersion = "1.54.0"
|
||||
const ProviderVersion = "1.58.0"
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type DataSourceFunctionsFunctionsInputParamsParameters struct {
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Name string `json:"name"`
|
||||
ParameterDefault string `json:"parameter_default,omitempty"`
|
||||
ParameterMode string `json:"parameter_mode,omitempty"`
|
||||
ParameterType string `json:"parameter_type,omitempty"`
|
||||
Position int `json:"position"`
|
||||
TypeIntervalType string `json:"type_interval_type,omitempty"`
|
||||
TypeJson string `json:"type_json,omitempty"`
|
||||
TypeName string `json:"type_name"`
|
||||
TypePrecision int `json:"type_precision,omitempty"`
|
||||
TypeScale int `json:"type_scale,omitempty"`
|
||||
TypeText string `json:"type_text"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsInputParams struct {
|
||||
Parameters []DataSourceFunctionsFunctionsInputParamsParameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsReturnParamsParameters struct {
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Name string `json:"name"`
|
||||
ParameterDefault string `json:"parameter_default,omitempty"`
|
||||
ParameterMode string `json:"parameter_mode,omitempty"`
|
||||
ParameterType string `json:"parameter_type,omitempty"`
|
||||
Position int `json:"position"`
|
||||
TypeIntervalType string `json:"type_interval_type,omitempty"`
|
||||
TypeJson string `json:"type_json,omitempty"`
|
||||
TypeName string `json:"type_name"`
|
||||
TypePrecision int `json:"type_precision,omitempty"`
|
||||
TypeScale int `json:"type_scale,omitempty"`
|
||||
TypeText string `json:"type_text"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsReturnParams struct {
|
||||
Parameters []DataSourceFunctionsFunctionsReturnParamsParameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction struct {
|
||||
FunctionFullName string `json:"function_full_name"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable struct {
|
||||
TableFullName string `json:"table_full_name"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependenciesDependencies struct {
|
||||
Function []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction `json:"function,omitempty"`
|
||||
Table []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable `json:"table,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctionsRoutineDependencies struct {
|
||||
Dependencies []DataSourceFunctionsFunctionsRoutineDependenciesDependencies `json:"dependencies,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctionsFunctions struct {
|
||||
BrowseOnly bool `json:"browse_only,omitempty"`
|
||||
CatalogName string `json:"catalog_name,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
CreatedAt int `json:"created_at,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
DataType string `json:"data_type,omitempty"`
|
||||
ExternalLanguage string `json:"external_language,omitempty"`
|
||||
ExternalName string `json:"external_name,omitempty"`
|
||||
FullDataType string `json:"full_data_type,omitempty"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
FunctionId string `json:"function_id,omitempty"`
|
||||
IsDeterministic bool `json:"is_deterministic,omitempty"`
|
||||
IsNullCall bool `json:"is_null_call,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
ParameterStyle string `json:"parameter_style,omitempty"`
|
||||
Properties string `json:"properties,omitempty"`
|
||||
RoutineBody string `json:"routine_body,omitempty"`
|
||||
RoutineDefinition string `json:"routine_definition,omitempty"`
|
||||
SchemaName string `json:"schema_name,omitempty"`
|
||||
SecurityType string `json:"security_type,omitempty"`
|
||||
SpecificName string `json:"specific_name,omitempty"`
|
||||
SqlDataAccess string `json:"sql_data_access,omitempty"`
|
||||
SqlPath string `json:"sql_path,omitempty"`
|
||||
UpdatedAt int `json:"updated_at,omitempty"`
|
||||
UpdatedBy string `json:"updated_by,omitempty"`
|
||||
InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"`
|
||||
ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"`
|
||||
RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceFunctions struct {
|
||||
CatalogName string `json:"catalog_name"`
|
||||
IncludeBrowse bool `json:"include_browse,omitempty"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"`
|
||||
}
|
|
@ -35,6 +35,7 @@ type DataSourceStorageCredentialStorageCredentialInfo struct {
|
|||
Comment string `json:"comment,omitempty"`
|
||||
CreatedAt int `json:"created_at,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
IsolationMode string `json:"isolation_mode,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
|
|
|
@ -4,7 +4,6 @@ package schema
|
|||
|
||||
type DataSourceVolumes struct {
|
||||
CatalogName string `json:"catalog_name"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ type DataSources struct {
|
|||
Directory map[string]any `json:"databricks_directory,omitempty"`
|
||||
ExternalLocation map[string]any `json:"databricks_external_location,omitempty"`
|
||||
ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"`
|
||||
Functions map[string]any `json:"databricks_functions,omitempty"`
|
||||
Group map[string]any `json:"databricks_group,omitempty"`
|
||||
InstancePool map[string]any `json:"databricks_instance_pool,omitempty"`
|
||||
InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"`
|
||||
|
@ -79,6 +80,7 @@ func NewDataSources() *DataSources {
|
|||
Directory: make(map[string]any),
|
||||
ExternalLocation: make(map[string]any),
|
||||
ExternalLocations: make(map[string]any),
|
||||
Functions: make(map[string]any),
|
||||
Group: make(map[string]any),
|
||||
InstancePool: make(map[string]any),
|
||||
InstanceProfiles: make(map[string]any),
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceAlertConditionOperandColumn struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ResourceAlertConditionOperand struct {
|
||||
Column *ResourceAlertConditionOperandColumn `json:"column,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlertConditionThresholdValue struct {
|
||||
BoolValue bool `json:"bool_value,omitempty"`
|
||||
DoubleValue int `json:"double_value,omitempty"`
|
||||
StringValue string `json:"string_value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlertConditionThreshold struct {
|
||||
Value *ResourceAlertConditionThresholdValue `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlertCondition struct {
|
||||
EmptyResultState string `json:"empty_result_state,omitempty"`
|
||||
Op string `json:"op"`
|
||||
Operand *ResourceAlertConditionOperand `json:"operand,omitempty"`
|
||||
Threshold *ResourceAlertConditionThreshold `json:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceAlert struct {
|
||||
CreateTime string `json:"create_time,omitempty"`
|
||||
CustomBody string `json:"custom_body,omitempty"`
|
||||
CustomSubject string `json:"custom_subject,omitempty"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Id string `json:"id,omitempty"`
|
||||
LifecycleState string `json:"lifecycle_state,omitempty"`
|
||||
NotifyOnOk bool `json:"notify_on_ok,omitempty"`
|
||||
OwnerUserName string `json:"owner_user_name,omitempty"`
|
||||
ParentPath string `json:"parent_path,omitempty"`
|
||||
QueryId string `json:"query_id"`
|
||||
SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
TriggerTime string `json:"trigger_time,omitempty"`
|
||||
UpdateTime string `json:"update_time,omitempty"`
|
||||
Condition *ResourceAlertCondition `json:"condition,omitempty"`
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceCustomAppIntegrationTokenAccessPolicy struct {
|
||||
AccessTokenTtlInMinutes int `json:"access_token_ttl_in_minutes,omitempty"`
|
||||
RefreshTokenTtlInMinutes int `json:"refresh_token_ttl_in_minutes,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceCustomAppIntegration struct {
|
||||
ClientId string `json:"client_id,omitempty"`
|
||||
ClientSecret string `json:"client_secret,omitempty"`
|
||||
Confidential bool `json:"confidential,omitempty"`
|
||||
CreateTime string `json:"create_time,omitempty"`
|
||||
CreatedBy int `json:"created_by,omitempty"`
|
||||
CreatorUsername string `json:"creator_username,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
IntegrationId string `json:"integration_id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
RedirectUrls []string `json:"redirect_urls,omitempty"`
|
||||
Scopes []string `json:"scopes,omitempty"`
|
||||
TokenAccessPolicy *ResourceCustomAppIntegrationTokenAccessPolicy `json:"token_access_policy,omitempty"`
|
||||
}
|
|
@ -19,13 +19,13 @@ type ResourceLibraryPypi struct {
|
|||
}
|
||||
|
||||
type ResourceLibrary struct {
|
||||
ClusterId string `json:"cluster_id"`
|
||||
Egg string `json:"egg,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Jar string `json:"jar,omitempty"`
|
||||
Requirements string `json:"requirements,omitempty"`
|
||||
Whl string `json:"whl,omitempty"`
|
||||
Cran *ResourceLibraryCran `json:"cran,omitempty"`
|
||||
Maven *ResourceLibraryMaven `json:"maven,omitempty"`
|
||||
Pypi *ResourceLibraryPypi `json:"pypi,omitempty"`
|
||||
ClusterId string `json:"cluster_id"`
|
||||
Egg string `json:"egg,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Jar string `json:"jar,omitempty"`
|
||||
Requirements string `json:"requirements,omitempty"`
|
||||
Whl string `json:"whl,omitempty"`
|
||||
Cran []ResourceLibraryCran `json:"cran,omitempty"`
|
||||
Maven []ResourceLibraryMaven `json:"maven,omitempty"`
|
||||
Pypi []ResourceLibraryPypi `json:"pypi,omitempty"`
|
||||
}
|
||||
|
|
|
@ -137,6 +137,7 @@ type ResourcePipelineFilters struct {
|
|||
|
||||
type ResourcePipelineGatewayDefinition struct {
|
||||
ConnectionId string `json:"connection_id,omitempty"`
|
||||
ConnectionName string `json:"connection_name,omitempty"`
|
||||
GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"`
|
||||
GatewayStorageName string `json:"gateway_storage_name,omitempty"`
|
||||
GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"`
|
||||
|
@ -242,6 +243,12 @@ type ResourcePipelineNotification struct {
|
|||
EmailRecipients []string `json:"email_recipients,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineRestartWindow struct {
|
||||
DaysOfWeek string `json:"days_of_week,omitempty"`
|
||||
StartHour int `json:"start_hour"`
|
||||
TimeZoneId string `json:"time_zone_id,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineTriggerCron struct {
|
||||
QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"`
|
||||
TimezoneId string `json:"timezone_id,omitempty"`
|
||||
|
@ -288,5 +295,6 @@ type ResourcePipeline struct {
|
|||
LatestUpdates []ResourcePipelineLatestUpdates `json:"latest_updates,omitempty"`
|
||||
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
||||
Notification []ResourcePipelineNotification `json:"notification,omitempty"`
|
||||
RestartWindow *ResourcePipelineRestartWindow `json:"restart_window,omitempty"`
|
||||
Trigger *ResourcePipelineTrigger `json:"trigger,omitempty"`
|
||||
}
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceQueryParameterDateRangeValueDateRangeValue struct {
|
||||
End string `json:"end"`
|
||||
Start string `json:"start"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterDateRangeValue struct {
|
||||
DynamicDateRangeValue string `json:"dynamic_date_range_value,omitempty"`
|
||||
Precision string `json:"precision,omitempty"`
|
||||
StartDayOfWeek int `json:"start_day_of_week,omitempty"`
|
||||
DateRangeValue *ResourceQueryParameterDateRangeValueDateRangeValue `json:"date_range_value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterDateValue struct {
|
||||
DateValue string `json:"date_value,omitempty"`
|
||||
DynamicDateValue string `json:"dynamic_date_value,omitempty"`
|
||||
Precision string `json:"precision,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterEnumValueMultiValuesOptions struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Separator string `json:"separator,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterEnumValue struct {
|
||||
EnumOptions string `json:"enum_options,omitempty"`
|
||||
Values []string `json:"values,omitempty"`
|
||||
MultiValuesOptions *ResourceQueryParameterEnumValueMultiValuesOptions `json:"multi_values_options,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterNumericValue struct {
|
||||
Value int `json:"value"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterQueryBackedValueMultiValuesOptions struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Separator string `json:"separator,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterQueryBackedValue struct {
|
||||
QueryId string `json:"query_id"`
|
||||
Values []string `json:"values,omitempty"`
|
||||
MultiValuesOptions *ResourceQueryParameterQueryBackedValueMultiValuesOptions `json:"multi_values_options,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameterTextValue struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type ResourceQueryParameter struct {
|
||||
Name string `json:"name"`
|
||||
Title string `json:"title,omitempty"`
|
||||
DateRangeValue *ResourceQueryParameterDateRangeValue `json:"date_range_value,omitempty"`
|
||||
DateValue *ResourceQueryParameterDateValue `json:"date_value,omitempty"`
|
||||
EnumValue *ResourceQueryParameterEnumValue `json:"enum_value,omitempty"`
|
||||
NumericValue *ResourceQueryParameterNumericValue `json:"numeric_value,omitempty"`
|
||||
QueryBackedValue *ResourceQueryParameterQueryBackedValue `json:"query_backed_value,omitempty"`
|
||||
TextValue *ResourceQueryParameterTextValue `json:"text_value,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceQuery struct {
|
||||
ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"`
|
||||
Catalog string `json:"catalog,omitempty"`
|
||||
CreateTime string `json:"create_time,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Id string `json:"id,omitempty"`
|
||||
LastModifierUserName string `json:"last_modifier_user_name,omitempty"`
|
||||
LifecycleState string `json:"lifecycle_state,omitempty"`
|
||||
OwnerUserName string `json:"owner_user_name,omitempty"`
|
||||
ParentPath string `json:"parent_path,omitempty"`
|
||||
QueryText string `json:"query_text"`
|
||||
RunAsMode string `json:"run_as_mode,omitempty"`
|
||||
Schema string `json:"schema,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
UpdateTime string `json:"update_time,omitempty"`
|
||||
WarehouseId string `json:"warehouse_id"`
|
||||
Parameter []ResourceQueryParameter `json:"parameter,omitempty"`
|
||||
}
|
|
@ -4,6 +4,7 @@ package schema
|
|||
|
||||
type Resources struct {
|
||||
AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"`
|
||||
Alert map[string]any `json:"databricks_alert,omitempty"`
|
||||
ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"`
|
||||
AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"`
|
||||
AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"`
|
||||
|
@ -17,6 +18,7 @@ type Resources struct {
|
|||
ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"`
|
||||
ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"`
|
||||
Connection map[string]any `json:"databricks_connection,omitempty"`
|
||||
CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"`
|
||||
Dashboard map[string]any `json:"databricks_dashboard,omitempty"`
|
||||
DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"`
|
||||
DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"`
|
||||
|
@ -68,6 +70,7 @@ type Resources struct {
|
|||
Pipeline map[string]any `json:"databricks_pipeline,omitempty"`
|
||||
Provider map[string]any `json:"databricks_provider,omitempty"`
|
||||
QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"`
|
||||
Query map[string]any `json:"databricks_query,omitempty"`
|
||||
Recipient map[string]any `json:"databricks_recipient,omitempty"`
|
||||
RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"`
|
||||
Repo map[string]any `json:"databricks_repo,omitempty"`
|
||||
|
@ -107,6 +110,7 @@ type Resources struct {
|
|||
func NewResources() *Resources {
|
||||
return &Resources{
|
||||
AccessControlRuleSet: make(map[string]any),
|
||||
Alert: make(map[string]any),
|
||||
ArtifactAllowlist: make(map[string]any),
|
||||
AutomaticClusterUpdateWorkspaceSetting: make(map[string]any),
|
||||
AwsS3Mount: make(map[string]any),
|
||||
|
@ -120,6 +124,7 @@ func NewResources() *Resources {
|
|||
ClusterPolicy: make(map[string]any),
|
||||
ComplianceSecurityProfileWorkspaceSetting: make(map[string]any),
|
||||
Connection: make(map[string]any),
|
||||
CustomAppIntegration: make(map[string]any),
|
||||
Dashboard: make(map[string]any),
|
||||
DbfsFile: make(map[string]any),
|
||||
DefaultNamespaceSetting: make(map[string]any),
|
||||
|
@ -171,6 +176,7 @@ func NewResources() *Resources {
|
|||
Pipeline: make(map[string]any),
|
||||
Provider: make(map[string]any),
|
||||
QualityMonitor: make(map[string]any),
|
||||
Query: make(map[string]any),
|
||||
Recipient: make(map[string]any),
|
||||
RegisteredModel: make(map[string]any),
|
||||
Repo: make(map[string]any),
|
||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
|||
|
||||
const ProviderHost = "registry.terraform.io"
|
||||
const ProviderSource = "databricks/databricks"
|
||||
const ProviderVersion = "1.54.0"
|
||||
const ProviderVersion = "1.58.0"
|
||||
|
||||
func NewRoot() *Root {
|
||||
return &Root{
|
||||
|
|
|
@ -59,9 +59,14 @@ func TestJsonSchema(t *testing.T) {
|
|||
}
|
||||
|
||||
// Assert enum values are loaded
|
||||
schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "catalog.MonitorCronSchedule")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "PAUSED")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "UNPAUSED")
|
||||
schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY")
|
||||
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY")
|
||||
|
||||
providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider")
|
||||
assert.Contains(t, providers.Enum, "gitHub")
|
||||
|
|
|
@ -185,6 +185,14 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"create_time": {
|
||||
"description": "The timestamp of when the dashboard was created.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"dashboard_id": {
|
||||
"description": "UUID identifying the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"display_name": {
|
||||
"description": "The display name of the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
|
@ -192,13 +200,25 @@
|
|||
"embed_credentials": {
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"etag": {
|
||||
"description": "The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard\nhas not been modified since the last read.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"file_path": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"lifecycle_state": {
|
||||
"description": "The state of the dashboard resource. Used for tracking trashed status.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState"
|
||||
},
|
||||
"parent_path": {
|
||||
"description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"path": {
|
||||
"description": "The workspace path of the dashboard asset, including the file name.\nExported dashboards always have the file extension `.lvdash.json`.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"permissions": {
|
||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||
},
|
||||
|
@ -206,15 +226,16 @@
|
|||
"description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.",
|
||||
"$ref": "#/$defs/interface"
|
||||
},
|
||||
"update_time": {
|
||||
"description": "The timestamp of when the dashboard was last updated by the user.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"warehouse_id": {
|
||||
"description": "The warehouse ID used to run the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"display_name"
|
||||
]
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
|
@ -551,7 +572,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.Filters"
|
||||
},
|
||||
"gateway_definition": {
|
||||
"description": "The definition of a gateway pipeline to support CDC.",
|
||||
"description": "The definition of a gateway pipeline to support change data capture.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition"
|
||||
},
|
||||
"id": {
|
||||
|
@ -581,6 +602,10 @@
|
|||
"description": "Whether Photon is enabled for this pipeline.",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"restart_window": {
|
||||
"description": "Restart window of this pipeline.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow"
|
||||
},
|
||||
"schema": {
|
||||
"description": "The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode.",
|
||||
"$ref": "#/$defs/string"
|
||||
|
@ -659,6 +684,9 @@
|
|||
"description": "Configuration for monitoring snapshot tables.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot"
|
||||
},
|
||||
"table_name": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"time_series": {
|
||||
"description": "Configuration for monitoring time series tables.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries"
|
||||
|
@ -670,6 +698,7 @@
|
|||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"table_name",
|
||||
"assets_dir",
|
||||
"output_schema_name"
|
||||
]
|
||||
|
@ -1289,11 +1318,7 @@
|
|||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Read only field that indicates whether a schedule is paused or not.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus",
|
||||
"enum": [
|
||||
"UNPAUSED",
|
||||
"PAUSED"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus"
|
||||
},
|
||||
"quartz_cron_expression": {
|
||||
"description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).\n",
|
||||
|
@ -1317,7 +1342,12 @@
|
|||
]
|
||||
},
|
||||
"catalog.MonitorCronSchedulePauseStatus": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Read only field that indicates whether a schedule is paused or not.",
|
||||
"enum": [
|
||||
"UNPAUSED",
|
||||
"PAUSED"
|
||||
]
|
||||
},
|
||||
"catalog.MonitorDataClassificationConfig": {
|
||||
"anyOf": [
|
||||
|
@ -1382,11 +1412,7 @@
|
|||
},
|
||||
"problem_type": {
|
||||
"description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType",
|
||||
"enum": [
|
||||
"PROBLEM_TYPE_CLASSIFICATION",
|
||||
"PROBLEM_TYPE_REGRESSION"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType"
|
||||
},
|
||||
"timestamp_col": {
|
||||
"description": "Column that contains the timestamps of requests. The column must be one of the following:\n- A ``TimestampType`` column\n- A column whose values can be converted to timestamps through the pyspark\n ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html).\n",
|
||||
|
@ -1409,7 +1435,12 @@
|
|||
]
|
||||
},
|
||||
"catalog.MonitorInferenceLogProblemType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.",
|
||||
"enum": [
|
||||
"PROBLEM_TYPE_CLASSIFICATION",
|
||||
"PROBLEM_TYPE_REGRESSION"
|
||||
]
|
||||
},
|
||||
"catalog.MonitorMetric": {
|
||||
"anyOf": [
|
||||
|
@ -1434,12 +1465,7 @@
|
|||
},
|
||||
"type": {
|
||||
"description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType",
|
||||
"enum": [
|
||||
"CUSTOM_METRIC_TYPE_AGGREGATE",
|
||||
"CUSTOM_METRIC_TYPE_DERIVED",
|
||||
"CUSTOM_METRIC_TYPE_DRIFT"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -1458,7 +1484,13 @@
|
|||
]
|
||||
},
|
||||
"catalog.MonitorMetricType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n",
|
||||
"enum": [
|
||||
"CUSTOM_METRIC_TYPE_AGGREGATE",
|
||||
"CUSTOM_METRIC_TYPE_DERIVED",
|
||||
"CUSTOM_METRIC_TYPE_DRIFT"
|
||||
]
|
||||
},
|
||||
"catalog.MonitorNotifications": {
|
||||
"anyOf": [
|
||||
|
@ -2325,6 +2357,13 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"dashboards.LifecycleState": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"ACTIVE",
|
||||
"TRASHED"
|
||||
]
|
||||
},
|
||||
"jobs.Condition": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
|
@ -3102,7 +3141,7 @@
|
|||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"jar_params": {
|
||||
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.",
|
||||
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"job_id": {
|
||||
|
@ -3436,11 +3475,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"condition_task": {
|
||||
"description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.",
|
||||
"description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask"
|
||||
},
|
||||
"dbt_task": {
|
||||
"description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
|
||||
"description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask"
|
||||
},
|
||||
"depends_on": {
|
||||
|
@ -3468,7 +3507,7 @@
|
|||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"for_each_task": {
|
||||
"description": "If for_each_task, indicates that this task must execute the nested task within it.",
|
||||
"description": "The task executes a nested task for every input provided when the `for_each_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask"
|
||||
},
|
||||
"health": {
|
||||
|
@ -3495,7 +3534,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec"
|
||||
},
|
||||
"notebook_task": {
|
||||
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
||||
"description": "The task runs a notebook when the `notebook_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask"
|
||||
},
|
||||
"notification_settings": {
|
||||
|
@ -3503,11 +3542,11 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings"
|
||||
},
|
||||
"pipeline_task": {
|
||||
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
|
||||
"description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask"
|
||||
},
|
||||
"python_wheel_task": {
|
||||
"description": "If python_wheel_task, indicates that this job must execute a PythonWheel.",
|
||||
"description": "The task runs a Python wheel when the `python_wheel_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask"
|
||||
},
|
||||
"retry_on_timeout": {
|
||||
|
@ -3519,23 +3558,23 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunIf"
|
||||
},
|
||||
"run_job_task": {
|
||||
"description": "If run_job_task, indicates that this task must execute another job.",
|
||||
"description": "The task triggers another job when the `run_job_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask"
|
||||
},
|
||||
"spark_jar_task": {
|
||||
"description": "If spark_jar_task, indicates that this task must run a JAR.",
|
||||
"description": "The task runs a JAR when the `spark_jar_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask"
|
||||
},
|
||||
"spark_python_task": {
|
||||
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
||||
"description": "The task runs a Python file when the `spark_python_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask"
|
||||
},
|
||||
"spark_submit_task": {
|
||||
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||
"description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask"
|
||||
},
|
||||
"sql_task": {
|
||||
"description": "If sql_task, indicates that this job must execute a SQL task.",
|
||||
"description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SqlTask"
|
||||
},
|
||||
"task_key": {
|
||||
|
@ -3821,12 +3860,7 @@
|
|||
},
|
||||
"status": {
|
||||
"description": "Current status of `model_version`",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus",
|
||||
"enum": [
|
||||
"PENDING_REGISTRATION",
|
||||
"FAILED_REGISTRATION",
|
||||
"READY"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus"
|
||||
},
|
||||
"status_message": {
|
||||
"description": "Details on current `status`, if it is pending or failed.",
|
||||
|
@ -3854,7 +3888,13 @@
|
|||
]
|
||||
},
|
||||
"ml.ModelVersionStatus": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Current status of `model_version`",
|
||||
"enum": [
|
||||
"PENDING_REGISTRATION",
|
||||
"FAILED_REGISTRATION",
|
||||
"READY"
|
||||
]
|
||||
},
|
||||
"ml.ModelVersionTag": {
|
||||
"anyOf": [
|
||||
|
@ -3951,15 +3991,15 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"report": {
|
||||
"description": "Select tables from a specific source report.",
|
||||
"description": "Select a specific source report.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec"
|
||||
},
|
||||
"schema": {
|
||||
"description": "Select tables from a specific source schema.",
|
||||
"description": "Select all tables from a specific source schema.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec"
|
||||
},
|
||||
"table": {
|
||||
"description": "Select tables from a specific source table.",
|
||||
"description": "Select a specific source table.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec"
|
||||
}
|
||||
},
|
||||
|
@ -3977,7 +4017,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"connection_id": {
|
||||
"description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.",
|
||||
"description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"connection_name": {
|
||||
"description": "Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"gateway_storage_catalog": {
|
||||
|
@ -4007,11 +4051,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"connection_name": {
|
||||
"description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name.",
|
||||
"description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"ingestion_gateway_id": {
|
||||
"description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name.",
|
||||
"description": "Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"objects": {
|
||||
|
@ -4188,11 +4232,7 @@
|
|||
},
|
||||
"mode": {
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode",
|
||||
"enum": [
|
||||
"ENHANCED",
|
||||
"LEGACY"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4208,7 +4248,12 @@
|
|||
]
|
||||
},
|
||||
"pipelines.PipelineClusterAutoscaleMode": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
|
||||
"enum": [
|
||||
"ENHANCED",
|
||||
"LEGACY"
|
||||
]
|
||||
},
|
||||
"pipelines.PipelineDeployment": {
|
||||
"anyOf": [
|
||||
|
@ -4320,6 +4365,47 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"pipelines.RestartWindow": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"days_of_week": {
|
||||
"description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek",
|
||||
"enum": [
|
||||
"MONDAY",
|
||||
"TUESDAY",
|
||||
"WEDNESDAY",
|
||||
"THURSDAY",
|
||||
"FRIDAY",
|
||||
"SATURDAY",
|
||||
"SUNDAY"
|
||||
]
|
||||
},
|
||||
"start_hour": {
|
||||
"description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.",
|
||||
"$ref": "#/$defs/int"
|
||||
},
|
||||
"time_zone_id": {
|
||||
"description": "Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"start_hour"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"pipelines.RestartWindowDaysOfWeek": {
|
||||
"type": "string"
|
||||
},
|
||||
"pipelines.SchemaSpec": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
@ -4411,11 +4497,7 @@
|
|||
},
|
||||
"scd_type": {
|
||||
"description": "The SCD type to use to ingest the table.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType",
|
||||
"enum": [
|
||||
"SCD_TYPE_1",
|
||||
"SCD_TYPE_2"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType"
|
||||
},
|
||||
"sequence_by": {
|
||||
"description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.",
|
||||
|
@ -4431,7 +4513,12 @@
|
|||
]
|
||||
},
|
||||
"pipelines.TableSpecificConfigScdType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The SCD type to use to ingest the table.",
|
||||
"enum": [
|
||||
"SCD_TYPE_1",
|
||||
"SCD_TYPE_2"
|
||||
]
|
||||
},
|
||||
"serving.Ai21LabsConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4520,11 +4607,7 @@
|
|||
"properties": {
|
||||
"behavior": {
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"BLOCK"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4539,7 +4622,12 @@
|
|||
]
|
||||
},
|
||||
"serving.AiGatewayGuardrailPiiBehaviorBehavior": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"BLOCK"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayGuardrails": {
|
||||
"anyOf": [
|
||||
|
@ -4604,18 +4692,11 @@
|
|||
},
|
||||
"key": {
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey"
|
||||
},
|
||||
"renewal_period": {
|
||||
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4631,10 +4712,19 @@
|
|||
]
|
||||
},
|
||||
"serving.AiGatewayRateLimitKey": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayRateLimitRenewalPeriod": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayUsageTrackingConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4681,13 +4771,7 @@
|
|||
},
|
||||
"bedrock_provider": {
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider",
|
||||
"enum": [
|
||||
"anthropic",
|
||||
"cohere",
|
||||
"ai21labs",
|
||||
"amazon"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4703,7 +4787,14 @@
|
|||
]
|
||||
},
|
||||
"serving.AmazonBedrockConfigBedrockProvider": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"enum": [
|
||||
"anthropic",
|
||||
"cohere",
|
||||
"ai21labs",
|
||||
"amazon"
|
||||
]
|
||||
},
|
||||
"serving.AnthropicConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4910,17 +5001,7 @@
|
|||
},
|
||||
"provider": {
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider",
|
||||
"enum": [
|
||||
"ai21labs",
|
||||
"anthropic",
|
||||
"amazon-bedrock",
|
||||
"cohere",
|
||||
"databricks-model-serving",
|
||||
"google-cloud-vertex-ai",
|
||||
"openai",
|
||||
"palm"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider"
|
||||
},
|
||||
"task": {
|
||||
"description": "The task type of the external model.",
|
||||
|
@ -4941,7 +5022,18 @@
|
|||
]
|
||||
},
|
||||
"serving.ExternalModelProvider": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"enum": [
|
||||
"ai21labs",
|
||||
"anthropic",
|
||||
"amazon-bedrock",
|
||||
"cohere",
|
||||
"databricks-model-serving",
|
||||
"google-cloud-vertex-ai",
|
||||
"openai",
|
||||
"palm"
|
||||
]
|
||||
},
|
||||
"serving.GoogleCloudVertexAiConfig": {
|
||||
"anyOf": [
|
||||
|
@ -5047,18 +5139,11 @@
|
|||
},
|
||||
"key": {
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey"
|
||||
},
|
||||
"renewal_period": {
|
||||
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -5074,10 +5159,19 @@
|
|||
]
|
||||
},
|
||||
"serving.RateLimitKey": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
},
|
||||
"serving.RateLimitRenewalPeriod": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
},
|
||||
"serving.Route": {
|
||||
"anyOf": [
|
||||
|
@ -5202,23 +5296,11 @@
|
|||
},
|
||||
"workload_size": {
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize",
|
||||
"enum": [
|
||||
"Small",
|
||||
"Medium",
|
||||
"Large"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize"
|
||||
},
|
||||
"workload_type": {
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_SMALL",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -5235,10 +5317,24 @@
|
|||
]
|
||||
},
|
||||
"serving.ServedModelInputWorkloadSize": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"enum": [
|
||||
"Small",
|
||||
"Medium",
|
||||
"Large"
|
||||
]
|
||||
},
|
||||
"serving.ServedModelInputWorkloadType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_SMALL",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
},
|
||||
"serving.TrafficConfig": {
|
||||
"anyOf": [
|
||||
|
|
|
@ -191,6 +191,8 @@ func newList() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `An opaque page token which was the next_page_token in the response of the previous request to list the secrets for this service principal.`)
|
||||
|
||||
cmd.Use = "list SERVICE_PRINCIPAL_ID"
|
||||
cmd.Short = `List service principal secrets.`
|
||||
cmd.Long = `List service principal secrets.
|
||||
|
|
|
@ -81,6 +81,7 @@ func newCreate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`)
|
||||
// TODO: complex arg: gcp_managed_network_config
|
||||
// TODO: complex arg: gke_config
|
||||
cmd.Flags().BoolVar(&createReq.IsNoPublicIpEnabled, "is-no-public-ip-enabled", createReq.IsNoPublicIpEnabled, `Whether no public IP is enabled for the workspace.`)
|
||||
cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`)
|
||||
cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`)
|
||||
cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``)
|
||||
|
@ -420,6 +421,7 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``)
|
||||
cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.PrivateAccessSettingsId, "private-access-settings-id", updateReq.PrivateAccessSettingsId, `The ID of the workspace's private access settings configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`)
|
||||
cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`)
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
|
@ -62,7 +63,12 @@ func newDestroyCommand() *cobra.Command {
|
|||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
phases.Initialize(),
|
||||
phases.Build(),
|
||||
// We need to resolve artifact variable (how we do it in build phase)
|
||||
// because some of the to-be-destroyed resource might use this variable.
|
||||
// Not resolving might lead to terraform "Reference to undeclared resource" error
|
||||
mutator.ResolveVariableReferences(
|
||||
"artifacts",
|
||||
),
|
||||
phases.Destroy(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
|
|
|
@ -35,17 +35,23 @@ func promptRunArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
|
|||
return key, nil
|
||||
}
|
||||
|
||||
func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) {
|
||||
// resolveRunArgument resolves the resource key to run.
|
||||
// It returns the remaining arguments to pass to the runner, if applicable.
|
||||
func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, []string, error) {
|
||||
// If no arguments are specified, prompt the user to select something to run.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
return promptRunArgument(ctx, b)
|
||||
key, err := promptRunArgument(ctx, b)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return key, args, nil
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", fmt.Errorf("expected a KEY of the resource to run")
|
||||
return "", nil, fmt.Errorf("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
return args[0], nil
|
||||
return args[0], args[1:], nil
|
||||
}
|
||||
|
||||
func keyToRunner(b *bundle.Bundle, arg string) (run.Runner, error) {
|
||||
|
@ -109,7 +115,7 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
arg, err := resolveRunArgument(ctx, b, args)
|
||||
key, args, err := resolveRunArgument(ctx, b, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -124,13 +130,13 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
runner, err := keyToRunner(b, arg)
|
||||
runner, err := keyToRunner(b, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse additional positional arguments.
|
||||
err = runner.ParseArgs(args[1:], &runOptions)
|
||||
err = runner.ParseArgs(args, &runOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/fakefs"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -84,7 +85,7 @@ func setupTest(t *testing.T) (*validArgs, *cobra.Command, *mocks.MockWorkspaceCl
|
|||
cmd, m := setupCommand(t)
|
||||
|
||||
fakeFilerForPath := func(ctx context.Context, fullPath string) (filer.Filer, string, error) {
|
||||
fakeFiler := filer.NewFakeFiler(map[string]filer.FakeFileInfo{
|
||||
fakeFiler := filer.NewFakeFiler(map[string]fakefs.FileInfo{
|
||||
"dir": {FakeName: "root", FakeDir: true},
|
||||
"dir/dirA": {FakeDir: true},
|
||||
"dir/dirB": {FakeDir: true},
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/databricks/cli/internal/build"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -73,8 +74,12 @@ func New(ctx context.Context) *cobra.Command {
|
|||
// get the context back
|
||||
ctx = cmd.Context()
|
||||
|
||||
// Detect if the CLI is running on DBR and store this on the context.
|
||||
ctx = dbr.DetectRuntime(ctx)
|
||||
|
||||
// Configure our user agent with the command that's about to be executed.
|
||||
ctx = withCommandInUserAgent(ctx, cmd)
|
||||
ctx = withCommandExecIdInUserAgent(ctx)
|
||||
ctx = withUpstreamInUserAgent(ctx)
|
||||
cmd.SetContext(ctx)
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func withCommandExecIdInUserAgent(ctx context.Context) context.Context {
|
||||
// A UUID that will allow us to correlate multiple API requests made by
|
||||
// the same CLI invocation.
|
||||
return useragent.InContext(ctx, "cmd-exec-id", uuid.New().String())
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWithCommandExecIdInUserAgent(t *testing.T) {
|
||||
ctx := withCommandExecIdInUserAgent(context.Background())
|
||||
|
||||
// Check that the command exec ID is in the user agent string.
|
||||
ua := useragent.FromContext(ctx)
|
||||
re := regexp.MustCompile(`cmd-exec-id/([a-f0-9-]+)`)
|
||||
matches := re.FindAllStringSubmatch(ua, -1)
|
||||
|
||||
// Assert that we have exactly one match and that it's a valid UUID.
|
||||
require.Len(t, matches, 1)
|
||||
_, err := uuid.Parse(matches[0][1])
|
||||
assert.NoError(t, err)
|
||||
}
|
|
@ -1,13 +1,15 @@
|
|||
package root
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/useragent"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCommandString(t *testing.T) {
|
||||
func TestWithCommandInUserAgent(t *testing.T) {
|
||||
root := &cobra.Command{
|
||||
Use: "root",
|
||||
}
|
||||
|
@ -26,4 +28,9 @@ func TestCommandString(t *testing.T) {
|
|||
assert.Equal(t, "root", commandString(root))
|
||||
assert.Equal(t, "hello", commandString(hello))
|
||||
assert.Equal(t, "hello_world", commandString(world))
|
||||
|
||||
ctx := withCommandInUserAgent(context.Background(), world)
|
||||
|
||||
ua := useragent.FromContext(ctx)
|
||||
assert.Contains(t, ua, "cmd/hello_world")
|
||||
}
|
||||
|
|
162
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go
generated
Executable file
162
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go
generated
Executable file
|
@ -0,0 +1,162 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package aibi_dashboard_embedding_access_policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "aibi-dashboard-embedding-access-policy",
|
||||
Short: `Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the workspace level.`,
|
||||
Long: `Controls whether AI/BI published dashboard embedding is enabled, conditionally
|
||||
enabled, or disabled at the workspace level. By default, this setting is
|
||||
conditionally enabled (ALLOW_APPROVED_DOMAINS).`,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get"
|
||||
cmd.Short = `Retrieve the AI/BI dashboard embedding access policy.`
|
||||
cmd.Long = `Retrieve the AI/BI dashboard embedding access policy.
|
||||
|
||||
Retrieves the AI/BI dashboard embedding access policy. The default setting is
|
||||
ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be embedded on approved
|
||||
domains.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update"
|
||||
cmd.Short = `Update the AI/BI dashboard embedding access policy.`
|
||||
cmd.Long = `Update the AI/BI dashboard embedding access policy.
|
||||
|
||||
Updates the AI/BI dashboard embedding access policy at the workspace level.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service AibiDashboardEmbeddingAccessPolicy
|
162
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go
generated
Executable file
162
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go
generated
Executable file
|
@ -0,0 +1,162 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package aibi_dashboard_embedding_approved_domains
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "aibi-dashboard-embedding-approved-domains",
|
||||
Short: `Controls the list of domains approved to host the embedded AI/BI dashboards.`,
|
||||
Long: `Controls the list of domains approved to host the embedded AI/BI dashboards.
|
||||
The approved domains list can't be mutated when the current access policy is
|
||||
not set to ALLOW_APPROVED_DOMAINS.`,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get"
|
||||
cmd.Short = `Retrieve the list of domains approved to host embedded AI/BI dashboards.`
|
||||
cmd.Long = `Retrieve the list of domains approved to host embedded AI/BI dashboards.
|
||||
|
||||
Retrieves the list of domains approved to host embedded AI/BI dashboards.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update"
|
||||
cmd.Short = `Update the list of domains approved to host embedded AI/BI dashboards.`
|
||||
cmd.Long = `Update the list of domains approved to host embedded AI/BI dashboards.
|
||||
|
||||
Updates the list of domains approved to host embedded AI/BI dashboards. This
|
||||
update will fail if the current workspace access policy is not
|
||||
ALLOW_APPROVED_DOMAINS.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service AibiDashboardEmbeddingApprovedDomains
|
|
@ -67,6 +67,7 @@ func newCreate() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var createReq apps.CreateAppRequest
|
||||
createReq.App = &apps.App{}
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
var createSkipWait bool
|
||||
|
@ -77,7 +78,11 @@ func newCreate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`)
|
||||
// TODO: complex arg: active_deployment
|
||||
// TODO: complex arg: app_status
|
||||
// TODO: complex arg: compute_status
|
||||
cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`)
|
||||
// TODO: complex arg: pending_deployment
|
||||
// TODO: array: resources
|
||||
|
||||
cmd.Use = "create NAME"
|
||||
|
@ -110,7 +115,7 @@ func newCreate() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createJson.Unmarshal(&createReq)
|
||||
diags := createJson.Unmarshal(&createReq.App)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -122,7 +127,7 @@ func newCreate() *cobra.Command {
|
|||
}
|
||||
}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
createReq.Name = args[0]
|
||||
createReq.App.Name = args[0]
|
||||
}
|
||||
|
||||
wait, err := w.Apps.Create(ctx, createReq)
|
||||
|
@ -234,6 +239,7 @@ func newDeploy() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var deployReq apps.CreateAppDeploymentRequest
|
||||
deployReq.AppDeployment = &apps.AppDeployment{}
|
||||
var deployJson flags.JsonFlag
|
||||
|
||||
var deploySkipWait bool
|
||||
|
@ -244,9 +250,11 @@ func newDeploy() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&deployReq.DeploymentId, "deployment-id", deployReq.DeploymentId, `The unique id of the deployment.`)
|
||||
cmd.Flags().Var(&deployReq.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`)
|
||||
cmd.Flags().StringVar(&deployReq.SourceCodePath, "source-code-path", deployReq.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`)
|
||||
// TODO: complex arg: deployment_artifacts
|
||||
cmd.Flags().StringVar(&deployReq.AppDeployment.DeploymentId, "deployment-id", deployReq.AppDeployment.DeploymentId, `The unique id of the deployment.`)
|
||||
cmd.Flags().Var(&deployReq.AppDeployment.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`)
|
||||
cmd.Flags().StringVar(&deployReq.AppDeployment.SourceCodePath, "source-code-path", deployReq.AppDeployment.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`)
|
||||
// TODO: complex arg: status
|
||||
|
||||
cmd.Use = "deploy APP_NAME"
|
||||
cmd.Short = `Create an app deployment.`
|
||||
|
@ -270,7 +278,7 @@ func newDeploy() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := deployJson.Unmarshal(&deployReq)
|
||||
diags := deployJson.Unmarshal(&deployReq.AppDeployment)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -692,8 +700,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set app permissions.`
|
||||
cmd.Long = `Set app permissions.
|
||||
|
||||
Sets permissions on an app. Apps can inherit permissions from their root
|
||||
object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
APP_NAME: The app for which to get or manage permissions.`
|
||||
|
@ -920,28 +929,41 @@ func newUpdate() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq apps.UpdateAppRequest
|
||||
updateReq.App = &apps.App{}
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`)
|
||||
// TODO: complex arg: active_deployment
|
||||
// TODO: complex arg: app_status
|
||||
// TODO: complex arg: compute_status
|
||||
cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`)
|
||||
// TODO: complex arg: pending_deployment
|
||||
// TODO: array: resources
|
||||
|
||||
cmd.Use = "update NAME"
|
||||
cmd.Use = "update NAME NAME"
|
||||
cmd.Short = `Update an app.`
|
||||
cmd.Long = `Update an app.
|
||||
|
||||
Updates the app with the supplied name.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the app.
|
||||
NAME: The name of the app. The name must contain only lowercase alphanumeric
|
||||
characters and hyphens. It must be unique within the workspace.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -951,7 +973,7 @@ func newUpdate() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq)
|
||||
diags := updateJson.Unmarshal(&updateReq.App)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -963,6 +985,9 @@ func newUpdate() *cobra.Command {
|
|||
}
|
||||
}
|
||||
updateReq.Name = args[0]
|
||||
if !cmd.Flags().Changed("json") {
|
||||
updateReq.App.Name = args[1]
|
||||
}
|
||||
|
||||
response, err := w.Apps.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
package apps
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/apps"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// We override apps.Update command beccause currently genkit does not support
|
||||
// a way to identify that path field (such as name) matches the field in the request body.
|
||||
// As a result, genkit generates a command with 2 required same fields, update NAME NAME.
|
||||
// This override should be removed when genkit supports this.
|
||||
func updateOverride(cmd *cobra.Command, req *apps.UpdateAppRequest) {
|
||||
cmd.Use = "update NAME"
|
||||
cmd.Long = `Update an app.
|
||||
|
||||
Updates the app with the supplied name.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the app. The name must contain only lowercase alphanumeric
|
||||
characters and hyphens. It must be unique within the workspace.`
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
updateJson := cmd.Flag("json").Value.(*flags.JsonFlag)
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&req.App)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req.Name = args[0]
|
||||
response, err := w.Apps.Update(ctx, *req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
updateOverrides = append(updateOverrides, updateOverride)
|
||||
}
|
|
@ -1,385 +0,0 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package clean_rooms
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/sharing"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "clean-rooms",
|
||||
Short: `A clean room is a secure, privacy-protecting environment where two or more parties can share sensitive enterprise data, including customer data, for measurements, insights, activation and other use cases.`,
|
||||
Long: `A clean room is a secure, privacy-protecting environment where two or more
|
||||
parties can share sensitive enterprise data, including customer data, for
|
||||
measurements, insights, activation and other use cases.
|
||||
|
||||
To create clean rooms, you must be a metastore admin or a user with the
|
||||
**CREATE_CLEAN_ROOM** privilege.`,
|
||||
GroupID: "sharing",
|
||||
Annotations: map[string]string{
|
||||
"package": "sharing",
|
||||
},
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newCreate())
|
||||
cmd.AddCommand(newDelete())
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newList())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var createOverrides []func(
|
||||
*cobra.Command,
|
||||
*sharing.CreateCleanRoom,
|
||||
)
|
||||
|
||||
func newCreate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var createReq sharing.CreateCleanRoom
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
||||
|
||||
cmd.Use = "create"
|
||||
cmd.Short = `Create a clean room.`
|
||||
cmd.Long = `Create a clean room.
|
||||
|
||||
Creates a new clean room with specified colaborators. The caller must be a
|
||||
metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createJson.Unmarshal(&createReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.CleanRooms.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range createOverrides {
|
||||
fn(cmd, &createReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteOverrides []func(
|
||||
*cobra.Command,
|
||||
*sharing.DeleteCleanRoomRequest,
|
||||
)
|
||||
|
||||
func newDelete() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteReq sharing.DeleteCleanRoomRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete NAME"
|
||||
cmd.Short = `Delete a clean room.`
|
||||
cmd.Long = `Delete a clean room.
|
||||
|
||||
Deletes a data object clean room from the metastore. The caller must be an
|
||||
owner of the clean room.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the clean room.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
deleteReq.Name = args[0]
|
||||
|
||||
err = w.CleanRooms.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteOverrides {
|
||||
fn(cmd, &deleteReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*sharing.GetCleanRoomRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq sharing.GetCleanRoomRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().BoolVar(&getReq.IncludeRemoteDetails, "include-remote-details", getReq.IncludeRemoteDetails, `Whether to include remote details (central) on the clean room.`)
|
||||
|
||||
cmd.Use = "get NAME"
|
||||
cmd.Short = `Get a clean room.`
|
||||
cmd.Long = `Get a clean room.
|
||||
|
||||
Gets a data object clean room from the metastore. The caller must be a
|
||||
metastore admin or the owner of the clean room.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the clean room.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
getReq.Name = args[0]
|
||||
|
||||
response, err := w.CleanRooms.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listOverrides []func(
|
||||
*cobra.Command,
|
||||
*sharing.ListCleanRoomsRequest,
|
||||
)
|
||||
|
||||
func newList() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listReq sharing.ListCleanRoomsRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of clean rooms to return.`)
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
|
||||
|
||||
cmd.Use = "list"
|
||||
cmd.Short = `List clean rooms.`
|
||||
cmd.Long = `List clean rooms.
|
||||
|
||||
Gets an array of data object clean rooms from the metastore. The caller must
|
||||
be a metastore admin or the owner of the clean room. There is no guarantee of
|
||||
a specific ordering of the elements in the array.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response := w.CleanRooms.List(ctx, listReq)
|
||||
return cmdio.RenderIterator(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listOverrides {
|
||||
fn(cmd, &listReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*sharing.UpdateCleanRoom,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq sharing.UpdateCleanRoom
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: array: catalog_updates
|
||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`)
|
||||
|
||||
cmd.Use = "update NAME"
|
||||
cmd.Short = `Update a clean room.`
|
||||
cmd.Long = `Update a clean room.
|
||||
|
||||
Updates the clean room with the changes and data objects in the request. The
|
||||
caller must be the owner of the clean room or a metastore admin.
|
||||
|
||||
When the caller is a metastore admin, only the __owner__ field can be updated.
|
||||
|
||||
In the case that the clean room name is changed **updateCleanRoom** requires
|
||||
that the caller is both the clean room owner and a metastore admin.
|
||||
|
||||
For each table that is added through this method, the clean room owner must
|
||||
also have **SELECT** privilege on the table. The privilege must be maintained
|
||||
indefinitely for recipients to be able to access the table. Typically, you
|
||||
should use a group as the clean room owner.
|
||||
|
||||
Table removals through **update** do not require additional privileges.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the clean room.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
updateReq.Name = args[0]
|
||||
|
||||
response, err := w.CleanRooms.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service CleanRooms
|
|
@ -634,8 +634,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set cluster policy permissions.`
|
||||
cmd.Long = `Set cluster policy permissions.
|
||||
|
||||
Sets permissions on a cluster policy. Cluster policies can inherit permissions
|
||||
from their root object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions.`
|
||||
|
|
|
@ -512,7 +512,7 @@ func newEdit() *cobra.Command {
|
|||
Clusters created by the Databricks Jobs service cannot be edited.
|
||||
|
||||
Arguments:
|
||||
CLUSTER_ID: ID of the cluser
|
||||
CLUSTER_ID: ID of the cluster
|
||||
SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of
|
||||
available Spark versions can be retrieved by using the
|
||||
:method:clusters/sparkVersions API call.`
|
||||
|
@ -1504,8 +1504,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set cluster permissions.`
|
||||
cmd.Long = `Set cluster permissions.
|
||||
|
||||
Sets permissions on a cluster. Clusters can inherit permissions from their
|
||||
root object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
CLUSTER_ID: The cluster for which to get or manage permissions.`
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
apps "github.com/databricks/cli/cmd/workspace/apps"
|
||||
artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists"
|
||||
catalogs "github.com/databricks/cli/cmd/workspace/catalogs"
|
||||
clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms"
|
||||
cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies"
|
||||
clusters "github.com/databricks/cli/cmd/workspace/clusters"
|
||||
connections "github.com/databricks/cli/cmd/workspace/connections"
|
||||
|
@ -17,6 +16,7 @@ import (
|
|||
consumer_listings "github.com/databricks/cli/cmd/workspace/consumer-listings"
|
||||
consumer_personalization_requests "github.com/databricks/cli/cmd/workspace/consumer-personalization-requests"
|
||||
consumer_providers "github.com/databricks/cli/cmd/workspace/consumer-providers"
|
||||
credentials "github.com/databricks/cli/cmd/workspace/credentials"
|
||||
credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager"
|
||||
current_user "github.com/databricks/cli/cmd/workspace/current-user"
|
||||
dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets"
|
||||
|
@ -98,7 +98,6 @@ func All() []*cobra.Command {
|
|||
out = append(out, apps.New())
|
||||
out = append(out, artifact_allowlists.New())
|
||||
out = append(out, catalogs.New())
|
||||
out = append(out, clean_rooms.New())
|
||||
out = append(out, cluster_policies.New())
|
||||
out = append(out, clusters.New())
|
||||
out = append(out, connections.New())
|
||||
|
@ -107,6 +106,7 @@ func All() []*cobra.Command {
|
|||
out = append(out, consumer_listings.New())
|
||||
out = append(out, consumer_personalization_requests.New())
|
||||
out = append(out, consumer_providers.New())
|
||||
out = append(out, credentials.New())
|
||||
out = append(out, credentials_manager.New())
|
||||
out = append(out, current_user.New())
|
||||
out = append(out, dashboard_widgets.New())
|
||||
|
|
|
@ -0,0 +1,545 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "credentials",
|
||||
Short: `A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant.`,
|
||||
Long: `A credential represents an authentication and authorization mechanism for
|
||||
accessing services on your cloud tenant. Each credential is subject to Unity
|
||||
Catalog access-control policies that control which users and groups can access
|
||||
the credential.
|
||||
|
||||
To create credentials, you must be a Databricks account admin or have the
|
||||
CREATE SERVICE CREDENTIAL privilege. The user who creates the credential can
|
||||
delegate ownership to another user or group to manage permissions on it`,
|
||||
GroupID: "catalog",
|
||||
Annotations: map[string]string{
|
||||
"package": "catalog",
|
||||
},
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newCreateCredential())
|
||||
cmd.AddCommand(newDeleteCredential())
|
||||
cmd.AddCommand(newGenerateTemporaryServiceCredential())
|
||||
cmd.AddCommand(newGetCredential())
|
||||
cmd.AddCommand(newListCredentials())
|
||||
cmd.AddCommand(newUpdateCredential())
|
||||
cmd.AddCommand(newValidateCredential())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start create-credential command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var createCredentialOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.CreateCredentialRequest,
|
||||
)
|
||||
|
||||
func newCreateCredential() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var createCredentialReq catalog.CreateCredentialRequest
|
||||
var createCredentialJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: aws_iam_role
|
||||
// TODO: complex arg: azure_managed_identity
|
||||
cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`)
|
||||
cmd.Flags().StringVar(&createCredentialReq.Name, "name", createCredentialReq.Name, `The credential name.`)
|
||||
cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE]`)
|
||||
cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`)
|
||||
|
||||
cmd.Use = "create-credential"
|
||||
cmd.Short = `Create a credential.`
|
||||
cmd.Long = `Create a credential.
|
||||
|
||||
Creates a new credential.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createCredentialJson.Unmarshal(&createCredentialReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response, err := w.Credentials.CreateCredential(ctx, createCredentialReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range createCredentialOverrides {
|
||||
fn(cmd, &createCredentialReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start delete-credential command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteCredentialOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.DeleteCredentialRequest,
|
||||
)
|
||||
|
||||
func newDeleteCredential() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteCredentialReq catalog.DeleteCredentialRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().BoolVar(&deleteCredentialReq.Force, "force", deleteCredentialReq.Force, `Force deletion even if there are dependent services.`)
|
||||
|
||||
cmd.Use = "delete-credential NAME_ARG"
|
||||
cmd.Short = `Delete a credential.`
|
||||
cmd.Long = `Delete a credential.
|
||||
|
||||
Deletes a credential from the metastore. The caller must be an owner of the
|
||||
credential.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: Name of the credential.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
deleteCredentialReq.NameArg = args[0]
|
||||
|
||||
err = w.Credentials.DeleteCredential(ctx, deleteCredentialReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteCredentialOverrides {
|
||||
fn(cmd, &deleteCredentialReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start generate-temporary-service-credential command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var generateTemporaryServiceCredentialOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.GenerateTemporaryServiceCredentialRequest,
|
||||
)
|
||||
|
||||
func newGenerateTemporaryServiceCredential() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var generateTemporaryServiceCredentialReq catalog.GenerateTemporaryServiceCredentialRequest
|
||||
var generateTemporaryServiceCredentialJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: azure_options
|
||||
cmd.Flags().StringVar(&generateTemporaryServiceCredentialReq.CredentialName, "credential-name", generateTemporaryServiceCredentialReq.CredentialName, `The name of the service credential used to generate a temporary credential.`)
|
||||
|
||||
cmd.Use = "generate-temporary-service-credential"
|
||||
cmd.Short = `Generate a temporary service credential.`
|
||||
cmd.Long = `Generate a temporary service credential.
|
||||
|
||||
Returns a set of temporary credentials generated using the specified service
|
||||
credential. The caller must be a metastore admin or have the metastore
|
||||
privilege **ACCESS** on the service credential.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := generateTemporaryServiceCredentialJson.Unmarshal(&generateTemporaryServiceCredentialReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response, err := w.Credentials.GenerateTemporaryServiceCredential(ctx, generateTemporaryServiceCredentialReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range generateTemporaryServiceCredentialOverrides {
|
||||
fn(cmd, &generateTemporaryServiceCredentialReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get-credential command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getCredentialOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.GetCredentialRequest,
|
||||
)
|
||||
|
||||
func newGetCredential() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getCredentialReq catalog.GetCredentialRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-credential NAME_ARG"
|
||||
cmd.Short = `Get a credential.`
|
||||
cmd.Long = `Get a credential.
|
||||
|
||||
Gets a credential from the metastore. The caller must be a metastore admin,
|
||||
the owner of the credential, or have any permission on the credential.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: Name of the credential.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
getCredentialReq.NameArg = args[0]
|
||||
|
||||
response, err := w.Credentials.GetCredential(ctx, getCredentialReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getCredentialOverrides {
|
||||
fn(cmd, &getCredentialReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list-credentials command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listCredentialsOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.ListCredentialsRequest,
|
||||
)
|
||||
|
||||
func newListCredentials() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listCredentialsReq catalog.ListCredentialsRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listCredentialsReq.MaxResults, "max-results", listCredentialsReq.MaxResults, `Maximum number of credentials to return.`)
|
||||
cmd.Flags().StringVar(&listCredentialsReq.PageToken, "page-token", listCredentialsReq.PageToken, `Opaque token to retrieve the next page of results.`)
|
||||
cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE]`)
|
||||
|
||||
cmd.Use = "list-credentials"
|
||||
cmd.Short = `List credentials.`
|
||||
cmd.Long = `List credentials.
|
||||
|
||||
Gets an array of credentials (as __CredentialInfo__ objects).
|
||||
|
||||
The array is limited to only the credentials that the caller has permission to
|
||||
access. If the caller is a metastore admin, retrieval of credentials is
|
||||
unrestricted. There is no guarantee of a specific ordering of the elements in
|
||||
the array.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response := w.Credentials.ListCredentials(ctx, listCredentialsReq)
|
||||
return cmdio.RenderIterator(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listCredentialsOverrides {
|
||||
fn(cmd, &listCredentialsReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update-credential command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateCredentialOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.UpdateCredentialRequest,
|
||||
)
|
||||
|
||||
func newUpdateCredential() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateCredentialReq catalog.UpdateCredentialRequest
|
||||
var updateCredentialJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: aws_iam_role
|
||||
// TODO: complex arg: azure_managed_identity
|
||||
cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`)
|
||||
cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force update even if there are dependent services.`)
|
||||
cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||
cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`)
|
||||
cmd.Flags().StringVar(&updateCredentialReq.Owner, "owner", updateCredentialReq.Owner, `Username of current owner of credential.`)
|
||||
cmd.Flags().BoolVar(&updateCredentialReq.SkipValidation, "skip-validation", updateCredentialReq.SkipValidation, `Supply true to this argument to skip validation of the updated credential.`)
|
||||
|
||||
cmd.Use = "update-credential NAME_ARG"
|
||||
cmd.Short = `Update a credential.`
|
||||
cmd.Long = `Update a credential.
|
||||
|
||||
Updates a credential on the metastore.
|
||||
|
||||
The caller must be the owner of the credential or a metastore admin or have
|
||||
the MANAGE permission. If the caller is a metastore admin, only the
|
||||
__owner__ field can be changed.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: Name of the credential.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateCredentialJson.Unmarshal(&updateCredentialReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
updateCredentialReq.NameArg = args[0]
|
||||
|
||||
response, err := w.Credentials.UpdateCredential(ctx, updateCredentialReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateCredentialOverrides {
|
||||
fn(cmd, &updateCredentialReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start validate-credential command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var validateCredentialOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.ValidateCredentialRequest,
|
||||
)
|
||||
|
||||
func newValidateCredential() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var validateCredentialReq catalog.ValidateCredentialRequest
|
||||
var validateCredentialJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&validateCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: aws_iam_role
|
||||
// TODO: complex arg: azure_managed_identity
|
||||
cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`)
|
||||
cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE]`)
|
||||
|
||||
cmd.Use = "validate-credential"
|
||||
cmd.Short = `Validate a credential.`
|
||||
cmd.Long = `Validate a credential.
|
||||
|
||||
Validates a credential.
|
||||
|
||||
Either the __credential_name__ or the cloud-specific credential must be
|
||||
provided.
|
||||
|
||||
The caller must be a metastore admin or the credential owner.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := validateCredentialJson.Unmarshal(&validateCredentialReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response, err := w.Credentials.ValidateCredential(ctx, validateCredentialReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range validateCredentialOverrides {
|
||||
fn(cmd, &validateCredentialReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service Credentials
|
|
@ -2034,8 +2034,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set experiment permissions.`
|
||||
cmd.Long = `Set experiment permissions.
|
||||
|
||||
Sets permissions on an experiment. Experiments can inherit permissions from
|
||||
their root object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
EXPERIMENT_ID: The experiment for which to get or manage permissions.`
|
||||
|
|
|
@ -356,7 +356,7 @@ func newUpdate() *cobra.Command {
|
|||
// TODO: complex arg: encryption_details
|
||||
cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
|
||||
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
|
||||
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`)
|
||||
cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`)
|
||||
|
|
|
@ -160,13 +160,13 @@ func newCreateMessage() *cobra.Command {
|
|||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var executeMessageQueryOverrides []func(
|
||||
*cobra.Command,
|
||||
*dashboards.ExecuteMessageQueryRequest,
|
||||
*dashboards.GenieExecuteMessageQueryRequest,
|
||||
)
|
||||
|
||||
func newExecuteMessageQuery() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var executeMessageQueryReq dashboards.ExecuteMessageQueryRequest
|
||||
var executeMessageQueryReq dashboards.GenieExecuteMessageQueryRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
|
|
|
@ -635,8 +635,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set instance pool permissions.`
|
||||
cmd.Long = `Set instance pool permissions.
|
||||
|
||||
Sets permissions on an instance pool. Instance pools can inherit permissions
|
||||
from their root object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
INSTANCE_POOL_ID: The instance pool for which to get or manage permissions.`
|
||||
|
|
|
@ -847,7 +847,7 @@ func newGetRun() *cobra.Command {
|
|||
|
||||
cmd.Flags().BoolVar(&getRunReq.IncludeHistory, "include-history", getRunReq.IncludeHistory, `Whether to include the repair history in the response.`)
|
||||
cmd.Flags().BoolVar(&getRunReq.IncludeResolvedValues, "include-resolved-values", getRunReq.IncludeResolvedValues, `Whether to include resolved parameter values in the response.`)
|
||||
cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page or the previous page of job tasks, set this field to the value of the next_page_token or prev_page_token returned in the GetJob response.`)
|
||||
cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page of job tasks, set this field to the value of the next_page_token returned in the GetJob response.`)
|
||||
|
||||
cmd.Use = "get-run RUN_ID"
|
||||
cmd.Short = `Get a single job run.`
|
||||
|
@ -1339,6 +1339,7 @@ func newRunNow() *cobra.Command {
|
|||
// TODO: array: jar_params
|
||||
// TODO: map via StringToStringVar: job_parameters
|
||||
// TODO: map via StringToStringVar: notebook_params
|
||||
// TODO: array: only
|
||||
// TODO: complex arg: pipeline_params
|
||||
// TODO: map via StringToStringVar: python_named_params
|
||||
// TODO: array: python_params
|
||||
|
@ -1470,8 +1471,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set job permissions.`
|
||||
cmd.Long = `Set job permissions.
|
||||
|
||||
Sets permissions on a job. Jobs can inherit permissions from their root
|
||||
object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
JOB_ID: The job for which to get or manage permissions.`
|
||||
|
|
|
@ -70,35 +70,26 @@ func newCreate() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var createReq dashboards.CreateDashboardRequest
|
||||
createReq.Dashboard = &dashboards.Dashboard{}
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.ParentPath, "parent-path", createReq.ParentPath, `The workspace path of the folder containing the dashboard.`)
|
||||
cmd.Flags().StringVar(&createReq.SerializedDashboard, "serialized-dashboard", createReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`)
|
||||
cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `The warehouse ID used to run the dashboard.`)
|
||||
cmd.Flags().StringVar(&createReq.Dashboard.DisplayName, "display-name", createReq.Dashboard.DisplayName, `The display name of the dashboard.`)
|
||||
cmd.Flags().StringVar(&createReq.Dashboard.SerializedDashboard, "serialized-dashboard", createReq.Dashboard.SerializedDashboard, `The contents of the dashboard in serialized string form.`)
|
||||
cmd.Flags().StringVar(&createReq.Dashboard.WarehouseId, "warehouse-id", createReq.Dashboard.WarehouseId, `The warehouse ID used to run the dashboard.`)
|
||||
|
||||
cmd.Use = "create DISPLAY_NAME"
|
||||
cmd.Use = "create"
|
||||
cmd.Short = `Create dashboard.`
|
||||
cmd.Long = `Create dashboard.
|
||||
|
||||
Create a draft dashboard.
|
||||
|
||||
Arguments:
|
||||
DISPLAY_NAME: The display name of the dashboard.`
|
||||
Create a draft dashboard.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'display_name' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(1)
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -108,7 +99,7 @@ func newCreate() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createJson.Unmarshal(&createReq)
|
||||
diags := createJson.Unmarshal(&createReq.Dashboard)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -119,9 +110,6 @@ func newCreate() *cobra.Command {
|
|||
}
|
||||
}
|
||||
}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
createReq.DisplayName = args[0]
|
||||
}
|
||||
|
||||
response, err := w.Lakeview.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
|
@ -155,13 +143,15 @@ func newCreateSchedule() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var createScheduleReq dashboards.CreateScheduleRequest
|
||||
createScheduleReq.Schedule = &dashboards.Schedule{}
|
||||
var createScheduleJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createScheduleReq.DisplayName, "display-name", createScheduleReq.DisplayName, `The display name for schedule.`)
|
||||
cmd.Flags().Var(&createScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`)
|
||||
cmd.Flags().StringVar(&createScheduleReq.Schedule.DisplayName, "display-name", createScheduleReq.Schedule.DisplayName, `The display name for schedule.`)
|
||||
cmd.Flags().Var(&createScheduleReq.Schedule.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`)
|
||||
cmd.Flags().StringVar(&createScheduleReq.Schedule.WarehouseId, "warehouse-id", createScheduleReq.Schedule.WarehouseId, `The warehouse id to run the dashboard with for the schedule.`)
|
||||
|
||||
cmd.Use = "create-schedule DASHBOARD_ID"
|
||||
cmd.Short = `Create dashboard schedule.`
|
||||
|
@ -176,6 +166,13 @@ func newCreateSchedule() *cobra.Command {
|
|||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cron_schedule' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
@ -186,7 +183,7 @@ func newCreateSchedule() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createScheduleJson.Unmarshal(&createScheduleReq)
|
||||
diags := createScheduleJson.Unmarshal(&createScheduleReq.Schedule)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -196,8 +193,6 @@ func newCreateSchedule() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
createScheduleReq.DashboardId = args[0]
|
||||
|
||||
|
@ -233,6 +228,7 @@ func newCreateSubscription() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var createSubscriptionReq dashboards.CreateSubscriptionRequest
|
||||
createSubscriptionReq.Subscription = &dashboards.Subscription{}
|
||||
var createSubscriptionJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
|
@ -252,6 +248,13 @@ func newCreateSubscription() *cobra.Command {
|
|||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'subscriber' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
@ -262,7 +265,7 @@ func newCreateSubscription() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createSubscriptionJson.Unmarshal(&createSubscriptionReq)
|
||||
diags := createSubscriptionJson.Unmarshal(&createSubscriptionReq.Subscription)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -272,8 +275,6 @@ func newCreateSubscription() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
createSubscriptionReq.DashboardId = args[0]
|
||||
createSubscriptionReq.ScheduleId = args[1]
|
||||
|
@ -313,8 +314,6 @@ func newDeleteSchedule() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&deleteScheduleReq.Etag, "etag", deleteScheduleReq.Etag, `The etag for the schedule.`)
|
||||
|
||||
cmd.Use = "delete-schedule DASHBOARD_ID SCHEDULE_ID"
|
||||
cmd.Short = `Delete dashboard schedule.`
|
||||
cmd.Long = `Delete dashboard schedule.
|
||||
|
@ -376,8 +375,6 @@ func newDeleteSubscription() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&deleteSubscriptionReq.Etag, "etag", deleteSubscriptionReq.Etag, `The etag for the subscription.`)
|
||||
|
||||
cmd.Use = "delete-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID"
|
||||
cmd.Short = `Delete schedule subscription.`
|
||||
cmd.Long = `Delete schedule subscription.
|
||||
|
@ -682,7 +679,6 @@ func newList() *cobra.Command {
|
|||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The number of dashboards to return per page.`)
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListDashboards call.`)
|
||||
cmd.Flags().BoolVar(&listReq.ShowTrashed, "show-trashed", listReq.ShowTrashed, `The flag to include dashboards located in the trash.`)
|
||||
cmd.Flags().Var(&listReq.View, "view", `DASHBOARD_VIEW_BASIConly includes summary metadata from the dashboard. Supported values: [DASHBOARD_VIEW_BASIC]`)
|
||||
|
||||
|
@ -735,7 +731,6 @@ func newListSchedules() *cobra.Command {
|
|||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listSchedulesReq.PageSize, "page-size", listSchedulesReq.PageSize, `The number of schedules to return per page.`)
|
||||
cmd.Flags().StringVar(&listSchedulesReq.PageToken, "page-token", listSchedulesReq.PageToken, `A page token, received from a previous ListSchedules call.`)
|
||||
|
||||
cmd.Use = "list-schedules DASHBOARD_ID"
|
||||
cmd.Short = `List dashboard schedules.`
|
||||
|
@ -794,7 +789,6 @@ func newListSubscriptions() *cobra.Command {
|
|||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listSubscriptionsReq.PageSize, "page-size", listSubscriptionsReq.PageSize, `The number of subscriptions to return per page.`)
|
||||
cmd.Flags().StringVar(&listSubscriptionsReq.PageToken, "page-token", listSubscriptionsReq.PageToken, `A page token, received from a previous ListSubscriptions call.`)
|
||||
|
||||
cmd.Use = "list-subscriptions DASHBOARD_ID SCHEDULE_ID"
|
||||
cmd.Short = `List schedule subscriptions.`
|
||||
|
@ -1126,15 +1120,15 @@ func newUpdate() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq dashboards.UpdateDashboardRequest
|
||||
updateReq.Dashboard = &dashboards.Dashboard{}
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `The display name of the dashboard.`)
|
||||
cmd.Flags().StringVar(&updateReq.Etag, "etag", updateReq.Etag, `The etag for the dashboard.`)
|
||||
cmd.Flags().StringVar(&updateReq.SerializedDashboard, "serialized-dashboard", updateReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`)
|
||||
cmd.Flags().StringVar(&updateReq.WarehouseId, "warehouse-id", updateReq.WarehouseId, `The warehouse ID used to run the dashboard.`)
|
||||
cmd.Flags().StringVar(&updateReq.Dashboard.DisplayName, "display-name", updateReq.Dashboard.DisplayName, `The display name of the dashboard.`)
|
||||
cmd.Flags().StringVar(&updateReq.Dashboard.SerializedDashboard, "serialized-dashboard", updateReq.Dashboard.SerializedDashboard, `The contents of the dashboard in serialized string form.`)
|
||||
cmd.Flags().StringVar(&updateReq.Dashboard.WarehouseId, "warehouse-id", updateReq.Dashboard.WarehouseId, `The warehouse ID used to run the dashboard.`)
|
||||
|
||||
cmd.Use = "update DASHBOARD_ID"
|
||||
cmd.Short = `Update dashboard.`
|
||||
|
@ -1158,7 +1152,7 @@ func newUpdate() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq)
|
||||
diags := updateJson.Unmarshal(&updateReq.Dashboard)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -1203,14 +1197,15 @@ func newUpdateSchedule() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var updateScheduleReq dashboards.UpdateScheduleRequest
|
||||
updateScheduleReq.Schedule = &dashboards.Schedule{}
|
||||
var updateScheduleJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateScheduleReq.DisplayName, "display-name", updateScheduleReq.DisplayName, `The display name for schedule.`)
|
||||
cmd.Flags().StringVar(&updateScheduleReq.Etag, "etag", updateScheduleReq.Etag, `The etag for the schedule.`)
|
||||
cmd.Flags().Var(&updateScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`)
|
||||
cmd.Flags().StringVar(&updateScheduleReq.Schedule.DisplayName, "display-name", updateScheduleReq.Schedule.DisplayName, `The display name for schedule.`)
|
||||
cmd.Flags().Var(&updateScheduleReq.Schedule.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`)
|
||||
cmd.Flags().StringVar(&updateScheduleReq.Schedule.WarehouseId, "warehouse-id", updateScheduleReq.Schedule.WarehouseId, `The warehouse id to run the dashboard with for the schedule.`)
|
||||
|
||||
cmd.Use = "update-schedule DASHBOARD_ID SCHEDULE_ID"
|
||||
cmd.Short = `Update dashboard schedule.`
|
||||
|
@ -1226,6 +1221,13 @@ func newUpdateSchedule() *cobra.Command {
|
|||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cron_schedule' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
@ -1236,7 +1238,7 @@ func newUpdateSchedule() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateScheduleJson.Unmarshal(&updateScheduleReq)
|
||||
diags := updateScheduleJson.Unmarshal(&updateScheduleReq.Schedule)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -1246,8 +1248,6 @@ func newUpdateSchedule() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
updateScheduleReq.DashboardId = args[0]
|
||||
updateScheduleReq.ScheduleId = args[1]
|
||||
|
|
|
@ -2123,7 +2123,8 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set registered model permissions.`
|
||||
cmd.Long = `Set registered model permissions.
|
||||
|
||||
Sets permissions on a registered model. Registered models can inherit
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
package online_tables
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
|
@ -52,13 +55,20 @@ func newCreate() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var createReq catalog.CreateOnlineTableRequest
|
||||
createReq.Table = &catalog.OnlineTable{}
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
var createSkipWait bool
|
||||
var createTimeout time.Duration
|
||||
|
||||
cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach ACTIVE state`)
|
||||
cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ACTIVE state`)
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Full three-part (catalog, schema, table) name of the table.`)
|
||||
cmd.Flags().StringVar(&createReq.Table.Name, "name", createReq.Table.Name, `Full three-part (catalog, schema, table) name of the table.`)
|
||||
// TODO: complex arg: spec
|
||||
// TODO: complex arg: status
|
||||
|
||||
cmd.Use = "create"
|
||||
cmd.Short = `Create an Online Table.`
|
||||
|
@ -79,7 +89,7 @@ func newCreate() *cobra.Command {
|
|||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createJson.Unmarshal(&createReq)
|
||||
diags := createJson.Unmarshal(&createReq.Table)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
|
@ -91,11 +101,24 @@ func newCreate() *cobra.Command {
|
|||
}
|
||||
}
|
||||
|
||||
response, err := w.OnlineTables.Create(ctx, createReq)
|
||||
wait, err := w.OnlineTables.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
if createSkipWait {
|
||||
return cmdio.Render(ctx, wait.Response)
|
||||
}
|
||||
spinner := cmdio.Spinner(ctx)
|
||||
info, err := wait.OnProgress(func(i *catalog.OnlineTable) {
|
||||
status := i.UnityCatalogProvisioningState
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
spinner <- statusMessage
|
||||
}).GetWithTimeout(createTimeout)
|
||||
close(spinner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, info)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
|
|
|
@ -241,8 +241,9 @@ func newSet() *cobra.Command {
|
|||
cmd.Short = `Set object permissions.`
|
||||
cmd.Long = `Set object permissions.
|
||||
|
||||
Sets permissions on an object. Objects can inherit permissions from their
|
||||
parent objects or root object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their parent objects or root object.
|
||||
|
||||
Arguments:
|
||||
REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts,
|
||||
|
|
|
@ -691,8 +691,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set pipeline permissions.`
|
||||
cmd.Long = `Set pipeline permissions.
|
||||
|
||||
Sets permissions on a pipeline. Pipelines can inherit permissions from their
|
||||
root object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
PIPELINE_ID: The pipeline for which to get or manage permissions.`
|
||||
|
@ -972,6 +973,7 @@ func newUpdate() *cobra.Command {
|
|||
// TODO: array: notifications
|
||||
cmd.Flags().BoolVar(&updateReq.Photon, "photon", updateReq.Photon, `Whether Photon is enabled for this pipeline.`)
|
||||
cmd.Flags().StringVar(&updateReq.PipelineId, "pipeline-id", updateReq.PipelineId, `Unique identifier for this pipeline.`)
|
||||
// TODO: complex arg: restart_window
|
||||
cmd.Flags().StringVar(&updateReq.Schema, "schema", updateReq.Schema, `The default schema (database) where tables are read from or published to.`)
|
||||
cmd.Flags().BoolVar(&updateReq.Serverless, "serverless", updateReq.Serverless, `Whether serverless compute is enabled for this pipeline.`)
|
||||
cmd.Flags().StringVar(&updateReq.Storage, "storage", updateReq.Storage, `DBFS root directory for storing checkpoints and tables.`)
|
||||
|
|
|
@ -513,8 +513,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set repo permissions.`
|
||||
cmd.Long = `Set repo permissions.
|
||||
|
||||
Sets permissions on a repo. Repos can inherit permissions from their root
|
||||
object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
REPO_ID: The repo for which to get or manage permissions.`
|
||||
|
|
|
@ -1008,7 +1008,8 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set serving endpoint permissions.`
|
||||
cmd.Long = `Set serving endpoint permissions.
|
||||
|
||||
Sets permissions on a serving endpoint. Serving endpoints can inherit
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
|
|
|
@ -5,6 +5,8 @@ package settings
|
|||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
aibi_dashboard_embedding_access_policy "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-access-policy"
|
||||
aibi_dashboard_embedding_approved_domains "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-approved-domains"
|
||||
automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update"
|
||||
compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile"
|
||||
default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace"
|
||||
|
@ -30,6 +32,8 @@ func New() *cobra.Command {
|
|||
}
|
||||
|
||||
// Add subservices
|
||||
cmd.AddCommand(aibi_dashboard_embedding_access_policy.New())
|
||||
cmd.AddCommand(aibi_dashboard_embedding_approved_domains.New())
|
||||
cmd.AddCommand(automatic_cluster_update.New())
|
||||
cmd.AddCommand(compliance_security_profile.New())
|
||||
cmd.AddCommand(default_namespace.New())
|
||||
|
|
|
@ -391,7 +391,6 @@ func newUpdate() *cobra.Command {
|
|||
|
||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`)
|
||||
cmd.Flags().StringVar(&updateReq.StorageRoot, "storage-root", updateReq.StorageRoot, `Storage root URL for the share.`)
|
||||
// TODO: array: updates
|
||||
|
||||
|
|
|
@ -360,7 +360,7 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`)
|
||||
// TODO: complex arg: databricks_gcp_service_account
|
||||
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`)
|
||||
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`)
|
||||
cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`)
|
||||
|
|
|
@ -304,6 +304,7 @@ func newList() *cobra.Command {
|
|||
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return.`)
|
||||
cmd.Flags().BoolVar(&listReq.OmitColumns, "omit-columns", listReq.OmitColumns, `Whether to omit the columns of the table from the response or not.`)
|
||||
cmd.Flags().BoolVar(&listReq.OmitProperties, "omit-properties", listReq.OmitProperties, `Whether to omit the properties of the table from the response or not.`)
|
||||
cmd.Flags().BoolVar(&listReq.OmitUsername, "omit-username", listReq.OmitUsername, `Whether to omit the username of the table (e.g.`)
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`)
|
||||
|
||||
cmd.Use = "list CATALOG_NAME SCHEMA_NAME"
|
||||
|
|
|
@ -448,8 +448,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set token permissions.`
|
||||
cmd.Long = `Set token permissions.
|
||||
|
||||
Sets permissions on all tokens. Tokens can inherit permissions from their root
|
||||
object.`
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
|
|
@ -542,8 +542,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set password permissions.`
|
||||
cmd.Long = `Set password permissions.
|
||||
|
||||
Sets permissions on all passwords. Passwords can inherit permissions from
|
||||
their root object.`
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
|
|
@ -686,8 +686,9 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set SQL warehouse permissions.`
|
||||
cmd.Long = `Set SQL warehouse permissions.
|
||||
|
||||
Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions
|
||||
from their root object.
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their root object.
|
||||
|
||||
Arguments:
|
||||
WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions.`
|
||||
|
|
|
@ -447,6 +447,7 @@ func newImport() *cobra.Command {
|
|||
DBC,
|
||||
HTML,
|
||||
JUPYTER,
|
||||
RAW,
|
||||
R_MARKDOWN,
|
||||
SOURCE,
|
||||
]`)
|
||||
|
@ -708,7 +709,8 @@ func newSetPermissions() *cobra.Command {
|
|||
cmd.Short = `Set workspace object permissions.`
|
||||
cmd.Long = `Set workspace object permissions.
|
||||
|
||||
Sets permissions on a workspace object. Workspace objects can inherit
|
||||
Sets permissions on an object, replacing existing permissions if they exist.
|
||||
Deletes all direct permissions if none are specified. Objects can inherit
|
||||
permissions from their parent objects or root object.
|
||||
|
||||
Arguments:
|
||||
|
|
14
go.mod
14
go.mod
|
@ -7,14 +7,14 @@ toolchain go1.23.2
|
|||
require (
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.49.0 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.51.0 // Apache 2.0
|
||||
github.com/fatih/color v1.18.0 // MIT
|
||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||
github.com/hashicorp/go-version v1.7.0 // MPL 2.0
|
||||
github.com/hashicorp/hc-install v0.9.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0
|
||||
github.com/hashicorp/terraform-json v0.23.0 // MPL 2.0
|
||||
github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause
|
||||
github.com/mattn/go-isatty v0.0.20 // MIT
|
||||
github.com/nwidger/jsoncolor v0.3.2 // MIT
|
||||
|
@ -24,11 +24,11 @@ require (
|
|||
github.com/spf13/pflag v1.0.5 // BSD-3-Clause
|
||||
github.com/stretchr/testify v1.9.0 // MIT
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/mod v0.21.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
golang.org/x/sync v0.9.0
|
||||
golang.org/x/term v0.25.0
|
||||
golang.org/x/text v0.19.0
|
||||
golang.org/x/text v0.20.0
|
||||
gopkg.in/ini.v1 v1.67.0 // Apache 2.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
@ -56,7 +56,7 @@ require (
|
|||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/zclconf/go-cty v1.14.4 // indirect
|
||||
github.com/zclconf/go-cty v1.15.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
|
|
|
@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ=
|
||||
github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/databricks/databricks-sdk-go v0.51.0 h1:tcvB9TID3oUl0O8npccB5c+33tarBiYMBFbq4U4AB6M=
|
||||
github.com/databricks/databricks-sdk-go v0.51.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -109,8 +109,8 @@ github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6e
|
|||
github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg=
|
||||
github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ=
|
||||
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
|
||||
github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec=
|
||||
github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A=
|
||||
github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI=
|
||||
github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
|
@ -160,8 +160,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
|
|||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
|
||||
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ=
|
||||
github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
|
||||
|
@ -184,8 +184,8 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY
|
|||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -195,13 +195,13 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -218,8 +218,8 @@ golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
|||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"properties": {
|
||||
"project_name": {
|
||||
"type": "string",
|
||||
"default": "my_test_code",
|
||||
"description": "Unique name for this project"
|
||||
},
|
||||
"spark_version": {
|
||||
"type": "string",
|
||||
"description": "Spark version used for job cluster"
|
||||
},
|
||||
"node_type_id": {
|
||||
"type": "string",
|
||||
"description": "Node type id for job cluster"
|
||||
},
|
||||
"unique_id": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for job name"
|
||||
},
|
||||
"instance_pool_id": {
|
||||
"type": "string",
|
||||
"description": "Instance pool id for job cluster"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
bundle:
|
||||
name: wheel-task
|
||||
|
||||
workspace:
|
||||
root_path: "~/.bundle/{{.unique_id}}"
|
||||
|
||||
resources:
|
||||
clusters:
|
||||
test_cluster:
|
||||
cluster_name: "test-cluster-{{.unique_id}}"
|
||||
spark_version: "{{.spark_version}}"
|
||||
node_type_id: "{{.node_type_id}}"
|
||||
num_workers: 1
|
||||
data_security_mode: USER_ISOLATION
|
||||
|
||||
jobs:
|
||||
some_other_job:
|
||||
name: "[${bundle.target}] Test Wheel Job {{.unique_id}}"
|
||||
tasks:
|
||||
- task_key: TestTask
|
||||
existing_cluster_id: "${resources.clusters.test_cluster.cluster_id}"
|
||||
python_wheel_task:
|
||||
package_name: my_test_code
|
||||
entry_point: run
|
||||
parameters:
|
||||
- "one"
|
||||
- "two"
|
||||
libraries:
|
||||
- whl: ./dist/*.whl
|
|
@ -0,0 +1,15 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
import {{.project_name}}
|
||||
|
||||
setup(
|
||||
name="{{.project_name}}",
|
||||
version={{.project_name}}.__version__,
|
||||
author={{.project_name}}.__author__,
|
||||
url="https://databricks.com",
|
||||
author_email="john.doe@databricks.com",
|
||||
description="my example wheel",
|
||||
packages=find_packages(include=["{{.project_name}}"]),
|
||||
entry_points={"group1": "run={{.project_name}}.__main__:main"},
|
||||
install_requires=["setuptools"],
|
||||
)
|
|
@ -0,0 +1,2 @@
|
|||
__version__ = "0.0.1"
|
||||
__author__ = "Databricks"
|
|
@ -0,0 +1,16 @@
|
|||
"""
|
||||
The entry point of the Python Wheel
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
# This method will print the provided arguments
|
||||
print("Hello from my func")
|
||||
print("Got arguments:")
|
||||
print(sys.argv)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -46,8 +46,10 @@ func TestAccDashboards(t *testing.T) {
|
|||
|
||||
// Make an out of band modification to the dashboard and confirm that it is detected.
|
||||
_, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{
|
||||
DashboardId: oi.ResourceId,
|
||||
SerializedDashboard: dashboard.SerializedDashboard,
|
||||
DashboardId: oi.ResourceId,
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
SerializedDashboard: dashboard.SerializedDashboard,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -5,17 +5,18 @@ import (
|
|||
|
||||
"github.com/databricks/cli/internal"
|
||||
"github.com/databricks/cli/internal/acc"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bool) {
|
||||
func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, pythonWheelWrapper bool) {
|
||||
ctx, _ := acc.WorkspaceTest(t)
|
||||
|
||||
nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV"))
|
||||
instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID")
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task", map[string]any{
|
||||
bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{
|
||||
"node_type_id": nodeTypeId,
|
||||
"unique_id": uuid.New().String(),
|
||||
"spark_version": sparkVersion,
|
||||
|
@ -45,9 +46,19 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo
|
|||
}
|
||||
|
||||
func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) {
|
||||
runPythonWheelTest(t, "13.3.x-snapshot-scala2.12", false)
|
||||
runPythonWheelTest(t, "python_wheel_task", "13.3.x-snapshot-scala2.12", false)
|
||||
}
|
||||
|
||||
func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) {
|
||||
runPythonWheelTest(t, "12.2.x-scala2.12", true)
|
||||
runPythonWheelTest(t, "python_wheel_task", "12.2.x-scala2.12", true)
|
||||
}
|
||||
|
||||
func TestAccPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) {
|
||||
_, wt := acc.WorkspaceTest(t)
|
||||
|
||||
if testutil.IsAWSCloud(wt.T) {
|
||||
t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters")
|
||||
}
|
||||
|
||||
runPythonWheelTest(t, "python_wheel_task_with_cluster", defaultSparkVersion, false)
|
||||
}
|
||||
|
|
|
@ -30,10 +30,12 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) {
|
|||
dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-")
|
||||
|
||||
dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{
|
||||
DisplayName: dashboardName,
|
||||
ParentPath: dir,
|
||||
SerializedDashboard: string(dashboardPayload),
|
||||
WarehouseId: warehouseId,
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: dashboardName,
|
||||
ParentPath: dir,
|
||||
SerializedDashboard: string(dashboardPayload),
|
||||
WarehouseId: warehouseId,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
t.Logf("Dashboard ID (per Lakeview API): %s", dashboard.DashboardId)
|
||||
|
@ -62,9 +64,11 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) {
|
|||
// Try to overwrite the dashboard via the Lakeview API (and expect failure).
|
||||
{
|
||||
_, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{
|
||||
DisplayName: dashboardName,
|
||||
ParentPath: dir,
|
||||
SerializedDashboard: string(dashboardPayload),
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: dashboardName,
|
||||
ParentPath: dir,
|
||||
SerializedDashboard: string(dashboardPayload),
|
||||
},
|
||||
})
|
||||
require.ErrorIs(t, err, apierr.ErrResourceAlreadyExists)
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str
|
|||
assert.Equal(f, contents, body.String())
|
||||
}
|
||||
|
||||
func (f filerTest) assertContentsJupyter(ctx context.Context, name string) {
|
||||
func (f filerTest) assertContentsJupyter(ctx context.Context, name string, language string) {
|
||||
reader, err := f.Read(ctx, name)
|
||||
if !assert.NoError(f, err) {
|
||||
return
|
||||
|
@ -62,6 +62,7 @@ func (f filerTest) assertContentsJupyter(ctx context.Context, name string) {
|
|||
// Since a roundtrip to the workspace changes a Jupyter notebook's payload,
|
||||
// the best we can do is assert that the nbformat is correct.
|
||||
assert.EqualValues(f, 4, actual["nbformat"])
|
||||
assert.Equal(f, language, actual["metadata"].(map[string]any)["language_info"].(map[string]any)["name"])
|
||||
}
|
||||
|
||||
func (f filerTest) assertNotExists(ctx context.Context, name string) {
|
||||
|
@ -360,146 +361,114 @@ func TestAccFilerReadDir(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var jupyterNotebookContent1 = `
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Jupyter Notebook Version 1\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
`
|
||||
|
||||
var jupyterNotebookContent2 = `
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Jupyter Notebook Version 2\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
`
|
||||
|
||||
func TestAccFilerWorkspaceNotebookConflict(t *testing.T) {
|
||||
func TestAccFilerWorkspaceNotebook(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f, _ := setupWsfsFiler(t)
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
|
||||
// Upload the notebooks
|
||||
err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('first upload'))"))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('first upload'))"))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"first upload\""))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"first upload\"))"))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent1))
|
||||
require.NoError(t, err)
|
||||
tcases := []struct {
|
||||
name string
|
||||
nameWithoutExt string
|
||||
content1 string
|
||||
expected1 string
|
||||
content2 string
|
||||
expected2 string
|
||||
}{
|
||||
{
|
||||
name: "pyNb.py",
|
||||
nameWithoutExt: "pyNb",
|
||||
content1: "# Databricks notebook source\nprint('first upload')",
|
||||
expected1: "# Databricks notebook source\nprint('first upload')",
|
||||
content2: "# Databricks notebook source\nprint('second upload')",
|
||||
expected2: "# Databricks notebook source\nprint('second upload')",
|
||||
},
|
||||
{
|
||||
name: "rNb.r",
|
||||
nameWithoutExt: "rNb",
|
||||
content1: "# Databricks notebook source\nprint('first upload')",
|
||||
expected1: "# Databricks notebook source\nprint('first upload')",
|
||||
content2: "# Databricks notebook source\nprint('second upload')",
|
||||
expected2: "# Databricks notebook source\nprint('second upload')",
|
||||
},
|
||||
{
|
||||
name: "sqlNb.sql",
|
||||
nameWithoutExt: "sqlNb",
|
||||
content1: "-- Databricks notebook source\n SELECT \"first upload\"",
|
||||
expected1: "-- Databricks notebook source\n SELECT \"first upload\"",
|
||||
content2: "-- Databricks notebook source\n SELECT \"second upload\"",
|
||||
expected2: "-- Databricks notebook source\n SELECT \"second upload\"",
|
||||
},
|
||||
{
|
||||
name: "scalaNb.scala",
|
||||
nameWithoutExt: "scalaNb",
|
||||
content1: "// Databricks notebook source\n println(\"first upload\")",
|
||||
expected1: "// Databricks notebook source\n println(\"first upload\")",
|
||||
content2: "// Databricks notebook source\n println(\"second upload\")",
|
||||
expected2: "// Databricks notebook source\n println(\"second upload\")",
|
||||
},
|
||||
{
|
||||
name: "pythonJupyterNb.ipynb",
|
||||
nameWithoutExt: "pythonJupyterNb",
|
||||
content1: readFile(t, "testdata/notebooks/py1.ipynb"),
|
||||
expected1: "# Databricks notebook source\nprint(1)",
|
||||
content2: readFile(t, "testdata/notebooks/py2.ipynb"),
|
||||
expected2: "# Databricks notebook source\nprint(2)",
|
||||
},
|
||||
{
|
||||
name: "rJupyterNb.ipynb",
|
||||
nameWithoutExt: "rJupyterNb",
|
||||
content1: readFile(t, "testdata/notebooks/r1.ipynb"),
|
||||
expected1: "# Databricks notebook source\nprint(1)",
|
||||
content2: readFile(t, "testdata/notebooks/r2.ipynb"),
|
||||
expected2: "# Databricks notebook source\nprint(2)",
|
||||
},
|
||||
{
|
||||
name: "scalaJupyterNb.ipynb",
|
||||
nameWithoutExt: "scalaJupyterNb",
|
||||
content1: readFile(t, "testdata/notebooks/scala1.ipynb"),
|
||||
expected1: "// Databricks notebook source\nprintln(1)",
|
||||
content2: readFile(t, "testdata/notebooks/scala2.ipynb"),
|
||||
expected2: "// Databricks notebook source\nprintln(2)",
|
||||
},
|
||||
{
|
||||
name: "sqlJupyterNotebook.ipynb",
|
||||
nameWithoutExt: "sqlJupyterNotebook",
|
||||
content1: readFile(t, "testdata/notebooks/sql1.ipynb"),
|
||||
expected1: "-- Databricks notebook source\nselect 1",
|
||||
content2: readFile(t, "testdata/notebooks/sql2.ipynb"),
|
||||
expected2: "-- Databricks notebook source\nselect 2",
|
||||
},
|
||||
}
|
||||
|
||||
// Assert contents after initial upload
|
||||
filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('first upload'))")
|
||||
filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('first upload'))")
|
||||
filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"first upload\"")
|
||||
filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"first upload\"))")
|
||||
filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 1\")")
|
||||
for _, tc := range tcases {
|
||||
f, _ := setupWsfsFiler(t)
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Assert uploading a second time fails due to overwrite mode missing
|
||||
err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('second upload'))"))
|
||||
assert.ErrorIs(t, err, fs.ErrExist)
|
||||
assert.Regexp(t, regexp.MustCompile(`file already exists: .*/pyNb$`), err.Error())
|
||||
// Upload the notebook
|
||||
err = f.Write(ctx, tc.name, strings.NewReader(tc.content1))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('second upload'))"))
|
||||
assert.ErrorIs(t, err, fs.ErrExist)
|
||||
assert.Regexp(t, regexp.MustCompile(`file already exists: .*/rNb$`), err.Error())
|
||||
// Assert contents after initial upload. Note that we expect the content
|
||||
// for jupyter notebooks to be of type source because the workspace files
|
||||
// client always uses the source format to read notebooks from the workspace.
|
||||
filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected1)
|
||||
|
||||
err = f.Write(ctx, "sqlNb.sql", strings.NewReader("# Databricks notebook source\n SELECT \"second upload\")"))
|
||||
assert.ErrorIs(t, err, fs.ErrExist)
|
||||
assert.Regexp(t, regexp.MustCompile(`file already exists: .*/sqlNb$`), err.Error())
|
||||
// Assert uploading a second time fails due to overwrite mode missing
|
||||
err = f.Write(ctx, tc.name, strings.NewReader(tc.content2))
|
||||
assert.ErrorIs(t, err, fs.ErrExist)
|
||||
assert.Regexp(t, regexp.MustCompile(`file already exists: .*/`+tc.nameWithoutExt+`$`), err.Error())
|
||||
|
||||
err = f.Write(ctx, "scalaNb.scala", strings.NewReader("# Databricks notebook source\n println(\"second upload\"))"))
|
||||
assert.ErrorIs(t, err, fs.ErrExist)
|
||||
assert.Regexp(t, regexp.MustCompile(`file already exists: .*/scalaNb$`), err.Error())
|
||||
// Try uploading the notebook again with overwrite flag. This time it should succeed.
|
||||
err = f.Write(ctx, tc.name, strings.NewReader(tc.content2), filer.OverwriteIfExists)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent2))
|
||||
assert.ErrorIs(t, err, fs.ErrExist)
|
||||
assert.Regexp(t, regexp.MustCompile(`file already exists: .*/jupyterNb$`), err.Error())
|
||||
}
|
||||
// Assert contents after second upload
|
||||
filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f, _ := setupWsfsFiler(t)
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
|
||||
// Upload notebooks
|
||||
err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('first upload'))"))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('first upload'))"))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"first upload\""))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"first upload\"))"))
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent1))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert contents after initial upload
|
||||
filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('first upload'))")
|
||||
filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('first upload'))")
|
||||
filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"first upload\"")
|
||||
filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"first upload\"))")
|
||||
filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 1\")")
|
||||
|
||||
// Upload notebooks a second time, overwriting the initial uplaods
|
||||
err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('second upload'))"), filer.OverwriteIfExists)
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('second upload'))"), filer.OverwriteIfExists)
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"second upload\""), filer.OverwriteIfExists)
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"second upload\"))"), filer.OverwriteIfExists)
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent2), filer.OverwriteIfExists)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert contents have been overwritten
|
||||
filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('second upload'))")
|
||||
filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('second upload'))")
|
||||
filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"second upload\"")
|
||||
filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"second upload\"))")
|
||||
filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 2\")")
|
||||
}
|
||||
|
||||
func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) {
|
||||
|
@ -515,11 +484,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) {
|
|||
{"foo.r", "print('foo')"},
|
||||
{"foo.scala", "println('foo')"},
|
||||
{"foo.sql", "SELECT 'foo'"},
|
||||
{"jupyterNb.ipynb", jupyterNotebookContent1},
|
||||
{"jupyterNb2.ipynb", jupyterNotebookContent2},
|
||||
{"py1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")},
|
||||
{"pyNb.py", "# Databricks notebook source\nprint('first upload'))"},
|
||||
{"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")},
|
||||
{"rNb.r", "# Databricks notebook source\nprint('first upload'))"},
|
||||
{"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")},
|
||||
{"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"},
|
||||
{"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")},
|
||||
{"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""},
|
||||
}
|
||||
|
||||
|
@ -554,11 +525,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) {
|
|||
"foo.r",
|
||||
"foo.scala",
|
||||
"foo.sql",
|
||||
"jupyterNb.ipynb",
|
||||
"jupyterNb2.ipynb",
|
||||
"py1.ipynb",
|
||||
"pyNb.py",
|
||||
"r1.ipynb",
|
||||
"rNb.r",
|
||||
"scala1.ipynb",
|
||||
"scalaNb.scala",
|
||||
"sql1.ipynb",
|
||||
"sqlNb.sql",
|
||||
}, names)
|
||||
|
||||
|
@ -582,7 +555,10 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer {
|
|||
}{
|
||||
{"foo.py", "# Databricks notebook source\nprint('first upload'))"},
|
||||
{"bar.py", "print('foo')"},
|
||||
{"jupyter.ipynb", jupyterNotebookContent1},
|
||||
{"p1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")},
|
||||
{"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")},
|
||||
{"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")},
|
||||
{"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")},
|
||||
{"pretender", "not a notebook"},
|
||||
{"dir/file.txt", "file content"},
|
||||
{"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"},
|
||||
|
@ -608,11 +584,15 @@ func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) {
|
|||
// Read contents of test fixtures as a sanity check.
|
||||
filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('first upload'))")
|
||||
filerTest{t, wf}.assertContents(ctx, "bar.py", "print('foo')")
|
||||
filerTest{t, wf}.assertContentsJupyter(ctx, "jupyter.ipynb")
|
||||
filerTest{t, wf}.assertContents(ctx, "dir/file.txt", "file content")
|
||||
filerTest{t, wf}.assertContents(ctx, "scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')")
|
||||
filerTest{t, wf}.assertContents(ctx, "pretender", "not a notebook")
|
||||
|
||||
filerTest{t, wf}.assertContentsJupyter(ctx, "p1.ipynb", "python")
|
||||
filerTest{t, wf}.assertContentsJupyter(ctx, "r1.ipynb", "r")
|
||||
filerTest{t, wf}.assertContentsJupyter(ctx, "scala1.ipynb", "scala")
|
||||
filerTest{t, wf}.assertContentsJupyter(ctx, "sql1.ipynb", "sql")
|
||||
|
||||
// Read non-existent file
|
||||
_, err := wf.Read(ctx, "non-existent.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
@ -638,35 +618,41 @@ func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
wf := setupFilerWithExtensionsTest(t)
|
||||
|
||||
// Delete notebook
|
||||
err := wf.Delete(ctx, "foo.py")
|
||||
require.NoError(t, err)
|
||||
filerTest{t, wf}.assertNotExists(ctx, "foo.py")
|
||||
for _, fileName := range []string{
|
||||
// notebook
|
||||
"foo.py",
|
||||
// file
|
||||
"bar.py",
|
||||
// python jupyter notebook
|
||||
"p1.ipynb",
|
||||
// R jupyter notebook
|
||||
"r1.ipynb",
|
||||
// Scala jupyter notebook
|
||||
"scala1.ipynb",
|
||||
// SQL jupyter notebook
|
||||
"sql1.ipynb",
|
||||
} {
|
||||
err := wf.Delete(ctx, fileName)
|
||||
require.NoError(t, err)
|
||||
filerTest{t, wf}.assertNotExists(ctx, fileName)
|
||||
}
|
||||
|
||||
// Delete file
|
||||
err = wf.Delete(ctx, "bar.py")
|
||||
require.NoError(t, err)
|
||||
filerTest{t, wf}.assertNotExists(ctx, "bar.py")
|
||||
|
||||
// Delete jupyter notebook
|
||||
err = wf.Delete(ctx, "jupyter.ipynb")
|
||||
require.NoError(t, err)
|
||||
filerTest{t, wf}.assertNotExists(ctx, "jupyter.ipynb")
|
||||
|
||||
// Delete non-existent file
|
||||
err = wf.Delete(ctx, "non-existent.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Ensure we do not delete a file as a notebook
|
||||
err = wf.Delete(ctx, "pretender.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Ensure we do not delete a Scala notebook as a Python notebook
|
||||
_, err = wf.Read(ctx, "scala-notebook.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
for _, fileName := range []string{
|
||||
// do not delete non-existent file
|
||||
"non-existent.py",
|
||||
// do not delete a file assuming it is a notebook and stripping the extension
|
||||
"pretender.py",
|
||||
// do not delete a Scala notebook as a Python notebook
|
||||
"scala-notebook.py",
|
||||
// do not delete a file assuming it is a Jupyter notebook and stripping the extension
|
||||
"pretender.ipynb",
|
||||
} {
|
||||
err := wf.Delete(ctx, fileName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
// Delete directory
|
||||
err = wf.Delete(ctx, "dir")
|
||||
err := wf.Delete(ctx, "dir")
|
||||
assert.ErrorIs(t, err, fs.ErrInvalid)
|
||||
|
||||
// Delete directory recursively
|
||||
|
@ -681,44 +667,45 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
wf := setupFilerWithExtensionsTest(t)
|
||||
|
||||
// Stat on a notebook
|
||||
info, err := wf.Stat(ctx, "foo.py")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "foo.py", info.Name())
|
||||
assert.False(t, info.IsDir())
|
||||
|
||||
// Stat on a file
|
||||
info, err = wf.Stat(ctx, "bar.py")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "bar.py", info.Name())
|
||||
assert.False(t, info.IsDir())
|
||||
|
||||
// Stat on a Jupyter notebook
|
||||
info, err = wf.Stat(ctx, "jupyter.ipynb")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "jupyter.ipynb", info.Name())
|
||||
assert.False(t, info.IsDir())
|
||||
for _, fileName := range []string{
|
||||
// notebook
|
||||
"foo.py",
|
||||
// file
|
||||
"bar.py",
|
||||
// python jupyter notebook
|
||||
"p1.ipynb",
|
||||
// R jupyter notebook
|
||||
"r1.ipynb",
|
||||
// Scala jupyter notebook
|
||||
"scala1.ipynb",
|
||||
// SQL jupyter notebook
|
||||
"sql1.ipynb",
|
||||
} {
|
||||
info, err := wf.Stat(ctx, fileName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fileName, info.Name())
|
||||
assert.False(t, info.IsDir())
|
||||
}
|
||||
|
||||
// Stat on a directory
|
||||
info, err = wf.Stat(ctx, "dir")
|
||||
info, err := wf.Stat(ctx, "dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "dir", info.Name())
|
||||
assert.True(t, info.IsDir())
|
||||
|
||||
// Stat on a non-existent file
|
||||
_, err = wf.Stat(ctx, "non-existent.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Ensure we do not stat a file as a notebook
|
||||
_, err = wf.Stat(ctx, "pretender.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// Ensure we do not stat a Scala notebook as a Python notebook
|
||||
_, err = wf.Stat(ctx, "scala-notebook.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
_, err = wf.Stat(ctx, "pretender.ipynb")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
for _, fileName := range []string{
|
||||
// non-existent file
|
||||
"non-existent.py",
|
||||
// do not stat a file assuming it is a notebook and stripping the extension
|
||||
"pretender.py",
|
||||
// do not stat a Scala notebook as a Python notebook
|
||||
"scala-notebook.py",
|
||||
// do not read a regular file assuming it is a Jupyter notebook and stripping the extension
|
||||
"pretender.ipynb",
|
||||
} {
|
||||
_, err := wf.Stat(ctx, fileName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) {
|
||||
|
@ -739,32 +726,115 @@ func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) {
|
|||
func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
wf, _ := setupWsfsExtensionsFiler(t)
|
||||
// Case 1: Writing source notebooks.
|
||||
for _, tc := range []struct {
|
||||
language string
|
||||
sourceName string
|
||||
sourceContent string
|
||||
jupyterName string
|
||||
jupyterContent string
|
||||
}{
|
||||
{
|
||||
language: "python",
|
||||
sourceName: "foo.py",
|
||||
sourceContent: "# Databricks notebook source\nprint('foo')",
|
||||
jupyterName: "foo.ipynb",
|
||||
},
|
||||
{
|
||||
language: "r",
|
||||
sourceName: "foo.r",
|
||||
sourceContent: "# Databricks notebook source\nprint('foo')",
|
||||
jupyterName: "foo.ipynb",
|
||||
},
|
||||
{
|
||||
language: "scala",
|
||||
sourceName: "foo.scala",
|
||||
sourceContent: "// Databricks notebook source\nprintln('foo')",
|
||||
jupyterName: "foo.ipynb",
|
||||
},
|
||||
{
|
||||
language: "sql",
|
||||
sourceName: "foo.sql",
|
||||
sourceContent: "-- Databricks notebook source\nselect 'foo'",
|
||||
jupyterName: "foo.ipynb",
|
||||
},
|
||||
} {
|
||||
t.Run("source_"+tc.language, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Case 1: Source Notebook
|
||||
err := wf.Write(ctx, "foo.py", strings.NewReader("# Databricks notebook source\nprint('foo')"))
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
wf, _ := setupWsfsExtensionsFiler(t)
|
||||
|
||||
// The source notebook should exist but not the Jupyter notebook
|
||||
filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('foo')")
|
||||
_, err = wf.Stat(ctx, "foo.ipynb")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
_, err = wf.Read(ctx, "foo.ipynb")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
err = wf.Delete(ctx, "foo.ipynb")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
err := wf.Write(ctx, tc.sourceName, strings.NewReader(tc.sourceContent))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Case 2: Jupyter Notebook
|
||||
err = wf.Write(ctx, "bar.ipynb", strings.NewReader(jupyterNotebookContent1))
|
||||
require.NoError(t, err)
|
||||
// Assert on the content of the source notebook that's been written.
|
||||
filerTest{t, wf}.assertContents(ctx, tc.sourceName, tc.sourceContent)
|
||||
|
||||
// The Jupyter notebook should exist but not the source notebook
|
||||
filerTest{t, wf}.assertContentsJupyter(ctx, "bar.ipynb")
|
||||
_, err = wf.Stat(ctx, "bar.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
_, err = wf.Read(ctx, "bar.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
err = wf.Delete(ctx, "bar.py")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
// Ensure that the source notebook is not read when the name contains
|
||||
// the .ipynb extension.
|
||||
_, err = wf.Stat(ctx, tc.jupyterName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
_, err = wf.Read(ctx, tc.jupyterName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
err = wf.Delete(ctx, tc.jupyterName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
})
|
||||
}
|
||||
|
||||
// Case 2: Writing Jupyter notebooks.
|
||||
for _, tc := range []struct {
|
||||
language string
|
||||
sourceName string
|
||||
jupyterName string
|
||||
jupyterContent string
|
||||
}{
|
||||
{
|
||||
language: "python",
|
||||
sourceName: "foo.py",
|
||||
jupyterName: "foo.ipynb",
|
||||
jupyterContent: readFile(t, "testdata/notebooks/py1.ipynb"),
|
||||
},
|
||||
{
|
||||
language: "r",
|
||||
sourceName: "foo.r",
|
||||
jupyterName: "foo.ipynb",
|
||||
jupyterContent: readFile(t, "testdata/notebooks/r1.ipynb"),
|
||||
},
|
||||
{
|
||||
language: "scala",
|
||||
sourceName: "foo.scala",
|
||||
jupyterName: "foo.ipynb",
|
||||
jupyterContent: readFile(t, "testdata/notebooks/scala1.ipynb"),
|
||||
},
|
||||
{
|
||||
language: "sql",
|
||||
sourceName: "foo.sql",
|
||||
jupyterName: "foo.ipynb",
|
||||
jupyterContent: readFile(t, "testdata/notebooks/sql1.ipynb"),
|
||||
},
|
||||
} {
|
||||
t.Run("jupyter_"+tc.language, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
wf, _ := setupWsfsExtensionsFiler(t)
|
||||
|
||||
err := wf.Write(ctx, tc.jupyterName, strings.NewReader(tc.jupyterContent))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the written notebook is jupyter and has the correct
|
||||
// language_info metadata set.
|
||||
filerTest{t, wf}.assertContentsJupyter(ctx, tc.jupyterName, tc.language)
|
||||
|
||||
// Ensure that the Jupyter notebook is not read when the name does not
|
||||
// contain the .ipynb extension.
|
||||
_, err = wf.Stat(ctx, tc.sourceName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
_, err = wf.Read(ctx, tc.sourceName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
err = wf.Delete(ctx, tc.sourceName)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -352,6 +352,13 @@ func RequireErrorRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer,
|
|||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
func readFile(t *testing.T, name string) string {
|
||||
b, err := os.ReadFile(name)
|
||||
require.NoError(t, err)
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func writeFile(t *testing.T, name string, body string) string {
|
||||
f, err := os.Create(filepath.Join(t.TempDir(), name))
|
||||
require.NoError(t, err)
|
||||
|
@ -562,12 +569,10 @@ func setupLocalFiler(t *testing.T) (filer.Filer, string) {
|
|||
}
|
||||
|
||||
func setupWsfsFiler(t *testing.T) (filer.Filer, string) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
|
||||
ctx := context.Background()
|
||||
w := databricks.Must(databricks.NewWorkspaceClient())
|
||||
tmpdir := TemporaryWorkspaceDir(t, w)
|
||||
f, err := filer.NewWorkspaceFilesClient(w, tmpdir)
|
||||
tmpdir := TemporaryWorkspaceDir(t, wt.W)
|
||||
f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check if we can use this API here, skip test if we cannot.
|
||||
|
@ -581,11 +586,10 @@ func setupWsfsFiler(t *testing.T) (filer.Filer, string) {
|
|||
}
|
||||
|
||||
func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
_, wt := acc.WorkspaceTest(t)
|
||||
|
||||
w := databricks.Must(databricks.NewWorkspaceClient())
|
||||
tmpdir := TemporaryWorkspaceDir(t, w)
|
||||
f, err := filer.NewWorkspaceFilesExtensionsClient(w, tmpdir)
|
||||
tmpdir := TemporaryWorkspaceDir(t, wt.W)
|
||||
f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tmpdir
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(1)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.8.13"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(2)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.8.13"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(1)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "R",
|
||||
"language": "R",
|
||||
"name": "ir"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "R"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "r"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(2)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "R",
|
||||
"language": "R",
|
||||
"name": "ir"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "R"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"println(1)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Scala",
|
||||
"language": "scala",
|
||||
"name": "scala"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": "text/x-scala",
|
||||
"file_extension": ".sc",
|
||||
"mimetype": "text/x-scala",
|
||||
"name": "scala",
|
||||
"nbconvert_exporter": "script",
|
||||
"version": "2.13.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"println(2)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Scala",
|
||||
"language": "scala",
|
||||
"name": "scala"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": "text/x-scala",
|
||||
"file_extension": ".sc",
|
||||
"mimetype": "text/x-scala",
|
||||
"name": "scala",
|
||||
"nbconvert_exporter": "script",
|
||||
"version": "2.13.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"select 1"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "sql"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"select 2"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "sql"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -143,6 +144,26 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// This function cleans up the host URL by only retaining the scheme and the host.
|
||||
// This function thus removes any path, query arguments, or fragments from the URL.
|
||||
func (a *PersistentAuth) cleanHost() {
|
||||
parsedHost, err := url.Parse(a.Host)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// when either host or scheme is empty, we don't want to clean it. This is because
|
||||
// the Go url library parses a raw "abc" string as the path of a URL and cleaning
|
||||
// it will return thus return an empty string.
|
||||
if parsedHost.Host == "" || parsedHost.Scheme == "" {
|
||||
return
|
||||
}
|
||||
host := url.URL{
|
||||
Scheme: parsedHost.Scheme,
|
||||
Host: parsedHost.Host,
|
||||
}
|
||||
a.Host = host.String()
|
||||
}
|
||||
|
||||
func (a *PersistentAuth) init(ctx context.Context) error {
|
||||
if a.Host == "" && a.AccountID == "" {
|
||||
return ErrFetchCredentials
|
||||
|
@ -156,6 +177,9 @@ func (a *PersistentAuth) init(ctx context.Context) error {
|
|||
if a.browser == nil {
|
||||
a.browser = browser.OpenURL
|
||||
}
|
||||
|
||||
a.cleanHost()
|
||||
|
||||
// try acquire listener, which we also use as a machine-local
|
||||
// exclusive lock to prevent token cache corruption in the scope
|
||||
// of developer machine, where this command runs.
|
||||
|
|
|
@ -228,3 +228,37 @@ func TestChallengeFailed(t *testing.T) {
|
|||
assert.EqualError(t, err, "authorize: access_denied: Policy evaluation failed for this request")
|
||||
})
|
||||
}
|
||||
|
||||
func TestPersistentAuthCleanHost(t *testing.T) {
|
||||
for _, tcases := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"https://example.com", "https://example.com"},
|
||||
{"https://example.com/", "https://example.com"},
|
||||
{"https://example.com/path", "https://example.com"},
|
||||
{"https://example.com/path/subpath", "https://example.com"},
|
||||
{"https://example.com/path?query=1", "https://example.com"},
|
||||
{"https://example.com/path?query=1&other=2", "https://example.com"},
|
||||
{"https://example.com/path#fragment", "https://example.com"},
|
||||
{"https://example.com/path?query=1#fragment", "https://example.com"},
|
||||
{"https://example.com/path?query=1&other=2#fragment", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1&other=2", "https://example.com"},
|
||||
{"https://example.com/path/subpath#fragment", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1#fragment", "https://example.com"},
|
||||
{"https://example.com/path/subpath?query=1&other=2#fragment", "https://example.com"},
|
||||
{"https://example.com/path?query=1%20value&other=2%20value", "https://example.com"},
|
||||
{"http://example.com/path/subpath?query=1%20value&other=2%20value", "http://example.com"},
|
||||
|
||||
// URLs without scheme should be left as is
|
||||
{"abc", "abc"},
|
||||
{"abc.com/def", "abc.com/def"},
|
||||
} {
|
||||
p := &PersistentAuth{
|
||||
Host: tcases.in,
|
||||
}
|
||||
p.cleanHost()
|
||||
assert.Equal(t, tcases.out, p.Host)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
package dbr
|
||||
|
||||
import "context"
|
||||
|
||||
// key is a package-local type to use for context keys.
|
||||
//
|
||||
// Using an unexported type for context keys prevents key collisions across
|
||||
// packages since external packages cannot create values of this type.
|
||||
type key int
|
||||
|
||||
const (
|
||||
// dbrKey is the context key for the detection result.
|
||||
// The value of 1 is arbitrary and can be any number.
|
||||
// Other keys in the same package must have different values.
|
||||
dbrKey = key(1)
|
||||
)
|
||||
|
||||
// DetectRuntime detects whether or not the current
|
||||
// process is running inside a Databricks Runtime environment.
|
||||
// It return a new context with the detection result set.
|
||||
func DetectRuntime(ctx context.Context) context.Context {
|
||||
if v := ctx.Value(dbrKey); v != nil {
|
||||
panic("dbr.DetectRuntime called twice on the same context")
|
||||
}
|
||||
return context.WithValue(ctx, dbrKey, detect(ctx))
|
||||
}
|
||||
|
||||
// MockRuntime is a helper function to mock the detection result.
|
||||
// It returns a new context with the detection result set.
|
||||
func MockRuntime(ctx context.Context, b bool) context.Context {
|
||||
if v := ctx.Value(dbrKey); v != nil {
|
||||
panic("dbr.MockRuntime called twice on the same context")
|
||||
}
|
||||
return context.WithValue(ctx, dbrKey, b)
|
||||
}
|
||||
|
||||
// RunsOnRuntime returns the detection result from the context.
|
||||
// It expects a context returned by [DetectRuntime] or [MockRuntime].
|
||||
//
|
||||
// We store this value in a context to avoid having to use either
|
||||
// a global variable, passing a boolean around everywhere, or
|
||||
// performing the same detection multiple times.
|
||||
func RunsOnRuntime(ctx context.Context) bool {
|
||||
v := ctx.Value(dbrKey)
|
||||
if v == nil {
|
||||
panic("dbr.RunsOnRuntime called without calling dbr.DetectRuntime first")
|
||||
}
|
||||
return v.(bool)
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
package dbr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestContext_DetectRuntimePanics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Run detection.
|
||||
ctx = DetectRuntime(ctx)
|
||||
|
||||
// Expect a panic if the detection is run twice.
|
||||
assert.Panics(t, func() {
|
||||
ctx = DetectRuntime(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func TestContext_MockRuntimePanics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Run detection.
|
||||
ctx = MockRuntime(ctx, true)
|
||||
|
||||
// Expect a panic if the mock function is run twice.
|
||||
assert.Panics(t, func() {
|
||||
MockRuntime(ctx, true)
|
||||
})
|
||||
}
|
||||
|
||||
func TestContext_RunsOnRuntimePanics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Expect a panic if the detection is not run.
|
||||
assert.Panics(t, func() {
|
||||
RunsOnRuntime(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func TestContext_RunsOnRuntime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Run detection.
|
||||
ctx = DetectRuntime(ctx)
|
||||
|
||||
// Expect no panic because detection has run.
|
||||
assert.NotPanics(t, func() {
|
||||
RunsOnRuntime(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func TestContext_RunsOnRuntimeWithMock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
assert.True(t, RunsOnRuntime(MockRuntime(ctx, true)))
|
||||
assert.False(t, RunsOnRuntime(MockRuntime(ctx, false)))
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package dbr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/databricks/cli/libs/env"
|
||||
)
|
||||
|
||||
// Dereference [os.Stat] to allow mocking in tests.
|
||||
var statFunc = os.Stat
|
||||
|
||||
// detect returns true if the current process is running on a Databricks Runtime.
|
||||
// Its return value is meant to be cached in the context.
|
||||
func detect(ctx context.Context) bool {
|
||||
// Databricks Runtime implies Linux.
|
||||
// Return early on other operating systems.
|
||||
if runtime.GOOS != "linux" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Databricks Runtime always has the DATABRICKS_RUNTIME_VERSION environment variable set.
|
||||
if value, ok := env.Lookup(ctx, "DATABRICKS_RUNTIME_VERSION"); !ok || value == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Expect to see a "/databricks" directory.
|
||||
if fi, err := statFunc("/databricks"); err != nil || !fi.IsDir() {
|
||||
return false
|
||||
}
|
||||
|
||||
// All checks passed.
|
||||
return true
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue