From 84b47745e451f6552465243665ad6c897c55ae5e Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Fri, 23 Aug 2024 12:13:21 +0200 Subject: [PATCH 1/3] Ignore CLI version check on development builds of the CLI (#1714) ## Changes This changes makes sure we ignore CLI version check on development builds of the CLI. Before: ``` $ cat databricks.yml | grep cli_version databricks_cli_version: ">= 0.223.1" $ cli bundle deploy Error: Databricks CLI version constraint not satisfied. Required: >= 0.223.1, current: 0.0.0-dev+06b169284737 ``` after ``` ... $ cli bundle deploy ... Warning: Ignoring Databricks CLI version constraint for development build. Required: >= 0.223.1, current: 0.0.0-dev+d52d6f08fcd5 ``` ## Tests --- bundle/config/mutator/verify_cli_version.go | 4 ++++ bundle/config/mutator/verify_cli_version_test.go | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/verify_cli_version.go b/bundle/config/mutator/verify_cli_version.go index 9c32fcc9d..279af44e6 100644 --- a/bundle/config/mutator/verify_cli_version.go +++ b/bundle/config/mutator/verify_cli_version.go @@ -40,6 +40,10 @@ func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia } if !c.Check(version) { + if version.Prerelease() == "dev" && version.Major() == 0 { + return diag.Warningf("Ignoring Databricks CLI version constraint for development build. Required: %s, current: %s", constraint, currentVersion) + } + return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion) } diff --git a/bundle/config/mutator/verify_cli_version_test.go b/bundle/config/mutator/verify_cli_version_test.go index 24f656745..025461292 100644 --- a/bundle/config/mutator/verify_cli_version_test.go +++ b/bundle/config/mutator/verify_cli_version_test.go @@ -107,6 +107,11 @@ func TestVerifyCliVersion(t *testing.T) { constraint: "^0.100", expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)", }, + { + currentVersion: "0.0.0-dev+06b169284737", + constraint: ">= 0.100.0", + expectedError: "Ignoring Databricks CLI version constraint for development build. Required: >= 0.100.0", + }, } t.Cleanup(func() { @@ -130,7 +135,7 @@ func TestVerifyCliVersion(t *testing.T) { diags := bundle.Apply(context.Background(), b, VerifyCliVersion()) if tc.expectedError != "" { require.NotEmpty(t, diags) - require.Equal(t, tc.expectedError, diags.Error().Error()) + require.Contains(t, diags[0].Summary, tc.expectedError) } else { require.Empty(t, diags) } From 783e05c939a694fe722e52ddea9c48f0ea077181 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 26 Aug 2024 12:03:56 +0200 Subject: [PATCH 2/3] Do not treat empty path as a local path (#1717) ## Changes Fixes issue introduced here https://github.com/databricks/cli/pull/1699 where PyPi packages were treated as local library. The reason is that `libraryPath` returns an empty string as a path for PyPi packages and then `IsLibraryLocal` treated empty string as local path. Both of these functions are fixed in this PR. ## Tests Added regression test --- bundle/libraries/helpers.go | 19 +++++++---- bundle/libraries/helpers_test.go | 28 +++++++++++++--- bundle/libraries/libraries.go | 7 +++- bundle/libraries/local_path.go | 4 +++ bundle/libraries/local_path_test.go | 1 + bundle/libraries/workspace_path.go | 4 +-- bundle/python/warning_test.go | 51 +++++++++++++++++++++++++++++ 7 files changed, 99 insertions(+), 15 deletions(-) diff --git a/bundle/libraries/helpers.go b/bundle/libraries/helpers.go index b7e707ccf..2149e5885 100644 --- a/bundle/libraries/helpers.go +++ b/bundle/libraries/helpers.go @@ -1,19 +1,24 @@ package libraries -import "github.com/databricks/databricks-sdk-go/service/compute" +import ( + "fmt" -func libraryPath(library *compute.Library) string { + "github.com/databricks/databricks-sdk-go/service/compute" +) + +func libraryPath(library *compute.Library) (string, error) { if library.Whl != "" { - return library.Whl + return library.Whl, nil } if library.Jar != "" { - return library.Jar + return library.Jar, nil } if library.Egg != "" { - return library.Egg + return library.Egg, nil } if library.Requirements != "" { - return library.Requirements + return library.Requirements, nil } - return "" + + return "", fmt.Errorf("not supported library type") } diff --git a/bundle/libraries/helpers_test.go b/bundle/libraries/helpers_test.go index e4bd32770..9d7e12ee5 100644 --- a/bundle/libraries/helpers_test.go +++ b/bundle/libraries/helpers_test.go @@ -10,9 +10,27 @@ import ( func TestLibraryPath(t *testing.T) { path := "/some/path" - assert.Equal(t, path, libraryPath(&compute.Library{Whl: path})) - assert.Equal(t, path, libraryPath(&compute.Library{Jar: path})) - assert.Equal(t, path, libraryPath(&compute.Library{Egg: path})) - assert.Equal(t, path, libraryPath(&compute.Library{Requirements: path})) - assert.Equal(t, "", libraryPath(&compute.Library{})) + p, err := libraryPath(&compute.Library{Whl: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{Jar: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{Egg: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{Requirements: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{}) + assert.Equal(t, "", p) + assert.NotNil(t, err) + + p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}}) + assert.Equal(t, "", p) + assert.NotNil(t, err) } diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 33b848dd9..f75e23a8c 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -67,7 +67,12 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task { func isTaskWithLocalLibraries(task jobs.Task) bool { for _, l := range task.Libraries { - if IsLibraryLocal(libraryPath(&l)) { + p, err := libraryPath(&l) + // If there's an error, skip the library because it's not of supported type + if err != nil { + continue + } + if IsLibraryLocal(p) { return true } } diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go index 417bce10e..e49562405 100644 --- a/bundle/libraries/local_path.go +++ b/bundle/libraries/local_path.go @@ -43,6 +43,10 @@ func IsLocalPath(p string) bool { // We can't use IsLocalPath beacuse environment dependencies can be // a pypi package name which can be misinterpreted as a local path by IsLocalPath. func IsLibraryLocal(dep string) bool { + if dep == "" { + return false + } + possiblePrefixes := []string{ ".", } diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go index 7f84b3244..667d64ec8 100644 --- a/bundle/libraries/local_path_test.go +++ b/bundle/libraries/local_path_test.go @@ -48,6 +48,7 @@ func TestIsLibraryLocal(t *testing.T) { {path: "../../local/*.whl", expected: true}, {path: "..\\..\\local\\*.whl", expected: true}, {path: "file://path/to/package/whl.whl", expected: true}, + {path: "", expected: false}, {path: "pypipackage", expected: false}, {path: "/Volumes/catalog/schema/volume/path.whl", expected: false}, {path: "/Workspace/my_project/dist.whl", expected: false}, diff --git a/bundle/libraries/workspace_path.go b/bundle/libraries/workspace_path.go index b08ca1616..126ad3f13 100644 --- a/bundle/libraries/workspace_path.go +++ b/bundle/libraries/workspace_path.go @@ -29,8 +29,8 @@ func IsWorkspacePath(path string) bool { // IsWorkspaceLibrary returns true if the specified library refers to a workspace path. func IsWorkspaceLibrary(library *compute.Library) bool { - path := libraryPath(library) - if path == "" { + path, err := libraryPath(library) + if err != nil { return false } diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index dd6397f78..b2296392b 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -223,6 +223,17 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { {Whl: "./dist/test.whl"}, }, }, + { + TaskKey: "key7", + PythonWheelTask: &jobs.PythonWheelTask{}, + ExistingClusterId: "test-key-2", + Libraries: []compute.Library{ + {Whl: "signol_lib-0.4.4-20240822+prod-py3-none-any.whl"}, + {Pypi: &compute.PythonPyPiLibrary{ + Package: "requests==2.25.1", + }}, + }, + }, }, }, }, @@ -241,6 +252,46 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { require.False(t, hasIncompatibleWheelTasks(context.Background(), b)) } +func TestTasksWithPyPiPackageAreCompatible(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "cluster1", + NewCluster: compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + }, + }, + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + ExistingClusterId: "test-key-2", + Libraries: []compute.Library{ + {Pypi: &compute.PythonPyPiLibrary{ + Package: "requests==2.25.1", + }}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + require.False(t, hasIncompatibleWheelTasks(context.Background(), b)) +} + func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ From 056d2032368ead1e3a7e65f9304508498bc53403 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 10:54:05 +0200 Subject: [PATCH 3/3] Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 (#1719) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.44.0 to 0.45.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.45.0

0.45.0

Bug Fixes

  • Add INVALID_STATE to error code mapping (#1014).
  • Do not specify --tenant flag when fetching managed identity access token from the CLI (#1021).

Internal Changes

  • Add terraform aliases to Entity (#1017).
  • Added Service.NamedIdMap (#1016).
  • Fix billing test for budget configuration update (#1019).

API Changes:

OpenAPI SHA: 3eae49b444cac5a0118a3503e5b7ecef7f96527a, Date: 2024-08-21

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

[Release] Release v0.45.0

Bug Fixes

  • Add INVALID_STATE to error code mapping (#1014).
  • Do not specify --tenant flag when fetching managed identity access token from the CLI (#1021).

Internal Changes

  • Add terraform aliases to Entity (#1017).
  • Added Service.NamedIdMap (#1016).
  • Fix billing test for budget configuration update (#1019).

API Changes:

OpenAPI SHA: 3eae49b444cac5a0118a3503e5b7ecef7f96527a, Date: 2024-08-21

Commits
  • 6d86788 [Release] Release v0.45.0 (#1023)
  • ba4489b [Fix] Do not specify --tenant flag when fetching managed identity access to...
  • f624809 [Internal] Fix billing test for budget configuration update (#1019)
  • 27a5055 [Internal] Add terraform aliases to Entity (#1017)
  • 382a38d [Internal] Added Service.NamedIdMap (#1016)
  • 1ef9931 [Fix] Add INVALID_STATE to error code mapping (#1014)
  • See full diff in compare view

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.44.0&new-version=0.45.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .gitattributes | 3 + bundle/schema/docs/bundle_descriptions.json | 64 +++++ cmd/workspace/cmd.go | 6 + .../external-locations/external-locations.go | 2 + .../policy-compliance-for-clusters.go | 260 +++++++++++++++++ .../policy-compliance-for-jobs.go | 262 ++++++++++++++++++ cmd/workspace/query-history/query-history.go | 8 +- .../resource-quotas/resource-quotas.go | 168 +++++++++++ go.mod | 2 +- go.sum | 4 +- 11 files changed, 773 insertions(+), 8 deletions(-) create mode 100755 cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go create mode 100755 cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go create mode 100755 cmd/workspace/resource-quotas/resource-quotas.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index fef6f268b..8b01a2422 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f98c07f9c71f579de65d2587bb0292f83d10e55d \ No newline at end of file +3eae49b444cac5a0118a3503e5b7ecef7f96527a \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index bdb3f3982..d82ab7696 100755 --- a/.gitattributes +++ b/.gitattributes @@ -75,6 +75,8 @@ cmd/workspace/online-tables/online-tables.go linguist-generated=true cmd/workspace/permission-migration/permission-migration.go linguist-generated=true cmd/workspace/permissions/permissions.go linguist-generated=true cmd/workspace/pipelines/pipelines.go linguist-generated=true +cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true +cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true @@ -94,6 +96,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr cmd/workspace/recipients/recipients.go linguist-generated=true cmd/workspace/registered-models/registered-models.go linguist-generated=true cmd/workspace/repos/repos.go linguist-generated=true +cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true cmd/workspace/schemas/schemas.go linguist-generated=true cmd/workspace/secrets/secrets.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index d888b3663..908a1c2ba 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -85,6 +85,12 @@ "enabled": { "description": "" }, + "import": { + "description": "", + "items": { + "description": "" + } + }, "venv_path": { "description": "" } @@ -130,6 +136,29 @@ } } }, + "presets": { + "description": "", + "properties": { + "jobs_max_concurrent_runs": { + "description": "" + }, + "name_prefix": { + "description": "" + }, + "pipelines_development": { + "description": "" + }, + "tags": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "trigger_pause_status": { + "description": "" + } + } + }, "resources": { "description": "Collection of Databricks resources to deploy.", "properties": { @@ -3079,6 +3108,12 @@ "items": { "description": "" } + }, + "paths": { + "description": "", + "items": { + "description": "" + } } } }, @@ -3202,6 +3237,29 @@ } } }, + "presets": { + "description": "", + "properties": { + "jobs_max_concurrent_runs": { + "description": "" + }, + "name_prefix": { + "description": "" + }, + "pipelines_development": { + "description": "" + }, + "tags": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "trigger_pause_status": { + "description": "" + } + } + }, "resources": { "description": "Collection of Databricks resources to deploy.", "properties": { @@ -6151,6 +6209,12 @@ "items": { "description": "" } + }, + "paths": { + "description": "", + "items": { + "description": "" + } } } }, diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 75664c79c..11be8077a 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -44,6 +44,8 @@ import ( permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration" permissions "github.com/databricks/cli/cmd/workspace/permissions" pipelines "github.com/databricks/cli/cmd/workspace/pipelines" + policy_compliance_for_clusters "github.com/databricks/cli/cmd/workspace/policy-compliance-for-clusters" + policy_compliance_for_jobs "github.com/databricks/cli/cmd/workspace/policy-compliance-for-jobs" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters" provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges" @@ -63,6 +65,7 @@ import ( recipients "github.com/databricks/cli/cmd/workspace/recipients" registered_models "github.com/databricks/cli/cmd/workspace/registered-models" repos "github.com/databricks/cli/cmd/workspace/repos" + resource_quotas "github.com/databricks/cli/cmd/workspace/resource-quotas" schemas "github.com/databricks/cli/cmd/workspace/schemas" secrets "github.com/databricks/cli/cmd/workspace/secrets" service_principals "github.com/databricks/cli/cmd/workspace/service-principals" @@ -130,6 +133,8 @@ func All() []*cobra.Command { out = append(out, permission_migration.New()) out = append(out, permissions.New()) out = append(out, pipelines.New()) + out = append(out, policy_compliance_for_clusters.New()) + out = append(out, policy_compliance_for_jobs.New()) out = append(out, policy_families.New()) out = append(out, provider_exchange_filters.New()) out = append(out, provider_exchanges.New()) @@ -149,6 +154,7 @@ func All() []*cobra.Command { out = append(out, recipients.New()) out = append(out, registered_models.New()) out = append(out, repos.New()) + out = append(out, resource_quotas.New()) out = append(out, schemas.New()) out = append(out, secrets.New()) out = append(out, service_principals.New()) diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 8f0dd346a..42493fc46 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -75,6 +75,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) // TODO: complex arg: encryption_details + cmd.Flags().BoolVar(&createReq.Fallback, "fallback", createReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`) cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`) cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`) @@ -347,6 +348,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`) // TODO: complex arg: encryption_details + cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`) cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) diff --git a/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go b/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go new file mode 100755 index 000000000..1274c8790 --- /dev/null +++ b/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go @@ -0,0 +1,260 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package policy_compliance_for_clusters + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "policy-compliance-for-clusters", + Short: `The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace.`, + Long: `The policy compliance APIs allow you to view and manage the policy compliance + status of clusters in your workspace. + + A cluster is compliant with its policy if its configuration satisfies all its + policy rules. Clusters could be out of compliance if their policy was updated + after the cluster was last edited. + + The get and list compliance APIs allow you to view the policy compliance + status of a cluster. The enforce compliance API allows you to update a cluster + to be compliant with the current version of its policy.`, + GroupID: "compute", + Annotations: map[string]string{ + "package": "compute", + }, + } + + // Add methods + cmd.AddCommand(newEnforceCompliance()) + cmd.AddCommand(newGetCompliance()) + cmd.AddCommand(newListCompliance()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start enforce-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var enforceComplianceOverrides []func( + *cobra.Command, + *compute.EnforceClusterComplianceRequest, +) + +func newEnforceCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var enforceComplianceReq compute.EnforceClusterComplianceRequest + var enforceComplianceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster.`) + + cmd.Use = "enforce-compliance CLUSTER_ID" + cmd.Short = `Enforce cluster policy compliance.` + cmd.Long = `Enforce cluster policy compliance. + + Updates a cluster to be compliant with the current version of its policy. A + cluster can be updated if it is in a RUNNING or TERMINATED state. + + If a cluster is updated while in a RUNNING state, it will be restarted so + that the new attributes can take effect. + + If a cluster is updated while in a TERMINATED state, it will remain + TERMINATED. The next time the cluster is started, the new attributes will + take effect. + + Clusters created by the Databricks Jobs, DLT, or Models services cannot be + enforced by this API. Instead, use the "Enforce job policy compliance" API to + enforce policy compliance on jobs. + + Arguments: + CLUSTER_ID: The ID of the cluster you want to enforce policy compliance on.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = enforceComplianceJson.Unmarshal(&enforceComplianceReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + enforceComplianceReq.ClusterId = args[0] + } + + response, err := w.PolicyComplianceForClusters.EnforceCompliance(ctx, enforceComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range enforceComplianceOverrides { + fn(cmd, &enforceComplianceReq) + } + + return cmd +} + +// start get-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getComplianceOverrides []func( + *cobra.Command, + *compute.GetClusterComplianceRequest, +) + +func newGetCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var getComplianceReq compute.GetClusterComplianceRequest + + // TODO: short flags + + cmd.Use = "get-compliance CLUSTER_ID" + cmd.Short = `Get cluster policy compliance.` + cmd.Long = `Get cluster policy compliance. + + Returns the policy compliance status of a cluster. Clusters could be out of + compliance if their policy was updated after the cluster was last edited. + + Arguments: + CLUSTER_ID: The ID of the cluster to get the compliance status` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getComplianceReq.ClusterId = args[0] + + response, err := w.PolicyComplianceForClusters.GetCompliance(ctx, getComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getComplianceOverrides { + fn(cmd, &getComplianceReq) + } + + return cmd +} + +// start list-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listComplianceOverrides []func( + *cobra.Command, + *compute.ListClusterCompliancesRequest, +) + +func newListCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var listComplianceReq compute.ListClusterCompliancesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`) + cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`) + + cmd.Use = "list-compliance POLICY_ID" + cmd.Short = `List cluster policy compliance.` + cmd.Long = `List cluster policy compliance. + + Returns the policy compliance status of all clusters that use a given policy. + Clusters could be out of compliance if their policy was updated after the + cluster was last edited. + + Arguments: + POLICY_ID: Canonical unique identifier for the cluster policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listComplianceReq.PolicyId = args[0] + + response := w.PolicyComplianceForClusters.ListCompliance(ctx, listComplianceReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listComplianceOverrides { + fn(cmd, &listComplianceReq) + } + + return cmd +} + +// end service PolicyComplianceForClusters diff --git a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go new file mode 100755 index 000000000..d74caa572 --- /dev/null +++ b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go @@ -0,0 +1,262 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package policy_compliance_for_jobs + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "policy-compliance-for-jobs", + Short: `The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace.`, + Long: `The compliance APIs allow you to view and manage the policy compliance status + of jobs in your workspace. This API currently only supports compliance + controls for cluster policies. + + A job is in compliance if its cluster configurations satisfy the rules of all + their respective cluster policies. A job could be out of compliance if a + cluster policy it uses was updated after the job was last edited. The job is + considered out of compliance if any of its clusters no longer comply with + their updated policies. + + The get and list compliance APIs allow you to view the policy compliance + status of a job. The enforce compliance API allows you to update a job so that + it becomes compliant with all of its policies.`, + GroupID: "jobs", + Annotations: map[string]string{ + "package": "jobs", + }, + } + + // Add methods + cmd.AddCommand(newEnforceCompliance()) + cmd.AddCommand(newGetCompliance()) + cmd.AddCommand(newListCompliance()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start enforce-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var enforceComplianceOverrides []func( + *cobra.Command, + *jobs.EnforcePolicyComplianceRequest, +) + +func newEnforceCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var enforceComplianceReq jobs.EnforcePolicyComplianceRequest + var enforceComplianceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews changes made to the job to comply with its policy, but does not update the job.`) + + cmd.Use = "enforce-compliance JOB_ID" + cmd.Short = `Enforce job policy compliance.` + cmd.Long = `Enforce job policy compliance. + + Updates a job so the job clusters that are created when running the job + (specified in new_cluster) are compliant with the current versions of their + respective cluster policies. All-purpose clusters used in the job will not be + updated. + + Arguments: + JOB_ID: The ID of the job you want to enforce policy compliance on.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = enforceComplianceJson.Unmarshal(&enforceComplianceReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId) + if err != nil { + return fmt.Errorf("invalid JOB_ID: %s", args[0]) + } + } + + response, err := w.PolicyComplianceForJobs.EnforceCompliance(ctx, enforceComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range enforceComplianceOverrides { + fn(cmd, &enforceComplianceReq) + } + + return cmd +} + +// start get-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getComplianceOverrides []func( + *cobra.Command, + *jobs.GetPolicyComplianceRequest, +) + +func newGetCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var getComplianceReq jobs.GetPolicyComplianceRequest + + // TODO: short flags + + cmd.Use = "get-compliance JOB_ID" + cmd.Short = `Get job policy compliance.` + cmd.Long = `Get job policy compliance. + + Returns the policy compliance status of a job. Jobs could be out of compliance + if a cluster policy they use was updated after the job was last edited and + some of its job clusters no longer comply with their updated policies. + + Arguments: + JOB_ID: The ID of the job whose compliance status you are requesting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + _, err = fmt.Sscan(args[0], &getComplianceReq.JobId) + if err != nil { + return fmt.Errorf("invalid JOB_ID: %s", args[0]) + } + + response, err := w.PolicyComplianceForJobs.GetCompliance(ctx, getComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getComplianceOverrides { + fn(cmd, &getComplianceReq) + } + + return cmd +} + +// start list-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listComplianceOverrides []func( + *cobra.Command, + *jobs.ListJobComplianceRequest, +) + +func newListCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var listComplianceReq jobs.ListJobComplianceRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`) + cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`) + + cmd.Use = "list-compliance POLICY_ID" + cmd.Short = `List job policy compliance.` + cmd.Long = `List job policy compliance. + + Returns the policy compliance status of all jobs that use a given policy. Jobs + could be out of compliance if a cluster policy they use was updated after the + job was last edited and its job clusters no longer comply with the updated + policy. + + Arguments: + POLICY_ID: Canonical unique identifier for the cluster policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listComplianceReq.PolicyId = args[0] + + response := w.PolicyComplianceForJobs.ListCompliance(ctx, listComplianceReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listComplianceOverrides { + fn(cmd, &listComplianceReq) + } + + return cmd +} + +// end service PolicyComplianceForJobs diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index 5155b5cc0..bfa013f28 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -16,9 +16,9 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "query-history", - Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints, serverless compute, and DLT.`, + Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.`, Long: `A service responsible for storing and retrieving the list of queries run - against SQL endpoints, serverless compute, and DLT.`, + against SQL endpoints and serverless compute.`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -53,6 +53,7 @@ func newList() *cobra.Command { // TODO: short flags // TODO: complex arg: filter_by + cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include the query metrics with each query.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) @@ -60,8 +61,7 @@ func newList() *cobra.Command { cmd.Short = `List Queries.` cmd.Long = `List Queries. - List the history of queries through SQL warehouses, serverless compute, and - DLT. + List the history of queries through SQL warehouses, and serverless compute. You can filter by user ID, warehouse ID, status, and time range. Most recently started queries are returned first (up to max_results in request). The diff --git a/cmd/workspace/resource-quotas/resource-quotas.go b/cmd/workspace/resource-quotas/resource-quotas.go new file mode 100755 index 000000000..9a0c30687 --- /dev/null +++ b/cmd/workspace/resource-quotas/resource-quotas.go @@ -0,0 +1,168 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package resource_quotas + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "resource-quotas", + Short: `Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created.`, + Long: `Unity Catalog enforces resource quotas on all securable objects, which limits + the number of resources that can be created. Quotas are expressed in terms of + a resource type and a parent (for example, tables per metastore or schemas per + catalog). The resource quota APIs enable you to monitor your current usage and + limits. For more information on resource quotas see the [Unity Catalog + documentation]. + + [Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Add methods + cmd.AddCommand(newGetQuota()) + cmd.AddCommand(newListQuotas()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get-quota command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getQuotaOverrides []func( + *cobra.Command, + *catalog.GetQuotaRequest, +) + +func newGetQuota() *cobra.Command { + cmd := &cobra.Command{} + + var getQuotaReq catalog.GetQuotaRequest + + // TODO: short flags + + cmd.Use = "get-quota PARENT_SECURABLE_TYPE PARENT_FULL_NAME QUOTA_NAME" + cmd.Short = `Get information for a single resource quota.` + cmd.Long = `Get information for a single resource quota. + + The GetQuota API returns usage information for a single resource quota, + defined as a child-parent pair. This API also refreshes the quota count if it + is out of date. Refreshes are triggered asynchronously. The updated count + might not be returned in the first call. + + Arguments: + PARENT_SECURABLE_TYPE: Securable type of the quota parent. + PARENT_FULL_NAME: Full name of the parent resource. Provide the metastore ID if the parent + is a metastore. + QUOTA_NAME: Name of the quota. Follows the pattern of the quota type, with "-quota" + added as a suffix.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getQuotaReq.ParentSecurableType = args[0] + getQuotaReq.ParentFullName = args[1] + getQuotaReq.QuotaName = args[2] + + response, err := w.ResourceQuotas.GetQuota(ctx, getQuotaReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getQuotaOverrides { + fn(cmd, &getQuotaReq) + } + + return cmd +} + +// start list-quotas command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listQuotasOverrides []func( + *cobra.Command, + *catalog.ListQuotasRequest, +) + +func newListQuotas() *cobra.Command { + cmd := &cobra.Command{} + + var listQuotasReq catalog.ListQuotasRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listQuotasReq.MaxResults, "max-results", listQuotasReq.MaxResults, `The number of quotas to return.`) + cmd.Flags().StringVar(&listQuotasReq.PageToken, "page-token", listQuotasReq.PageToken, `Opaque token for the next page of results.`) + + cmd.Use = "list-quotas" + cmd.Short = `List all resource quotas under a metastore.` + cmd.Long = `List all resource quotas under a metastore. + + ListQuotas returns all quota values under the metastore. There are no SLAs on + the freshness of the counts returned. This API does not trigger a refresh of + quota counts.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ResourceQuotas.ListQuotas(ctx, listQuotasReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listQuotasOverrides { + fn(cmd, &listQuotasReq) + } + + return cmd +} + +// end service ResourceQuotas diff --git a/go.mod b/go.mod index 838a45f36..4aa279921 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.44.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index f55f329f3..2e58948aa 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.44.0 h1:9/FZACv4EFQIOYxfwYVKnY7v46xio9FKCw9tpKB2O/s= -github.com/databricks/databricks-sdk-go v0.44.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.45.0 h1:wdx5Wm/ESrahdHeq62WrjLeGjV4r722LLanD8ahI0Mo= +github.com/databricks/databricks-sdk-go v0.45.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=