mirror of https://github.com/databricks/cli.git
Compare commits
No commits in common. "01c381ffdef1cab969aa207e9c4057f97facaff7" and "cff5b92ad8a9a27613d95a0975c073cba4683158" have entirely different histories.
01c381ffde
...
cff5b92ad8
|
@ -1 +1 @@
|
||||||
3eae49b444cac5a0118a3503e5b7ecef7f96527a
|
f98c07f9c71f579de65d2587bb0292f83d10e55d
|
|
@ -154,7 +154,6 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
"provider-exchanges delete-listing-from-exchange"
|
"provider-exchanges delete-listing-from-exchange"
|
||||||
"provider-exchanges list-exchanges-for-listing"
|
"provider-exchanges list-exchanges-for-listing"
|
||||||
"provider-exchanges list-listings-for-exchange"
|
"provider-exchanges list-listings-for-exchange"
|
||||||
"storage-credentials get"
|
|
||||||
-}}
|
-}}
|
||||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||||
|
|
|
@ -75,8 +75,6 @@ cmd/workspace/online-tables/online-tables.go linguist-generated=true
|
||||||
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
||||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||||
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
||||||
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true
|
|
||||||
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true
|
|
||||||
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
||||||
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
|
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
|
||||||
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
|
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
|
||||||
|
@ -96,7 +94,6 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr
|
||||||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||||
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
||||||
cmd/workspace/repos/repos.go linguist-generated=true
|
cmd/workspace/repos/repos.go linguist-generated=true
|
||||||
cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true
|
|
||||||
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
|
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
|
||||||
cmd/workspace/schemas/schemas.go linguist-generated=true
|
cmd/workspace/schemas/schemas.go linguist-generated=true
|
||||||
cmd/workspace/secrets/secrets.go linguist-generated=true
|
cmd/workspace/secrets/secrets.go linguist-generated=true
|
||||||
|
|
23
CHANGELOG.md
23
CHANGELOG.md
|
@ -1,28 +1,5 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
## [Release] Release v0.227.1
|
|
||||||
|
|
||||||
CLI:
|
|
||||||
* Disable prompt for storage-credentials get command ([#1723](https://github.com/databricks/cli/pull/1723)).
|
|
||||||
|
|
||||||
Bundles:
|
|
||||||
* Do not treat empty path as a local path ([#1717](https://github.com/databricks/cli/pull/1717)).
|
|
||||||
* Correctly mark PyPI package name specs with multiple specifiers as remote libraries ([#1725](https://github.com/databricks/cli/pull/1725)).
|
|
||||||
* Improve error handling for /Volumes paths in mode: development ([#1716](https://github.com/databricks/cli/pull/1716)).
|
|
||||||
|
|
||||||
Internal:
|
|
||||||
* Ignore CLI version check on development builds of the CLI ([#1714](https://github.com/databricks/cli/pull/1714)).
|
|
||||||
|
|
||||||
API Changes:
|
|
||||||
* Added `databricks resource-quotas` command group.
|
|
||||||
* Added `databricks policy-compliance-for-clusters` command group.
|
|
||||||
* Added `databricks policy-compliance-for-jobs` command group.
|
|
||||||
|
|
||||||
OpenAPI commit 3eae49b444cac5a0118a3503e5b7ecef7f96527a (2024-08-21)
|
|
||||||
Dependency updates:
|
|
||||||
* Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 ([#1719](https://github.com/databricks/cli/pull/1719)).
|
|
||||||
* Revert hc-install version to 0.7.0 ([#1711](https://github.com/databricks/cli/pull/1711)).
|
|
||||||
|
|
||||||
## [Release] Release v0.227.0
|
## [Release] Release v0.227.0
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -64,7 +64,6 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
var diags diag.Diagnostics
|
|
||||||
p := b.Config.Presets
|
p := b.Config.Presets
|
||||||
u := b.Config.Workspace.CurrentUser
|
u := b.Config.Workspace.CurrentUser
|
||||||
|
|
||||||
|
@ -75,56 +74,44 @@ func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
// status to UNPAUSED at the level of an individual object, whic hwas
|
// status to UNPAUSED at the level of an individual object, whic hwas
|
||||||
// historically allowed.)
|
// historically allowed.)
|
||||||
if p.TriggerPauseStatus == config.Unpaused {
|
if p.TriggerPauseStatus == config.Unpaused {
|
||||||
diags = diags.Append(diag.Diagnostic{
|
return diag.Diagnostics{{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
||||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||||
})
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure this development copy has unique names and paths to avoid conflicts
|
// Make sure this development copy has unique names and paths to avoid conflicts
|
||||||
if path := findNonUserPath(b); path != "" {
|
if path := findNonUserPath(b); path != "" {
|
||||||
if path == "artifact_path" && strings.HasPrefix(b.Config.Workspace.ArtifactPath, "/Volumes") {
|
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
||||||
// For Volumes paths we recommend including the current username as a substring
|
|
||||||
diags = diags.Extend(diag.Errorf("%s should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'", path))
|
|
||||||
} else {
|
|
||||||
// For non-Volumes paths recommend simply putting things in the home folder
|
|
||||||
diags = diags.Extend(diag.Errorf("%s must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'", path))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
||||||
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
||||||
// For this reason we require the name prefix to contain the current username;
|
// For this reason we require the name prefix to contain the current username;
|
||||||
// it's a pitfall for users if they don't include it and later find out that
|
// it's a pitfall for users if they don't include it and later find out that
|
||||||
// only a single user can do development deployments.
|
// only a single user can do development deployments.
|
||||||
diags = diags.Append(diag.Diagnostic{
|
return diag.Diagnostics{{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
||||||
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
||||||
})
|
}}
|
||||||
}
|
}
|
||||||
return diags
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// findNonUserPath finds the first workspace path such as root_path that doesn't
|
|
||||||
// contain the current username or current user's shortname.
|
|
||||||
func findNonUserPath(b *bundle.Bundle) string {
|
func findNonUserPath(b *bundle.Bundle) string {
|
||||||
containsName := func(path string) bool {
|
|
||||||
username := b.Config.Workspace.CurrentUser.UserName
|
username := b.Config.Workspace.CurrentUser.UserName
|
||||||
shortname := b.Config.Workspace.CurrentUser.ShortName
|
|
||||||
return strings.Contains(path, username) || strings.Contains(path, shortname)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
|
if b.Config.Workspace.RootPath != "" && !strings.Contains(b.Config.Workspace.RootPath, username) {
|
||||||
return "root_path"
|
return "root_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
if b.Config.Workspace.StatePath != "" && !strings.Contains(b.Config.Workspace.StatePath, username) {
|
||||||
return "state_path"
|
return "state_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
|
if b.Config.Workspace.FilePath != "" && !strings.Contains(b.Config.Workspace.FilePath, username) {
|
||||||
return "file_path"
|
return "file_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
|
if b.Config.Workspace.ArtifactPath != "" && !strings.Contains(b.Config.Workspace.ArtifactPath, username) {
|
||||||
return "artifact_path"
|
return "artifact_path"
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -230,20 +230,10 @@ func TestValidateDevelopmentMode(t *testing.T) {
|
||||||
diags := validateDevelopmentMode(b)
|
diags := validateDevelopmentMode(b)
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Test with /Volumes path
|
|
||||||
b = mockBundle(config.Development)
|
|
||||||
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/lennart/libs"
|
|
||||||
diags = validateDevelopmentMode(b)
|
|
||||||
require.NoError(t, diags.Error())
|
|
||||||
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/libs"
|
|
||||||
diags = validateDevelopmentMode(b)
|
|
||||||
require.ErrorContains(t, diags.Error(), "artifact_path should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'")
|
|
||||||
|
|
||||||
// Test with a bundle that has a non-user path
|
// Test with a bundle that has a non-user path
|
||||||
b = mockBundle(config.Development)
|
|
||||||
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
||||||
diags = validateDevelopmentMode(b)
|
diags = validateDevelopmentMode(b)
|
||||||
require.ErrorContains(t, diags.Error(), "root_path must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'")
|
require.ErrorContains(t, diags.Error(), "root_path")
|
||||||
|
|
||||||
// Test with a bundle that has an unpaused trigger pause status
|
// Test with a bundle that has an unpaused trigger pause status
|
||||||
b = mockBundle(config.Development)
|
b = mockBundle(config.Development)
|
||||||
|
|
|
@ -1,24 +1,19 @@
|
||||||
package libraries
|
package libraries
|
||||||
|
|
||||||
import (
|
import "github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
func libraryPath(library *compute.Library) string {
|
||||||
)
|
|
||||||
|
|
||||||
func libraryPath(library *compute.Library) (string, error) {
|
|
||||||
if library.Whl != "" {
|
if library.Whl != "" {
|
||||||
return library.Whl, nil
|
return library.Whl
|
||||||
}
|
}
|
||||||
if library.Jar != "" {
|
if library.Jar != "" {
|
||||||
return library.Jar, nil
|
return library.Jar
|
||||||
}
|
}
|
||||||
if library.Egg != "" {
|
if library.Egg != "" {
|
||||||
return library.Egg, nil
|
return library.Egg
|
||||||
}
|
}
|
||||||
if library.Requirements != "" {
|
if library.Requirements != "" {
|
||||||
return library.Requirements, nil
|
return library.Requirements
|
||||||
}
|
}
|
||||||
|
return ""
|
||||||
return "", fmt.Errorf("not supported library type")
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,27 +10,9 @@ import (
|
||||||
func TestLibraryPath(t *testing.T) {
|
func TestLibraryPath(t *testing.T) {
|
||||||
path := "/some/path"
|
path := "/some/path"
|
||||||
|
|
||||||
p, err := libraryPath(&compute.Library{Whl: path})
|
assert.Equal(t, path, libraryPath(&compute.Library{Whl: path}))
|
||||||
assert.Equal(t, path, p)
|
assert.Equal(t, path, libraryPath(&compute.Library{Jar: path}))
|
||||||
assert.Nil(t, err)
|
assert.Equal(t, path, libraryPath(&compute.Library{Egg: path}))
|
||||||
|
assert.Equal(t, path, libraryPath(&compute.Library{Requirements: path}))
|
||||||
p, err = libraryPath(&compute.Library{Jar: path})
|
assert.Equal(t, "", libraryPath(&compute.Library{}))
|
||||||
assert.Equal(t, path, p)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
p, err = libraryPath(&compute.Library{Egg: path})
|
|
||||||
assert.Equal(t, path, p)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
p, err = libraryPath(&compute.Library{Requirements: path})
|
|
||||||
assert.Equal(t, path, p)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
p, err = libraryPath(&compute.Library{})
|
|
||||||
assert.Equal(t, "", p)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
|
|
||||||
p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}})
|
|
||||||
assert.Equal(t, "", p)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,12 +67,7 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task {
|
||||||
|
|
||||||
func isTaskWithLocalLibraries(task jobs.Task) bool {
|
func isTaskWithLocalLibraries(task jobs.Task) bool {
|
||||||
for _, l := range task.Libraries {
|
for _, l := range task.Libraries {
|
||||||
p, err := libraryPath(&l)
|
if IsLibraryLocal(libraryPath(&l)) {
|
||||||
// If there's an error, skip the library because it's not of supported type
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if IsLibraryLocal(p) {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,10 +43,6 @@ func IsLocalPath(p string) bool {
|
||||||
// We can't use IsLocalPath beacuse environment dependencies can be
|
// We can't use IsLocalPath beacuse environment dependencies can be
|
||||||
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
|
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
|
||||||
func IsLibraryLocal(dep string) bool {
|
func IsLibraryLocal(dep string) bool {
|
||||||
if dep == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
possiblePrefixes := []string{
|
possiblePrefixes := []string{
|
||||||
".",
|
".",
|
||||||
}
|
}
|
||||||
|
@ -72,11 +68,9 @@ func IsLibraryLocal(dep string) bool {
|
||||||
|
|
||||||
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
|
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
|
||||||
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
|
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
|
||||||
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?)?: Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
||||||
// ,?: Optionally matches a comma (,) at the end of the specifier which is used to separate multiple specifiers.
|
|
||||||
// There can be multiple version specifiers separated by commas or no specifiers.
|
|
||||||
// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/
|
// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/
|
||||||
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?,?)*$`)
|
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?)?$`)
|
||||||
|
|
||||||
func isPackage(name string) bool {
|
func isPackage(name string) bool {
|
||||||
if packageRegex.MatchString(name) {
|
if packageRegex.MatchString(name) {
|
||||||
|
|
|
@ -48,7 +48,6 @@ func TestIsLibraryLocal(t *testing.T) {
|
||||||
{path: "../../local/*.whl", expected: true},
|
{path: "../../local/*.whl", expected: true},
|
||||||
{path: "..\\..\\local\\*.whl", expected: true},
|
{path: "..\\..\\local\\*.whl", expected: true},
|
||||||
{path: "file://path/to/package/whl.whl", expected: true},
|
{path: "file://path/to/package/whl.whl", expected: true},
|
||||||
{path: "", expected: false},
|
|
||||||
{path: "pypipackage", expected: false},
|
{path: "pypipackage", expected: false},
|
||||||
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
|
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
|
||||||
{path: "/Workspace/my_project/dist.whl", expected: false},
|
{path: "/Workspace/my_project/dist.whl", expected: false},
|
||||||
|
@ -62,8 +61,6 @@ func TestIsLibraryLocal(t *testing.T) {
|
||||||
{path: "beautifulsoup4 ~= 4.12.3", expected: false},
|
{path: "beautifulsoup4 ~= 4.12.3", expected: false},
|
||||||
{path: "beautifulsoup4[security, tests]", expected: false},
|
{path: "beautifulsoup4[security, tests]", expected: false},
|
||||||
{path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false},
|
{path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false},
|
||||||
{path: "beautifulsoup4>=1.0.0,<2.0.0", expected: false},
|
|
||||||
{path: "beautifulsoup4>=1.0.0,~=1.2.0,<2.0.0", expected: false},
|
|
||||||
{path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
{path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||||
{path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
{path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||||
{path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false},
|
{path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false},
|
||||||
|
|
|
@ -29,8 +29,8 @@ func IsWorkspacePath(path string) bool {
|
||||||
|
|
||||||
// IsWorkspaceLibrary returns true if the specified library refers to a workspace path.
|
// IsWorkspaceLibrary returns true if the specified library refers to a workspace path.
|
||||||
func IsWorkspaceLibrary(library *compute.Library) bool {
|
func IsWorkspaceLibrary(library *compute.Library) bool {
|
||||||
path, err := libraryPath(library)
|
path := libraryPath(library)
|
||||||
if err != nil {
|
if path == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -223,17 +223,6 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
||||||
{Whl: "./dist/test.whl"},
|
{Whl: "./dist/test.whl"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
TaskKey: "key7",
|
|
||||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
|
||||||
ExistingClusterId: "test-key-2",
|
|
||||||
Libraries: []compute.Library{
|
|
||||||
{Whl: "signol_lib-0.4.4-20240822+prod-py3-none-any.whl"},
|
|
||||||
{Pypi: &compute.PythonPyPiLibrary{
|
|
||||||
Package: "requests==2.25.1",
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -252,46 +241,6 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
||||||
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTasksWithPyPiPackageAreCompatible(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Resources: config.Resources{
|
|
||||||
Jobs: map[string]*resources.Job{
|
|
||||||
"job1": {
|
|
||||||
JobSettings: &jobs.JobSettings{
|
|
||||||
JobClusters: []jobs.JobCluster{
|
|
||||||
{
|
|
||||||
JobClusterKey: "cluster1",
|
|
||||||
NewCluster: compute.ClusterSpec{
|
|
||||||
SparkVersion: "12.2.x-scala2.12",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Tasks: []jobs.Task{
|
|
||||||
{
|
|
||||||
TaskKey: "key1",
|
|
||||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
|
||||||
ExistingClusterId: "test-key-2",
|
|
||||||
Libraries: []compute.Library{
|
|
||||||
{Pypi: &compute.PythonPyPiLibrary{
|
|
||||||
Package: "requests==2.25.1",
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) {
|
func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
|
|
@ -85,12 +85,6 @@
|
||||||
"enabled": {
|
"enabled": {
|
||||||
"description": ""
|
"description": ""
|
||||||
},
|
},
|
||||||
"import": {
|
|
||||||
"description": "",
|
|
||||||
"items": {
|
|
||||||
"description": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"venv_path": {
|
"venv_path": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -136,29 +130,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"presets": {
|
|
||||||
"description": "",
|
|
||||||
"properties": {
|
|
||||||
"jobs_max_concurrent_runs": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"name_prefix": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"pipelines_development": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"tags": {
|
|
||||||
"description": "",
|
|
||||||
"additionalproperties": {
|
|
||||||
"description": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"trigger_pause_status": {
|
|
||||||
"description": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"resources": {
|
"resources": {
|
||||||
"description": "Collection of Databricks resources to deploy.",
|
"description": "Collection of Databricks resources to deploy.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -3108,12 +3079,6 @@
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"paths": {
|
|
||||||
"description": "",
|
|
||||||
"items": {
|
|
||||||
"description": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -3237,29 +3202,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"presets": {
|
|
||||||
"description": "",
|
|
||||||
"properties": {
|
|
||||||
"jobs_max_concurrent_runs": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"name_prefix": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"pipelines_development": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"tags": {
|
|
||||||
"description": "",
|
|
||||||
"additionalproperties": {
|
|
||||||
"description": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"trigger_pause_status": {
|
|
||||||
"description": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"resources": {
|
"resources": {
|
||||||
"description": "Collection of Databricks resources to deploy.",
|
"description": "Collection of Databricks resources to deploy.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -6209,12 +6151,6 @@
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"paths": {
|
|
||||||
"description": "",
|
|
||||||
"items": {
|
|
||||||
"description": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -2,7 +2,10 @@ package bundle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||||
|
@ -40,6 +43,9 @@ task or a Python wheel task, the second example applies.
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var forcePull bool
|
||||||
|
cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace")
|
||||||
|
|
||||||
var runOptions run.Options
|
var runOptions run.Options
|
||||||
runOptions.Define(cmd)
|
runOptions.Define(cmd)
|
||||||
|
|
||||||
|
@ -60,6 +66,25 @@ task or a Python wheel task, the second example applies.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheDir, err := terraform.Dir(ctx, b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName))
|
||||||
|
_, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName))
|
||||||
|
noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist)
|
||||||
|
|
||||||
|
if forcePull || noCache {
|
||||||
|
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||||
|
terraform.StatePull(),
|
||||||
|
terraform.Interpolate(),
|
||||||
|
terraform.Write(),
|
||||||
|
))
|
||||||
|
if err := diags.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If no arguments are specified, prompt the user to select something to run.
|
// If no arguments are specified, prompt the user to select something to run.
|
||||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||||
// Invert completions from KEY -> NAME, to NAME -> KEY.
|
// Invert completions from KEY -> NAME, to NAME -> KEY.
|
||||||
|
@ -78,14 +103,6 @@ task or a Python wheel task, the second example applies.
|
||||||
return fmt.Errorf("expected a KEY of the resource to run")
|
return fmt.Errorf("expected a KEY of the resource to run")
|
||||||
}
|
}
|
||||||
|
|
||||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
|
||||||
terraform.Interpolate(),
|
|
||||||
terraform.Write(),
|
|
||||||
terraform.StatePull(),
|
|
||||||
terraform.Load(terraform.ErrorOnEmptyState),
|
|
||||||
))
|
|
||||||
return diags.Error()
|
|
||||||
|
|
||||||
runner, err := run.Find(b, args[0])
|
runner, err := run.Find(b, args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -44,8 +44,6 @@ import (
|
||||||
permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration"
|
permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration"
|
||||||
permissions "github.com/databricks/cli/cmd/workspace/permissions"
|
permissions "github.com/databricks/cli/cmd/workspace/permissions"
|
||||||
pipelines "github.com/databricks/cli/cmd/workspace/pipelines"
|
pipelines "github.com/databricks/cli/cmd/workspace/pipelines"
|
||||||
policy_compliance_for_clusters "github.com/databricks/cli/cmd/workspace/policy-compliance-for-clusters"
|
|
||||||
policy_compliance_for_jobs "github.com/databricks/cli/cmd/workspace/policy-compliance-for-jobs"
|
|
||||||
policy_families "github.com/databricks/cli/cmd/workspace/policy-families"
|
policy_families "github.com/databricks/cli/cmd/workspace/policy-families"
|
||||||
provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters"
|
provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters"
|
||||||
provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges"
|
provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges"
|
||||||
|
@ -65,7 +63,6 @@ import (
|
||||||
recipients "github.com/databricks/cli/cmd/workspace/recipients"
|
recipients "github.com/databricks/cli/cmd/workspace/recipients"
|
||||||
registered_models "github.com/databricks/cli/cmd/workspace/registered-models"
|
registered_models "github.com/databricks/cli/cmd/workspace/registered-models"
|
||||||
repos "github.com/databricks/cli/cmd/workspace/repos"
|
repos "github.com/databricks/cli/cmd/workspace/repos"
|
||||||
resource_quotas "github.com/databricks/cli/cmd/workspace/resource-quotas"
|
|
||||||
schemas "github.com/databricks/cli/cmd/workspace/schemas"
|
schemas "github.com/databricks/cli/cmd/workspace/schemas"
|
||||||
secrets "github.com/databricks/cli/cmd/workspace/secrets"
|
secrets "github.com/databricks/cli/cmd/workspace/secrets"
|
||||||
service_principals "github.com/databricks/cli/cmd/workspace/service-principals"
|
service_principals "github.com/databricks/cli/cmd/workspace/service-principals"
|
||||||
|
@ -133,8 +130,6 @@ func All() []*cobra.Command {
|
||||||
out = append(out, permission_migration.New())
|
out = append(out, permission_migration.New())
|
||||||
out = append(out, permissions.New())
|
out = append(out, permissions.New())
|
||||||
out = append(out, pipelines.New())
|
out = append(out, pipelines.New())
|
||||||
out = append(out, policy_compliance_for_clusters.New())
|
|
||||||
out = append(out, policy_compliance_for_jobs.New())
|
|
||||||
out = append(out, policy_families.New())
|
out = append(out, policy_families.New())
|
||||||
out = append(out, provider_exchange_filters.New())
|
out = append(out, provider_exchange_filters.New())
|
||||||
out = append(out, provider_exchanges.New())
|
out = append(out, provider_exchanges.New())
|
||||||
|
@ -154,7 +149,6 @@ func All() []*cobra.Command {
|
||||||
out = append(out, recipients.New())
|
out = append(out, recipients.New())
|
||||||
out = append(out, registered_models.New())
|
out = append(out, registered_models.New())
|
||||||
out = append(out, repos.New())
|
out = append(out, repos.New())
|
||||||
out = append(out, resource_quotas.New())
|
|
||||||
out = append(out, schemas.New())
|
out = append(out, schemas.New())
|
||||||
out = append(out, secrets.New())
|
out = append(out, secrets.New())
|
||||||
out = append(out, service_principals.New())
|
out = append(out, service_principals.New())
|
||||||
|
|
|
@ -75,7 +75,6 @@ func newCreate() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`)
|
cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`)
|
||||||
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
||||||
// TODO: complex arg: encryption_details
|
// TODO: complex arg: encryption_details
|
||||||
cmd.Flags().BoolVar(&createReq.Fallback, "fallback", createReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
|
|
||||||
cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`)
|
cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`)
|
||||||
cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`)
|
cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`)
|
||||||
|
|
||||||
|
@ -348,7 +347,6 @@ func newUpdate() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||||
cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`)
|
cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`)
|
||||||
// TODO: complex arg: encryption_details
|
// TODO: complex arg: encryption_details
|
||||||
cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
|
|
||||||
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
|
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
|
||||||
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)
|
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)
|
||||||
|
|
|
@ -1,260 +0,0 @@
|
||||||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
|
||||||
|
|
||||||
package policy_compliance_for_clusters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/cmd/root"
|
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
|
||||||
"github.com/databricks/cli/libs/flags"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var cmdOverrides []func(*cobra.Command)
|
|
||||||
|
|
||||||
func New() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "policy-compliance-for-clusters",
|
|
||||||
Short: `The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace.`,
|
|
||||||
Long: `The policy compliance APIs allow you to view and manage the policy compliance
|
|
||||||
status of clusters in your workspace.
|
|
||||||
|
|
||||||
A cluster is compliant with its policy if its configuration satisfies all its
|
|
||||||
policy rules. Clusters could be out of compliance if their policy was updated
|
|
||||||
after the cluster was last edited.
|
|
||||||
|
|
||||||
The get and list compliance APIs allow you to view the policy compliance
|
|
||||||
status of a cluster. The enforce compliance API allows you to update a cluster
|
|
||||||
to be compliant with the current version of its policy.`,
|
|
||||||
GroupID: "compute",
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"package": "compute",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add methods
|
|
||||||
cmd.AddCommand(newEnforceCompliance())
|
|
||||||
cmd.AddCommand(newGetCompliance())
|
|
||||||
cmd.AddCommand(newListCompliance())
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range cmdOverrides {
|
|
||||||
fn(cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start enforce-compliance command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var enforceComplianceOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*compute.EnforceClusterComplianceRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newEnforceCompliance() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var enforceComplianceReq compute.EnforceClusterComplianceRequest
|
|
||||||
var enforceComplianceJson flags.JsonFlag
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster.`)
|
|
||||||
|
|
||||||
cmd.Use = "enforce-compliance CLUSTER_ID"
|
|
||||||
cmd.Short = `Enforce cluster policy compliance.`
|
|
||||||
cmd.Long = `Enforce cluster policy compliance.
|
|
||||||
|
|
||||||
Updates a cluster to be compliant with the current version of its policy. A
|
|
||||||
cluster can be updated if it is in a RUNNING or TERMINATED state.
|
|
||||||
|
|
||||||
If a cluster is updated while in a RUNNING state, it will be restarted so
|
|
||||||
that the new attributes can take effect.
|
|
||||||
|
|
||||||
If a cluster is updated while in a TERMINATED state, it will remain
|
|
||||||
TERMINATED. The next time the cluster is started, the new attributes will
|
|
||||||
take effect.
|
|
||||||
|
|
||||||
Clusters created by the Databricks Jobs, DLT, or Models services cannot be
|
|
||||||
enforced by this API. Instead, use the "Enforce job policy compliance" API to
|
|
||||||
enforce policy compliance on jobs.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
CLUSTER_ID: The ID of the cluster you want to enforce policy compliance on.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
if cmd.Flags().Changed("json") {
|
|
||||||
err := root.ExactArgs(0)(cmd, args)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
check := root.ExactArgs(1)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
|
||||||
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !cmd.Flags().Changed("json") {
|
|
||||||
enforceComplianceReq.ClusterId = args[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := w.PolicyComplianceForClusters.EnforceCompliance(ctx, enforceComplianceReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cmdio.Render(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range enforceComplianceOverrides {
|
|
||||||
fn(cmd, &enforceComplianceReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start get-compliance command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var getComplianceOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*compute.GetClusterComplianceRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newGetCompliance() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var getComplianceReq compute.GetClusterComplianceRequest
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
|
|
||||||
cmd.Use = "get-compliance CLUSTER_ID"
|
|
||||||
cmd.Short = `Get cluster policy compliance.`
|
|
||||||
cmd.Long = `Get cluster policy compliance.
|
|
||||||
|
|
||||||
Returns the policy compliance status of a cluster. Clusters could be out of
|
|
||||||
compliance if their policy was updated after the cluster was last edited.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
CLUSTER_ID: The ID of the cluster to get the compliance status`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
check := root.ExactArgs(1)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
getComplianceReq.ClusterId = args[0]
|
|
||||||
|
|
||||||
response, err := w.PolicyComplianceForClusters.GetCompliance(ctx, getComplianceReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cmdio.Render(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range getComplianceOverrides {
|
|
||||||
fn(cmd, &getComplianceReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start list-compliance command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var listComplianceOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*compute.ListClusterCompliancesRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newListCompliance() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var listComplianceReq compute.ListClusterCompliancesRequest
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
|
|
||||||
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
|
|
||||||
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
|
|
||||||
|
|
||||||
cmd.Use = "list-compliance POLICY_ID"
|
|
||||||
cmd.Short = `List cluster policy compliance.`
|
|
||||||
cmd.Long = `List cluster policy compliance.
|
|
||||||
|
|
||||||
Returns the policy compliance status of all clusters that use a given policy.
|
|
||||||
Clusters could be out of compliance if their policy was updated after the
|
|
||||||
cluster was last edited.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
POLICY_ID: Canonical unique identifier for the cluster policy.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
check := root.ExactArgs(1)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
listComplianceReq.PolicyId = args[0]
|
|
||||||
|
|
||||||
response := w.PolicyComplianceForClusters.ListCompliance(ctx, listComplianceReq)
|
|
||||||
return cmdio.RenderIterator(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range listComplianceOverrides {
|
|
||||||
fn(cmd, &listComplianceReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// end service PolicyComplianceForClusters
|
|
|
@ -1,262 +0,0 @@
|
||||||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
|
||||||
|
|
||||||
package policy_compliance_for_jobs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/cmd/root"
|
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
|
||||||
"github.com/databricks/cli/libs/flags"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var cmdOverrides []func(*cobra.Command)
|
|
||||||
|
|
||||||
func New() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "policy-compliance-for-jobs",
|
|
||||||
Short: `The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace.`,
|
|
||||||
Long: `The compliance APIs allow you to view and manage the policy compliance status
|
|
||||||
of jobs in your workspace. This API currently only supports compliance
|
|
||||||
controls for cluster policies.
|
|
||||||
|
|
||||||
A job is in compliance if its cluster configurations satisfy the rules of all
|
|
||||||
their respective cluster policies. A job could be out of compliance if a
|
|
||||||
cluster policy it uses was updated after the job was last edited. The job is
|
|
||||||
considered out of compliance if any of its clusters no longer comply with
|
|
||||||
their updated policies.
|
|
||||||
|
|
||||||
The get and list compliance APIs allow you to view the policy compliance
|
|
||||||
status of a job. The enforce compliance API allows you to update a job so that
|
|
||||||
it becomes compliant with all of its policies.`,
|
|
||||||
GroupID: "jobs",
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"package": "jobs",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add methods
|
|
||||||
cmd.AddCommand(newEnforceCompliance())
|
|
||||||
cmd.AddCommand(newGetCompliance())
|
|
||||||
cmd.AddCommand(newListCompliance())
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range cmdOverrides {
|
|
||||||
fn(cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start enforce-compliance command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var enforceComplianceOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*jobs.EnforcePolicyComplianceRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newEnforceCompliance() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var enforceComplianceReq jobs.EnforcePolicyComplianceRequest
|
|
||||||
var enforceComplianceJson flags.JsonFlag
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews changes made to the job to comply with its policy, but does not update the job.`)
|
|
||||||
|
|
||||||
cmd.Use = "enforce-compliance JOB_ID"
|
|
||||||
cmd.Short = `Enforce job policy compliance.`
|
|
||||||
cmd.Long = `Enforce job policy compliance.
|
|
||||||
|
|
||||||
Updates a job so the job clusters that are created when running the job
|
|
||||||
(specified in new_cluster) are compliant with the current versions of their
|
|
||||||
respective cluster policies. All-purpose clusters used in the job will not be
|
|
||||||
updated.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
JOB_ID: The ID of the job you want to enforce policy compliance on.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
if cmd.Flags().Changed("json") {
|
|
||||||
err := root.ExactArgs(0)(cmd, args)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
check := root.ExactArgs(1)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
|
||||||
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !cmd.Flags().Changed("json") {
|
|
||||||
_, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid JOB_ID: %s", args[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := w.PolicyComplianceForJobs.EnforceCompliance(ctx, enforceComplianceReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cmdio.Render(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range enforceComplianceOverrides {
|
|
||||||
fn(cmd, &enforceComplianceReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start get-compliance command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var getComplianceOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*jobs.GetPolicyComplianceRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newGetCompliance() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var getComplianceReq jobs.GetPolicyComplianceRequest
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
|
|
||||||
cmd.Use = "get-compliance JOB_ID"
|
|
||||||
cmd.Short = `Get job policy compliance.`
|
|
||||||
cmd.Long = `Get job policy compliance.
|
|
||||||
|
|
||||||
Returns the policy compliance status of a job. Jobs could be out of compliance
|
|
||||||
if a cluster policy they use was updated after the job was last edited and
|
|
||||||
some of its job clusters no longer comply with their updated policies.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
JOB_ID: The ID of the job whose compliance status you are requesting.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
check := root.ExactArgs(1)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
_, err = fmt.Sscan(args[0], &getComplianceReq.JobId)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid JOB_ID: %s", args[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := w.PolicyComplianceForJobs.GetCompliance(ctx, getComplianceReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cmdio.Render(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range getComplianceOverrides {
|
|
||||||
fn(cmd, &getComplianceReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start list-compliance command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var listComplianceOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*jobs.ListJobComplianceRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newListCompliance() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var listComplianceReq jobs.ListJobComplianceRequest
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
|
|
||||||
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
|
|
||||||
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
|
|
||||||
|
|
||||||
cmd.Use = "list-compliance POLICY_ID"
|
|
||||||
cmd.Short = `List job policy compliance.`
|
|
||||||
cmd.Long = `List job policy compliance.
|
|
||||||
|
|
||||||
Returns the policy compliance status of all jobs that use a given policy. Jobs
|
|
||||||
could be out of compliance if a cluster policy they use was updated after the
|
|
||||||
job was last edited and its job clusters no longer comply with the updated
|
|
||||||
policy.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
POLICY_ID: Canonical unique identifier for the cluster policy.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
check := root.ExactArgs(1)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
listComplianceReq.PolicyId = args[0]
|
|
||||||
|
|
||||||
response := w.PolicyComplianceForJobs.ListCompliance(ctx, listComplianceReq)
|
|
||||||
return cmdio.RenderIterator(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range listComplianceOverrides {
|
|
||||||
fn(cmd, &listComplianceReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// end service PolicyComplianceForJobs
|
|
|
@ -16,9 +16,9 @@ var cmdOverrides []func(*cobra.Command)
|
||||||
func New() *cobra.Command {
|
func New() *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "query-history",
|
Use: "query-history",
|
||||||
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.`,
|
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints, serverless compute, and DLT.`,
|
||||||
Long: `A service responsible for storing and retrieving the list of queries run
|
Long: `A service responsible for storing and retrieving the list of queries run
|
||||||
against SQL endpoints and serverless compute.`,
|
against SQL endpoints, serverless compute, and DLT.`,
|
||||||
GroupID: "sql",
|
GroupID: "sql",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"package": "sql",
|
"package": "sql",
|
||||||
|
@ -53,7 +53,6 @@ func newList() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
// TODO: complex arg: filter_by
|
// TODO: complex arg: filter_by
|
||||||
cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include the query metrics with each query.`)
|
|
||||||
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`)
|
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`)
|
||||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`)
|
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`)
|
||||||
|
|
||||||
|
@ -61,7 +60,8 @@ func newList() *cobra.Command {
|
||||||
cmd.Short = `List Queries.`
|
cmd.Short = `List Queries.`
|
||||||
cmd.Long = `List Queries.
|
cmd.Long = `List Queries.
|
||||||
|
|
||||||
List the history of queries through SQL warehouses, and serverless compute.
|
List the history of queries through SQL warehouses, serverless compute, and
|
||||||
|
DLT.
|
||||||
|
|
||||||
You can filter by user ID, warehouse ID, status, and time range. Most recently
|
You can filter by user ID, warehouse ID, status, and time range. Most recently
|
||||||
started queries are returned first (up to max_results in request). The
|
started queries are returned first (up to max_results in request). The
|
||||||
|
|
|
@ -1,168 +0,0 @@
|
||||||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
|
||||||
|
|
||||||
package resource_quotas
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/databricks/cli/cmd/root"
|
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var cmdOverrides []func(*cobra.Command)
|
|
||||||
|
|
||||||
func New() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "resource-quotas",
|
|
||||||
Short: `Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created.`,
|
|
||||||
Long: `Unity Catalog enforces resource quotas on all securable objects, which limits
|
|
||||||
the number of resources that can be created. Quotas are expressed in terms of
|
|
||||||
a resource type and a parent (for example, tables per metastore or schemas per
|
|
||||||
catalog). The resource quota APIs enable you to monitor your current usage and
|
|
||||||
limits. For more information on resource quotas see the [Unity Catalog
|
|
||||||
documentation].
|
|
||||||
|
|
||||||
[Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas`,
|
|
||||||
GroupID: "catalog",
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"package": "catalog",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add methods
|
|
||||||
cmd.AddCommand(newGetQuota())
|
|
||||||
cmd.AddCommand(newListQuotas())
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range cmdOverrides {
|
|
||||||
fn(cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start get-quota command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var getQuotaOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*catalog.GetQuotaRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newGetQuota() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var getQuotaReq catalog.GetQuotaRequest
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
|
|
||||||
cmd.Use = "get-quota PARENT_SECURABLE_TYPE PARENT_FULL_NAME QUOTA_NAME"
|
|
||||||
cmd.Short = `Get information for a single resource quota.`
|
|
||||||
cmd.Long = `Get information for a single resource quota.
|
|
||||||
|
|
||||||
The GetQuota API returns usage information for a single resource quota,
|
|
||||||
defined as a child-parent pair. This API also refreshes the quota count if it
|
|
||||||
is out of date. Refreshes are triggered asynchronously. The updated count
|
|
||||||
might not be returned in the first call.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
PARENT_SECURABLE_TYPE: Securable type of the quota parent.
|
|
||||||
PARENT_FULL_NAME: Full name of the parent resource. Provide the metastore ID if the parent
|
|
||||||
is a metastore.
|
|
||||||
QUOTA_NAME: Name of the quota. Follows the pattern of the quota type, with "-quota"
|
|
||||||
added as a suffix.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
check := root.ExactArgs(3)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
getQuotaReq.ParentSecurableType = args[0]
|
|
||||||
getQuotaReq.ParentFullName = args[1]
|
|
||||||
getQuotaReq.QuotaName = args[2]
|
|
||||||
|
|
||||||
response, err := w.ResourceQuotas.GetQuota(ctx, getQuotaReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cmdio.Render(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range getQuotaOverrides {
|
|
||||||
fn(cmd, &getQuotaReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// start list-quotas command
|
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
|
||||||
var listQuotasOverrides []func(
|
|
||||||
*cobra.Command,
|
|
||||||
*catalog.ListQuotasRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
func newListQuotas() *cobra.Command {
|
|
||||||
cmd := &cobra.Command{}
|
|
||||||
|
|
||||||
var listQuotasReq catalog.ListQuotasRequest
|
|
||||||
|
|
||||||
// TODO: short flags
|
|
||||||
|
|
||||||
cmd.Flags().IntVar(&listQuotasReq.MaxResults, "max-results", listQuotasReq.MaxResults, `The number of quotas to return.`)
|
|
||||||
cmd.Flags().StringVar(&listQuotasReq.PageToken, "page-token", listQuotasReq.PageToken, `Opaque token for the next page of results.`)
|
|
||||||
|
|
||||||
cmd.Use = "list-quotas"
|
|
||||||
cmd.Short = `List all resource quotas under a metastore.`
|
|
||||||
cmd.Long = `List all resource quotas under a metastore.
|
|
||||||
|
|
||||||
ListQuotas returns all quota values under the metastore. There are no SLAs on
|
|
||||||
the freshness of the counts returned. This API does not trigger a refresh of
|
|
||||||
quota counts.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
check := root.ExactArgs(0)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
|
||||||
ctx := cmd.Context()
|
|
||||||
w := root.WorkspaceClient(ctx)
|
|
||||||
|
|
||||||
response := w.ResourceQuotas.ListQuotas(ctx, listQuotasReq)
|
|
||||||
return cmdio.RenderIterator(ctx, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
|
||||||
// Can be overridden by manual implementation in `override.go`.
|
|
||||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
|
||||||
for _, fn := range listQuotasOverrides {
|
|
||||||
fn(cmd, &listQuotasReq)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// end service ResourceQuotas
|
|
|
@ -241,16 +241,28 @@ func newGet() *cobra.Command {
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
|
||||||
check := root.ExactArgs(1)
|
|
||||||
return check(cmd, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
w := root.WorkspaceClient(ctx)
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
if len(args) == 0 {
|
||||||
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
|
promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down."
|
||||||
|
names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx, catalog.ListStorageCredentialsRequest{})
|
||||||
|
close(promptSpinner)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
}
|
||||||
|
id, err := cmdio.Select(ctx, names, "Name of the storage credential")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args = append(args, id)
|
||||||
|
}
|
||||||
|
if len(args) != 1 {
|
||||||
|
return fmt.Errorf("expected to have name of the storage credential")
|
||||||
|
}
|
||||||
getReq.Name = args[0]
|
getReq.Name = args[0]
|
||||||
|
|
||||||
response, err := w.StorageCredentials.Get(ctx, getReq)
|
response, err := w.StorageCredentials.Get(ctx, getReq)
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -5,7 +5,7 @@ go 1.22
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
||||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||||
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
|
github.com/databricks/databricks-sdk-go v0.44.0 // Apache 2.0
|
||||||
github.com/fatih/color v1.17.0 // MIT
|
github.com/fatih/color v1.17.0 // MIT
|
||||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||||
|
|
|
@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/databricks/databricks-sdk-go v0.45.0 h1:wdx5Wm/ESrahdHeq62WrjLeGjV4r722LLanD8ahI0Mo=
|
github.com/databricks/databricks-sdk-go v0.44.0 h1:9/FZACv4EFQIOYxfwYVKnY7v46xio9FKCw9tpKB2O/s=
|
||||||
github.com/databricks/databricks-sdk-go v0.45.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
github.com/databricks/databricks-sdk-go v0.44.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
|
Loading…
Reference in New Issue