mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'databricks/main' into cp-summary-with-urls
This commit is contained in:
commit
462ee2e4b4
|
@ -1 +1 @@
|
|||
f98c07f9c71f579de65d2587bb0292f83d10e55d
|
||||
3eae49b444cac5a0118a3503e5b7ecef7f96527a
|
|
@ -154,6 +154,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
"provider-exchanges delete-listing-from-exchange"
|
||||
"provider-exchanges list-exchanges-for-listing"
|
||||
"provider-exchanges list-listings-for-exchange"
|
||||
"storage-credentials get"
|
||||
-}}
|
||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
|
|
@ -75,6 +75,8 @@ cmd/workspace/online-tables/online-tables.go linguist-generated=true
|
|||
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
||||
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true
|
||||
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true
|
||||
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
||||
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
|
||||
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
|
||||
|
@ -94,6 +96,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr
|
|||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
||||
cmd/workspace/repos/repos.go linguist-generated=true
|
||||
cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true
|
||||
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
|
||||
cmd/workspace/schemas/schemas.go linguist-generated=true
|
||||
cmd/workspace/secrets/secrets.go linguist-generated=true
|
||||
|
|
23
CHANGELOG.md
23
CHANGELOG.md
|
@ -1,5 +1,28 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.227.1
|
||||
|
||||
CLI:
|
||||
* Disable prompt for storage-credentials get command ([#1723](https://github.com/databricks/cli/pull/1723)).
|
||||
|
||||
Bundles:
|
||||
* Do not treat empty path as a local path ([#1717](https://github.com/databricks/cli/pull/1717)).
|
||||
* Correctly mark PyPI package name specs with multiple specifiers as remote libraries ([#1725](https://github.com/databricks/cli/pull/1725)).
|
||||
* Improve error handling for /Volumes paths in mode: development ([#1716](https://github.com/databricks/cli/pull/1716)).
|
||||
|
||||
Internal:
|
||||
* Ignore CLI version check on development builds of the CLI ([#1714](https://github.com/databricks/cli/pull/1714)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks resource-quotas` command group.
|
||||
* Added `databricks policy-compliance-for-clusters` command group.
|
||||
* Added `databricks policy-compliance-for-jobs` command group.
|
||||
|
||||
OpenAPI commit 3eae49b444cac5a0118a3503e5b7ecef7f96527a (2024-08-21)
|
||||
Dependency updates:
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 ([#1719](https://github.com/databricks/cli/pull/1719)).
|
||||
* Revert hc-install version to 0.7.0 ([#1711](https://github.com/databricks/cli/pull/1711)).
|
||||
|
||||
## [Release] Release v0.227.0
|
||||
|
||||
CLI:
|
||||
|
|
|
@ -64,6 +64,7 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
|
|||
}
|
||||
|
||||
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
p := b.Config.Presets
|
||||
u := b.Config.Workspace.CurrentUser
|
||||
|
||||
|
@ -74,44 +75,56 @@ func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
|||
// status to UNPAUSED at the level of an individual object, whic hwas
|
||||
// historically allowed.)
|
||||
if p.TriggerPauseStatus == config.Unpaused {
|
||||
return diag.Diagnostics{{
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||
}}
|
||||
})
|
||||
}
|
||||
|
||||
// Make sure this development copy has unique names and paths to avoid conflicts
|
||||
if path := findNonUserPath(b); path != "" {
|
||||
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
||||
if path == "artifact_path" && strings.HasPrefix(b.Config.Workspace.ArtifactPath, "/Volumes") {
|
||||
// For Volumes paths we recommend including the current username as a substring
|
||||
diags = diags.Extend(diag.Errorf("%s should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'", path))
|
||||
} else {
|
||||
// For non-Volumes paths recommend simply putting things in the home folder
|
||||
diags = diags.Extend(diag.Errorf("%s must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'", path))
|
||||
}
|
||||
}
|
||||
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
||||
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
||||
// For this reason we require the name prefix to contain the current username;
|
||||
// it's a pitfall for users if they don't include it and later find out that
|
||||
// only a single user can do development deployments.
|
||||
return diag.Diagnostics{{
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
||||
}}
|
||||
})
|
||||
}
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
||||
// findNonUserPath finds the first workspace path such as root_path that doesn't
|
||||
// contain the current username or current user's shortname.
|
||||
func findNonUserPath(b *bundle.Bundle) string {
|
||||
containsName := func(path string) bool {
|
||||
username := b.Config.Workspace.CurrentUser.UserName
|
||||
shortname := b.Config.Workspace.CurrentUser.ShortName
|
||||
return strings.Contains(path, username) || strings.Contains(path, shortname)
|
||||
}
|
||||
|
||||
if b.Config.Workspace.RootPath != "" && !strings.Contains(b.Config.Workspace.RootPath, username) {
|
||||
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
|
||||
return "root_path"
|
||||
}
|
||||
if b.Config.Workspace.StatePath != "" && !strings.Contains(b.Config.Workspace.StatePath, username) {
|
||||
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
||||
return "state_path"
|
||||
}
|
||||
if b.Config.Workspace.FilePath != "" && !strings.Contains(b.Config.Workspace.FilePath, username) {
|
||||
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
|
||||
return "file_path"
|
||||
}
|
||||
if b.Config.Workspace.ArtifactPath != "" && !strings.Contains(b.Config.Workspace.ArtifactPath, username) {
|
||||
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
|
||||
return "artifact_path"
|
||||
}
|
||||
return ""
|
||||
|
|
|
@ -230,10 +230,20 @@ func TestValidateDevelopmentMode(t *testing.T) {
|
|||
diags := validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Test with /Volumes path
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/lennart/libs"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/libs"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.ErrorContains(t, diags.Error(), "artifact_path should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'")
|
||||
|
||||
// Test with a bundle that has a non-user path
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.ErrorContains(t, diags.Error(), "root_path")
|
||||
require.ErrorContains(t, diags.Error(), "root_path must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'")
|
||||
|
||||
// Test with a bundle that has an unpaused trigger pause status
|
||||
b = mockBundle(config.Development)
|
||||
|
|
|
@ -1,19 +1,24 @@
|
|||
package libraries
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/compute"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
func libraryPath(library *compute.Library) string {
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
)
|
||||
|
||||
func libraryPath(library *compute.Library) (string, error) {
|
||||
if library.Whl != "" {
|
||||
return library.Whl
|
||||
return library.Whl, nil
|
||||
}
|
||||
if library.Jar != "" {
|
||||
return library.Jar
|
||||
return library.Jar, nil
|
||||
}
|
||||
if library.Egg != "" {
|
||||
return library.Egg
|
||||
return library.Egg, nil
|
||||
}
|
||||
if library.Requirements != "" {
|
||||
return library.Requirements
|
||||
return library.Requirements, nil
|
||||
}
|
||||
return ""
|
||||
|
||||
return "", fmt.Errorf("not supported library type")
|
||||
}
|
||||
|
|
|
@ -10,9 +10,27 @@ import (
|
|||
func TestLibraryPath(t *testing.T) {
|
||||
path := "/some/path"
|
||||
|
||||
assert.Equal(t, path, libraryPath(&compute.Library{Whl: path}))
|
||||
assert.Equal(t, path, libraryPath(&compute.Library{Jar: path}))
|
||||
assert.Equal(t, path, libraryPath(&compute.Library{Egg: path}))
|
||||
assert.Equal(t, path, libraryPath(&compute.Library{Requirements: path}))
|
||||
assert.Equal(t, "", libraryPath(&compute.Library{}))
|
||||
p, err := libraryPath(&compute.Library{Whl: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Jar: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Egg: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Requirements: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{})
|
||||
assert.Equal(t, "", p)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}})
|
||||
assert.Equal(t, "", p)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
|
|
@ -67,7 +67,12 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task {
|
|||
|
||||
func isTaskWithLocalLibraries(task jobs.Task) bool {
|
||||
for _, l := range task.Libraries {
|
||||
if IsLibraryLocal(libraryPath(&l)) {
|
||||
p, err := libraryPath(&l)
|
||||
// If there's an error, skip the library because it's not of supported type
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if IsLibraryLocal(p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,10 @@ func IsLocalPath(p string) bool {
|
|||
// We can't use IsLocalPath beacuse environment dependencies can be
|
||||
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
|
||||
func IsLibraryLocal(dep string) bool {
|
||||
if dep == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
possiblePrefixes := []string{
|
||||
".",
|
||||
}
|
||||
|
@ -68,9 +72,11 @@ func IsLibraryLocal(dep string) bool {
|
|||
|
||||
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
|
||||
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
|
||||
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?)?: Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
||||
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
||||
// ,?: Optionally matches a comma (,) at the end of the specifier which is used to separate multiple specifiers.
|
||||
// There can be multiple version specifiers separated by commas or no specifiers.
|
||||
// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/
|
||||
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?)?$`)
|
||||
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?,?)*$`)
|
||||
|
||||
func isPackage(name string) bool {
|
||||
if packageRegex.MatchString(name) {
|
||||
|
|
|
@ -48,6 +48,7 @@ func TestIsLibraryLocal(t *testing.T) {
|
|||
{path: "../../local/*.whl", expected: true},
|
||||
{path: "..\\..\\local\\*.whl", expected: true},
|
||||
{path: "file://path/to/package/whl.whl", expected: true},
|
||||
{path: "", expected: false},
|
||||
{path: "pypipackage", expected: false},
|
||||
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
|
||||
{path: "/Workspace/my_project/dist.whl", expected: false},
|
||||
|
@ -61,6 +62,8 @@ func TestIsLibraryLocal(t *testing.T) {
|
|||
{path: "beautifulsoup4 ~= 4.12.3", expected: false},
|
||||
{path: "beautifulsoup4[security, tests]", expected: false},
|
||||
{path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false},
|
||||
{path: "beautifulsoup4>=1.0.0,<2.0.0", expected: false},
|
||||
{path: "beautifulsoup4>=1.0.0,~=1.2.0,<2.0.0", expected: false},
|
||||
{path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||
{path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||
{path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false},
|
||||
|
|
|
@ -29,8 +29,8 @@ func IsWorkspacePath(path string) bool {
|
|||
|
||||
// IsWorkspaceLibrary returns true if the specified library refers to a workspace path.
|
||||
func IsWorkspaceLibrary(library *compute.Library) bool {
|
||||
path := libraryPath(library)
|
||||
if path == "" {
|
||||
path, err := libraryPath(library)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -223,6 +223,17 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
|||
{Whl: "./dist/test.whl"},
|
||||
},
|
||||
},
|
||||
{
|
||||
TaskKey: "key7",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
ExistingClusterId: "test-key-2",
|
||||
Libraries: []compute.Library{
|
||||
{Whl: "signol_lib-0.4.4-20240822+prod-py3-none-any.whl"},
|
||||
{Pypi: &compute.PythonPyPiLibrary{
|
||||
Package: "requests==2.25.1",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -241,6 +252,46 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
|||
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
||||
}
|
||||
|
||||
func TestTasksWithPyPiPackageAreCompatible(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
JobClusters: []jobs.JobCluster{
|
||||
{
|
||||
JobClusterKey: "cluster1",
|
||||
NewCluster: compute.ClusterSpec{
|
||||
SparkVersion: "12.2.x-scala2.12",
|
||||
},
|
||||
},
|
||||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "key1",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
ExistingClusterId: "test-key-2",
|
||||
Libraries: []compute.Library{
|
||||
{Pypi: &compute.PythonPyPiLibrary{
|
||||
Package: "requests==2.25.1",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
||||
}
|
||||
|
||||
func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
|
|
|
@ -85,6 +85,12 @@
|
|||
"enabled": {
|
||||
"description": ""
|
||||
},
|
||||
"import": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"venv_path": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -130,6 +136,29 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"presets": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"jobs_max_concurrent_runs": {
|
||||
"description": ""
|
||||
},
|
||||
"name_prefix": {
|
||||
"description": ""
|
||||
},
|
||||
"pipelines_development": {
|
||||
"description": ""
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"trigger_pause_status": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"description": "Collection of Databricks resources to deploy.",
|
||||
"properties": {
|
||||
|
@ -3079,6 +3108,12 @@
|
|||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"paths": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -3202,6 +3237,29 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"presets": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"jobs_max_concurrent_runs": {
|
||||
"description": ""
|
||||
},
|
||||
"name_prefix": {
|
||||
"description": ""
|
||||
},
|
||||
"pipelines_development": {
|
||||
"description": ""
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"trigger_pause_status": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"description": "Collection of Databricks resources to deploy.",
|
||||
"properties": {
|
||||
|
@ -6151,6 +6209,12 @@
|
|||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"paths": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -44,6 +44,8 @@ import (
|
|||
permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration"
|
||||
permissions "github.com/databricks/cli/cmd/workspace/permissions"
|
||||
pipelines "github.com/databricks/cli/cmd/workspace/pipelines"
|
||||
policy_compliance_for_clusters "github.com/databricks/cli/cmd/workspace/policy-compliance-for-clusters"
|
||||
policy_compliance_for_jobs "github.com/databricks/cli/cmd/workspace/policy-compliance-for-jobs"
|
||||
policy_families "github.com/databricks/cli/cmd/workspace/policy-families"
|
||||
provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters"
|
||||
provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges"
|
||||
|
@ -63,6 +65,7 @@ import (
|
|||
recipients "github.com/databricks/cli/cmd/workspace/recipients"
|
||||
registered_models "github.com/databricks/cli/cmd/workspace/registered-models"
|
||||
repos "github.com/databricks/cli/cmd/workspace/repos"
|
||||
resource_quotas "github.com/databricks/cli/cmd/workspace/resource-quotas"
|
||||
schemas "github.com/databricks/cli/cmd/workspace/schemas"
|
||||
secrets "github.com/databricks/cli/cmd/workspace/secrets"
|
||||
service_principals "github.com/databricks/cli/cmd/workspace/service-principals"
|
||||
|
@ -130,6 +133,8 @@ func All() []*cobra.Command {
|
|||
out = append(out, permission_migration.New())
|
||||
out = append(out, permissions.New())
|
||||
out = append(out, pipelines.New())
|
||||
out = append(out, policy_compliance_for_clusters.New())
|
||||
out = append(out, policy_compliance_for_jobs.New())
|
||||
out = append(out, policy_families.New())
|
||||
out = append(out, provider_exchange_filters.New())
|
||||
out = append(out, provider_exchanges.New())
|
||||
|
@ -149,6 +154,7 @@ func All() []*cobra.Command {
|
|||
out = append(out, recipients.New())
|
||||
out = append(out, registered_models.New())
|
||||
out = append(out, repos.New())
|
||||
out = append(out, resource_quotas.New())
|
||||
out = append(out, schemas.New())
|
||||
out = append(out, secrets.New())
|
||||
out = append(out, service_principals.New())
|
||||
|
|
|
@ -75,6 +75,7 @@ func newCreate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`)
|
||||
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
||||
// TODO: complex arg: encryption_details
|
||||
cmd.Flags().BoolVar(&createReq.Fallback, "fallback", createReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
|
||||
cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`)
|
||||
cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`)
|
||||
|
||||
|
@ -347,6 +348,7 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||
cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`)
|
||||
// TODO: complex arg: encryption_details
|
||||
cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
|
||||
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
|
||||
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)
|
||||
|
|
260
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go
generated
Executable file
260
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go
generated
Executable file
|
@ -0,0 +1,260 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package policy_compliance_for_clusters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "policy-compliance-for-clusters",
|
||||
Short: `The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace.`,
|
||||
Long: `The policy compliance APIs allow you to view and manage the policy compliance
|
||||
status of clusters in your workspace.
|
||||
|
||||
A cluster is compliant with its policy if its configuration satisfies all its
|
||||
policy rules. Clusters could be out of compliance if their policy was updated
|
||||
after the cluster was last edited.
|
||||
|
||||
The get and list compliance APIs allow you to view the policy compliance
|
||||
status of a cluster. The enforce compliance API allows you to update a cluster
|
||||
to be compliant with the current version of its policy.`,
|
||||
GroupID: "compute",
|
||||
Annotations: map[string]string{
|
||||
"package": "compute",
|
||||
},
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newEnforceCompliance())
|
||||
cmd.AddCommand(newGetCompliance())
|
||||
cmd.AddCommand(newListCompliance())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start enforce-compliance command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var enforceComplianceOverrides []func(
|
||||
*cobra.Command,
|
||||
*compute.EnforceClusterComplianceRequest,
|
||||
)
|
||||
|
||||
func newEnforceCompliance() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var enforceComplianceReq compute.EnforceClusterComplianceRequest
|
||||
var enforceComplianceJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster.`)
|
||||
|
||||
cmd.Use = "enforce-compliance CLUSTER_ID"
|
||||
cmd.Short = `Enforce cluster policy compliance.`
|
||||
cmd.Long = `Enforce cluster policy compliance.
|
||||
|
||||
Updates a cluster to be compliant with the current version of its policy. A
|
||||
cluster can be updated if it is in a RUNNING or TERMINATED state.
|
||||
|
||||
If a cluster is updated while in a RUNNING state, it will be restarted so
|
||||
that the new attributes can take effect.
|
||||
|
||||
If a cluster is updated while in a TERMINATED state, it will remain
|
||||
TERMINATED. The next time the cluster is started, the new attributes will
|
||||
take effect.
|
||||
|
||||
Clusters created by the Databricks Jobs, DLT, or Models services cannot be
|
||||
enforced by this API. Instead, use the "Enforce job policy compliance" API to
|
||||
enforce policy compliance on jobs.
|
||||
|
||||
Arguments:
|
||||
CLUSTER_ID: The ID of the cluster you want to enforce policy compliance on.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
enforceComplianceReq.ClusterId = args[0]
|
||||
}
|
||||
|
||||
response, err := w.PolicyComplianceForClusters.EnforceCompliance(ctx, enforceComplianceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range enforceComplianceOverrides {
|
||||
fn(cmd, &enforceComplianceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get-compliance command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getComplianceOverrides []func(
|
||||
*cobra.Command,
|
||||
*compute.GetClusterComplianceRequest,
|
||||
)
|
||||
|
||||
func newGetCompliance() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getComplianceReq compute.GetClusterComplianceRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-compliance CLUSTER_ID"
|
||||
cmd.Short = `Get cluster policy compliance.`
|
||||
cmd.Long = `Get cluster policy compliance.
|
||||
|
||||
Returns the policy compliance status of a cluster. Clusters could be out of
|
||||
compliance if their policy was updated after the cluster was last edited.
|
||||
|
||||
Arguments:
|
||||
CLUSTER_ID: The ID of the cluster to get the compliance status`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
getComplianceReq.ClusterId = args[0]
|
||||
|
||||
response, err := w.PolicyComplianceForClusters.GetCompliance(ctx, getComplianceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getComplianceOverrides {
|
||||
fn(cmd, &getComplianceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list-compliance command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listComplianceOverrides []func(
|
||||
*cobra.Command,
|
||||
*compute.ListClusterCompliancesRequest,
|
||||
)
|
||||
|
||||
func newListCompliance() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listComplianceReq compute.ListClusterCompliancesRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
|
||||
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
|
||||
|
||||
cmd.Use = "list-compliance POLICY_ID"
|
||||
cmd.Short = `List cluster policy compliance.`
|
||||
cmd.Long = `List cluster policy compliance.
|
||||
|
||||
Returns the policy compliance status of all clusters that use a given policy.
|
||||
Clusters could be out of compliance if their policy was updated after the
|
||||
cluster was last edited.
|
||||
|
||||
Arguments:
|
||||
POLICY_ID: Canonical unique identifier for the cluster policy.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
listComplianceReq.PolicyId = args[0]
|
||||
|
||||
response := w.PolicyComplianceForClusters.ListCompliance(ctx, listComplianceReq)
|
||||
return cmdio.RenderIterator(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listComplianceOverrides {
|
||||
fn(cmd, &listComplianceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service PolicyComplianceForClusters
|
262
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go
generated
Executable file
262
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go
generated
Executable file
|
@ -0,0 +1,262 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package policy_compliance_for_jobs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "policy-compliance-for-jobs",
|
||||
Short: `The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace.`,
|
||||
Long: `The compliance APIs allow you to view and manage the policy compliance status
|
||||
of jobs in your workspace. This API currently only supports compliance
|
||||
controls for cluster policies.
|
||||
|
||||
A job is in compliance if its cluster configurations satisfy the rules of all
|
||||
their respective cluster policies. A job could be out of compliance if a
|
||||
cluster policy it uses was updated after the job was last edited. The job is
|
||||
considered out of compliance if any of its clusters no longer comply with
|
||||
their updated policies.
|
||||
|
||||
The get and list compliance APIs allow you to view the policy compliance
|
||||
status of a job. The enforce compliance API allows you to update a job so that
|
||||
it becomes compliant with all of its policies.`,
|
||||
GroupID: "jobs",
|
||||
Annotations: map[string]string{
|
||||
"package": "jobs",
|
||||
},
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newEnforceCompliance())
|
||||
cmd.AddCommand(newGetCompliance())
|
||||
cmd.AddCommand(newListCompliance())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start enforce-compliance command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var enforceComplianceOverrides []func(
|
||||
*cobra.Command,
|
||||
*jobs.EnforcePolicyComplianceRequest,
|
||||
)
|
||||
|
||||
func newEnforceCompliance() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var enforceComplianceReq jobs.EnforcePolicyComplianceRequest
|
||||
var enforceComplianceJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews changes made to the job to comply with its policy, but does not update the job.`)
|
||||
|
||||
cmd.Use = "enforce-compliance JOB_ID"
|
||||
cmd.Short = `Enforce job policy compliance.`
|
||||
cmd.Long = `Enforce job policy compliance.
|
||||
|
||||
Updates a job so the job clusters that are created when running the job
|
||||
(specified in new_cluster) are compliant with the current versions of their
|
||||
respective cluster policies. All-purpose clusters used in the job will not be
|
||||
updated.
|
||||
|
||||
Arguments:
|
||||
JOB_ID: The ID of the job you want to enforce policy compliance on.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
_, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid JOB_ID: %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
response, err := w.PolicyComplianceForJobs.EnforceCompliance(ctx, enforceComplianceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range enforceComplianceOverrides {
|
||||
fn(cmd, &enforceComplianceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get-compliance command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getComplianceOverrides []func(
|
||||
*cobra.Command,
|
||||
*jobs.GetPolicyComplianceRequest,
|
||||
)
|
||||
|
||||
func newGetCompliance() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getComplianceReq jobs.GetPolicyComplianceRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-compliance JOB_ID"
|
||||
cmd.Short = `Get job policy compliance.`
|
||||
cmd.Long = `Get job policy compliance.
|
||||
|
||||
Returns the policy compliance status of a job. Jobs could be out of compliance
|
||||
if a cluster policy they use was updated after the job was last edited and
|
||||
some of its job clusters no longer comply with their updated policies.
|
||||
|
||||
Arguments:
|
||||
JOB_ID: The ID of the job whose compliance status you are requesting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
_, err = fmt.Sscan(args[0], &getComplianceReq.JobId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid JOB_ID: %s", args[0])
|
||||
}
|
||||
|
||||
response, err := w.PolicyComplianceForJobs.GetCompliance(ctx, getComplianceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getComplianceOverrides {
|
||||
fn(cmd, &getComplianceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list-compliance command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listComplianceOverrides []func(
|
||||
*cobra.Command,
|
||||
*jobs.ListJobComplianceRequest,
|
||||
)
|
||||
|
||||
func newListCompliance() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listComplianceReq jobs.ListJobComplianceRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
|
||||
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
|
||||
|
||||
cmd.Use = "list-compliance POLICY_ID"
|
||||
cmd.Short = `List job policy compliance.`
|
||||
cmd.Long = `List job policy compliance.
|
||||
|
||||
Returns the policy compliance status of all jobs that use a given policy. Jobs
|
||||
could be out of compliance if a cluster policy they use was updated after the
|
||||
job was last edited and its job clusters no longer comply with the updated
|
||||
policy.
|
||||
|
||||
Arguments:
|
||||
POLICY_ID: Canonical unique identifier for the cluster policy.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
listComplianceReq.PolicyId = args[0]
|
||||
|
||||
response := w.PolicyComplianceForJobs.ListCompliance(ctx, listComplianceReq)
|
||||
return cmdio.RenderIterator(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listComplianceOverrides {
|
||||
fn(cmd, &listComplianceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service PolicyComplianceForJobs
|
|
@ -16,9 +16,9 @@ var cmdOverrides []func(*cobra.Command)
|
|||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "query-history",
|
||||
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints, serverless compute, and DLT.`,
|
||||
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.`,
|
||||
Long: `A service responsible for storing and retrieving the list of queries run
|
||||
against SQL endpoints, serverless compute, and DLT.`,
|
||||
against SQL endpoints and serverless compute.`,
|
||||
GroupID: "sql",
|
||||
Annotations: map[string]string{
|
||||
"package": "sql",
|
||||
|
@ -53,6 +53,7 @@ func newList() *cobra.Command {
|
|||
// TODO: short flags
|
||||
|
||||
// TODO: complex arg: filter_by
|
||||
cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include the query metrics with each query.`)
|
||||
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`)
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`)
|
||||
|
||||
|
@ -60,8 +61,7 @@ func newList() *cobra.Command {
|
|||
cmd.Short = `List Queries.`
|
||||
cmd.Long = `List Queries.
|
||||
|
||||
List the history of queries through SQL warehouses, serverless compute, and
|
||||
DLT.
|
||||
List the history of queries through SQL warehouses, and serverless compute.
|
||||
|
||||
You can filter by user ID, warehouse ID, status, and time range. Most recently
|
||||
started queries are returned first (up to max_results in request). The
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package resource_quotas
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "resource-quotas",
|
||||
Short: `Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created.`,
|
||||
Long: `Unity Catalog enforces resource quotas on all securable objects, which limits
|
||||
the number of resources that can be created. Quotas are expressed in terms of
|
||||
a resource type and a parent (for example, tables per metastore or schemas per
|
||||
catalog). The resource quota APIs enable you to monitor your current usage and
|
||||
limits. For more information on resource quotas see the [Unity Catalog
|
||||
documentation].
|
||||
|
||||
[Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas`,
|
||||
GroupID: "catalog",
|
||||
Annotations: map[string]string{
|
||||
"package": "catalog",
|
||||
},
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newGetQuota())
|
||||
cmd.AddCommand(newListQuotas())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get-quota command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getQuotaOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.GetQuotaRequest,
|
||||
)
|
||||
|
||||
func newGetQuota() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getQuotaReq catalog.GetQuotaRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-quota PARENT_SECURABLE_TYPE PARENT_FULL_NAME QUOTA_NAME"
|
||||
cmd.Short = `Get information for a single resource quota.`
|
||||
cmd.Long = `Get information for a single resource quota.
|
||||
|
||||
The GetQuota API returns usage information for a single resource quota,
|
||||
defined as a child-parent pair. This API also refreshes the quota count if it
|
||||
is out of date. Refreshes are triggered asynchronously. The updated count
|
||||
might not be returned in the first call.
|
||||
|
||||
Arguments:
|
||||
PARENT_SECURABLE_TYPE: Securable type of the quota parent.
|
||||
PARENT_FULL_NAME: Full name of the parent resource. Provide the metastore ID if the parent
|
||||
is a metastore.
|
||||
QUOTA_NAME: Name of the quota. Follows the pattern of the quota type, with "-quota"
|
||||
added as a suffix.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(3)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
getQuotaReq.ParentSecurableType = args[0]
|
||||
getQuotaReq.ParentFullName = args[1]
|
||||
getQuotaReq.QuotaName = args[2]
|
||||
|
||||
response, err := w.ResourceQuotas.GetQuota(ctx, getQuotaReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getQuotaOverrides {
|
||||
fn(cmd, &getQuotaReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list-quotas command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listQuotasOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.ListQuotasRequest,
|
||||
)
|
||||
|
||||
func newListQuotas() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listQuotasReq catalog.ListQuotasRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listQuotasReq.MaxResults, "max-results", listQuotasReq.MaxResults, `The number of quotas to return.`)
|
||||
cmd.Flags().StringVar(&listQuotasReq.PageToken, "page-token", listQuotasReq.PageToken, `Opaque token for the next page of results.`)
|
||||
|
||||
cmd.Use = "list-quotas"
|
||||
cmd.Short = `List all resource quotas under a metastore.`
|
||||
cmd.Long = `List all resource quotas under a metastore.
|
||||
|
||||
ListQuotas returns all quota values under the metastore. There are no SLAs on
|
||||
the freshness of the counts returned. This API does not trigger a refresh of
|
||||
quota counts.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response := w.ResourceQuotas.ListQuotas(ctx, listQuotasReq)
|
||||
return cmdio.RenderIterator(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listQuotasOverrides {
|
||||
fn(cmd, &listQuotasReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service ResourceQuotas
|
|
@ -241,28 +241,16 @@ func newGet() *cobra.Command {
|
|||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down."
|
||||
names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx, catalog.ListStorageCredentialsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of the storage credential")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have name of the storage credential")
|
||||
}
|
||||
getReq.Name = args[0]
|
||||
|
||||
response, err := w.StorageCredentials.Get(ctx, getReq)
|
||||
|
|
2
go.mod
2
go.mod
|
@ -5,7 +5,7 @@ go 1.22
|
|||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.44.0 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
|
||||
github.com/fatih/color v1.17.0 // MIT
|
||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||
|
|
|
@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/databricks/databricks-sdk-go v0.44.0 h1:9/FZACv4EFQIOYxfwYVKnY7v46xio9FKCw9tpKB2O/s=
|
||||
github.com/databricks/databricks-sdk-go v0.44.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/databricks/databricks-sdk-go v0.45.0 h1:wdx5Wm/ESrahdHeq62WrjLeGjV4r722LLanD8ahI0Mo=
|
||||
github.com/databricks/databricks-sdk-go v0.45.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
Loading…
Reference in New Issue