diff --git a/.codegen.json b/.codegen.json index 735e1ee31..9a5ef0a98 100644 --- a/.codegen.json +++ b/.codegen.json @@ -1,15 +1,19 @@ { - "formatter": "go run golang.org/x/tools/cmd/goimports@latest -w $FILENAMES && go fmt ./...", + "formatter": "go run golang.org/x/tools/cmd/goimports@latest -w cmd && go fmt ./...", "services": { - ".codegen/service.go.tmpl": "cmd/{{if .IsAccounts}}account{{else}}workspace{{end}}/{{(.TrimPrefix \"account\").KebabName}}/{{(.TrimPrefix \"account\").KebabName}}.go" + ".codegen/service.go.tmpl": "cmd/{{if .IsAccounts}}account{{else}}workspace{{end}}/{{(.TrimPrefix \"account\").KebabName}}/{{(.TrimPrefix \"account\").KebabName}}.go", + ".codegen/cmd_script.tmpl": "acceptance/help/cmd/{{if .IsAccounts}}account{{else}}workspace{{end}}/{{(.TrimPrefix \"account\").KebabName}}/{{(.TrimPrefix \"account\").KebabName}}/script" }, "batch": { ".codegen/cmds-workspace.go.tmpl": "cmd/workspace/cmd.go", ".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go" }, "toolchain": { - "required": ["go"], + "required": [ + "go" + ], "post_generate": [ + "go test ./acceptance -v -update -run 'TestAccept/help/cmd' ", "go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build", "make schema", "echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes", diff --git a/.codegen/cmd_script.tmpl b/.codegen/cmd_script.tmpl new file mode 100644 index 000000000..229bc9161 --- /dev/null +++ b/.codegen/cmd_script.tmpl @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +{{- $isAccount := .IsAccounts }} +{{- $cmdGrp := (.TrimPrefix "account").KebabName }} +{{- range .Methods}} +trace $CLI {{if $isAccount }}account {{end}}{{$cmdGrp}} {{.KebabName}} --help +{{- end}} diff --git a/.gitattributes b/.gitattributes index ebe94ed8e..aca4c1b25 100755 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,139 @@ +acceptance/help/cmd/account/access-control/access-control/script linguist-generated=true +acceptance/help/cmd/account/billable-usage/billable-usage/script linguist-generated=true +acceptance/help/cmd/account/budgets/budgets/script linguist-generated=true +acceptance/help/cmd/account/credentials/credentials/script linguist-generated=true +acceptance/help/cmd/account/csp-enablement-account/csp-enablement-account/script linguist-generated=true +acceptance/help/cmd/account/custom-app-integration/custom-app-integration/script linguist-generated=true +acceptance/help/cmd/account/disable-legacy-features/disable-legacy-features/script linguist-generated=true +acceptance/help/cmd/account/encryption-keys/encryption-keys/script linguist-generated=true +acceptance/help/cmd/account/esm-enablement-account/esm-enablement-account/script linguist-generated=true +acceptance/help/cmd/account/federation-policy/federation-policy/script linguist-generated=true +acceptance/help/cmd/account/groups/groups/script linguist-generated=true +acceptance/help/cmd/account/ip-access-lists/ip-access-lists/script linguist-generated=true +acceptance/help/cmd/account/log-delivery/log-delivery/script linguist-generated=true +acceptance/help/cmd/account/metastore-assignments/metastore-assignments/script linguist-generated=true +acceptance/help/cmd/account/metastores/metastores/script linguist-generated=true +acceptance/help/cmd/account/network-connectivity/network-connectivity/script linguist-generated=true +acceptance/help/cmd/account/networks/networks/script linguist-generated=true +acceptance/help/cmd/account/o-auth-published-apps/o-auth-published-apps/script linguist-generated=true +acceptance/help/cmd/account/personal-compute/personal-compute/script linguist-generated=true +acceptance/help/cmd/account/private-access/private-access/script linguist-generated=true +acceptance/help/cmd/account/published-app-integration/published-app-integration/script linguist-generated=true +acceptance/help/cmd/account/service-principal-federation-policy/service-principal-federation-policy/script linguist-generated=true +acceptance/help/cmd/account/service-principal-secrets/service-principal-secrets/script linguist-generated=true +acceptance/help/cmd/account/service-principals/service-principals/script linguist-generated=true +acceptance/help/cmd/account/settings/settings/script linguist-generated=true +acceptance/help/cmd/account/storage-credentials/storage-credentials/script linguist-generated=true +acceptance/help/cmd/account/storage/storage/script linguist-generated=true +acceptance/help/cmd/account/usage-dashboards/usage-dashboards/script linguist-generated=true +acceptance/help/cmd/account/users/users/script linguist-generated=true +acceptance/help/cmd/account/vpc-endpoints/vpc-endpoints/script linguist-generated=true +acceptance/help/cmd/account/workspace-assignment/workspace-assignment/script linguist-generated=true +acceptance/help/cmd/account/workspaces/workspaces/script linguist-generated=true +acceptance/help/cmd/workspace/access-control-proxy/access-control-proxy/script linguist-generated=true +acceptance/help/cmd/workspace/access-control/access-control/script linguist-generated=true +acceptance/help/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy/script linguist-generated=true +acceptance/help/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains/script linguist-generated=true +acceptance/help/cmd/workspace/alerts-legacy/alerts-legacy/script linguist-generated=true +acceptance/help/cmd/workspace/alerts/alerts/script linguist-generated=true +acceptance/help/cmd/workspace/apps/apps/script linguist-generated=true +acceptance/help/cmd/workspace/artifact-allowlists/artifact-allowlists/script linguist-generated=true +acceptance/help/cmd/workspace/automatic-cluster-update/automatic-cluster-update/script linguist-generated=true +acceptance/help/cmd/workspace/catalogs/catalogs/script linguist-generated=true +acceptance/help/cmd/workspace/clean-room-assets/clean-room-assets/script linguist-generated=true +acceptance/help/cmd/workspace/clean-room-task-runs/clean-room-task-runs/script linguist-generated=true +acceptance/help/cmd/workspace/clean-rooms/clean-rooms/script linguist-generated=true +acceptance/help/cmd/workspace/cluster-policies/cluster-policies/script linguist-generated=true +acceptance/help/cmd/workspace/clusters/clusters/script linguist-generated=true +acceptance/help/cmd/workspace/command-execution/command-execution/script linguist-generated=true +acceptance/help/cmd/workspace/compliance-security-profile/compliance-security-profile/script linguist-generated=true +acceptance/help/cmd/workspace/connections/connections/script linguist-generated=true +acceptance/help/cmd/workspace/consumer-fulfillments/consumer-fulfillments/script linguist-generated=true +acceptance/help/cmd/workspace/consumer-installations/consumer-installations/script linguist-generated=true +acceptance/help/cmd/workspace/consumer-listings/consumer-listings/script linguist-generated=true +acceptance/help/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests/script linguist-generated=true +acceptance/help/cmd/workspace/consumer-providers/consumer-providers/script linguist-generated=true +acceptance/help/cmd/workspace/credentials-manager/credentials-manager/script linguist-generated=true +acceptance/help/cmd/workspace/credentials/credentials/script linguist-generated=true +acceptance/help/cmd/workspace/current-user/current-user/script linguist-generated=true +acceptance/help/cmd/workspace/dashboard-widgets/dashboard-widgets/script linguist-generated=true +acceptance/help/cmd/workspace/dashboards/dashboards/script linguist-generated=true +acceptance/help/cmd/workspace/data-sources/data-sources/script linguist-generated=true +acceptance/help/cmd/workspace/dbfs/dbfs/script linguist-generated=true +acceptance/help/cmd/workspace/dbsql-permissions/dbsql-permissions/script linguist-generated=true +acceptance/help/cmd/workspace/default-namespace/default-namespace/script linguist-generated=true +acceptance/help/cmd/workspace/disable-legacy-access/disable-legacy-access/script linguist-generated=true +acceptance/help/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs/script linguist-generated=true +acceptance/help/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring/script linguist-generated=true +acceptance/help/cmd/workspace/experiments/experiments/script linguist-generated=true +acceptance/help/cmd/workspace/external-locations/external-locations/script linguist-generated=true +acceptance/help/cmd/workspace/files/files/script linguist-generated=true +acceptance/help/cmd/workspace/functions/functions/script linguist-generated=true +acceptance/help/cmd/workspace/genie/genie/script linguist-generated=true +acceptance/help/cmd/workspace/git-credentials/git-credentials/script linguist-generated=true +acceptance/help/cmd/workspace/global-init-scripts/global-init-scripts/script linguist-generated=true +acceptance/help/cmd/workspace/grants/grants/script linguist-generated=true +acceptance/help/cmd/workspace/groups/groups/script linguist-generated=true +acceptance/help/cmd/workspace/instance-pools/instance-pools/script linguist-generated=true +acceptance/help/cmd/workspace/instance-profiles/instance-profiles/script linguist-generated=true +acceptance/help/cmd/workspace/ip-access-lists/ip-access-lists/script linguist-generated=true +acceptance/help/cmd/workspace/jobs/jobs/script linguist-generated=true +acceptance/help/cmd/workspace/lakeview/lakeview/script linguist-generated=true +acceptance/help/cmd/workspace/libraries/libraries/script linguist-generated=true +acceptance/help/cmd/workspace/metastores/metastores/script linguist-generated=true +acceptance/help/cmd/workspace/model-registry/model-registry/script linguist-generated=true +acceptance/help/cmd/workspace/model-versions/model-versions/script linguist-generated=true +acceptance/help/cmd/workspace/notification-destinations/notification-destinations/script linguist-generated=true +acceptance/help/cmd/workspace/online-tables/online-tables/script linguist-generated=true +acceptance/help/cmd/workspace/permission-migration/permission-migration/script linguist-generated=true +acceptance/help/cmd/workspace/permissions/permissions/script linguist-generated=true +acceptance/help/cmd/workspace/pipelines/pipelines/script linguist-generated=true +acceptance/help/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters/script linguist-generated=true +acceptance/help/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs/script linguist-generated=true +acceptance/help/cmd/workspace/policy-families/policy-families/script linguist-generated=true +acceptance/help/cmd/workspace/provider-exchange-filters/provider-exchange-filters/script linguist-generated=true +acceptance/help/cmd/workspace/provider-exchanges/provider-exchanges/script linguist-generated=true +acceptance/help/cmd/workspace/provider-files/provider-files/script linguist-generated=true +acceptance/help/cmd/workspace/provider-listings/provider-listings/script linguist-generated=true +acceptance/help/cmd/workspace/provider-personalization-requests/provider-personalization-requests/script linguist-generated=true +acceptance/help/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards/script linguist-generated=true +acceptance/help/cmd/workspace/provider-providers/provider-providers/script linguist-generated=true +acceptance/help/cmd/workspace/providers/providers/script linguist-generated=true +acceptance/help/cmd/workspace/quality-monitors/quality-monitors/script linguist-generated=true +acceptance/help/cmd/workspace/queries-legacy/queries-legacy/script linguist-generated=true +acceptance/help/cmd/workspace/queries/queries/script linguist-generated=true +acceptance/help/cmd/workspace/query-history/query-history/script linguist-generated=true +acceptance/help/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy/script linguist-generated=true +acceptance/help/cmd/workspace/query-visualizations/query-visualizations/script linguist-generated=true +acceptance/help/cmd/workspace/recipient-activation/recipient-activation/script linguist-generated=true +acceptance/help/cmd/workspace/recipients/recipients/script linguist-generated=true +acceptance/help/cmd/workspace/registered-models/registered-models/script linguist-generated=true +acceptance/help/cmd/workspace/repos/repos/script linguist-generated=true +acceptance/help/cmd/workspace/resource-quotas/resource-quotas/script linguist-generated=true +acceptance/help/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins/script linguist-generated=true +acceptance/help/cmd/workspace/schemas/schemas/script linguist-generated=true +acceptance/help/cmd/workspace/secrets/secrets/script linguist-generated=true +acceptance/help/cmd/workspace/service-principals/service-principals/script linguist-generated=true +acceptance/help/cmd/workspace/serving-endpoints-data-plane/serving-endpoints-data-plane/script linguist-generated=true +acceptance/help/cmd/workspace/serving-endpoints/serving-endpoints/script linguist-generated=true +acceptance/help/cmd/workspace/settings/settings/script linguist-generated=true +acceptance/help/cmd/workspace/shares/shares/script linguist-generated=true +acceptance/help/cmd/workspace/statement-execution/statement-execution/script linguist-generated=true +acceptance/help/cmd/workspace/storage-credentials/storage-credentials/script linguist-generated=true +acceptance/help/cmd/workspace/system-schemas/system-schemas/script linguist-generated=true +acceptance/help/cmd/workspace/table-constraints/table-constraints/script linguist-generated=true +acceptance/help/cmd/workspace/tables/tables/script linguist-generated=true +acceptance/help/cmd/workspace/temporary-table-credentials/temporary-table-credentials/script linguist-generated=true +acceptance/help/cmd/workspace/token-management/token-management/script linguist-generated=true +acceptance/help/cmd/workspace/tokens/tokens/script linguist-generated=true +acceptance/help/cmd/workspace/users/users/script linguist-generated=true +acceptance/help/cmd/workspace/vector-search-endpoints/vector-search-endpoints/script linguist-generated=true +acceptance/help/cmd/workspace/vector-search-indexes/vector-search-indexes/script linguist-generated=true +acceptance/help/cmd/workspace/volumes/volumes/script linguist-generated=true +acceptance/help/cmd/workspace/warehouses/warehouses/script linguist-generated=true +acceptance/help/cmd/workspace/workspace-bindings/workspace-bindings/script linguist-generated=true +acceptance/help/cmd/workspace/workspace-conf/workspace-conf/script linguist-generated=true +acceptance/help/cmd/workspace/workspace/workspace/script linguist-generated=true cmd/account/access-control/access-control.go linguist-generated=true cmd/account/billable-usage/billable-usage.go linguist-generated=true cmd/account/budgets/budgets.go linguist-generated=true diff --git a/acceptance/help/cmd/account/access-control/access-control/output.txt b/acceptance/help/cmd/account/access-control/access-control/output.txt new file mode 100644 index 000000000..40784dbf3 --- /dev/null +++ b/acceptance/help/cmd/account/access-control/access-control/output.txt @@ -0,0 +1,72 @@ + +>>> $CLI account access-control get-assignable-roles-for-resource --help +Get assignable roles for a resource. + + Gets all the roles that can be granted on an account level resource. A role is + grantable if the rule set on the resource can contain an access rule of the + role. + + Arguments: + RESOURCE: The resource name for which assignable roles will be listed. + +Usage: + databricks account access-control get-assignable-roles-for-resource RESOURCE [flags] + +Flags: + -h, --help help for get-assignable-roles-for-resource + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account access-control get-rule-set --help +Get a rule set. + + Get a rule set by its name. A rule set is always attached to a resource and + contains a list of access rules on the said resource. Currently only a default + rule set for each resource is supported. + + Arguments: + NAME: The ruleset name associated with the request. + ETAG: Etag used for versioning. The response is at least as fresh as the eTag + provided. Etag is used for optimistic concurrency control as a way to help + prevent simultaneous updates of a rule set from overwriting each other. It + is strongly suggested that systems make use of the etag in the read -> + modify -> write pattern to perform rule set updates in order to avoid race + conditions that is get an etag from a GET rule set request, and pass it + with the PUT update request to identify the rule set version you are + updating. + +Usage: + databricks account access-control get-rule-set NAME ETAG [flags] + +Flags: + -h, --help help for get-rule-set + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account access-control update-rule-set --help +Update a rule set. + + Replace the rules of a rule set. First, use get to read the current version of + the rule set before modifying it. This pattern helps prevent conflicts between + concurrent updates. + +Usage: + databricks account access-control update-rule-set [flags] + +Flags: + -h, --help help for update-rule-set + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/access-control/access-control/script b/acceptance/help/cmd/account/access-control/access-control/script new file mode 100755 index 000000000..69f7b84b8 --- /dev/null +++ b/acceptance/help/cmd/account/access-control/access-control/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account access-control get-assignable-roles-for-resource --help +trace $CLI account access-control get-rule-set --help +trace $CLI account access-control update-rule-set --help diff --git a/acceptance/help/cmd/account/billable-usage/billable-usage/output.txt b/acceptance/help/cmd/account/billable-usage/billable-usage/output.txt new file mode 100644 index 000000000..b402c28d3 --- /dev/null +++ b/acceptance/help/cmd/account/billable-usage/billable-usage/output.txt @@ -0,0 +1,33 @@ + +>>> $CLI account billable-usage download --help +Return billable usage logs. + + Returns billable usage logs in CSV format for the specified account and date + range. For the data schema, see [CSV file schema]. Note that this method might + take multiple minutes to complete. + + **Warning**: Depending on the queried date range, the number of workspaces in + the account, the size of the response and the internet speed of the caller, + this API may hit a timeout after a few minutes. If you experience this, try to + mitigate by calling the API with narrower date ranges. + + [CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema + + Arguments: + START_MONTH: Format: YYYY-MM. First month to return billable usage logs for. This + field is required. + END_MONTH: Format: YYYY-MM. Last month to return billable usage logs for. This + field is required. + +Usage: + databricks account billable-usage download START_MONTH END_MONTH [flags] + +Flags: + -h, --help help for download + --personal-data Specify whether to include personally identifiable information in the billable usage logs, for example the email addresses of cluster creators. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/billable-usage/billable-usage/script b/acceptance/help/cmd/account/billable-usage/billable-usage/script new file mode 100755 index 000000000..deb48b6f0 --- /dev/null +++ b/acceptance/help/cmd/account/billable-usage/billable-usage/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account billable-usage download --help diff --git a/acceptance/help/cmd/account/budgets/budgets/output.txt b/acceptance/help/cmd/account/budgets/budgets/output.txt new file mode 100644 index 000000000..42e1ae5d7 --- /dev/null +++ b/acceptance/help/cmd/account/budgets/budgets/output.txt @@ -0,0 +1,101 @@ + +>>> $CLI account budgets create --help +Create new budget. + + Create a new budget configuration for an account. For full details, see + https://docs.databricks.com/en/admin/account-settings/budgets.html. + +Usage: + databricks account budgets create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account budgets delete --help +Delete budget. + + Deletes a budget configuration for an account. Both account and budget + configuration are specified by ID. This cannot be undone. + + Arguments: + BUDGET_ID: The Databricks budget configuration ID. + +Usage: + databricks account budgets delete BUDGET_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account budgets get --help +Get budget. + + Gets a budget configuration for an account. Both account and budget + configuration are specified by ID. + + Arguments: + BUDGET_ID: The budget configuration ID + +Usage: + databricks account budgets get BUDGET_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account budgets list --help +Get all budgets. + + Gets all budgets associated with this account. + +Usage: + databricks account budgets list [flags] + +Flags: + -h, --help help for list + --page-token string A page token received from a previous get all budget configurations call. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account budgets update --help +Modify budget. + + Updates a budget configuration for an account. Both account and budget + configuration are specified by ID. + + Arguments: + BUDGET_ID: The Databricks budget configuration ID. + +Usage: + databricks account budgets update BUDGET_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/budgets/budgets/script b/acceptance/help/cmd/account/budgets/budgets/script new file mode 100755 index 000000000..055664afc --- /dev/null +++ b/acceptance/help/cmd/account/budgets/budgets/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account budgets create --help +trace $CLI account budgets delete --help +trace $CLI account budgets get --help +trace $CLI account budgets list --help +trace $CLI account budgets update --help diff --git a/acceptance/help/cmd/account/credentials/credentials/output.txt b/acceptance/help/cmd/account/credentials/credentials/output.txt new file mode 100644 index 000000000..a7d771491 --- /dev/null +++ b/acceptance/help/cmd/account/credentials/credentials/output.txt @@ -0,0 +1,92 @@ + +>>> $CLI account credentials create --help +Create credential configuration. + + Creates a Databricks credential configuration that represents cloud + cross-account credentials for a specified account. Databricks uses this to set + up network infrastructure properly to host Databricks clusters. For your AWS + IAM role, you need to trust the External ID (the Databricks Account API + account ID) in the returned credential object, and configure the required + access policy. + + Save the response's credentials_id field, which is the ID for your new + credential configuration object. + + For information about how to create a new workspace with this API, see [Create + a new workspace using the Account API] + + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + +Usage: + databricks account credentials create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account credentials delete --help +Delete credential configuration. + + Deletes a Databricks credential configuration object for an account, both + specified by ID. You cannot delete a credential that is associated with any + workspace. + + Arguments: + CREDENTIALS_ID: Databricks Account API credential configuration ID + +Usage: + databricks account credentials delete CREDENTIALS_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account credentials get --help +Get credential configuration. + + Gets a Databricks credential configuration object for an account, both + specified by ID. + + Arguments: + CREDENTIALS_ID: Databricks Account API credential configuration ID + +Usage: + databricks account credentials get CREDENTIALS_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account credentials list --help +Get all credential configurations. + + Gets all Databricks credential configurations associated with an account + specified by ID. + +Usage: + databricks account credentials list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/credentials/credentials/script b/acceptance/help/cmd/account/credentials/credentials/script new file mode 100755 index 000000000..f337eb862 --- /dev/null +++ b/acceptance/help/cmd/account/credentials/credentials/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account credentials create --help +trace $CLI account credentials delete --help +trace $CLI account credentials get --help +trace $CLI account credentials list --help diff --git a/acceptance/help/cmd/account/csp-enablement-account/csp-enablement-account/output.txt b/acceptance/help/cmd/account/csp-enablement-account/csp-enablement-account/output.txt new file mode 100644 index 000000000..544c1f415 --- /dev/null +++ b/acceptance/help/cmd/account/csp-enablement-account/csp-enablement-account/output.txt @@ -0,0 +1,110 @@ + +>>> $CLI account csp-enablement-account get --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. + +>>> $CLI account csp-enablement-account update --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. diff --git a/acceptance/help/cmd/account/csp-enablement-account/csp-enablement-account/script b/acceptance/help/cmd/account/csp-enablement-account/csp-enablement-account/script new file mode 100755 index 000000000..8116216e6 --- /dev/null +++ b/acceptance/help/cmd/account/csp-enablement-account/csp-enablement-account/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account csp-enablement-account get --help +trace $CLI account csp-enablement-account update --help diff --git a/acceptance/help/cmd/account/custom-app-integration/custom-app-integration/output.txt b/acceptance/help/cmd/account/custom-app-integration/custom-app-integration/output.txt new file mode 100644 index 000000000..2e16ca6a3 --- /dev/null +++ b/acceptance/help/cmd/account/custom-app-integration/custom-app-integration/output.txt @@ -0,0 +1,101 @@ + +>>> $CLI account custom-app-integration create --help +Create Custom OAuth App Integration. + + Create Custom OAuth App Integration. + + You can retrieve the custom OAuth app integration via + :method:CustomAppIntegration/get. + +Usage: + databricks account custom-app-integration create [flags] + +Flags: + --confidential This field indicates whether an OAuth client secret is required to authenticate this client. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string Name of the custom OAuth app. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account custom-app-integration delete --help +Delete Custom OAuth App Integration. + + Delete an existing Custom OAuth App Integration. You can retrieve the custom + OAuth app integration via :method:CustomAppIntegration/get. + +Usage: + databricks account custom-app-integration delete INTEGRATION_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account custom-app-integration get --help +Get OAuth Custom App Integration. + + Gets the Custom OAuth App Integration for the given integration id. + + Arguments: + INTEGRATION_ID: The OAuth app integration ID. + +Usage: + databricks account custom-app-integration get INTEGRATION_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account custom-app-integration list --help +Get custom oauth app integrations. + + Get the list of custom OAuth app integrations for the specified Databricks + account + +Usage: + databricks account custom-app-integration list [flags] + +Flags: + -h, --help help for list + --include-creator-username + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account custom-app-integration update --help +Updates Custom OAuth App Integration. + + Updates an existing custom OAuth App Integration. You can retrieve the custom + OAuth app integration via :method:CustomAppIntegration/get. + +Usage: + databricks account custom-app-integration update INTEGRATION_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/custom-app-integration/custom-app-integration/script b/acceptance/help/cmd/account/custom-app-integration/custom-app-integration/script new file mode 100755 index 000000000..0b05397dc --- /dev/null +++ b/acceptance/help/cmd/account/custom-app-integration/custom-app-integration/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account custom-app-integration create --help +trace $CLI account custom-app-integration delete --help +trace $CLI account custom-app-integration get --help +trace $CLI account custom-app-integration list --help +trace $CLI account custom-app-integration update --help diff --git a/acceptance/help/cmd/account/disable-legacy-features/disable-legacy-features/output.txt b/acceptance/help/cmd/account/disable-legacy-features/disable-legacy-features/output.txt new file mode 100644 index 000000000..0ce10d830 --- /dev/null +++ b/acceptance/help/cmd/account/disable-legacy-features/disable-legacy-features/output.txt @@ -0,0 +1,165 @@ + +>>> $CLI account disable-legacy-features delete --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. + +>>> $CLI account disable-legacy-features get --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. + +>>> $CLI account disable-legacy-features update --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. diff --git a/acceptance/help/cmd/account/disable-legacy-features/disable-legacy-features/script b/acceptance/help/cmd/account/disable-legacy-features/disable-legacy-features/script new file mode 100755 index 000000000..f686a9245 --- /dev/null +++ b/acceptance/help/cmd/account/disable-legacy-features/disable-legacy-features/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account disable-legacy-features delete --help +trace $CLI account disable-legacy-features get --help +trace $CLI account disable-legacy-features update --help diff --git a/acceptance/help/cmd/account/encryption-keys/encryption-keys/output.txt b/acceptance/help/cmd/account/encryption-keys/encryption-keys/output.txt new file mode 100644 index 000000000..67b714fd3 --- /dev/null +++ b/acceptance/help/cmd/account/encryption-keys/encryption-keys/output.txt @@ -0,0 +1,118 @@ + +>>> $CLI account encryption-keys create --help +Create encryption key configuration. + + Creates a customer-managed key configuration object for an account, specified + by ID. This operation uploads a reference to a customer-managed key to + Databricks. If the key is assigned as a workspace's customer-managed key for + managed services, Databricks uses the key to encrypt the workspaces notebooks + and secrets in the control plane, in addition to Databricks SQL queries and + query history. If it is specified as a workspace's customer-managed key for + workspace storage, the key encrypts the workspace's root S3 bucket (which + contains the workspace's root DBFS and system data) and, optionally, cluster + EBS volume data. + + **Important**: Customer-managed keys are supported only for some deployment + types, subscription types, and AWS regions that currently support creation of + Databricks workspaces. + + This operation is available only if your account is on the E2 version of the + platform or on a select custom plan that allows multiple workspaces per + account. + +Usage: + databricks account encryption-keys create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account encryption-keys delete --help +Delete encryption key configuration. + + Deletes a customer-managed key configuration object for an account. You cannot + delete a configuration that is associated with a running workspace. + + Arguments: + CUSTOMER_MANAGED_KEY_ID: Databricks encryption key configuration ID. + +Usage: + databricks account encryption-keys delete CUSTOMER_MANAGED_KEY_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account encryption-keys get --help +Get encryption key configuration. + + Gets a customer-managed key configuration object for an account, specified by + ID. This operation uploads a reference to a customer-managed key to + Databricks. If assigned as a workspace's customer-managed key for managed + services, Databricks uses the key to encrypt the workspaces notebooks and + secrets in the control plane, in addition to Databricks SQL queries and query + history. If it is specified as a workspace's customer-managed key for storage, + the key encrypts the workspace's root S3 bucket (which contains the + workspace's root DBFS and system data) and, optionally, cluster EBS volume + data. + + **Important**: Customer-managed keys are supported only for some deployment + types, subscription types, and AWS regions. + + This operation is available only if your account is on the E2 version of the + platform.", + + Arguments: + CUSTOMER_MANAGED_KEY_ID: Databricks encryption key configuration ID. + +Usage: + databricks account encryption-keys get CUSTOMER_MANAGED_KEY_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account encryption-keys list --help +Get all encryption key configurations. + + Gets all customer-managed key configuration objects for an account. If the key + is specified as a workspace's managed services customer-managed key, + Databricks uses the key to encrypt the workspace's notebooks and secrets in + the control plane, in addition to Databricks SQL queries and query history. If + the key is specified as a workspace's storage customer-managed key, the key is + used to encrypt the workspace's root S3 bucket and optionally can encrypt + cluster EBS volumes data in the data plane. + + **Important**: Customer-managed keys are supported only for some deployment + types, subscription types, and AWS regions. + + This operation is available only if your account is on the E2 version of the + platform. + +Usage: + databricks account encryption-keys list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/encryption-keys/encryption-keys/script b/acceptance/help/cmd/account/encryption-keys/encryption-keys/script new file mode 100755 index 000000000..a9aa592d3 --- /dev/null +++ b/acceptance/help/cmd/account/encryption-keys/encryption-keys/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account encryption-keys create --help +trace $CLI account encryption-keys delete --help +trace $CLI account encryption-keys get --help +trace $CLI account encryption-keys list --help diff --git a/acceptance/help/cmd/account/esm-enablement-account/esm-enablement-account/output.txt b/acceptance/help/cmd/account/esm-enablement-account/esm-enablement-account/output.txt new file mode 100644 index 000000000..c23e8da83 --- /dev/null +++ b/acceptance/help/cmd/account/esm-enablement-account/esm-enablement-account/output.txt @@ -0,0 +1,110 @@ + +>>> $CLI account esm-enablement-account get --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. + +>>> $CLI account esm-enablement-account update --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. diff --git a/acceptance/help/cmd/account/esm-enablement-account/esm-enablement-account/script b/acceptance/help/cmd/account/esm-enablement-account/esm-enablement-account/script new file mode 100755 index 000000000..187943e05 --- /dev/null +++ b/acceptance/help/cmd/account/esm-enablement-account/esm-enablement-account/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account esm-enablement-account get --help +trace $CLI account esm-enablement-account update --help diff --git a/acceptance/help/cmd/account/federation-policy/federation-policy/output.txt b/acceptance/help/cmd/account/federation-policy/federation-policy/output.txt new file mode 100644 index 000000000..00012ff93 --- /dev/null +++ b/acceptance/help/cmd/account/federation-policy/federation-policy/output.txt @@ -0,0 +1,94 @@ + +>>> $CLI account federation-policy create --help +Create account federation policy. + +Usage: + databricks account federation-policy create [flags] + +Flags: + --description string Description of the federation policy. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string Resource name for the federation policy. + --policy-id string The identifier for the federation policy. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account federation-policy delete --help +Delete account federation policy. + + Arguments: + POLICY_ID: The identifier for the federation policy. + +Usage: + databricks account federation-policy delete POLICY_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account federation-policy get --help +Get account federation policy. + + Arguments: + POLICY_ID: The identifier for the federation policy. + +Usage: + databricks account federation-policy get POLICY_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account federation-policy list --help +List account federation policies. + +Usage: + databricks account federation-policy list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account federation-policy update --help +Update account federation policy. + + Arguments: + POLICY_ID: The identifier for the federation policy. + +Usage: + databricks account federation-policy update POLICY_ID [flags] + +Flags: + --description string Description of the federation policy. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string Resource name for the federation policy. + --update-mask string The field mask specifies which fields of the policy to update. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/federation-policy/federation-policy/script b/acceptance/help/cmd/account/federation-policy/federation-policy/script new file mode 100755 index 000000000..9407cef24 --- /dev/null +++ b/acceptance/help/cmd/account/federation-policy/federation-policy/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account federation-policy create --help +trace $CLI account federation-policy delete --help +trace $CLI account federation-policy get --help +trace $CLI account federation-policy list --help +trace $CLI account federation-policy update --help diff --git a/acceptance/help/cmd/account/groups/groups/output.txt b/acceptance/help/cmd/account/groups/groups/output.txt new file mode 100644 index 000000000..4645a88c9 --- /dev/null +++ b/acceptance/help/cmd/account/groups/groups/output.txt @@ -0,0 +1,131 @@ + +>>> $CLI account groups create --help +Create a new group. + + Creates a group in the Databricks account with a unique name, using the + supplied group details. + +Usage: + databricks account groups create [flags] + +Flags: + --display-name string String that represents a human-readable group name. + --external-id string + -h, --help help for create + --id string Databricks group ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account groups delete --help +Delete a group. + + Deletes a group from the Databricks account. + + Arguments: + ID: Unique ID for a group in the Databricks account. + +Usage: + databricks account groups delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account groups get --help +Get group details. + + Gets the information for a specific group in the Databricks account. + + Arguments: + ID: Unique ID for a group in the Databricks account. + +Usage: + databricks account groups get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account groups list --help +List group details. + + Gets all details of the groups associated with the Databricks account. + +Usage: + databricks account groups list [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for list + --sort-by string Attribute to sort the results. + --sort-order ListSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account groups patch --help +Update group details. + + Partially updates the details of a group. + + Arguments: + ID: Unique ID for a group in the Databricks account. + +Usage: + databricks account groups patch ID [flags] + +Flags: + -h, --help help for patch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account groups update --help +Replace a group. + + Updates the details of a group by replacing the entire group entity. + + Arguments: + ID: Databricks group ID + +Usage: + databricks account groups update ID [flags] + +Flags: + --display-name string String that represents a human-readable group name. + --external-id string + -h, --help help for update + --id string Databricks group ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/groups/groups/script b/acceptance/help/cmd/account/groups/groups/script new file mode 100755 index 000000000..c0ddb0e45 --- /dev/null +++ b/acceptance/help/cmd/account/groups/groups/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account groups create --help +trace $CLI account groups delete --help +trace $CLI account groups get --help +trace $CLI account groups list --help +trace $CLI account groups patch --help +trace $CLI account groups update --help diff --git a/acceptance/help/cmd/account/ip-access-lists/ip-access-lists/output.txt b/acceptance/help/cmd/account/ip-access-lists/ip-access-lists/output.txt new file mode 100644 index 000000000..3aba1be73 --- /dev/null +++ b/acceptance/help/cmd/account/ip-access-lists/ip-access-lists/output.txt @@ -0,0 +1,172 @@ + +>>> $CLI account ip-access-lists create --help +Create access list. + + Creates an IP access list for the account. + + A list can be an allow list or a block list. See the top of this file for a + description of how the server treats allow lists and block lists at runtime. + + When creating or updating an IP access list: + + * For all allow lists and block lists combined, the API supports a maximum of + 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + exceed that number return error 400 with error_code value QUOTA_EXCEEDED. + * If the new list would block the calling user's current IP, error 400 is + returned with error_code value INVALID_STATE. + + It can take a few minutes for the changes to take effect. + + Arguments: + LABEL: Label for the IP access list. This **cannot** be empty. + LIST_TYPE: Type of IP access list. Valid values are as follows and are + case-sensitive: + + * ALLOW: An allow list. Include this IP or range. * BLOCK: A block + list. Exclude this IP or range. IP addresses in the block list are + excluded even if they are included in an allow list. + +Usage: + databricks account ip-access-lists create LABEL LIST_TYPE [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account ip-access-lists delete --help +Delete access list. + + Deletes an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + +Usage: + databricks account ip-access-lists delete IP_ACCESS_LIST_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account ip-access-lists get --help +Get IP access list. + + Gets an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + +Usage: + databricks account ip-access-lists get IP_ACCESS_LIST_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account ip-access-lists list --help +Get access lists. + + Gets all IP access lists for the specified account. + +Usage: + databricks account ip-access-lists list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account ip-access-lists replace --help +Replace access list. + + Replaces an IP access list, specified by its ID. + + A list can include allow lists and block lists. See the top of this file for a + description of how the server treats allow lists and block lists at run time. + When replacing an IP access list: * For all allow lists and block lists + combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR + counts as a single value. Attempts to exceed that number return error 400 with + error_code value QUOTA_EXCEEDED. * If the resulting list would block the + calling user's current IP, error 400 is returned with error_code value + INVALID_STATE. It can take a few minutes for the changes to take effect. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + LABEL: Label for the IP access list. This **cannot** be empty. + LIST_TYPE: Type of IP access list. Valid values are as follows and are + case-sensitive: + + * ALLOW: An allow list. Include this IP or range. * BLOCK: A block + list. Exclude this IP or range. IP addresses in the block list are + excluded even if they are included in an allow list. + ENABLED: Specifies whether this IP access list is enabled. + +Usage: + databricks account ip-access-lists replace IP_ACCESS_LIST_ID LABEL LIST_TYPE ENABLED [flags] + +Flags: + -h, --help help for replace + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account ip-access-lists update --help +Update access list. + + Updates an existing IP access list, specified by its ID. + + A list can include allow lists and block lists. See the top of this file for a + description of how the server treats allow lists and block lists at run time. + + When updating an IP access list: + + * For all allow lists and block lists combined, the API supports a maximum of + 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + exceed that number return error 400 with error_code value QUOTA_EXCEEDED. + * If the updated list would block the calling user's current IP, error 400 is + returned with error_code value INVALID_STATE. + + It can take a few minutes for the changes to take effect. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + +Usage: + databricks account ip-access-lists update IP_ACCESS_LIST_ID [flags] + +Flags: + --enabled Specifies whether this IP access list is enabled. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --label string Label for the IP access list. + --list-type ListType Type of IP access list. Supported values: [ALLOW, BLOCK] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/ip-access-lists/ip-access-lists/script b/acceptance/help/cmd/account/ip-access-lists/ip-access-lists/script new file mode 100755 index 000000000..fa2b47f1f --- /dev/null +++ b/acceptance/help/cmd/account/ip-access-lists/ip-access-lists/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account ip-access-lists create --help +trace $CLI account ip-access-lists delete --help +trace $CLI account ip-access-lists get --help +trace $CLI account ip-access-lists list --help +trace $CLI account ip-access-lists replace --help +trace $CLI account ip-access-lists update --help diff --git a/acceptance/help/cmd/account/log-delivery/log-delivery/output.txt b/acceptance/help/cmd/account/log-delivery/log-delivery/output.txt new file mode 100644 index 000000000..a5b097649 --- /dev/null +++ b/acceptance/help/cmd/account/log-delivery/log-delivery/output.txt @@ -0,0 +1,114 @@ + +>>> $CLI account log-delivery create --help +Create a new log delivery configuration. + + Creates a new Databricks log delivery configuration to enable delivery of the + specified type of logs to your storage location. This requires that you + already created a [credential object](:method:Credentials/Create) (which + encapsulates a cross-account service IAM role) and a [storage configuration + object](:method:Storage/Create) (which encapsulates an S3 bucket). + + For full details, including the required IAM role policies and bucket + policies, see [Deliver and access billable usage logs] or [Configure audit + logging]. + + **Note**: There is a limit on the number of log delivery configurations + available per account (each limit applies separately to each log type + including billable usage and audit logs). You can create a maximum of two + enabled account-level delivery configurations (configurations without a + workspace filter) per type. Additionally, you can create two enabled + workspace-level delivery configurations per workspace for each log type, which + means that the same workspace ID can occur in the workspace filter for no more + than two delivery configurations per log type. + + You cannot delete a log delivery configuration, but you can disable it (see + [Enable or disable log delivery + configuration](:method:LogDelivery/PatchStatus)). + + [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + [Deliver and access billable usage logs]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + +Usage: + databricks account log-delivery create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account log-delivery get --help +Get log delivery configuration. + + Gets a Databricks log delivery configuration object for an account, both + specified by ID. + + Arguments: + LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID + +Usage: + databricks account log-delivery get LOG_DELIVERY_CONFIGURATION_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account log-delivery list --help +Get all log delivery configurations. + + Gets all Databricks log delivery configurations associated with an account + specified by ID. + +Usage: + databricks account log-delivery list [flags] + +Flags: + --credentials-id string Filter by credential configuration ID. + -h, --help help for list + --status LogDeliveryConfigStatus Filter by status ENABLED or DISABLED. Supported values: [DISABLED, ENABLED] + --storage-configuration-id string Filter by storage configuration ID. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account log-delivery patch-status --help +Enable or disable log delivery configuration. + + Enables or disables a log delivery configuration. Deletion of delivery + configurations is not supported, so disable log delivery configurations that + are no longer needed. Note that you can't re-enable a delivery configuration + if this would violate the delivery configuration limits described under + [Create log delivery](:method:LogDelivery/Create). + + Arguments: + LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID + STATUS: Status of log delivery configuration. Set to ENABLED (enabled) or + DISABLED (disabled). Defaults to ENABLED. You can [enable or disable + the configuration](#operation/patch-log-delivery-config-status) later. + Deletion of a configuration is not supported, so disable a log delivery + configuration that is no longer needed. + +Usage: + databricks account log-delivery patch-status LOG_DELIVERY_CONFIGURATION_ID STATUS [flags] + +Flags: + -h, --help help for patch-status + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/log-delivery/log-delivery/script b/acceptance/help/cmd/account/log-delivery/log-delivery/script new file mode 100755 index 000000000..4af3568b3 --- /dev/null +++ b/acceptance/help/cmd/account/log-delivery/log-delivery/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account log-delivery create --help +trace $CLI account log-delivery get --help +trace $CLI account log-delivery list --help +trace $CLI account log-delivery patch-status --help diff --git a/acceptance/help/cmd/account/metastore-assignments/metastore-assignments/output.txt b/acceptance/help/cmd/account/metastore-assignments/metastore-assignments/output.txt new file mode 100644 index 000000000..f917b71a5 --- /dev/null +++ b/acceptance/help/cmd/account/metastore-assignments/metastore-assignments/output.txt @@ -0,0 +1,111 @@ + +>>> $CLI account metastore-assignments create --help +Assigns a workspace to a metastore. + + Creates an assignment to a metastore for a workspace + + Arguments: + WORKSPACE_ID: Workspace ID. + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account metastore-assignments create WORKSPACE_ID METASTORE_ID [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastore-assignments delete --help +Delete a metastore assignment. + + Deletes a metastore assignment to a workspace, leaving the workspace with no + metastore. + + Arguments: + WORKSPACE_ID: Workspace ID. + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account metastore-assignments delete WORKSPACE_ID METASTORE_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastore-assignments get --help +Gets the metastore assignment for a workspace. + + Gets the metastore assignment, if any, for the workspace specified by ID. If + the workspace is assigned a metastore, the mappig will be returned. If no + metastore is assigned to the workspace, the assignment will not be found and a + 404 returned. + + Arguments: + WORKSPACE_ID: Workspace ID. + +Usage: + databricks account metastore-assignments get WORKSPACE_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastore-assignments list --help +Get all workspaces assigned to a metastore. + + Gets a list of all Databricks workspace IDs that have been assigned to given + metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account metastore-assignments list METASTORE_ID [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastore-assignments update --help +Updates a metastore assignment to a workspaces. + + Updates an assignment to a metastore for a workspace. Currently, only the + default catalog may be updated. + + Arguments: + WORKSPACE_ID: Workspace ID. + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account metastore-assignments update WORKSPACE_ID METASTORE_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/metastore-assignments/metastore-assignments/script b/acceptance/help/cmd/account/metastore-assignments/metastore-assignments/script new file mode 100755 index 000000000..ee4bf9490 --- /dev/null +++ b/acceptance/help/cmd/account/metastore-assignments/metastore-assignments/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account metastore-assignments create --help +trace $CLI account metastore-assignments delete --help +trace $CLI account metastore-assignments get --help +trace $CLI account metastore-assignments list --help +trace $CLI account metastore-assignments update --help diff --git a/acceptance/help/cmd/account/metastores/metastores/output.txt b/acceptance/help/cmd/account/metastores/metastores/output.txt new file mode 100644 index 000000000..966c64503 --- /dev/null +++ b/acceptance/help/cmd/account/metastores/metastores/output.txt @@ -0,0 +1,97 @@ + +>>> $CLI account metastores create --help +Create metastore. + + Creates a Unity Catalog metastore. + +Usage: + databricks account metastores create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastores delete --help +Delete a metastore. + + Deletes a Unity Catalog metastore for an account, both specified by ID. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account metastores delete METASTORE_ID [flags] + +Flags: + --force Force deletion even if the metastore is not empty. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastores get --help +Get a metastore. + + Gets a Unity Catalog metastore from an account, both specified by ID. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account metastores get METASTORE_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastores list --help +Get all metastores associated with an account. + + Gets all Unity Catalog metastores associated with an account specified by ID. + +Usage: + databricks account metastores list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account metastores update --help +Update a metastore. + + Updates an existing Unity Catalog metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account metastores update METASTORE_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/metastores/metastores/script b/acceptance/help/cmd/account/metastores/metastores/script new file mode 100755 index 000000000..f27711590 --- /dev/null +++ b/acceptance/help/cmd/account/metastores/metastores/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account metastores create --help +trace $CLI account metastores delete --help +trace $CLI account metastores get --help +trace $CLI account metastores list --help +trace $CLI account metastores update --help diff --git a/acceptance/help/cmd/account/network-connectivity/network-connectivity/output.txt b/acceptance/help/cmd/account/network-connectivity/network-connectivity/output.txt new file mode 100644 index 000000000..e582af689 --- /dev/null +++ b/acceptance/help/cmd/account/network-connectivity/network-connectivity/output.txt @@ -0,0 +1,184 @@ + +>>> $CLI account network-connectivity create-network-connectivity-configuration --help +Create a network connectivity configuration. + + Arguments: + NAME: The name of the network connectivity configuration. The name can contain + alphanumeric characters, hyphens, and underscores. The length must be + between 3 and 30 characters. The name must match the regular expression + ^[0-9a-zA-Z-_]{3,30}$. + REGION: The region for the network connectivity configuration. Only workspaces in + the same region can be attached to the network connectivity configuration. + +Usage: + databricks account network-connectivity create-network-connectivity-configuration NAME REGION [flags] + +Flags: + -h, --help help for create-network-connectivity-configuration + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account network-connectivity create-private-endpoint-rule --help +Create a private endpoint rule. + + Create a private endpoint rule for the specified network connectivity config + object. Once the object is created, Databricks asynchronously provisions a new + Azure private endpoint to your specified Azure resource. + + **IMPORTANT**: You must use Azure portal or other Azure tools to approve the + private endpoint to complete the connection. To get the information of the + private endpoint created, make a GET request on the new private endpoint + rule. See [serverless private link]. + + [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + RESOURCE_ID: The Azure resource ID of the target resource. + GROUP_ID: The sub-resource type (group ID) of the target resource. Note that to + connect to workspace root storage (root DBFS), you need two endpoints, one + for blob and one for dfs. + +Usage: + databricks account network-connectivity create-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID RESOURCE_ID GROUP_ID [flags] + +Flags: + -h, --help help for create-private-endpoint-rule + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account network-connectivity delete-network-connectivity-configuration --help +Delete a network connectivity configuration. + + Deletes a network connectivity configuration. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + +Usage: + databricks account network-connectivity delete-network-connectivity-configuration NETWORK_CONNECTIVITY_CONFIG_ID [flags] + +Flags: + -h, --help help for delete-network-connectivity-configuration + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account network-connectivity delete-private-endpoint-rule --help +Delete a private endpoint rule. + + Initiates deleting a private endpoint rule. If the connection state is PENDING + or EXPIRED, the private endpoint is immediately deleted. Otherwise, the + private endpoint is deactivated and will be deleted after seven days of + deactivation. When a private endpoint is deactivated, the deactivated field + is set to true and the private endpoint is not available to your serverless + compute resources. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + PRIVATE_ENDPOINT_RULE_ID: Your private endpoint rule ID. + +Usage: + databricks account network-connectivity delete-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID [flags] + +Flags: + -h, --help help for delete-private-endpoint-rule + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account network-connectivity get-network-connectivity-configuration --help +Get a network connectivity configuration. + + Gets a network connectivity configuration. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + +Usage: + databricks account network-connectivity get-network-connectivity-configuration NETWORK_CONNECTIVITY_CONFIG_ID [flags] + +Flags: + -h, --help help for get-network-connectivity-configuration + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account network-connectivity get-private-endpoint-rule --help +Get a private endpoint rule. + + Gets the private endpoint rule. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + PRIVATE_ENDPOINT_RULE_ID: Your private endpoint rule ID. + +Usage: + databricks account network-connectivity get-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID [flags] + +Flags: + -h, --help help for get-private-endpoint-rule + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account network-connectivity list-network-connectivity-configurations --help +List network connectivity configurations. + + Gets an array of network connectivity configurations. + +Usage: + databricks account network-connectivity list-network-connectivity-configurations [flags] + +Flags: + -h, --help help for list-network-connectivity-configurations + --page-token string Pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account network-connectivity list-private-endpoint-rules --help +List private endpoint rules. + + Gets an array of private endpoint rules. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + +Usage: + databricks account network-connectivity list-private-endpoint-rules NETWORK_CONNECTIVITY_CONFIG_ID [flags] + +Flags: + -h, --help help for list-private-endpoint-rules + --page-token string Pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/network-connectivity/network-connectivity/script b/acceptance/help/cmd/account/network-connectivity/network-connectivity/script new file mode 100755 index 000000000..4c2db8b3d --- /dev/null +++ b/acceptance/help/cmd/account/network-connectivity/network-connectivity/script @@ -0,0 +1,9 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account network-connectivity create-network-connectivity-configuration --help +trace $CLI account network-connectivity create-private-endpoint-rule --help +trace $CLI account network-connectivity delete-network-connectivity-configuration --help +trace $CLI account network-connectivity delete-private-endpoint-rule --help +trace $CLI account network-connectivity get-network-connectivity-configuration --help +trace $CLI account network-connectivity get-private-endpoint-rule --help +trace $CLI account network-connectivity list-network-connectivity-configurations --help +trace $CLI account network-connectivity list-private-endpoint-rules --help diff --git a/acceptance/help/cmd/account/networks/networks/output.txt b/acceptance/help/cmd/account/networks/networks/output.txt new file mode 100644 index 000000000..6ac36c96d --- /dev/null +++ b/acceptance/help/cmd/account/networks/networks/output.txt @@ -0,0 +1,91 @@ + +>>> $CLI account networks create --help +Create network configuration. + + Creates a Databricks network configuration that represents an VPC and its + resources. The VPC will be used for new Databricks clusters. This requires a + pre-existing VPC and subnets. + + Arguments: + NETWORK_NAME: The human-readable name of the network configuration. + +Usage: + databricks account networks create NETWORK_NAME [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --vpc-id string The ID of the VPC associated with this network. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account networks delete --help +Delete a network configuration. + + Deletes a Databricks network configuration, which represents a cloud VPC and + its resources. You cannot delete a network that is associated with a + workspace. + + This operation is available only if your account is on the E2 version of the + platform. + + Arguments: + NETWORK_ID: Databricks Account API network configuration ID. + +Usage: + databricks account networks delete NETWORK_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account networks get --help +Get a network configuration. + + Gets a Databricks network configuration, which represents a cloud VPC and its + resources. + + Arguments: + NETWORK_ID: Databricks Account API network configuration ID. + +Usage: + databricks account networks get NETWORK_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account networks list --help +Get all network configurations. + + Gets a list of all Databricks network configurations for an account, specified + by ID. + + This operation is available only if your account is on the E2 version of the + platform. + +Usage: + databricks account networks list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/networks/networks/script b/acceptance/help/cmd/account/networks/networks/script new file mode 100755 index 000000000..f372975c9 --- /dev/null +++ b/acceptance/help/cmd/account/networks/networks/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account networks create --help +trace $CLI account networks delete --help +trace $CLI account networks get --help +trace $CLI account networks list --help diff --git a/acceptance/help/cmd/account/o-auth-published-apps/o-auth-published-apps/output.txt b/acceptance/help/cmd/account/o-auth-published-apps/o-auth-published-apps/output.txt new file mode 100644 index 000000000..ac84c2cb7 --- /dev/null +++ b/acceptance/help/cmd/account/o-auth-published-apps/o-auth-published-apps/output.txt @@ -0,0 +1,19 @@ + +>>> $CLI account o-auth-published-apps list --help +Get all the published OAuth apps. + + Get all the available published OAuth apps in Databricks. + +Usage: + databricks account o-auth-published-apps list [flags] + +Flags: + -h, --help help for list + --page-size int The max number of OAuth published apps to return in one page. + --page-token string A token that can be used to get the next page of results. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/o-auth-published-apps/o-auth-published-apps/script b/acceptance/help/cmd/account/o-auth-published-apps/o-auth-published-apps/script new file mode 100755 index 000000000..399e68c64 --- /dev/null +++ b/acceptance/help/cmd/account/o-auth-published-apps/o-auth-published-apps/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account o-auth-published-apps list --help diff --git a/acceptance/help/cmd/account/personal-compute/personal-compute/output.txt b/acceptance/help/cmd/account/personal-compute/personal-compute/output.txt new file mode 100644 index 000000000..6a55849b0 --- /dev/null +++ b/acceptance/help/cmd/account/personal-compute/personal-compute/output.txt @@ -0,0 +1,165 @@ + +>>> $CLI account personal-compute delete --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. + +>>> $CLI account personal-compute get --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. + +>>> $CLI account personal-compute update --help +Databricks Account Commands + +Usage: + databricks account [command] + +Identity and Access Management + access-control These APIs manage access rules on resources in an account. + groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. + service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + users User identities recognized by Databricks and represented by email addresses. + workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. + +Unity Catalog + metastore-assignments These APIs manage metastore assignments to a workspace. + metastores These APIs manage Unity Catalog metastores for an account. + storage-credentials These APIs manage storage credentials for a particular metastore. + +Settings + ip-access-lists The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. + network-connectivity These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources. + settings Accounts Settings API allows users to manage settings at the account level. + +Provisioning + credentials These APIs manage credential configurations for this workspace. + encryption-keys These APIs manage encryption key configurations for this workspace (optional). + networks These APIs manage network configurations for customer-managed VPCs (optional). + private-access These APIs manage private access settings for this account. + storage These APIs manage storage configurations for this workspace. + vpc-endpoints These APIs manage VPC endpoint configurations for this account. + workspaces These APIs manage workspaces for this account. + +Billing + billable-usage This API allows you to download billable usage logs for the specified account and date range. + budgets These APIs manage budget configurations for this account. + log-delivery These APIs manage log delivery configurations for this account. + usage-dashboards These APIs manage usage dashboards for this account. + +OAuth + custom-app-integration These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. + o-auth-published-apps These APIs enable administrators to view all the available published OAuth applications in Databricks. + published-app-integration These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. + service-principal-secrets These APIs enable administrators to manage service principal secrets. + +Flags: + -h, --help help for account + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +Use "databricks account [command] --help" for more information about a command. diff --git a/acceptance/help/cmd/account/personal-compute/personal-compute/script b/acceptance/help/cmd/account/personal-compute/personal-compute/script new file mode 100755 index 000000000..8c75d3635 --- /dev/null +++ b/acceptance/help/cmd/account/personal-compute/personal-compute/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account personal-compute delete --help +trace $CLI account personal-compute get --help +trace $CLI account personal-compute update --help diff --git a/acceptance/help/cmd/account/private-access/private-access/output.txt b/acceptance/help/cmd/account/private-access/private-access/output.txt new file mode 100644 index 000000000..1f4df8152 --- /dev/null +++ b/acceptance/help/cmd/account/private-access/private-access/output.txt @@ -0,0 +1,157 @@ + +>>> $CLI account private-access create --help +Create private access settings. + + Creates a private access settings object, which specifies how your workspace + is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must + have a private access settings object referenced by ID in the workspace's + private_access_settings_id property. + + You can share one private access settings with multiple workspaces in a single + account. However, private access settings are specific to AWS regions, so only + workspaces in the same AWS region can use a given private access settings + object. + + Before configuring PrivateLink, read the [Databricks article about + PrivateLink]. + + [AWS PrivateLink]: https://aws.amazon.com/privatelink + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_NAME: The human-readable name of the private access settings object. + REGION: The cloud region for workspaces associated with this private access + settings object. + +Usage: + databricks account private-access create PRIVATE_ACCESS_SETTINGS_NAME REGION [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --private-access-level PrivateAccessLevel The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. Supported values: [ACCOUNT, ENDPOINT] + --public-access-enabled Determines if the workspace can be accessed over public internet. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account private-access delete --help +Delete a private access settings object. + + Deletes a private access settings object, which determines how your workspace + is accessed over [AWS PrivateLink]. + + Before configuring PrivateLink, read the [Databricks article about + PrivateLink].", + + [AWS PrivateLink]: https://aws.amazon.com/privatelink + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID. + +Usage: + databricks account private-access delete PRIVATE_ACCESS_SETTINGS_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account private-access get --help +Get a private access settings object. + + Gets a private access settings object, which specifies how your workspace is + accessed over [AWS PrivateLink]. + + Before configuring PrivateLink, read the [Databricks article about + PrivateLink].", + + [AWS PrivateLink]: https://aws.amazon.com/privatelink + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID. + +Usage: + databricks account private-access get PRIVATE_ACCESS_SETTINGS_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account private-access list --help +Get all private access settings objects. + + Gets a list of all private access settings objects for an account, specified + by ID. + +Usage: + databricks account private-access list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account private-access replace --help +Replace private access settings. + + Updates an existing private access settings object, which specifies how your + workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a + workspace must have a private access settings object referenced by ID in the + workspace's private_access_settings_id property. + + This operation completely overwrites your existing private access settings + object attached to your workspaces. All workspaces attached to the private + access settings are affected by any change. If public_access_enabled, + private_access_level, or allowed_vpc_endpoint_ids are updated, effects of + these changes might take several minutes to propagate to the workspace API. + + You can share one private access settings object with multiple workspaces in a + single account. However, private access settings are specific to AWS regions, + so only workspaces in the same AWS region can use a given private access + settings object. + + Before configuring PrivateLink, read the [Databricks article about + PrivateLink]. + + [AWS PrivateLink]: https://aws.amazon.com/privatelink + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID. + PRIVATE_ACCESS_SETTINGS_NAME: The human-readable name of the private access settings object. + REGION: The cloud region for workspaces associated with this private access + settings object. + +Usage: + databricks account private-access replace PRIVATE_ACCESS_SETTINGS_ID PRIVATE_ACCESS_SETTINGS_NAME REGION [flags] + +Flags: + -h, --help help for replace + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --private-access-level PrivateAccessLevel The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. Supported values: [ACCOUNT, ENDPOINT] + --public-access-enabled Determines if the workspace can be accessed over public internet. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/private-access/private-access/script b/acceptance/help/cmd/account/private-access/private-access/script new file mode 100755 index 000000000..2d18cd88e --- /dev/null +++ b/acceptance/help/cmd/account/private-access/private-access/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account private-access create --help +trace $CLI account private-access delete --help +trace $CLI account private-access get --help +trace $CLI account private-access list --help +trace $CLI account private-access replace --help diff --git a/acceptance/help/cmd/account/published-app-integration/published-app-integration/output.txt b/acceptance/help/cmd/account/published-app-integration/published-app-integration/output.txt new file mode 100644 index 000000000..7aef92e92 --- /dev/null +++ b/acceptance/help/cmd/account/published-app-integration/published-app-integration/output.txt @@ -0,0 +1,96 @@ + +>>> $CLI account published-app-integration create --help +Create Published OAuth App Integration. + + Create Published OAuth App Integration. + + You can retrieve the published OAuth app integration via + :method:PublishedAppIntegration/get. + +Usage: + databricks account published-app-integration create [flags] + +Flags: + --app-id string App id of the OAuth published app integration. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account published-app-integration delete --help +Delete Published OAuth App Integration. + + Delete an existing Published OAuth App Integration. You can retrieve the + published OAuth app integration via :method:PublishedAppIntegration/get. + +Usage: + databricks account published-app-integration delete INTEGRATION_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account published-app-integration get --help +Get OAuth Published App Integration. + + Gets the Published OAuth App Integration for the given integration id. + +Usage: + databricks account published-app-integration get INTEGRATION_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account published-app-integration list --help +Get published oauth app integrations. + + Get the list of published OAuth app integrations for the specified Databricks + account + +Usage: + databricks account published-app-integration list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account published-app-integration update --help +Updates Published OAuth App Integration. + + Updates an existing published OAuth App Integration. You can retrieve the + published OAuth app integration via :method:PublishedAppIntegration/get. + +Usage: + databricks account published-app-integration update INTEGRATION_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/published-app-integration/published-app-integration/script b/acceptance/help/cmd/account/published-app-integration/published-app-integration/script new file mode 100755 index 000000000..898ef3972 --- /dev/null +++ b/acceptance/help/cmd/account/published-app-integration/published-app-integration/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account published-app-integration create --help +trace $CLI account published-app-integration delete --help +trace $CLI account published-app-integration get --help +trace $CLI account published-app-integration list --help +trace $CLI account published-app-integration update --help diff --git a/acceptance/help/cmd/account/service-principal-federation-policy/service-principal-federation-policy/output.txt b/acceptance/help/cmd/account/service-principal-federation-policy/service-principal-federation-policy/output.txt new file mode 100644 index 000000000..e4947ee03 --- /dev/null +++ b/acceptance/help/cmd/account/service-principal-federation-policy/service-principal-federation-policy/output.txt @@ -0,0 +1,103 @@ + +>>> $CLI account service-principal-federation-policy create --help +Create service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + +Usage: + databricks account service-principal-federation-policy create SERVICE_PRINCIPAL_ID [flags] + +Flags: + --description string Description of the federation policy. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string Resource name for the federation policy. + --policy-id string The identifier for the federation policy. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principal-federation-policy delete --help +Delete service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: The identifier for the federation policy. + +Usage: + databricks account service-principal-federation-policy delete SERVICE_PRINCIPAL_ID POLICY_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principal-federation-policy get --help +Get service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: The identifier for the federation policy. + +Usage: + databricks account service-principal-federation-policy get SERVICE_PRINCIPAL_ID POLICY_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principal-federation-policy list --help +List service principal federation policies. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + +Usage: + databricks account service-principal-federation-policy list SERVICE_PRINCIPAL_ID [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principal-federation-policy update --help +Update service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: The identifier for the federation policy. + +Usage: + databricks account service-principal-federation-policy update SERVICE_PRINCIPAL_ID POLICY_ID [flags] + +Flags: + --description string Description of the federation policy. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string Resource name for the federation policy. + --update-mask string The field mask specifies which fields of the policy to update. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/service-principal-federation-policy/service-principal-federation-policy/script b/acceptance/help/cmd/account/service-principal-federation-policy/service-principal-federation-policy/script new file mode 100755 index 000000000..7d5158f55 --- /dev/null +++ b/acceptance/help/cmd/account/service-principal-federation-policy/service-principal-federation-policy/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account service-principal-federation-policy create --help +trace $CLI account service-principal-federation-policy delete --help +trace $CLI account service-principal-federation-policy get --help +trace $CLI account service-principal-federation-policy list --help +trace $CLI account service-principal-federation-policy update --help diff --git a/acceptance/help/cmd/account/service-principal-secrets/service-principal-secrets/output.txt b/acceptance/help/cmd/account/service-principal-secrets/service-principal-secrets/output.txt new file mode 100644 index 000000000..c850a4273 --- /dev/null +++ b/acceptance/help/cmd/account/service-principal-secrets/service-principal-secrets/output.txt @@ -0,0 +1,64 @@ + +>>> $CLI account service-principal-secrets create --help +Create service principal secret. + + Create a secret for the given service principal. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal ID. + +Usage: + databricks account service-principal-secrets create SERVICE_PRINCIPAL_ID [flags] + +Flags: + -h, --help help for create + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principal-secrets delete --help +Delete service principal secret. + + Delete a secret from the given service principal. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal ID. + SECRET_ID: The secret ID. + +Usage: + databricks account service-principal-secrets delete SERVICE_PRINCIPAL_ID SECRET_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principal-secrets list --help +List service principal secrets. + + List all secrets associated with the given service principal. This operation + only returns information about the secrets themselves and does not include the + secret values. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal ID. + +Usage: + databricks account service-principal-secrets list SERVICE_PRINCIPAL_ID [flags] + +Flags: + -h, --help help for list + --page-token string An opaque page token which was the next_page_token in the response of the previous request to list the secrets for this service principal. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/service-principal-secrets/service-principal-secrets/script b/acceptance/help/cmd/account/service-principal-secrets/service-principal-secrets/script new file mode 100755 index 000000000..55406f5f3 --- /dev/null +++ b/acceptance/help/cmd/account/service-principal-secrets/service-principal-secrets/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account service-principal-secrets create --help +trace $CLI account service-principal-secrets delete --help +trace $CLI account service-principal-secrets list --help diff --git a/acceptance/help/cmd/account/service-principals/service-principals/output.txt b/acceptance/help/cmd/account/service-principals/service-principals/output.txt new file mode 100644 index 000000000..979eb48a8 --- /dev/null +++ b/acceptance/help/cmd/account/service-principals/service-principals/output.txt @@ -0,0 +1,138 @@ + +>>> $CLI account service-principals create --help +Create a service principal. + + Creates a new service principal in the Databricks account. + +Usage: + databricks account service-principals create [flags] + +Flags: + --active If this user is active. + --application-id string UUID relating to the service principal. + --display-name string String that represents a concatenation of given and family names. + --external-id string + -h, --help help for create + --id string Databricks service principal ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principals delete --help +Delete a service principal. + + Delete a single service principal in the Databricks account. + + Arguments: + ID: Unique ID for a service principal in the Databricks account. + +Usage: + databricks account service-principals delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principals get --help +Get service principal details. + + Gets the details for a single service principal define in the Databricks + account. + + Arguments: + ID: Unique ID for a service principal in the Databricks account. + +Usage: + databricks account service-principals get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principals list --help +List service principals. + + Gets the set of service principals associated with a Databricks account. + +Usage: + databricks account service-principals list [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for list + --sort-by string Attribute to sort the results. + --sort-order ListSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principals patch --help +Update service principal details. + + Partially updates the details of a single service principal in the Databricks + account. + + Arguments: + ID: Unique ID for a service principal in the Databricks account. + +Usage: + databricks account service-principals patch ID [flags] + +Flags: + -h, --help help for patch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account service-principals update --help +Replace service principal. + + Updates the details of a single service principal. + + This action replaces the existing service principal with the same name. + + Arguments: + ID: Databricks service principal ID. + +Usage: + databricks account service-principals update ID [flags] + +Flags: + --active If this user is active. + --application-id string UUID relating to the service principal. + --display-name string String that represents a concatenation of given and family names. + --external-id string + -h, --help help for update + --id string Databricks service principal ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/service-principals/service-principals/script b/acceptance/help/cmd/account/service-principals/service-principals/script new file mode 100755 index 000000000..bc2fc22bf --- /dev/null +++ b/acceptance/help/cmd/account/service-principals/service-principals/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account service-principals create --help +trace $CLI account service-principals delete --help +trace $CLI account service-principals get --help +trace $CLI account service-principals list --help +trace $CLI account service-principals patch --help +trace $CLI account service-principals update --help diff --git a/acceptance/help/cmd/account/settings/settings/output.txt b/acceptance/help/cmd/account/settings/settings/output.txt new file mode 100644 index 000000000..7901d464d --- /dev/null +++ b/acceptance/help/cmd/account/settings/settings/output.txt @@ -0,0 +1,3 @@ +script: line 65: syntax error near unexpected token `)' + +Exit code: 2 diff --git a/acceptance/help/cmd/account/settings/settings/script b/acceptance/help/cmd/account/settings/settings/script new file mode 100755 index 000000000..71380e875 --- /dev/null +++ b/acceptance/help/cmd/account/settings/settings/script @@ -0,0 +1 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. diff --git a/acceptance/help/cmd/account/storage-credentials/storage-credentials/output.txt b/acceptance/help/cmd/account/storage-credentials/storage-credentials/output.txt new file mode 100644 index 000000000..fd86c3b00 --- /dev/null +++ b/acceptance/help/cmd/account/storage-credentials/storage-credentials/output.txt @@ -0,0 +1,118 @@ + +>>> $CLI account storage-credentials create --help +Create a storage credential. + + Creates a new storage credential. The request object is specific to the cloud: + + * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure + credentials * **GcpServiceAcountKey** for GCP credentials. + + The caller must be a metastore admin and have the + **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account storage-credentials create METASTORE_ID [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account storage-credentials delete --help +Delete a storage credential. + + Deletes a storage credential from the metastore. The caller must be an owner + of the storage credential. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + STORAGE_CREDENTIAL_NAME: Name of the storage credential. + +Usage: + databricks account storage-credentials delete METASTORE_ID STORAGE_CREDENTIAL_NAME [flags] + +Flags: + --force Force deletion even if the Storage Credential is not empty. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account storage-credentials get --help +Gets the named storage credential. + + Gets a storage credential from the metastore. The caller must be a metastore + admin, the owner of the storage credential, or have a level of privilege on + the storage credential. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + STORAGE_CREDENTIAL_NAME: Name of the storage credential. + +Usage: + databricks account storage-credentials get METASTORE_ID STORAGE_CREDENTIAL_NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account storage-credentials list --help +Get all storage credentials assigned to a metastore. + + Gets a list of all storage credentials that have been assigned to given + metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + +Usage: + databricks account storage-credentials list METASTORE_ID [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account storage-credentials update --help +Updates a storage credential. + + Updates a storage credential on the metastore. The caller must be the owner of + the storage credential. If the caller is a metastore admin, only the __owner__ + credential can be changed. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + STORAGE_CREDENTIAL_NAME: Name of the storage credential. + +Usage: + databricks account storage-credentials update METASTORE_ID STORAGE_CREDENTIAL_NAME [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/storage-credentials/storage-credentials/script b/acceptance/help/cmd/account/storage-credentials/storage-credentials/script new file mode 100755 index 000000000..c1716ac1c --- /dev/null +++ b/acceptance/help/cmd/account/storage-credentials/storage-credentials/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account storage-credentials create --help +trace $CLI account storage-credentials delete --help +trace $CLI account storage-credentials get --help +trace $CLI account storage-credentials list --help +trace $CLI account storage-credentials update --help diff --git a/acceptance/help/cmd/account/storage/storage/output.txt b/acceptance/help/cmd/account/storage/storage/output.txt new file mode 100644 index 000000000..04e905319 --- /dev/null +++ b/acceptance/help/cmd/account/storage/storage/output.txt @@ -0,0 +1,86 @@ + +>>> $CLI account storage create --help +Create new storage configuration. + + Creates new storage configuration for an account, specified by ID. Uploads a + storage configuration object that represents the root AWS S3 bucket in your + account. Databricks stores related workspace assets including DBFS, cluster + logs, and job results. For the AWS S3 bucket, you need to configure the + required bucket policy. + + For information about how to create a new workspace with this API, see [Create + a new workspace using the Account API] + + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + +Usage: + databricks account storage create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account storage delete --help +Delete storage configuration. + + Deletes a Databricks storage configuration. You cannot delete a storage + configuration that is associated with any workspace. + + Arguments: + STORAGE_CONFIGURATION_ID: Databricks Account API storage configuration ID. + +Usage: + databricks account storage delete STORAGE_CONFIGURATION_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account storage get --help +Get storage configuration. + + Gets a Databricks storage configuration for an account, both specified by ID. + + Arguments: + STORAGE_CONFIGURATION_ID: Databricks Account API storage configuration ID. + +Usage: + databricks account storage get STORAGE_CONFIGURATION_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account storage list --help +Get all storage configurations. + + Gets a list of all Databricks storage configurations for your account, + specified by ID. + +Usage: + databricks account storage list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/storage/storage/script b/acceptance/help/cmd/account/storage/storage/script new file mode 100755 index 000000000..e9ffb4b8d --- /dev/null +++ b/acceptance/help/cmd/account/storage/storage/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account storage create --help +trace $CLI account storage delete --help +trace $CLI account storage get --help +trace $CLI account storage list --help diff --git a/acceptance/help/cmd/account/usage-dashboards/usage-dashboards/output.txt b/acceptance/help/cmd/account/usage-dashboards/usage-dashboards/output.txt new file mode 100644 index 000000000..767608bf4 --- /dev/null +++ b/acceptance/help/cmd/account/usage-dashboards/usage-dashboards/output.txt @@ -0,0 +1,40 @@ + +>>> $CLI account usage-dashboards create --help +Create new usage dashboard. + + Create a usage dashboard specified by workspaceId, accountId, and dashboard + type. + +Usage: + databricks account usage-dashboards create [flags] + +Flags: + --dashboard-type UsageDashboardType Workspace level usage dashboard shows usage data for the specified workspace ID. Supported values: [USAGE_DASHBOARD_TYPE_GLOBAL, USAGE_DASHBOARD_TYPE_WORKSPACE] + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --workspace-id int The workspace ID of the workspace in which the usage dashboard is created. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account usage-dashboards get --help +Get usage dashboard. + + Get a usage dashboard specified by workspaceId, accountId, and dashboard type. + +Usage: + databricks account usage-dashboards get [flags] + +Flags: + --dashboard-type UsageDashboardType Workspace level usage dashboard shows usage data for the specified workspace ID. Supported values: [USAGE_DASHBOARD_TYPE_GLOBAL, USAGE_DASHBOARD_TYPE_WORKSPACE] + -h, --help help for get + --workspace-id int The workspace ID of the workspace in which the usage dashboard is created. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/usage-dashboards/usage-dashboards/script b/acceptance/help/cmd/account/usage-dashboards/usage-dashboards/script new file mode 100755 index 000000000..d4e6159b1 --- /dev/null +++ b/acceptance/help/cmd/account/usage-dashboards/usage-dashboards/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account usage-dashboards create --help +trace $CLI account usage-dashboards get --help diff --git a/acceptance/help/cmd/account/users/users/output.txt b/acceptance/help/cmd/account/users/users/output.txt new file mode 100644 index 000000000..995d84155 --- /dev/null +++ b/acceptance/help/cmd/account/users/users/output.txt @@ -0,0 +1,145 @@ + +>>> $CLI account users create --help +Create a new user. + + Creates a new user in the Databricks account. This new user will also be added + to the Databricks account. + +Usage: + databricks account users create [flags] + +Flags: + --active If this user is active. + --display-name string String that represents a concatenation of given and family names. + --external-id string External ID is not currently supported. + -h, --help help for create + --id string Databricks user ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --user-name string Email address of the Databricks user. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account users delete --help +Delete a user. + + Deletes a user. Deleting a user from a Databricks account also removes objects + associated with the user. + + Arguments: + ID: Unique ID for a user in the Databricks account. + +Usage: + databricks account users delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account users get --help +Get user details. + + Gets information for a specific user in Databricks account. + + Arguments: + ID: Unique ID for a user in the Databricks account. + +Usage: + databricks account users get ID [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for get + --sort-by string Attribute to sort the results. + --sort-order GetSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account users list --help +List users. + + Gets details for all the users associated with a Databricks account. + +Usage: + databricks account users list [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for list + --sort-by string Attribute to sort the results. + --sort-order ListSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account users patch --help +Update user details. + + Partially updates a user resource by applying the supplied operations on + specific user attributes. + + Arguments: + ID: Unique ID for a user in the Databricks account. + +Usage: + databricks account users patch ID [flags] + +Flags: + -h, --help help for patch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account users update --help +Replace a user. + + Replaces a user's information with the data supplied in request. + + Arguments: + ID: Databricks user ID. This is automatically set by Databricks. Any value + provided by the client will be ignored. + +Usage: + databricks account users update ID [flags] + +Flags: + --active If this user is active. + --display-name string String that represents a concatenation of given and family names. + --external-id string External ID is not currently supported. + -h, --help help for update + --id string Databricks user ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --user-name string Email address of the Databricks user. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/users/users/script b/acceptance/help/cmd/account/users/users/script new file mode 100755 index 000000000..6b4e636ad --- /dev/null +++ b/acceptance/help/cmd/account/users/users/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account users create --help +trace $CLI account users delete --help +trace $CLI account users get --help +trace $CLI account users list --help +trace $CLI account users patch --help +trace $CLI account users update --help diff --git a/acceptance/help/cmd/account/vpc-endpoints/vpc-endpoints/output.txt b/acceptance/help/cmd/account/vpc-endpoints/vpc-endpoints/output.txt new file mode 100644 index 000000000..ca940f0e2 --- /dev/null +++ b/acceptance/help/cmd/account/vpc-endpoints/vpc-endpoints/output.txt @@ -0,0 +1,109 @@ + +>>> $CLI account vpc-endpoints create --help +Create VPC endpoint configuration. + + Creates a VPC endpoint configuration, which represents a [VPC endpoint] object + in AWS used to communicate privately with Databricks over [AWS PrivateLink]. + + After you create the VPC endpoint configuration, the Databricks [endpoint + service] automatically accepts the VPC endpoint. + + Before configuring PrivateLink, read the [Databricks article about + PrivateLink]. + + [AWS PrivateLink]: https://aws.amazon.com/privatelink + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html + [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html + + Arguments: + VPC_ENDPOINT_NAME: The human-readable name of the storage configuration. + +Usage: + databricks account vpc-endpoints create VPC_ENDPOINT_NAME [flags] + +Flags: + --aws-vpc-endpoint-id string The ID of the VPC endpoint object in AWS. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --region string The AWS region in which this VPC endpoint object exists. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account vpc-endpoints delete --help +Delete VPC endpoint configuration. + + Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] + that can communicate privately with Databricks over [AWS PrivateLink]. + + Before configuring PrivateLink, read the [Databricks article about + PrivateLink]. + + [AWS PrivateLink]: https://aws.amazon.com/privatelink + [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + VPC_ENDPOINT_ID: Databricks VPC endpoint ID. + +Usage: + databricks account vpc-endpoints delete VPC_ENDPOINT_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account vpc-endpoints get --help +Get a VPC endpoint configuration. + + Gets a VPC endpoint configuration, which represents a [VPC endpoint] object in + AWS used to communicate privately with Databricks over [AWS PrivateLink]. + + [AWS PrivateLink]: https://aws.amazon.com/privatelink + [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html + + Arguments: + VPC_ENDPOINT_ID: Databricks VPC endpoint ID. + +Usage: + databricks account vpc-endpoints get VPC_ENDPOINT_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account vpc-endpoints list --help +Get all VPC endpoint configurations. + + Gets a list of all VPC endpoints for an account, specified by ID. + + Before configuring PrivateLink, read the [Databricks article about + PrivateLink]. + + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + +Usage: + databricks account vpc-endpoints list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/vpc-endpoints/vpc-endpoints/script b/acceptance/help/cmd/account/vpc-endpoints/vpc-endpoints/script new file mode 100755 index 000000000..a0ae54c06 --- /dev/null +++ b/acceptance/help/cmd/account/vpc-endpoints/vpc-endpoints/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account vpc-endpoints create --help +trace $CLI account vpc-endpoints delete --help +trace $CLI account vpc-endpoints get --help +trace $CLI account vpc-endpoints list --help diff --git a/acceptance/help/cmd/account/workspace-assignment/workspace-assignment/output.txt b/acceptance/help/cmd/account/workspace-assignment/workspace-assignment/output.txt new file mode 100644 index 000000000..54d5aff32 --- /dev/null +++ b/acceptance/help/cmd/account/workspace-assignment/workspace-assignment/output.txt @@ -0,0 +1,86 @@ + +>>> $CLI account workspace-assignment delete --help +Delete permissions assignment. + + Deletes the workspace permissions assignment in a given account and workspace + for the specified principal. + + Arguments: + WORKSPACE_ID: The workspace ID for the account. + PRINCIPAL_ID: The ID of the user, service principal, or group. + +Usage: + databricks account workspace-assignment delete WORKSPACE_ID PRINCIPAL_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account workspace-assignment get --help +List workspace permissions. + + Get an array of workspace permissions for the specified account and workspace. + + Arguments: + WORKSPACE_ID: The workspace ID. + +Usage: + databricks account workspace-assignment get WORKSPACE_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account workspace-assignment list --help +Get permission assignments. + + Get the permission assignments for the specified Databricks account and + Databricks workspace. + + Arguments: + WORKSPACE_ID: The workspace ID for the account. + +Usage: + databricks account workspace-assignment list WORKSPACE_ID [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account workspace-assignment update --help +Create or update permissions assignment. + + Creates or updates the workspace permissions assignment in a given account and + workspace for the specified principal. + + Arguments: + WORKSPACE_ID: The workspace ID. + PRINCIPAL_ID: The ID of the user, service principal, or group. + +Usage: + databricks account workspace-assignment update WORKSPACE_ID PRINCIPAL_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/workspace-assignment/workspace-assignment/script b/acceptance/help/cmd/account/workspace-assignment/workspace-assignment/script new file mode 100755 index 000000000..5684e03d0 --- /dev/null +++ b/acceptance/help/cmd/account/workspace-assignment/workspace-assignment/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account workspace-assignment delete --help +trace $CLI account workspace-assignment get --help +trace $CLI account workspace-assignment list --help +trace $CLI account workspace-assignment update --help diff --git a/acceptance/help/cmd/account/workspaces/workspaces/output.txt b/acceptance/help/cmd/account/workspaces/workspaces/output.txt new file mode 100644 index 000000000..3f5420c24 --- /dev/null +++ b/acceptance/help/cmd/account/workspaces/workspaces/output.txt @@ -0,0 +1,279 @@ + +>>> $CLI account workspaces create --help +Create a new workspace. + + Creates a new workspace. + + **Important**: This operation is asynchronous. A response with HTTP status + code 200 means the request has been accepted and is in progress, but does not + mean that the workspace deployed successfully and is running. The initial + workspace status is typically PROVISIONING. Use the workspace ID + (workspace_id) field in the response to identify the new workspace and make + repeated GET requests with the workspace ID and check its status. The + workspace becomes available when the status changes to RUNNING. + + Arguments: + WORKSPACE_NAME: The workspace's human-readable name. + +Usage: + databricks account workspaces create WORKSPACE_NAME [flags] + +Flags: + --aws-region string The AWS region of the workspace's data plane. + --cloud string The cloud provider which the workspace uses. + --credentials-id string ID of the workspace's credential configuration object. + --deployment-name string The deployment name defines part of the subdomain for the workspace. + -h, --help help for create + --is-no-public-ip-enabled Whether no public IP is enabled for the workspace. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --location string The Google Cloud region of the workspace data plane in your Google account. + --managed-services-customer-managed-key-id string The ID of the workspace's managed services encryption key configuration object. + --network-id string + --no-wait do not wait to reach RUNNING state + --pricing-tier PricingTier The pricing tier of the workspace. Supported values: [ + COMMUNITY_EDITION, + DEDICATED, + ENTERPRISE, + PREMIUM, + STANDARD, + UNKNOWN, + ] + --private-access-settings-id string ID of the workspace's private access settings object. + --storage-configuration-id string The ID of the workspace's storage configuration object. + --storage-customer-managed-key-id string The ID of the workspace's storage encryption key configuration object. + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account workspaces delete --help +Delete a workspace. + + Terminates and deletes a Databricks workspace. From an API perspective, + deletion is immediate. However, it might take a few minutes for all workspaces + resources to be deleted, depending on the size and number of workspace + resources. + + This operation is available only if your account is on the E2 version of the + platform or on a select custom plan that allows multiple workspaces per + account. + + Arguments: + WORKSPACE_ID: Workspace ID. + +Usage: + databricks account workspaces delete WORKSPACE_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account workspaces get --help +Get a workspace. + + Gets information including status for a Databricks workspace, specified by ID. + In the response, the workspace_status field indicates the current status. + After initial workspace creation (which is asynchronous), make repeated GET + requests with the workspace ID and check its status. The workspace becomes + available when the status changes to RUNNING. + + For information about how to create a new workspace with this API **including + error handling**, see [Create a new workspace using the Account API]. + + This operation is available only if your account is on the E2 version of the + platform or on a select custom plan that allows multiple workspaces per + account. + + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + + Arguments: + WORKSPACE_ID: Workspace ID. + +Usage: + databricks account workspaces get WORKSPACE_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account workspaces list --help +Get all workspaces. + + Gets a list of all workspaces associated with an account, specified by ID. + + This operation is available only if your account is on the E2 version of the + platform or on a select custom plan that allows multiple workspaces per + account. + +Usage: + databricks account workspaces list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI account workspaces update --help +Update workspace configuration. + + Updates a workspace configuration for either a running workspace or a failed + workspace. The elements that can be updated varies between these two use + cases. + + ### Update a failed workspace You can update a Databricks workspace + configuration for failed workspace deployment for some fields, but not all + fields. For a failed workspace, this request supports updates to the following + fields only: - Credential configuration ID - Storage configuration ID - + Network configuration ID. Used only to add or change a network configuration + for a customer-managed VPC. For a failed workspace only, you can convert a + workspace with Databricks-managed VPC to use a customer-managed VPC by adding + this ID. You cannot downgrade a workspace with a customer-managed VPC to be a + Databricks-managed VPC. You can update the network configuration for a failed + or running workspace to add PrivateLink support, though you must also add a + private access settings object. - Key configuration ID for managed services + (control plane storage, such as notebook source and Databricks SQL queries). + Used only if you use customer-managed keys for managed services. - Key + configuration ID for workspace storage (root S3 bucket and, optionally, EBS + volumes). Used only if you use customer-managed keys for workspace storage. + **Important**: If the workspace was ever in the running state, even if briefly + before becoming a failed workspace, you cannot add a new key configuration ID + for workspace storage. - Private access settings ID to add PrivateLink + support. You can add or update the private access settings ID to upgrade a + workspace to add support for front-end, back-end, or both types of + connectivity. You cannot remove (downgrade) any existing front-end or back-end + PrivateLink support on a workspace. - Custom tags. Given you provide an empty + custom tags, the update would not be applied. - Network connectivity + configuration ID to add serverless stable IP support. You can add or update + the network connectivity configuration ID to ensure the workspace uses the + same set of stable IP CIDR blocks to access your resources. You cannot remove + a network connectivity configuration from the workspace once attached, you can + only switch to another one. + + After calling the PATCH operation to update the workspace configuration, + make repeated GET requests with the workspace ID and check the workspace + status. The workspace is successful if the status changes to RUNNING. + + For information about how to create a new workspace with this API **including + error handling**, see [Create a new workspace using the Account API]. + + ### Update a running workspace You can update a Databricks workspace + configuration for running workspaces for some fields, but not all fields. For + a running workspace, this request supports updating the following fields only: + - Credential configuration ID - Network configuration ID. Used only if you + already use a customer-managed VPC. You cannot convert a running workspace + from a Databricks-managed VPC to a customer-managed VPC. You can use a network + configuration update in this API for a failed or running workspace to add + support for PrivateLink, although you also need to add a private access + settings object. - Key configuration ID for managed services (control plane + storage, such as notebook source and Databricks SQL queries). Databricks does + not directly encrypt the data with the customer-managed key (CMK). Databricks + uses both the CMK and the Databricks managed key (DMK) that is unique to your + workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to + encrypt your workspace's managed services persisted data. If the workspace + does not already have a CMK for managed services, adding this ID enables + managed services encryption for new or updated data. Existing managed services + data that existed before adding the key remains not encrypted with the DEK + until it is modified. If the workspace already has customer-managed keys for + managed services, this request rotates (changes) the CMK keys and the DEK is + re-encrypted with the DMK and the new CMK. - Key configuration ID for + workspace storage (root S3 bucket and, optionally, EBS volumes). You can set + this only if the workspace does not already have a customer-managed key + configuration for workspace storage. - Private access settings ID to add + PrivateLink support. You can add or update the private access settings ID to + upgrade a workspace to add support for front-end, back-end, or both types of + connectivity. You cannot remove (downgrade) any existing front-end or back-end + PrivateLink support on a workspace. - Custom tags. Given you provide an empty + custom tags, the update would not be applied. - Network connectivity + configuration ID to add serverless stable IP support. You can add or update + the network connectivity configuration ID to ensure the workspace uses the + same set of stable IP CIDR blocks to access your resources. You cannot remove + a network connectivity configuration from the workspace once attached, you can + only switch to another one. + + **Important**: To update a running workspace, your workspace must have no + running compute resources that run in your workspace's VPC in the Classic data + plane. For example, stop all all-purpose clusters, job clusters, pools with + running clusters, and Classic SQL warehouses. If you do not terminate all + cluster instances in the workspace before calling this API, the request will + fail. + + ### Wait until changes take effect. After calling the PATCH operation to + update the workspace configuration, make repeated GET requests with the + workspace ID and check the workspace status and the status of the fields. * + For workspaces with a Databricks-managed VPC, the workspace status becomes + PROVISIONING temporarily (typically under 20 minutes). If the workspace + update is successful, the workspace status changes to RUNNING. Note that you + can also check the workspace status in the [Account Console]. However, you + cannot use or create clusters for another 20 minutes after that status change. + This results in a total of up to 40 minutes in which you cannot create + clusters. If you create or use clusters before this time interval elapses, + clusters do not launch successfully, fail, or could cause other unexpected + behavior. * For workspaces with a customer-managed VPC, the workspace status + stays at status RUNNING and the VPC change happens immediately. A change to + the storage customer-managed key configuration ID might take a few minutes to + update, so continue to check the workspace until you observe that it has been + updated. If the update fails, the workspace might revert silently to its + original configuration. After the workspace has been updated, you cannot use + or create clusters for another 20 minutes. If you create or use clusters + before this time interval elapses, clusters do not launch successfully, fail, + or could cause other unexpected behavior. + + If you update the _storage_ customer-managed key configurations, it takes 20 + minutes for the changes to fully take effect. During the 20 minute wait, it is + important that you stop all REST API calls to the DBFS API. If you are + modifying _only the managed services key configuration_, you can omit the 20 + minute wait. + + **Important**: Customer-managed keys and customer-managed VPCs are supported + by only some deployment types and subscription types. If you have questions + about availability, contact your Databricks representative. + + This operation is available only if your account is on the E2 version of the + platform or on a select custom plan that allows multiple workspaces per + account. + + [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + + Arguments: + WORKSPACE_ID: Workspace ID. + +Usage: + databricks account workspaces update WORKSPACE_ID [flags] + +Flags: + --aws-region string The AWS region of the workspace's data plane (for example, us-west-2). + --credentials-id string ID of the workspace's credential configuration object. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --managed-services-customer-managed-key-id string The ID of the workspace's managed services encryption key configuration object. + --network-connectivity-config-id string + --network-id string The ID of the workspace's network configuration object. + --no-wait do not wait to reach RUNNING state + --private-access-settings-id string The ID of the workspace's private access settings configuration object. + --storage-configuration-id string The ID of the workspace's storage configuration object. + --storage-customer-managed-key-id string The ID of the key configuration object for workspace storage. + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/account/workspaces/workspaces/script b/acceptance/help/cmd/account/workspaces/workspaces/script new file mode 100755 index 000000000..26c3d9d75 --- /dev/null +++ b/acceptance/help/cmd/account/workspaces/workspaces/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI account workspaces create --help +trace $CLI account workspaces delete --help +trace $CLI account workspaces get --help +trace $CLI account workspaces list --help +trace $CLI account workspaces update --help diff --git a/acceptance/help/cmd/workspace/access-control-proxy/access-control-proxy/output.txt b/acceptance/help/cmd/workspace/access-control-proxy/access-control-proxy/output.txt new file mode 100644 index 000000000..2db7056b4 --- /dev/null +++ b/acceptance/help/cmd/workspace/access-control-proxy/access-control-proxy/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI access-control-proxy get-assignable-roles-for-resource --help +Error: unknown command "access-control-proxy" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/access-control-proxy/access-control-proxy/script b/acceptance/help/cmd/workspace/access-control-proxy/access-control-proxy/script new file mode 100755 index 000000000..5dd4931cc --- /dev/null +++ b/acceptance/help/cmd/workspace/access-control-proxy/access-control-proxy/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI access-control-proxy get-assignable-roles-for-resource --help +trace $CLI access-control-proxy get-rule-set --help +trace $CLI access-control-proxy update-rule-set --help diff --git a/acceptance/help/cmd/workspace/access-control/access-control/output.txt b/acceptance/help/cmd/workspace/access-control/access-control/output.txt new file mode 100644 index 000000000..96f5bd249 --- /dev/null +++ b/acceptance/help/cmd/workspace/access-control/access-control/output.txt @@ -0,0 +1,16 @@ + +>>> $CLI access-control check-policy --help +Check access policy to a resource. + +Usage: + databricks access-control check-policy [flags] + +Flags: + -h, --help help for check-policy + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/access-control/access-control/script b/acceptance/help/cmd/workspace/access-control/access-control/script new file mode 100755 index 000000000..d24c29528 --- /dev/null +++ b/acceptance/help/cmd/workspace/access-control/access-control/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI access-control check-policy --help diff --git a/acceptance/help/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy/output.txt b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy/output.txt new file mode 100644 index 000000000..32b1c5c7f --- /dev/null +++ b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI aibi-dashboard-embedding-access-policy delete --help +Error: unknown command "aibi-dashboard-embedding-access-policy" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy/script b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy/script new file mode 100755 index 000000000..3be024823 --- /dev/null +++ b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI aibi-dashboard-embedding-access-policy delete --help +trace $CLI aibi-dashboard-embedding-access-policy get --help +trace $CLI aibi-dashboard-embedding-access-policy update --help diff --git a/acceptance/help/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains/output.txt b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains/output.txt new file mode 100644 index 000000000..fb65a9bfb --- /dev/null +++ b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI aibi-dashboard-embedding-approved-domains delete --help +Error: unknown command "aibi-dashboard-embedding-approved-domains" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains/script b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains/script new file mode 100755 index 000000000..7a2d91b98 --- /dev/null +++ b/acceptance/help/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI aibi-dashboard-embedding-approved-domains delete --help +trace $CLI aibi-dashboard-embedding-approved-domains get --help +trace $CLI aibi-dashboard-embedding-approved-domains update --help diff --git a/acceptance/help/cmd/workspace/alerts-legacy/alerts-legacy/output.txt b/acceptance/help/cmd/workspace/alerts-legacy/alerts-legacy/output.txt new file mode 100644 index 000000000..3c80ad6e0 --- /dev/null +++ b/acceptance/help/cmd/workspace/alerts-legacy/alerts-legacy/output.txt @@ -0,0 +1,119 @@ + +>>> $CLI alerts-legacy create --help +Create an alert. + + Creates an alert. An alert is a Databricks SQL object that periodically runs a + query, evaluates a condition of its result, and notifies users or notification + destinations if the condition was met. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/create instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks alerts-legacy create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --parent string The identifier of the workspace folder containing the object. + --rearm int Number of seconds after being triggered before the alert rearms itself and can be triggered again. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts-legacy delete --help +Delete an alert. + + Deletes an alert. Deleted alerts are no longer accessible and cannot be + restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + the trash. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/delete instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks alerts-legacy delete ALERT_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts-legacy get --help +Get an alert. + + Gets an alert. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/get instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks alerts-legacy get ALERT_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts-legacy list --help +Get alerts. + + Gets a list of alerts. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/list instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks alerts-legacy list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts-legacy update --help +Update an alert. + + Updates an alert. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/update instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks alerts-legacy update ALERT_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --rearm int Number of seconds after being triggered before the alert rearms itself and can be triggered again. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/alerts-legacy/alerts-legacy/script b/acceptance/help/cmd/workspace/alerts-legacy/alerts-legacy/script new file mode 100755 index 000000000..9b4f3a556 --- /dev/null +++ b/acceptance/help/cmd/workspace/alerts-legacy/alerts-legacy/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI alerts-legacy create --help +trace $CLI alerts-legacy delete --help +trace $CLI alerts-legacy get --help +trace $CLI alerts-legacy list --help +trace $CLI alerts-legacy update --help diff --git a/acceptance/help/cmd/workspace/alerts/alerts/output.txt b/acceptance/help/cmd/workspace/alerts/alerts/output.txt new file mode 100644 index 000000000..2f3f4e77e --- /dev/null +++ b/acceptance/help/cmd/workspace/alerts/alerts/output.txt @@ -0,0 +1,100 @@ + +>>> $CLI alerts create --help +Create an alert. + + Creates an alert. + +Usage: + databricks alerts create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts delete --help +Delete an alert. + + Moves an alert to the trash. Trashed alerts immediately disappear from + searches and list views, and can no longer trigger. You can restore a trashed + alert through the UI. A trashed alert is permanently deleted after 30 days. + +Usage: + databricks alerts delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts get --help +Get an alert. + + Gets an alert. + +Usage: + databricks alerts get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts list --help +List alerts. + + Gets a list of alerts accessible to the user, ordered by creation time. + **Warning:** Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + +Usage: + databricks alerts list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI alerts update --help +Update an alert. + + Updates an alert. + + Arguments: + ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space). + +Usage: + databricks alerts update ID UPDATE_MASK [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/alerts/alerts/script b/acceptance/help/cmd/workspace/alerts/alerts/script new file mode 100755 index 000000000..8b7f4d6f8 --- /dev/null +++ b/acceptance/help/cmd/workspace/alerts/alerts/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI alerts create --help +trace $CLI alerts delete --help +trace $CLI alerts get --help +trace $CLI alerts list --help +trace $CLI alerts update --help diff --git a/acceptance/help/cmd/workspace/apps/apps/output.txt b/acceptance/help/cmd/workspace/apps/apps/output.txt new file mode 100644 index 000000000..fd738704a --- /dev/null +++ b/acceptance/help/cmd/workspace/apps/apps/output.txt @@ -0,0 +1,308 @@ + +>>> $CLI apps create --help +Create an app. + + Creates a new app. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens. It must be unique within the workspace. + +Usage: + databricks apps create NAME [flags] + +Flags: + --description string The description of the app. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-compute If true, the app will not be started after creation. + --no-wait do not wait to reach ACTIVE state + --timeout duration maximum amount of time to reach ACTIVE state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps delete --help +Delete an app. + + Deletes an app. + + Arguments: + NAME: The name of the app. + +Usage: + databricks apps delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps deploy --help +Create an app deployment. + + Creates an app deployment for the app with the supplied name. + + Arguments: + APP_NAME: The name of the app. + +Usage: + databricks apps deploy APP_NAME [flags] + +Flags: + --deployment-id string The unique id of the deployment. + -h, --help help for deploy + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --mode AppDeploymentMode The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT] + --no-wait do not wait to reach SUCCEEDED state + --source-code-path string The workspace file system path of the source code used to create the app deployment. + --timeout duration maximum amount of time to reach SUCCEEDED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps get --help +Get an app. + + Retrieves information for the app with the supplied name. + + Arguments: + NAME: The name of the app. + +Usage: + databricks apps get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps get-deployment --help +Get an app deployment. + + Retrieves information for the app deployment with the supplied name and + deployment id. + + Arguments: + APP_NAME: The name of the app. + DEPLOYMENT_ID: The unique id of the deployment. + +Usage: + databricks apps get-deployment APP_NAME DEPLOYMENT_ID [flags] + +Flags: + -h, --help help for get-deployment + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps get-permission-levels --help +Get app permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + APP_NAME: The app for which to get or manage permissions. + +Usage: + databricks apps get-permission-levels APP_NAME [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps get-permissions --help +Get app permissions. + + Gets the permissions of an app. Apps can inherit permissions from their root + object. + + Arguments: + APP_NAME: The app for which to get or manage permissions. + +Usage: + databricks apps get-permissions APP_NAME [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps list --help +List apps. + + Lists all apps in the workspace. + +Usage: + databricks apps list [flags] + +Flags: + -h, --help help for list + --page-size int Upper bound for items returned. + --page-token string Pagination token to go to the next page of apps. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps list-deployments --help +List app deployments. + + Lists all app deployments for the app with the supplied name. + + Arguments: + APP_NAME: The name of the app. + +Usage: + databricks apps list-deployments APP_NAME [flags] + +Flags: + -h, --help help for list-deployments + --page-size int Upper bound for items returned. + --page-token string Pagination token to go to the next page of apps. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps set-permissions --help +Set app permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + APP_NAME: The app for which to get or manage permissions. + +Usage: + databricks apps set-permissions APP_NAME [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps start --help +Start an app. + + Start the last active deployment of the app in the workspace. + + Arguments: + NAME: The name of the app. + +Usage: + databricks apps start NAME [flags] + +Flags: + -h, --help help for start + --no-wait do not wait to reach ACTIVE state + --timeout duration maximum amount of time to reach ACTIVE state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps stop --help +Stop an app. + + Stops the active deployment of the app in the workspace. + + Arguments: + NAME: The name of the app. + +Usage: + databricks apps stop NAME [flags] + +Flags: + -h, --help help for stop + --no-wait do not wait to reach STOPPED state + --timeout duration maximum amount of time to reach STOPPED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps update --help +Update an app. + + Updates the app with the supplied name. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens. It must be unique within the workspace. + +Usage: + databricks apps update NAME [flags] + +Flags: + --description string The description of the app. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI apps update-permissions --help +Update app permissions. + + Updates the permissions on an app. Apps can inherit permissions from their + root object. + + Arguments: + APP_NAME: The app for which to get or manage permissions. + +Usage: + databricks apps update-permissions APP_NAME [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/apps/apps/script b/acceptance/help/cmd/workspace/apps/apps/script new file mode 100755 index 000000000..09307580d --- /dev/null +++ b/acceptance/help/cmd/workspace/apps/apps/script @@ -0,0 +1,15 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI apps create --help +trace $CLI apps delete --help +trace $CLI apps deploy --help +trace $CLI apps get --help +trace $CLI apps get-deployment --help +trace $CLI apps get-permission-levels --help +trace $CLI apps get-permissions --help +trace $CLI apps list --help +trace $CLI apps list-deployments --help +trace $CLI apps set-permissions --help +trace $CLI apps start --help +trace $CLI apps stop --help +trace $CLI apps update --help +trace $CLI apps update-permissions --help diff --git a/acceptance/help/cmd/workspace/artifact-allowlists/artifact-allowlists/output.txt b/acceptance/help/cmd/workspace/artifact-allowlists/artifact-allowlists/output.txt new file mode 100644 index 000000000..dc3b3ac9d --- /dev/null +++ b/acceptance/help/cmd/workspace/artifact-allowlists/artifact-allowlists/output.txt @@ -0,0 +1,44 @@ + +>>> $CLI artifact-allowlists get --help +Get an artifact allowlist. + + Get the artifact allowlist of a certain artifact type. The caller must be a + metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. + + Arguments: + ARTIFACT_TYPE: The artifact type of the allowlist. + +Usage: + databricks artifact-allowlists get ARTIFACT_TYPE [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI artifact-allowlists update --help +Set an artifact allowlist. + + Set the artifact allowlist of a certain artifact type. The whole artifact + allowlist is replaced with the new allowlist. The caller must be a metastore + admin or have the **MANAGE ALLOWLIST** privilege on the metastore. + + Arguments: + ARTIFACT_TYPE: The artifact type of the allowlist. + +Usage: + databricks artifact-allowlists update ARTIFACT_TYPE [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/artifact-allowlists/artifact-allowlists/script b/acceptance/help/cmd/workspace/artifact-allowlists/artifact-allowlists/script new file mode 100755 index 000000000..b2a4a5660 --- /dev/null +++ b/acceptance/help/cmd/workspace/artifact-allowlists/artifact-allowlists/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI artifact-allowlists get --help +trace $CLI artifact-allowlists update --help diff --git a/acceptance/help/cmd/workspace/automatic-cluster-update/automatic-cluster-update/output.txt b/acceptance/help/cmd/workspace/automatic-cluster-update/automatic-cluster-update/output.txt new file mode 100644 index 000000000..c894a8912 --- /dev/null +++ b/acceptance/help/cmd/workspace/automatic-cluster-update/automatic-cluster-update/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI automatic-cluster-update get --help +Error: unknown command "automatic-cluster-update" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/automatic-cluster-update/automatic-cluster-update/script b/acceptance/help/cmd/workspace/automatic-cluster-update/automatic-cluster-update/script new file mode 100755 index 000000000..8e8808ba6 --- /dev/null +++ b/acceptance/help/cmd/workspace/automatic-cluster-update/automatic-cluster-update/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI automatic-cluster-update get --help +trace $CLI automatic-cluster-update update --help diff --git a/acceptance/help/cmd/workspace/catalogs/catalogs/output.txt b/acceptance/help/cmd/workspace/catalogs/catalogs/output.txt new file mode 100644 index 000000000..1f918ce97 --- /dev/null +++ b/acceptance/help/cmd/workspace/catalogs/catalogs/output.txt @@ -0,0 +1,124 @@ + +>>> $CLI catalogs create --help +Create a catalog. + + Creates a new catalog instance in the parent metastore if the caller is a + metastore admin or has the **CREATE_CATALOG** privilege. + + Arguments: + NAME: Name of catalog. + +Usage: + databricks catalogs create NAME [flags] + +Flags: + --comment string User-provided free-form text description. + --connection-name string The name of the connection to an external data source. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --provider-name string The name of delta sharing provider. + --share-name string The name of the share under the share provider. + --storage-root string Storage root URL for managed tables within catalog. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI catalogs delete --help +Delete a catalog. + + Deletes the catalog that matches the supplied name. The caller must be a + metastore admin or the owner of the catalog. + + Arguments: + NAME: The name of the catalog. + +Usage: + databricks catalogs delete NAME [flags] + +Flags: + --force Force deletion even if the catalog is not empty. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI catalogs get --help +Get a catalog. + + Gets the specified catalog in a metastore. The caller must be a metastore + admin, the owner of the catalog, or a user that has the **USE_CATALOG** + privilege set for their account. + + Arguments: + NAME: The name of the catalog. + +Usage: + databricks catalogs get NAME [flags] + +Flags: + -h, --help help for get + --include-browse Whether to include catalogs in the response for which the principal can only access selective metadata for. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI catalogs list --help +List catalogs. + + Gets an array of catalogs in the metastore. If the caller is the metastore + admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the + caller (or for which the caller has the **USE_CATALOG** privilege) will be + retrieved. There is no guarantee of a specific ordering of the elements in the + array. + +Usage: + databricks catalogs list [flags] + +Flags: + -h, --help help for list + --include-browse Whether to include catalogs in the response for which the principal can only access selective metadata for. + --max-results int Maximum number of catalogs to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI catalogs update --help +Update a catalog. + + Updates the catalog that matches the supplied name. The caller must be either + the owner of the catalog, or a metastore admin (when changing the owner field + of the catalog). + + Arguments: + NAME: The name of the catalog. + +Usage: + databricks catalogs update NAME [flags] + +Flags: + --comment string User-provided free-form text description. + --enable-predictive-optimization EnablePredictiveOptimization Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT] + -h, --help help for update + --isolation-mode CatalogIsolationMode Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATED, OPEN] + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the catalog. + --owner string Username of current owner of catalog. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/catalogs/catalogs/script b/acceptance/help/cmd/workspace/catalogs/catalogs/script new file mode 100755 index 000000000..bd37b3770 --- /dev/null +++ b/acceptance/help/cmd/workspace/catalogs/catalogs/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI catalogs create --help +trace $CLI catalogs delete --help +trace $CLI catalogs get --help +trace $CLI catalogs list --help +trace $CLI catalogs update --help diff --git a/acceptance/help/cmd/workspace/clean-room-assets/clean-room-assets/output.txt b/acceptance/help/cmd/workspace/clean-room-assets/clean-room-assets/output.txt new file mode 100644 index 000000000..94e1f3930 --- /dev/null +++ b/acceptance/help/cmd/workspace/clean-room-assets/clean-room-assets/output.txt @@ -0,0 +1,124 @@ + +>>> $CLI clean-room-assets create --help +Create an asset. + + Create a clean room asset —share an asset like a notebook or table into the + clean room. For each UC asset that is added through this method, the clean + room owner must also have enough privilege on the asset to consume it. The + privilege must be maintained indefinitely for the clean room to be able to + access the asset. Typically, you should use a group as the clean room owner. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + +Usage: + databricks clean-room-assets create CLEAN_ROOM_NAME [flags] + +Flags: + --asset-type CleanRoomAssetAssetType The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string A fully qualified name that uniquely identifies the asset within the clean room. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-room-assets delete --help +Delete an asset. + + Delete a clean room asset - unshare/remove the asset from the clean room + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset. + +Usage: + databricks clean-room-assets delete CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-room-assets get --help +Get an asset. + + Get the details of a clean room asset by its type and full name. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset. + +Usage: + databricks clean-room-assets get CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-room-assets list --help +List assets. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + +Usage: + databricks clean-room-assets list CLEAN_ROOM_NAME [flags] + +Flags: + -h, --help help for list + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-room-assets update --help +Update an asset. + + Update a clean room asset. For example, updating the content of a notebook; + changing the shared partitions of a table; etc. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + NAME: A fully qualified name that uniquely identifies the asset within the clean + room. This is also the name displayed in the clean room UI. + + For UC securable assets (tables, volumes, etc.), the format is + *shared_catalog*.*shared_schema*.*asset_name* + + For notebooks, the name is the notebook file name. + +Usage: + databricks clean-room-assets update CLEAN_ROOM_NAME ASSET_TYPE NAME [flags] + +Flags: + --asset-type CleanRoomAssetAssetType The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string A fully qualified name that uniquely identifies the asset within the clean room. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/clean-room-assets/clean-room-assets/script b/acceptance/help/cmd/workspace/clean-room-assets/clean-room-assets/script new file mode 100755 index 000000000..1d654958d --- /dev/null +++ b/acceptance/help/cmd/workspace/clean-room-assets/clean-room-assets/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI clean-room-assets create --help +trace $CLI clean-room-assets delete --help +trace $CLI clean-room-assets get --help +trace $CLI clean-room-assets list --help +trace $CLI clean-room-assets update --help diff --git a/acceptance/help/cmd/workspace/clean-room-task-runs/clean-room-task-runs/output.txt b/acceptance/help/cmd/workspace/clean-room-task-runs/clean-room-task-runs/output.txt new file mode 100644 index 000000000..243292986 --- /dev/null +++ b/acceptance/help/cmd/workspace/clean-room-task-runs/clean-room-task-runs/output.txt @@ -0,0 +1,23 @@ + +>>> $CLI clean-room-task-runs list --help +List notebook task runs. + + List all the historical notebook task runs in a clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + +Usage: + databricks clean-room-task-runs list CLEAN_ROOM_NAME [flags] + +Flags: + -h, --help help for list + --notebook-name string Notebook name. + --page-size int The maximum number of task runs to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/clean-room-task-runs/clean-room-task-runs/script b/acceptance/help/cmd/workspace/clean-room-task-runs/clean-room-task-runs/script new file mode 100755 index 000000000..ce2bce972 --- /dev/null +++ b/acceptance/help/cmd/workspace/clean-room-task-runs/clean-room-task-runs/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI clean-room-task-runs list --help diff --git a/acceptance/help/cmd/workspace/clean-rooms/clean-rooms/output.txt b/acceptance/help/cmd/workspace/clean-rooms/clean-rooms/output.txt new file mode 100644 index 000000000..5616fb1ac --- /dev/null +++ b/acceptance/help/cmd/workspace/clean-rooms/clean-rooms/output.txt @@ -0,0 +1,134 @@ + +>>> $CLI clean-rooms create --help +Create a clean room. + + Create a new clean room with the specified collaborators. This method is + asynchronous; the returned name field inside the clean_room field can be used + to poll the clean room status, using the :method:cleanrooms/get method. When + this method returns, the cluster will be in a PROVISIONING state. The cluster + will be usable once it enters an ACTIVE state. + + The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** + privilege on the metastore. + +Usage: + databricks clean-rooms create [flags] + +Flags: + --comment string + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string The name of the clean room. + --owner string This is Databricks username of the owner of the local clean room securable for permission management. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-rooms create-output-catalog --help +Create an output catalog. + + Create the output catalog of the clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + +Usage: + databricks clean-rooms create-output-catalog CLEAN_ROOM_NAME [flags] + +Flags: + --catalog-name string The name of the output catalog in UC. + -h, --help help for create-output-catalog + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-rooms delete --help +Delete a clean room. + + Delete a clean room. After deletion, the clean room will be removed from the + metastore. If the other collaborators have not deleted the clean room, they + will still have the clean room in their metastore, but it will be in a DELETED + state and no operations other than deletion can be performed on it. + + Arguments: + NAME: Name of the clean room. + +Usage: + databricks clean-rooms delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-rooms get --help +Get a clean room. + + Get the details of a clean room given its name. + +Usage: + databricks clean-rooms get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-rooms list --help +List clean rooms. + + Get a list of all clean rooms of the metastore. Only clean rooms the caller + has access to are returned. + +Usage: + databricks clean-rooms list [flags] + +Flags: + -h, --help help for list + --page-size int Maximum number of clean rooms to return (i.e., the page length). + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clean-rooms update --help +Update a clean room. + + Update a clean room. The caller must be the owner of the clean room, have + **MODIFY_CLEAN_ROOM** privilege, or be metastore admin. + + When the caller is a metastore admin, only the __owner__ field can be updated. + + Arguments: + NAME: Name of the clean room. + +Usage: + databricks clean-rooms update NAME [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/clean-rooms/clean-rooms/script b/acceptance/help/cmd/workspace/clean-rooms/clean-rooms/script new file mode 100755 index 000000000..61e2d7561 --- /dev/null +++ b/acceptance/help/cmd/workspace/clean-rooms/clean-rooms/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI clean-rooms create --help +trace $CLI clean-rooms create-output-catalog --help +trace $CLI clean-rooms delete --help +trace $CLI clean-rooms get --help +trace $CLI clean-rooms list --help +trace $CLI clean-rooms update --help diff --git a/acceptance/help/cmd/workspace/cluster-policies/cluster-policies/output.txt b/acceptance/help/cmd/workspace/cluster-policies/cluster-policies/output.txt new file mode 100644 index 000000000..4b22ab087 --- /dev/null +++ b/acceptance/help/cmd/workspace/cluster-policies/cluster-policies/output.txt @@ -0,0 +1,199 @@ + +>>> $CLI cluster-policies create --help +Create a new policy. + + Creates a new policy with prescribed settings. + +Usage: + databricks cluster-policies create [flags] + +Flags: + --definition string Policy definition document expressed in [Databricks Cluster Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html). + --description string Additional human-readable description of the cluster policy. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-clusters-per-user int Max number of clusters per user that can be active using this policy. + --name string Cluster Policy name requested by the user. + --policy-family-definition-overrides string Policy definition JSON document expressed in [Databricks Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html). + --policy-family-id string ID of the policy family. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies delete --help +Delete a cluster policy. + + Delete a policy for a cluster. Clusters governed by this policy can still run, + but cannot be edited. + + Arguments: + POLICY_ID: The ID of the policy to delete. + +Usage: + databricks cluster-policies delete POLICY_ID [flags] + +Flags: + -h, --help help for delete + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies edit --help +Update a cluster policy. + + Update an existing policy for cluster. This operation may make some clusters + governed by the previous policy invalid. + + Arguments: + POLICY_ID: The ID of the policy to update. + +Usage: + databricks cluster-policies edit POLICY_ID [flags] + +Flags: + --definition string Policy definition document expressed in [Databricks Cluster Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html). + --description string Additional human-readable description of the cluster policy. + -h, --help help for edit + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-clusters-per-user int Max number of clusters per user that can be active using this policy. + --name string Cluster Policy name requested by the user. + --policy-family-definition-overrides string Policy definition JSON document expressed in [Databricks Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html). + --policy-family-id string ID of the policy family. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies get --help +Get a cluster policy. + + Get a cluster policy entity. Creation and editing is available to admins only. + + Arguments: + POLICY_ID: Canonical unique identifier for the Cluster Policy. + +Usage: + databricks cluster-policies get POLICY_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies get-permission-levels --help +Get cluster policy permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions. + +Usage: + databricks cluster-policies get-permission-levels CLUSTER_POLICY_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies get-permissions --help +Get cluster policy permissions. + + Gets the permissions of a cluster policy. Cluster policies can inherit + permissions from their root object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions. + +Usage: + databricks cluster-policies get-permissions CLUSTER_POLICY_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies list --help +List cluster policies. + + Returns a list of policies accessible by the requesting user. + +Usage: + databricks cluster-policies list [flags] + +Flags: + -h, --help help for list + --sort-column ListSortColumn The cluster policy attribute to sort by. Supported values: [POLICY_CREATION_TIME, POLICY_NAME] + --sort-order ListSortOrder The order in which the policies get listed. Supported values: [ASC, DESC] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies set-permissions --help +Set cluster policy permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions. + +Usage: + databricks cluster-policies set-permissions CLUSTER_POLICY_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI cluster-policies update-permissions --help +Update cluster policy permissions. + + Updates the permissions on a cluster policy. Cluster policies can inherit + permissions from their root object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions. + +Usage: + databricks cluster-policies update-permissions CLUSTER_POLICY_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/cluster-policies/cluster-policies/script b/acceptance/help/cmd/workspace/cluster-policies/cluster-policies/script new file mode 100755 index 000000000..dea76725b --- /dev/null +++ b/acceptance/help/cmd/workspace/cluster-policies/cluster-policies/script @@ -0,0 +1,10 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI cluster-policies create --help +trace $CLI cluster-policies delete --help +trace $CLI cluster-policies edit --help +trace $CLI cluster-policies get --help +trace $CLI cluster-policies get-permission-levels --help +trace $CLI cluster-policies get-permissions --help +trace $CLI cluster-policies list --help +trace $CLI cluster-policies set-permissions --help +trace $CLI cluster-policies update-permissions --help diff --git a/acceptance/help/cmd/workspace/clusters/clusters/output.txt b/acceptance/help/cmd/workspace/clusters/clusters/output.txt new file mode 100644 index 000000000..a04dff83c --- /dev/null +++ b/acceptance/help/cmd/workspace/clusters/clusters/output.txt @@ -0,0 +1,586 @@ + +>>> $CLI clusters change-owner --help +Change cluster owner. + + Change the owner of the cluster. You must be an admin and the cluster must be + terminated to perform this operation. The service principal application ID can + be supplied as an argument to owner_username. + + Arguments: + CLUSTER_ID: + OWNER_USERNAME: New owner of the cluster_id after this RPC. + +Usage: + databricks clusters change-owner CLUSTER_ID OWNER_USERNAME [flags] + +Flags: + -h, --help help for change-owner + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters create --help +Create new cluster. + + Creates a new Spark cluster. This method will acquire new instances from the + cloud provider if necessary. Note: Databricks may not be able to acquire some + of the requested nodes, due to cloud provider limitations (account limits, + spot price, etc.) or transient network issues. + + If Databricks acquires at least 85% of the requested on-demand nodes, cluster + creation will succeed. Otherwise the cluster will terminate with an + informative error message. + + Rather than authoring the cluster's JSON definition from scratch, Databricks + recommends filling out the [create compute UI] and then copying the generated + JSON definition from the UI. + + [create compute UI]: https://docs.databricks.com/compute/configure.html + + Arguments: + SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of + available Spark versions can be retrieved by using the + :method:clusters/sparkVersions API call. + +Usage: + databricks clusters create SPARK_VERSION [flags] + +Flags: + --apply-policy-default-values When set to true, fixed and default values from the policy will be used for fields that are omitted. + --autotermination-minutes int Automatically terminates the cluster after it is inactive for this time in minutes. + --cluster-name string Cluster name requested by the user. + --data-security-mode DataSecurityMode Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, + LEGACY_PASSTHROUGH, + LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, + LEGACY_TABLE_ACL, + NONE, + SINGLE_USER, + USER_ISOLATION, + ] + --driver-instance-pool-id string The optional ID of the instance pool for the driver of the cluster belongs. + --driver-node-type-id string The node type of the Spark driver. + --enable-elastic-disk Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. + --enable-local-disk-encryption Whether to enable LUKS on cluster VMs' local disks. + -h, --help help for create + --instance-pool-id string The optional ID of the instance pool to which the cluster belongs. + --is-single-node This field can only be used with kind. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --kind Kind The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW] + --no-wait do not wait to reach RUNNING state + --node-type-id string This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. + --num-workers int Number of worker nodes that this cluster should have. + --policy-id string The ID of the cluster policy used to create the cluster if applicable. + --runtime-engine RuntimeEngine Determines the cluster's runtime engine, either standard or Photon. Supported values: [NULL, PHOTON, STANDARD] + --single-user-name string Single user name if data_security_mode is SINGLE_USER. + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + --use-ml-runtime This field can only be used with kind. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters delete --help +Terminate cluster. + + Terminates the Spark cluster with the specified ID. The cluster is removed + asynchronously. Once the termination has completed, the cluster will be in a + TERMINATED state. If the cluster is already in a TERMINATING or + TERMINATED state, nothing will happen. + + Arguments: + CLUSTER_ID: The cluster to be terminated. + +Usage: + databricks clusters delete CLUSTER_ID [flags] + +Flags: + -h, --help help for delete + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach TERMINATED state + --timeout duration maximum amount of time to reach TERMINATED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters edit --help +Update cluster configuration. + + Updates the configuration of a cluster to match the provided attributes and + size. A cluster can be updated if it is in a RUNNING or TERMINATED state. + + If a cluster is updated while in a RUNNING state, it will be restarted so + that the new attributes can take effect. + + If a cluster is updated while in a TERMINATED state, it will remain + TERMINATED. The next time it is started using the clusters/start API, the + new attributes will take effect. Any attempt to update a cluster in any other + state will be rejected with an INVALID_STATE error code. + + Clusters created by the Databricks Jobs service cannot be edited. + + Arguments: + CLUSTER_ID: ID of the cluster + SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of + available Spark versions can be retrieved by using the + :method:clusters/sparkVersions API call. + +Usage: + databricks clusters edit CLUSTER_ID SPARK_VERSION [flags] + +Flags: + --apply-policy-default-values When set to true, fixed and default values from the policy will be used for fields that are omitted. + --autotermination-minutes int Automatically terminates the cluster after it is inactive for this time in minutes. + --cluster-name string Cluster name requested by the user. + --data-security-mode DataSecurityMode Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, + LEGACY_PASSTHROUGH, + LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, + LEGACY_TABLE_ACL, + NONE, + SINGLE_USER, + USER_ISOLATION, + ] + --driver-instance-pool-id string The optional ID of the instance pool for the driver of the cluster belongs. + --driver-node-type-id string The node type of the Spark driver. + --enable-elastic-disk Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. + --enable-local-disk-encryption Whether to enable LUKS on cluster VMs' local disks. + -h, --help help for edit + --instance-pool-id string The optional ID of the instance pool to which the cluster belongs. + --is-single-node This field can only be used with kind. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --kind Kind The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW] + --no-wait do not wait to reach RUNNING state + --node-type-id string This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. + --num-workers int Number of worker nodes that this cluster should have. + --policy-id string The ID of the cluster policy used to create the cluster if applicable. + --runtime-engine RuntimeEngine Determines the cluster's runtime engine, either standard or Photon. Supported values: [NULL, PHOTON, STANDARD] + --single-user-name string Single user name if data_security_mode is SINGLE_USER. + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + --use-ml-runtime This field can only be used with kind. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters events --help +List cluster activity events. + + Retrieves a list of events about the activity of a cluster. This API is + paginated. If there are more events to read, the response includes all the + nparameters necessary to request the next page of events. + + Arguments: + CLUSTER_ID: The ID of the cluster to retrieve events about. + +Usage: + databricks clusters events CLUSTER_ID [flags] + +Flags: + --end-time int The end time in epoch milliseconds. + -h, --help help for events + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --limit int The maximum number of events to include in a page of events. + --offset int The offset in the result set. + --order GetEventsOrder The order to list events in; either "ASC" or "DESC". Supported values: [ASC, DESC] + --start-time int The start time in epoch milliseconds. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters get --help +Get cluster info. + + Retrieves the information for a cluster given its identifier. Clusters can be + described while they are running, or up to 60 days after they are terminated. + + Arguments: + CLUSTER_ID: The cluster about which to retrieve information. + +Usage: + databricks clusters get CLUSTER_ID [flags] + +Flags: + -h, --help help for get + --no-wait do not wait to reach RUNNING state + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters get-permission-levels --help +Get cluster permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions. + +Usage: + databricks clusters get-permission-levels CLUSTER_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters get-permissions --help +Get cluster permissions. + + Gets the permissions of a cluster. Clusters can inherit permissions from their + root object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions. + +Usage: + databricks clusters get-permissions CLUSTER_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters list --help +List clusters. + + Return information about all pinned and active clusters, and all clusters + terminated within the last 30 days. Clusters terminated prior to this period + are not included. + +Usage: + databricks clusters list [flags] + +Flags: + --cluster-sources []string Filter clusters by source + --cluster-states []string Filter clusters by states + -h, --help help for list + --is-pinned Filter clusters by pinned status + --page-size int Use this field to specify the maximum number of results to be returned by the server. + --page-token string Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of clusters respectively. + --policy-id string Filter clusters by policy id + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters list-node-types --help +List node types. + + Returns a list of supported Spark node types. These node types can be used to + launch a cluster. + +Usage: + databricks clusters list-node-types [flags] + +Flags: + -h, --help help for list-node-types + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters list-zones --help +List availability zones. + + Returns a list of availability zones where clusters can be created in (For + example, us-west-2a). These zones can be used to launch a cluster. + +Usage: + databricks clusters list-zones [flags] + +Flags: + -h, --help help for list-zones + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters permanent-delete --help +Permanently delete cluster. + + Permanently deletes a Spark cluster. This cluster is terminated and resources + are asynchronously removed. + + In addition, users will no longer see permanently deleted clusters in the + cluster list, and API users can no longer perform any action on permanently + deleted clusters. + + Arguments: + CLUSTER_ID: The cluster to be deleted. + +Usage: + databricks clusters permanent-delete CLUSTER_ID [flags] + +Flags: + -h, --help help for permanent-delete + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters pin --help +Pin cluster. + + Pinning a cluster ensures that the cluster will always be returned by the + ListClusters API. Pinning a cluster that is already pinned will have no + effect. This API can only be called by workspace admins. + + Arguments: + CLUSTER_ID: + +Usage: + databricks clusters pin CLUSTER_ID [flags] + +Flags: + -h, --help help for pin + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters resize --help +Resize cluster. + + Resizes a cluster to have a desired number of workers. This will fail unless + the cluster is in a RUNNING state. + + Arguments: + CLUSTER_ID: The cluster to be resized. + +Usage: + databricks clusters resize CLUSTER_ID [flags] + +Flags: + -h, --help help for resize + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach RUNNING state + --num-workers int Number of worker nodes that this cluster should have. + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters restart --help +Restart cluster. + + Restarts a Spark cluster with the supplied ID. If the cluster is not currently + in a RUNNING state, nothing will happen. + + Arguments: + CLUSTER_ID: The cluster to be started. + +Usage: + databricks clusters restart CLUSTER_ID [flags] + +Flags: + -h, --help help for restart + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach RUNNING state + --restart-user string . + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters set-permissions --help +Set cluster permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions. + +Usage: + databricks clusters set-permissions CLUSTER_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters spark-versions --help +List available Spark versions. + + Returns the list of available Spark versions. These versions can be used to + launch a cluster. + +Usage: + databricks clusters spark-versions [flags] + +Flags: + -h, --help help for spark-versions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters start --help +Start terminated cluster. + + Starts a terminated Spark cluster with the supplied ID. This works similar to + createCluster except: + + * The previous cluster id and attributes are preserved. * The cluster starts + with the last specified cluster size. * If the previous cluster was an + autoscaling cluster, the current cluster starts with the minimum number of + nodes. * If the cluster is not currently in a TERMINATED state, nothing will + happen. * Clusters launched to run a job cannot be started. + + Arguments: + CLUSTER_ID: The cluster to be started. + +Usage: + databricks clusters start CLUSTER_ID [flags] + +Flags: + -h, --help help for start + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach RUNNING state + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters unpin --help +Unpin cluster. + + Unpinning a cluster will allow the cluster to eventually be removed from the + ListClusters API. Unpinning a cluster that is not pinned will have no effect. + This API can only be called by workspace admins. + + Arguments: + CLUSTER_ID: + +Usage: + databricks clusters unpin CLUSTER_ID [flags] + +Flags: + -h, --help help for unpin + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters update --help +Update cluster configuration (partial). + + Updates the configuration of a cluster to match the partial set of attributes + and size. Denote which fields to update using the update_mask field in the + request body. A cluster can be updated if it is in a RUNNING or TERMINATED + state. If a cluster is updated while in a RUNNING state, it will be + restarted so that the new attributes can take effect. If a cluster is updated + while in a TERMINATED state, it will remain TERMINATED. The updated + attributes will take effect the next time the cluster is started using the + clusters/start API. Attempts to update a cluster in any other state will be + rejected with an INVALID_STATE error code. Clusters created by the + Databricks Jobs service cannot be updated. + + Arguments: + CLUSTER_ID: ID of the cluster. + UPDATE_MASK: Specifies which fields of the cluster will be updated. This is required in + the POST request. The update mask should be supplied as a single string. + To specify multiple fields, separate them with commas (no spaces). To + delete a field from a cluster configuration, add it to the update_mask + string but omit it from the cluster object. + +Usage: + databricks clusters update CLUSTER_ID UPDATE_MASK [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach RUNNING state + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI clusters update-permissions --help +Update cluster permissions. + + Updates the permissions on a cluster. Clusters can inherit permissions from + their root object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions. + +Usage: + databricks clusters update-permissions CLUSTER_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/clusters/clusters/script b/acceptance/help/cmd/workspace/clusters/clusters/script new file mode 100755 index 000000000..094855e7e --- /dev/null +++ b/acceptance/help/cmd/workspace/clusters/clusters/script @@ -0,0 +1,22 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI clusters change-owner --help +trace $CLI clusters create --help +trace $CLI clusters delete --help +trace $CLI clusters edit --help +trace $CLI clusters events --help +trace $CLI clusters get --help +trace $CLI clusters get-permission-levels --help +trace $CLI clusters get-permissions --help +trace $CLI clusters list --help +trace $CLI clusters list-node-types --help +trace $CLI clusters list-zones --help +trace $CLI clusters permanent-delete --help +trace $CLI clusters pin --help +trace $CLI clusters resize --help +trace $CLI clusters restart --help +trace $CLI clusters set-permissions --help +trace $CLI clusters spark-versions --help +trace $CLI clusters start --help +trace $CLI clusters unpin --help +trace $CLI clusters update --help +trace $CLI clusters update-permissions --help diff --git a/acceptance/help/cmd/workspace/command-execution/command-execution/output.txt b/acceptance/help/cmd/workspace/command-execution/command-execution/output.txt new file mode 100644 index 000000000..d6d66c8c7 --- /dev/null +++ b/acceptance/help/cmd/workspace/command-execution/command-execution/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI command-execution cancel --help +Error: unknown command "command-execution" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/command-execution/command-execution/script b/acceptance/help/cmd/workspace/command-execution/command-execution/script new file mode 100755 index 000000000..5e0bab19c --- /dev/null +++ b/acceptance/help/cmd/workspace/command-execution/command-execution/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI command-execution cancel --help +trace $CLI command-execution command-status --help +trace $CLI command-execution context-status --help +trace $CLI command-execution create --help +trace $CLI command-execution destroy --help +trace $CLI command-execution execute --help diff --git a/acceptance/help/cmd/workspace/compliance-security-profile/compliance-security-profile/output.txt b/acceptance/help/cmd/workspace/compliance-security-profile/compliance-security-profile/output.txt new file mode 100644 index 000000000..b73c205f9 --- /dev/null +++ b/acceptance/help/cmd/workspace/compliance-security-profile/compliance-security-profile/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI compliance-security-profile get --help +Error: unknown command "compliance-security-profile" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/compliance-security-profile/compliance-security-profile/script b/acceptance/help/cmd/workspace/compliance-security-profile/compliance-security-profile/script new file mode 100755 index 000000000..5f9796816 --- /dev/null +++ b/acceptance/help/cmd/workspace/compliance-security-profile/compliance-security-profile/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI compliance-security-profile get --help +trace $CLI compliance-security-profile update --help diff --git a/acceptance/help/cmd/workspace/connections/connections/output.txt b/acceptance/help/cmd/workspace/connections/connections/output.txt new file mode 100644 index 000000000..cf0344c97 --- /dev/null +++ b/acceptance/help/cmd/workspace/connections/connections/output.txt @@ -0,0 +1,106 @@ + +>>> $CLI connections create --help +Create a connection. + + Creates a new connection + + Creates a new connection to an external data source. It allows users to + specify connection details and configurations for interaction with the + external server. + +Usage: + databricks connections create [flags] + +Flags: + --comment string User-provided free-form text description. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --read-only If the connection is read only. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI connections delete --help +Delete a connection. + + Deletes the connection that matches the supplied name. + + Arguments: + NAME: The name of the connection to be deleted. + +Usage: + databricks connections delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI connections get --help +Get a connection. + + Gets a connection from it's name. + + Arguments: + NAME: Name of the connection. + +Usage: + databricks connections get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI connections list --help +List connections. + + List all connections. + +Usage: + databricks connections list [flags] + +Flags: + -h, --help help for list + --max-results int Maximum number of connections to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI connections update --help +Update a connection. + + Updates the connection that matches the supplied name. + + Arguments: + NAME: Name of the connection. + +Usage: + databricks connections update NAME [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the connection. + --owner string Username of current owner of the connection. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/connections/connections/script b/acceptance/help/cmd/workspace/connections/connections/script new file mode 100755 index 000000000..9bd0e4faf --- /dev/null +++ b/acceptance/help/cmd/workspace/connections/connections/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI connections create --help +trace $CLI connections delete --help +trace $CLI connections get --help +trace $CLI connections list --help +trace $CLI connections update --help diff --git a/acceptance/help/cmd/workspace/consumer-fulfillments/consumer-fulfillments/output.txt b/acceptance/help/cmd/workspace/consumer-fulfillments/consumer-fulfillments/output.txt new file mode 100644 index 000000000..c44720355 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-fulfillments/consumer-fulfillments/output.txt @@ -0,0 +1,42 @@ + +>>> $CLI consumer-fulfillments get --help +Get listing content metadata. + + Get a high level preview of the metadata of listing installable content. + +Usage: + databricks consumer-fulfillments get LISTING_ID [flags] + +Flags: + -h, --help help for get + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-fulfillments list --help +List all listing fulfillments. + + Get all listings fulfillments associated with a listing. A _fulfillment_ is a + potential installation. Standard installations contain metadata about the + attached share or git repo. Only one of these fields will be present. + Personalized installations contain metadata about the attached share or git + repo, as well as the Delta Sharing recipient type. + +Usage: + databricks consumer-fulfillments list LISTING_ID [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/consumer-fulfillments/consumer-fulfillments/script b/acceptance/help/cmd/workspace/consumer-fulfillments/consumer-fulfillments/script new file mode 100755 index 000000000..aab97c944 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-fulfillments/consumer-fulfillments/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI consumer-fulfillments get --help +trace $CLI consumer-fulfillments list --help diff --git a/acceptance/help/cmd/workspace/consumer-installations/consumer-installations/output.txt b/acceptance/help/cmd/workspace/consumer-installations/consumer-installations/output.txt new file mode 100644 index 000000000..17a2ff64a --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-installations/consumer-installations/output.txt @@ -0,0 +1,99 @@ + +>>> $CLI consumer-installations create --help +Install from a listing. + + Install payload associated with a Databricks Marketplace listing. + +Usage: + databricks consumer-installations create LISTING_ID [flags] + +Flags: + --catalog-name string + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --recipient-type DeltaSharingRecipientType . Supported values: [DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS, DELTA_SHARING_RECIPIENT_TYPE_OPEN] + --share-name string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-installations delete --help +Uninstall from a listing. + + Uninstall an installation associated with a Databricks Marketplace listing. + +Usage: + databricks consumer-installations delete LISTING_ID INSTALLATION_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-installations list --help +List all installations. + + List all installations across all listings. + +Usage: + databricks consumer-installations list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-installations list-listing-installations --help +List installations for a listing. + + List all installations for a particular listing. + +Usage: + databricks consumer-installations list-listing-installations LISTING_ID [flags] + +Flags: + -h, --help help for list-listing-installations + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-installations update --help +Update an installation. + + This is a update API that will update the part of the fields defined in the + installation table as well as interact with external services according to the + fields not included in the installation table 1. the token will be rotate if + the rotateToken flag is true 2. the token will be forcibly rotate if the + rotateToken flag is true and the tokenInfo field is empty + +Usage: + databricks consumer-installations update LISTING_ID INSTALLATION_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --rotate-token + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/consumer-installations/consumer-installations/script b/acceptance/help/cmd/workspace/consumer-installations/consumer-installations/script new file mode 100755 index 000000000..700802b84 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-installations/consumer-installations/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI consumer-installations create --help +trace $CLI consumer-installations delete --help +trace $CLI consumer-installations list --help +trace $CLI consumer-installations list-listing-installations --help +trace $CLI consumer-installations update --help diff --git a/acceptance/help/cmd/workspace/consumer-listings/consumer-listings/output.txt b/acceptance/help/cmd/workspace/consumer-listings/consumer-listings/output.txt new file mode 100644 index 000000000..6320aaa52 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-listings/consumer-listings/output.txt @@ -0,0 +1,85 @@ + +>>> $CLI consumer-listings batch-get --help +Get one batch of listings. One may specify up to 50 IDs per request. + + Batch get a published listing in the Databricks Marketplace that the consumer + has access to. + +Usage: + databricks consumer-listings batch-get [flags] + +Flags: + -h, --help help for batch-get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-listings get --help +Get listing. + + Get a published listing in the Databricks Marketplace that the consumer has + access to. + +Usage: + databricks consumer-listings get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-listings list --help +List listings. + + List all published listings in the Databricks Marketplace that the consumer + has access to. + +Usage: + databricks consumer-listings list [flags] + +Flags: + -h, --help help for list + --is-free Filters each listing based on if it is free. + --is-private-exchange Filters each listing based on if it is a private exchange. + --is-staff-pick Filters each listing based on whether it is a staff pick. + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-listings search --help +Search listings. + + Search published listings in the Databricks Marketplace that the consumer has + access to. This query supports a variety of different search parameters and + performs fuzzy matching. + + Arguments: + QUERY: Fuzzy matches query + +Usage: + databricks consumer-listings search QUERY [flags] + +Flags: + -h, --help help for search + --is-free + --is-private-exchange + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/consumer-listings/consumer-listings/script b/acceptance/help/cmd/workspace/consumer-listings/consumer-listings/script new file mode 100755 index 000000000..3eecfd469 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-listings/consumer-listings/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI consumer-listings batch-get --help +trace $CLI consumer-listings get --help +trace $CLI consumer-listings list --help +trace $CLI consumer-listings search --help diff --git a/acceptance/help/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests/output.txt b/acceptance/help/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests/output.txt new file mode 100644 index 000000000..f6f7db268 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests/output.txt @@ -0,0 +1,61 @@ + +>>> $CLI consumer-personalization-requests create --help +Create a personalization request. + + Create a personalization request for a listing. + +Usage: + databricks consumer-personalization-requests create LISTING_ID [flags] + +Flags: + --comment string + --company string + --first-name string + -h, --help help for create + --is-from-lighthouse + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --last-name string + --recipient-type DeltaSharingRecipientType . Supported values: [DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS, DELTA_SHARING_RECIPIENT_TYPE_OPEN] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-personalization-requests get --help +Get the personalization request for a listing. + + Get the personalization request for a listing. Each consumer can make at + *most* one personalization request for a listing. + +Usage: + databricks consumer-personalization-requests get LISTING_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-personalization-requests list --help +List all personalization requests. + + List personalization requests for a consumer across all listings. + +Usage: + databricks consumer-personalization-requests list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests/script b/acceptance/help/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests/script new file mode 100755 index 000000000..75bf938d4 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI consumer-personalization-requests create --help +trace $CLI consumer-personalization-requests get --help +trace $CLI consumer-personalization-requests list --help diff --git a/acceptance/help/cmd/workspace/consumer-providers/consumer-providers/output.txt b/acceptance/help/cmd/workspace/consumer-providers/consumer-providers/output.txt new file mode 100644 index 000000000..1e1c30a96 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-providers/consumer-providers/output.txt @@ -0,0 +1,57 @@ + +>>> $CLI consumer-providers batch-get --help +Get one batch of providers. One may specify up to 50 IDs per request. + + Batch get a provider in the Databricks Marketplace with at least one visible + listing. + +Usage: + databricks consumer-providers batch-get [flags] + +Flags: + -h, --help help for batch-get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-providers get --help +Get a provider. + + Get a provider in the Databricks Marketplace with at least one visible + listing. + +Usage: + databricks consumer-providers get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI consumer-providers list --help +List providers. + + List all providers in the Databricks Marketplace with at least one visible + listing. + +Usage: + databricks consumer-providers list [flags] + +Flags: + -h, --help help for list + --is-featured + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/consumer-providers/consumer-providers/script b/acceptance/help/cmd/workspace/consumer-providers/consumer-providers/script new file mode 100755 index 000000000..ca55fbfa9 --- /dev/null +++ b/acceptance/help/cmd/workspace/consumer-providers/consumer-providers/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI consumer-providers batch-get --help +trace $CLI consumer-providers get --help +trace $CLI consumer-providers list --help diff --git a/acceptance/help/cmd/workspace/credentials-manager/credentials-manager/output.txt b/acceptance/help/cmd/workspace/credentials-manager/credentials-manager/output.txt new file mode 100644 index 000000000..fb8967c5c --- /dev/null +++ b/acceptance/help/cmd/workspace/credentials-manager/credentials-manager/output.txt @@ -0,0 +1,19 @@ + +>>> $CLI credentials-manager exchange-token --help +Exchange token. + + Exchange tokens with an Identity Provider to get a new access token. It allows + specifying scopes to determine token permissions. + +Usage: + databricks credentials-manager exchange-token [flags] + +Flags: + -h, --help help for exchange-token + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/credentials-manager/credentials-manager/script b/acceptance/help/cmd/workspace/credentials-manager/credentials-manager/script new file mode 100755 index 000000000..c79ff2eac --- /dev/null +++ b/acceptance/help/cmd/workspace/credentials-manager/credentials-manager/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI credentials-manager exchange-token --help diff --git a/acceptance/help/cmd/workspace/credentials/credentials/output.txt b/acceptance/help/cmd/workspace/credentials/credentials/output.txt new file mode 100644 index 000000000..3da26e1fe --- /dev/null +++ b/acceptance/help/cmd/workspace/credentials/credentials/output.txt @@ -0,0 +1,192 @@ + +>>> $CLI credentials create-credential --help +Create a credential. + + Creates a new credential. The type of credential to be created is determined + by the **purpose** field, which should be either **SERVICE** or **STORAGE**. + + The caller must be a metastore admin or have the metastore privilege + **CREATE_STORAGE_CREDENTIAL** for storage credentials, or + **CREATE_SERVICE_CREDENTIAL** for service credentials. + + Arguments: + NAME: The credential name. The name must be unique among storage and service + credentials within the metastore. + +Usage: + databricks credentials create-credential NAME [flags] + +Flags: + --comment string Comment associated with the credential. + -h, --help help for create-credential + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --purpose CredentialPurpose Indicates the purpose of the credential. Supported values: [SERVICE, STORAGE] + --read-only Whether the credential is usable only for read operations. + --skip-validation Optional. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI credentials delete-credential --help +Delete a credential. + + Deletes a service or storage credential from the metastore. The caller must be + an owner of the credential. + + Arguments: + NAME_ARG: Name of the credential. + +Usage: + databricks credentials delete-credential NAME_ARG [flags] + +Flags: + --force Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**). + -h, --help help for delete-credential + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI credentials generate-temporary-service-credential --help +Generate a temporary service credential. + + Returns a set of temporary credentials generated using the specified service + credential. The caller must be a metastore admin or have the metastore + privilege **ACCESS** on the service credential. + + Arguments: + CREDENTIAL_NAME: The name of the service credential used to generate a temporary credential + +Usage: + databricks credentials generate-temporary-service-credential CREDENTIAL_NAME [flags] + +Flags: + -h, --help help for generate-temporary-service-credential + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI credentials get-credential --help +Get a credential. + + Gets a service or storage credential from the metastore. The caller must be a + metastore admin, the owner of the credential, or have any permission on the + credential. + + Arguments: + NAME_ARG: Name of the credential. + +Usage: + databricks credentials get-credential NAME_ARG [flags] + +Flags: + -h, --help help for get-credential + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI credentials list-credentials --help +List credentials. + + Gets an array of credentials (as __CredentialInfo__ objects). + + The array is limited to only the credentials that the caller has permission to + access. If the caller is a metastore admin, retrieval of credentials is + unrestricted. There is no guarantee of a specific ordering of the elements in + the array. + +Usage: + databricks credentials list-credentials [flags] + +Flags: + -h, --help help for list-credentials + --max-results int Maximum number of credentials to return. + --page-token string Opaque token to retrieve the next page of results. + --purpose CredentialPurpose Return only credentials for the specified purpose. Supported values: [SERVICE, STORAGE] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI credentials update-credential --help +Update a credential. + + Updates a service or storage credential on the metastore. + + The caller must be the owner of the credential or a metastore admin or have + the MANAGE permission. If the caller is a metastore admin, only the + __owner__ field can be changed. + + Arguments: + NAME_ARG: Name of the credential. + +Usage: + databricks credentials update-credential NAME_ARG [flags] + +Flags: + --comment string Comment associated with the credential. + --force Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**). + -h, --help help for update-credential + --isolation-mode IsolationMode Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN] + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name of credential. + --owner string Username of current owner of credential. + --read-only Whether the credential is usable only for read operations. + --skip-validation Supply true to this argument to skip validation of the updated credential. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI credentials validate-credential --help +Validate a credential. + + Validates a credential. + + For service credentials (purpose is **SERVICE**), either the + __credential_name__ or the cloud-specific credential must be provided. + + For storage credentials (purpose is **STORAGE**), at least one of + __external_location_name__ and __url__ need to be provided. If only one of + them is provided, it will be used for validation. And if both are provided, + the __url__ will be used for validation, and __external_location_name__ will + be ignored when checking overlapping urls. Either the __credential_name__ or + the cloud-specific credential must be provided. + + The caller must be a metastore admin or the credential owner or have the + required permission on the metastore and the credential (e.g., + **CREATE_EXTERNAL_LOCATION** when purpose is **STORAGE**). + +Usage: + databricks credentials validate-credential [flags] + +Flags: + --credential-name string Required. + --external-location-name string The name of an existing external location to validate. + -h, --help help for validate-credential + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --purpose CredentialPurpose The purpose of the credential. Supported values: [SERVICE, STORAGE] + --read-only Whether the credential is only usable for read operations. + --url string The external location url to validate. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/credentials/credentials/script b/acceptance/help/cmd/workspace/credentials/credentials/script new file mode 100755 index 000000000..c610844b5 --- /dev/null +++ b/acceptance/help/cmd/workspace/credentials/credentials/script @@ -0,0 +1,8 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI credentials create-credential --help +trace $CLI credentials delete-credential --help +trace $CLI credentials generate-temporary-service-credential --help +trace $CLI credentials get-credential --help +trace $CLI credentials list-credentials --help +trace $CLI credentials update-credential --help +trace $CLI credentials validate-credential --help diff --git a/acceptance/help/cmd/workspace/current-user/current-user/output.txt b/acceptance/help/cmd/workspace/current-user/current-user/output.txt new file mode 100644 index 000000000..d3bae59d6 --- /dev/null +++ b/acceptance/help/cmd/workspace/current-user/current-user/output.txt @@ -0,0 +1,17 @@ + +>>> $CLI current-user me --help +Get current user info. + + Get details about the current method caller's identity. + +Usage: + databricks current-user me [flags] + +Flags: + -h, --help help for me + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/current-user/current-user/script b/acceptance/help/cmd/workspace/current-user/current-user/script new file mode 100755 index 000000000..a8c0a9018 --- /dev/null +++ b/acceptance/help/cmd/workspace/current-user/current-user/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI current-user me --help diff --git a/acceptance/help/cmd/workspace/dashboard-widgets/dashboard-widgets/output.txt b/acceptance/help/cmd/workspace/dashboard-widgets/dashboard-widgets/output.txt new file mode 100644 index 000000000..7efef8213 --- /dev/null +++ b/acceptance/help/cmd/workspace/dashboard-widgets/dashboard-widgets/output.txt @@ -0,0 +1,53 @@ + +>>> $CLI dashboard-widgets create --help +Add widget to a dashboard. + +Usage: + databricks dashboard-widgets create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI dashboard-widgets delete --help +Remove widget. + + Arguments: + ID: Widget ID returned by :method:dashboardwidgets/create + +Usage: + databricks dashboard-widgets delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI dashboard-widgets update --help +Update existing widget. + + Arguments: + ID: Widget ID returned by :method:dashboardwidgets/create + +Usage: + databricks dashboard-widgets update ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/dashboard-widgets/dashboard-widgets/script b/acceptance/help/cmd/workspace/dashboard-widgets/dashboard-widgets/script new file mode 100755 index 000000000..9434c87d8 --- /dev/null +++ b/acceptance/help/cmd/workspace/dashboard-widgets/dashboard-widgets/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI dashboard-widgets create --help +trace $CLI dashboard-widgets delete --help +trace $CLI dashboard-widgets update --help diff --git a/acceptance/help/cmd/workspace/dashboards/dashboards/output.txt b/acceptance/help/cmd/workspace/dashboards/dashboards/output.txt new file mode 100644 index 000000000..9fbef7205 --- /dev/null +++ b/acceptance/help/cmd/workspace/dashboards/dashboards/output.txt @@ -0,0 +1,116 @@ + +>>> $CLI dashboards create --help +Create a dashboard object. + +Usage: + databricks dashboards create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI dashboards delete --help +Remove a dashboard. + + Moves a dashboard to the trash. Trashed dashboards do not appear in list views + or searches, and cannot be shared. + +Usage: + databricks dashboards delete DASHBOARD_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI dashboards get --help +Retrieve a definition. + + Returns a JSON representation of a dashboard object, including its + visualization and query objects. + +Usage: + databricks dashboards get DASHBOARD_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI dashboards list --help +Get dashboard objects. + + Fetch a paginated list of dashboard objects. + + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + +Usage: + databricks dashboards list [flags] + +Flags: + -h, --help help for list + --order ListOrder Name of dashboard attribute to order by. Supported values: [created_at, name] + --page int Page number to retrieve. + --page-size int Number of dashboards to return per page. + --q string Full text search term. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI dashboards restore --help +Restore a dashboard. + + A restored dashboard appears in list views and searches and can be shared. + +Usage: + databricks dashboards restore DASHBOARD_ID [flags] + +Flags: + -h, --help help for restore + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI dashboards update --help +Change a dashboard definition. + + Modify this dashboard definition. This operation only affects attributes of + the dashboard object. It does not add, modify, or remove widgets. + + **Note**: You cannot undo this operation. + +Usage: + databricks dashboards update DASHBOARD_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string The title of this dashboard that appears in list views and at the top of the dashboard page. + --run-as-role RunAsRole Sets the **Run as** role for the object. Supported values: [owner, viewer] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/dashboards/dashboards/script b/acceptance/help/cmd/workspace/dashboards/dashboards/script new file mode 100755 index 000000000..f864e8139 --- /dev/null +++ b/acceptance/help/cmd/workspace/dashboards/dashboards/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI dashboards create --help +trace $CLI dashboards delete --help +trace $CLI dashboards get --help +trace $CLI dashboards list --help +trace $CLI dashboards restore --help +trace $CLI dashboards update --help diff --git a/acceptance/help/cmd/workspace/data-sources/data-sources/output.txt b/acceptance/help/cmd/workspace/data-sources/data-sources/output.txt new file mode 100644 index 000000000..de1d30f79 --- /dev/null +++ b/acceptance/help/cmd/workspace/data-sources/data-sources/output.txt @@ -0,0 +1,24 @@ + +>>> $CLI data-sources list --help +Get a list of SQL warehouses. + + Retrieves a full list of SQL warehouses available in this workspace. All + fields that appear in this API response are enumerated for clarity. However, + you need only a SQL warehouse's id to create new queries against it. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:warehouses/list instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks data-sources list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/data-sources/data-sources/script b/acceptance/help/cmd/workspace/data-sources/data-sources/script new file mode 100755 index 000000000..d05c62413 --- /dev/null +++ b/acceptance/help/cmd/workspace/data-sources/data-sources/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI data-sources list --help diff --git a/acceptance/help/cmd/workspace/dbfs/dbfs/output.txt b/acceptance/help/cmd/workspace/dbfs/dbfs/output.txt new file mode 100644 index 000000000..6e016ec7b --- /dev/null +++ b/acceptance/help/cmd/workspace/dbfs/dbfs/output.txt @@ -0,0 +1,9 @@ + +>>> $CLI dbfs add-block --help +Error: unknown command "dbfs" for "databricks" + +Did you mean this? + fs + + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/dbfs/dbfs/script b/acceptance/help/cmd/workspace/dbfs/dbfs/script new file mode 100755 index 000000000..bc9c7e1c6 --- /dev/null +++ b/acceptance/help/cmd/workspace/dbfs/dbfs/script @@ -0,0 +1,11 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI dbfs add-block --help +trace $CLI dbfs close --help +trace $CLI dbfs create --help +trace $CLI dbfs delete --help +trace $CLI dbfs get-status --help +trace $CLI dbfs list --help +trace $CLI dbfs mkdirs --help +trace $CLI dbfs move --help +trace $CLI dbfs put --help +trace $CLI dbfs read --help diff --git a/acceptance/help/cmd/workspace/dbsql-permissions/dbsql-permissions/output.txt b/acceptance/help/cmd/workspace/dbsql-permissions/dbsql-permissions/output.txt new file mode 100644 index 000000000..95f87baa4 --- /dev/null +++ b/acceptance/help/cmd/workspace/dbsql-permissions/dbsql-permissions/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI dbsql-permissions get --help +Error: unknown command "dbsql-permissions" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/dbsql-permissions/dbsql-permissions/script b/acceptance/help/cmd/workspace/dbsql-permissions/dbsql-permissions/script new file mode 100755 index 000000000..0df4f1d64 --- /dev/null +++ b/acceptance/help/cmd/workspace/dbsql-permissions/dbsql-permissions/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI dbsql-permissions get --help +trace $CLI dbsql-permissions set --help +trace $CLI dbsql-permissions transfer-ownership --help diff --git a/acceptance/help/cmd/workspace/default-namespace/default-namespace/output.txt b/acceptance/help/cmd/workspace/default-namespace/default-namespace/output.txt new file mode 100644 index 000000000..fa1878bac --- /dev/null +++ b/acceptance/help/cmd/workspace/default-namespace/default-namespace/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI default-namespace delete --help +Error: unknown command "default-namespace" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/default-namespace/default-namespace/script b/acceptance/help/cmd/workspace/default-namespace/default-namespace/script new file mode 100755 index 000000000..763ae95ce --- /dev/null +++ b/acceptance/help/cmd/workspace/default-namespace/default-namespace/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI default-namespace delete --help +trace $CLI default-namespace get --help +trace $CLI default-namespace update --help diff --git a/acceptance/help/cmd/workspace/disable-legacy-access/disable-legacy-access/output.txt b/acceptance/help/cmd/workspace/disable-legacy-access/disable-legacy-access/output.txt new file mode 100644 index 000000000..c15cd755b --- /dev/null +++ b/acceptance/help/cmd/workspace/disable-legacy-access/disable-legacy-access/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI disable-legacy-access delete --help +Error: unknown command "disable-legacy-access" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/disable-legacy-access/disable-legacy-access/script b/acceptance/help/cmd/workspace/disable-legacy-access/disable-legacy-access/script new file mode 100755 index 000000000..ccc8cd30b --- /dev/null +++ b/acceptance/help/cmd/workspace/disable-legacy-access/disable-legacy-access/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI disable-legacy-access delete --help +trace $CLI disable-legacy-access get --help +trace $CLI disable-legacy-access update --help diff --git a/acceptance/help/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs/output.txt b/acceptance/help/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs/output.txt new file mode 100644 index 000000000..8aae7f35d --- /dev/null +++ b/acceptance/help/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI disable-legacy-dbfs delete --help +Error: unknown command "disable-legacy-dbfs" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs/script b/acceptance/help/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs/script new file mode 100755 index 000000000..28a823fac --- /dev/null +++ b/acceptance/help/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI disable-legacy-dbfs delete --help +trace $CLI disable-legacy-dbfs get --help +trace $CLI disable-legacy-dbfs update --help diff --git a/acceptance/help/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring/output.txt b/acceptance/help/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring/output.txt new file mode 100644 index 000000000..e5d1d56a5 --- /dev/null +++ b/acceptance/help/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI enhanced-security-monitoring get --help +Error: unknown command "enhanced-security-monitoring" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring/script b/acceptance/help/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring/script new file mode 100755 index 000000000..a28072912 --- /dev/null +++ b/acceptance/help/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI enhanced-security-monitoring get --help +trace $CLI enhanced-security-monitoring update --help diff --git a/acceptance/help/cmd/workspace/experiments/experiments/output.txt b/acceptance/help/cmd/workspace/experiments/experiments/output.txt new file mode 100644 index 000000000..5149f9098 --- /dev/null +++ b/acceptance/help/cmd/workspace/experiments/experiments/output.txt @@ -0,0 +1,747 @@ + +>>> $CLI experiments create-experiment --help +Create experiment. + + Creates an experiment with a name. Returns the ID of the newly created + experiment. Validates that another experiment with the same name does not + already exist and fails if another experiment with the same name already + exists. + + Throws RESOURCE_ALREADY_EXISTS if a experiment with the given name exists. + + Arguments: + NAME: Experiment name. + +Usage: + databricks experiments create-experiment NAME [flags] + +Flags: + --artifact-location string Location where all artifacts for the experiment are stored. + -h, --help help for create-experiment + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments create-run --help +Create a run. + + Creates a new run within an experiment. A run is usually a single execution of + a machine learning or data ETL pipeline. MLflow uses runs to track the + mlflowParam, mlflowMetric and mlflowRunTag associated with a single + execution. + +Usage: + databricks experiments create-run [flags] + +Flags: + --experiment-id string ID of the associated experiment. + -h, --help help for create-run + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --start-time int Unix timestamp in milliseconds of when the run started. + --user-id string ID of the user executing the run. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments delete-experiment --help +Delete an experiment. + + Marks an experiment and associated metadata, runs, metrics, params, and tags + for deletion. If the experiment uses FileStore, artifacts associated with + experiment are also deleted. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment. + +Usage: + databricks experiments delete-experiment EXPERIMENT_ID [flags] + +Flags: + -h, --help help for delete-experiment + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments delete-run --help +Delete a run. + + Marks a run for deletion. + + Arguments: + RUN_ID: ID of the run to delete. + +Usage: + databricks experiments delete-run RUN_ID [flags] + +Flags: + -h, --help help for delete-run + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments delete-runs --help +Delete runs by creation time. + + Bulk delete runs in an experiment that were created prior to or at the + specified timestamp. Deletes at most max_runs per request. To call this API + from a Databricks Notebook in Python, you can use the client code snippet on + https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. + + Arguments: + EXPERIMENT_ID: The ID of the experiment containing the runs to delete. + MAX_TIMESTAMP_MILLIS: The maximum creation timestamp in milliseconds since the UNIX epoch for + deleting runs. Only runs created prior to or at this timestamp are + deleted. + +Usage: + databricks experiments delete-runs EXPERIMENT_ID MAX_TIMESTAMP_MILLIS [flags] + +Flags: + -h, --help help for delete-runs + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-runs int An optional positive integer indicating the maximum number of runs to delete. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments delete-tag --help +Delete a tag. + + Deletes a tag on a run. Tags are run metadata that can be updated during a run + and after a run completes. + + Arguments: + RUN_ID: ID of the run that the tag was logged under. Must be provided. + KEY: Name of the tag. Maximum size is 255 bytes. Must be provided. + +Usage: + databricks experiments delete-tag RUN_ID KEY [flags] + +Flags: + -h, --help help for delete-tag + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments get-by-name --help +Get metadata. + + Gets metadata for an experiment. + + This endpoint will return deleted experiments, but prefers the active + experiment if an active and deleted experiment share the same name. If + multiple deleted experiments share the same name, the API will return one of + them. + + Throws RESOURCE_DOES_NOT_EXIST if no experiment with the specified name + exists. + + Arguments: + EXPERIMENT_NAME: Name of the associated experiment. + +Usage: + databricks experiments get-by-name EXPERIMENT_NAME [flags] + +Flags: + -h, --help help for get-by-name + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments get-experiment --help +Get an experiment. + + Gets metadata for an experiment. This method works on deleted experiments. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment. + +Usage: + databricks experiments get-experiment EXPERIMENT_ID [flags] + +Flags: + -h, --help help for get-experiment + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments get-history --help +Get history of a given metric within a run. + + Gets a list of all values for the specified metric for a given run. + + Arguments: + METRIC_KEY: Name of the metric. + +Usage: + databricks experiments get-history METRIC_KEY [flags] + +Flags: + -h, --help help for get-history + --max-results int Maximum number of Metric records to return per paginated request. + --page-token string Token indicating the page of metric histories to fetch. + --run-id string ID of the run from which to fetch metric values. + --run-uuid string [Deprecated, use run_id instead] ID of the run from which to fetch metric values. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments get-permission-levels --help +Get experiment permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions. + +Usage: + databricks experiments get-permission-levels EXPERIMENT_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments get-permissions --help +Get experiment permissions. + + Gets the permissions of an experiment. Experiments can inherit permissions + from their root object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions. + +Usage: + databricks experiments get-permissions EXPERIMENT_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments get-run --help +Get a run. + + Gets the metadata, metrics, params, and tags for a run. In the case where + multiple metrics with the same key are logged for a run, return only the value + with the latest timestamp. + + If there are multiple values with the latest timestamp, return the maximum of + these values. + + Arguments: + RUN_ID: ID of the run to fetch. Must be provided. + +Usage: + databricks experiments get-run RUN_ID [flags] + +Flags: + -h, --help help for get-run + --run-uuid string [Deprecated, use run_id instead] ID of the run to fetch. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments list-artifacts --help +Get all artifacts. + + List artifacts for a run. Takes an optional artifact_path prefix. If it is + specified, the response contains only artifacts with the specified prefix. + This API does not support pagination when listing artifacts in UC Volumes. A + maximum of 1000 artifacts will be retrieved for UC Volumes. Please call + /api/2.0/fs/directories{directory_path} for listing artifacts in UC Volumes, + which supports pagination. See [List directory contents | Files + API](/api/workspace/files/listdirectorycontents). + +Usage: + databricks experiments list-artifacts [flags] + +Flags: + -h, --help help for list-artifacts + --page-token string Token indicating the page of artifact results to fetch. + --path string Filter artifacts matching this path (a relative path from the root artifact directory). + --run-id string ID of the run whose artifacts to list. + --run-uuid string [Deprecated, use run_id instead] ID of the run whose artifacts to list. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments list-experiments --help +List experiments. + + Gets a list of all experiments. + +Usage: + databricks experiments list-experiments [flags] + +Flags: + -h, --help help for list-experiments + --max-results int Maximum number of experiments desired. + --page-token string Token indicating the page of experiments to fetch. + --view-type string Qualifier for type of experiments to be returned. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments log-batch --help +Log a batch. + + Logs a batch of metrics, params, and tags for a run. If any data failed to be + persisted, the server will respond with an error (non-200 status code). + + In case of error (due to internal server error or an invalid request), partial + data may be written. + + You can write metrics, params, and tags in interleaving fashion, but within a + given entity type are guaranteed to follow the order specified in the request + body. + + The overwrite behavior for metrics, params, and tags is as follows: + + * Metrics: metric values are never overwritten. Logging a metric (key, value, + timestamp) appends to the set of values for the metric with the provided key. + + * Tags: tag values can be overwritten by successive writes to the same tag + key. That is, if multiple tag values with the same key are provided in the + same API request, the last-provided tag value is written. Logging the same tag + (key, value) is permitted. Specifically, logging a tag is idempotent. + + * Parameters: once written, param values cannot be changed (attempting to + overwrite a param value will result in an error). However, logging the same + param (key, value) is permitted. Specifically, logging a param is idempotent. + + Request Limits ------------------------------- A single JSON-serialized API + request may be up to 1 MB in size and contain: + + * No more than 1000 metrics, params, and tags in total * Up to 1000 metrics * + Up to 100 params * Up to 100 tags + + For example, a valid request might contain 900 metrics, 50 params, and 50 + tags, but logging 900 metrics, 50 params, and 51 tags is invalid. + + The following limits also apply to metric, param, and tag keys and values: + + * Metric keys, param keys, and tag keys can be up to 250 characters in length + * Parameter and tag values can be up to 250 characters in length + +Usage: + databricks experiments log-batch [flags] + +Flags: + -h, --help help for log-batch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --run-id string ID of the run to log under. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments log-inputs --help +Log inputs to a run. + + **NOTE:** Experimental: This API may change or be removed in a future release + without warning. + +Usage: + databricks experiments log-inputs [flags] + +Flags: + -h, --help help for log-inputs + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --run-id string ID of the run to log under. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments log-metric --help +Log a metric. + + Logs a metric for a run. A metric is a key-value pair (string key, float + value) with an associated timestamp. Examples include the various metrics that + represent ML model accuracy. A metric can be logged multiple times. + + Arguments: + KEY: Name of the metric. + VALUE: Double value of the metric being logged. + TIMESTAMP: Unix timestamp in milliseconds at the time metric was logged. + +Usage: + databricks experiments log-metric KEY VALUE TIMESTAMP [flags] + +Flags: + -h, --help help for log-metric + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --run-id string ID of the run under which to log the metric. + --run-uuid string [Deprecated, use run_id instead] ID of the run under which to log the metric. + --step int Step at which to log the metric. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments log-model --help +Log a model. + + **NOTE:** Experimental: This API may change or be removed in a future release + without warning. + +Usage: + databricks experiments log-model [flags] + +Flags: + -h, --help help for log-model + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --model-json string MLmodel file in json format. + --run-id string ID of the run to log under. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments log-param --help +Log a param. + + Logs a param used for a run. A param is a key-value pair (string key, string + value). Examples include hyperparameters used for ML model training and + constant dates and values used in an ETL pipeline. A param can be logged only + once for a run. + + Arguments: + KEY: Name of the param. Maximum size is 255 bytes. + VALUE: String value of the param being logged. Maximum size is 500 bytes. + +Usage: + databricks experiments log-param KEY VALUE [flags] + +Flags: + -h, --help help for log-param + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --run-id string ID of the run under which to log the param. + --run-uuid string [Deprecated, use run_id instead] ID of the run under which to log the param. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments restore-experiment --help +Restores an experiment. + + Restore an experiment marked for deletion. This also restores associated + metadata, runs, metrics, params, and tags. If experiment uses FileStore, + underlying artifacts associated with experiment are also restored. + + Throws RESOURCE_DOES_NOT_EXIST if experiment was never created or was + permanently deleted. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment. + +Usage: + databricks experiments restore-experiment EXPERIMENT_ID [flags] + +Flags: + -h, --help help for restore-experiment + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments restore-run --help +Restore a run. + + Restores a deleted run. + + Arguments: + RUN_ID: ID of the run to restore. + +Usage: + databricks experiments restore-run RUN_ID [flags] + +Flags: + -h, --help help for restore-run + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments restore-runs --help +Restore runs by deletion time. + + Bulk restore runs in an experiment that were deleted no earlier than the + specified timestamp. Restores at most max_runs per request. To call this API + from a Databricks Notebook in Python, you can use the client code snippet on + https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. + + Arguments: + EXPERIMENT_ID: The ID of the experiment containing the runs to restore. + MIN_TIMESTAMP_MILLIS: The minimum deletion timestamp in milliseconds since the UNIX epoch for + restoring runs. Only runs deleted no earlier than this timestamp are + restored. + +Usage: + databricks experiments restore-runs EXPERIMENT_ID MIN_TIMESTAMP_MILLIS [flags] + +Flags: + -h, --help help for restore-runs + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-runs int An optional positive integer indicating the maximum number of runs to restore. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments search-experiments --help +Search experiments. + + Searches for experiments that satisfy specified search criteria. + +Usage: + databricks experiments search-experiments [flags] + +Flags: + --filter string String representing a SQL filter condition (e.g. + -h, --help help for search-experiments + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-results int Maximum number of experiments desired. + --page-token string Token indicating the page of experiments to fetch. + --view-type SearchExperimentsViewType Qualifier for type of experiments to be returned. Supported values: [ACTIVE_ONLY, ALL, DELETED_ONLY] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments search-runs --help +Search for runs. + + Searches for runs that satisfy expressions. + + Search expressions can use mlflowMetric and mlflowParam keys.", + +Usage: + databricks experiments search-runs [flags] + +Flags: + --filter string A filter expression over params, metrics, and tags, that allows returning a subset of runs. + -h, --help help for search-runs + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-results int Maximum number of runs desired. + --page-token string Token for the current page of runs. + --run-view-type SearchRunsRunViewType Whether to display only active, only deleted, or all runs. Supported values: [ACTIVE_ONLY, ALL, DELETED_ONLY] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments set-experiment-tag --help +Set a tag. + + Sets a tag on an experiment. Experiment tags are metadata that can be updated. + + Arguments: + EXPERIMENT_ID: ID of the experiment under which to log the tag. Must be provided. + KEY: Name of the tag. Maximum size depends on storage backend. All storage + backends are guaranteed to support key values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size. + +Usage: + databricks experiments set-experiment-tag EXPERIMENT_ID KEY VALUE [flags] + +Flags: + -h, --help help for set-experiment-tag + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments set-permissions --help +Set experiment permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions. + +Usage: + databricks experiments set-permissions EXPERIMENT_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments set-tag --help +Set a tag. + + Sets a tag on a run. Tags are run metadata that can be updated during a run + and after a run completes. + + Arguments: + KEY: Name of the tag. Maximum size depends on storage backend. All storage + backends are guaranteed to support key values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size. + +Usage: + databricks experiments set-tag KEY VALUE [flags] + +Flags: + -h, --help help for set-tag + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --run-id string ID of the run under which to log the tag. + --run-uuid string [Deprecated, use run_id instead] ID of the run under which to log the tag. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments update-experiment --help +Update an experiment. + + Updates experiment metadata. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment. + +Usage: + databricks experiments update-experiment EXPERIMENT_ID [flags] + +Flags: + -h, --help help for update-experiment + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string If provided, the experiment's name is changed to the new name. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments update-permissions --help +Update experiment permissions. + + Updates the permissions on an experiment. Experiments can inherit permissions + from their root object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions. + +Usage: + databricks experiments update-permissions EXPERIMENT_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI experiments update-run --help +Update a run. + + Updates run metadata. + +Usage: + databricks experiments update-run [flags] + +Flags: + --end-time int Unix timestamp in milliseconds of when the run ended. + -h, --help help for update-run + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --run-id string ID of the run to update. + --run-uuid string [Deprecated, use run_id instead] ID of the run to update. + --status UpdateRunStatus Updated status of the run. Supported values: [FAILED, FINISHED, KILLED, RUNNING, SCHEDULED] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/experiments/experiments/script b/acceptance/help/cmd/workspace/experiments/experiments/script new file mode 100755 index 000000000..d1625cb50 --- /dev/null +++ b/acceptance/help/cmd/workspace/experiments/experiments/script @@ -0,0 +1,31 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI experiments create-experiment --help +trace $CLI experiments create-run --help +trace $CLI experiments delete-experiment --help +trace $CLI experiments delete-run --help +trace $CLI experiments delete-runs --help +trace $CLI experiments delete-tag --help +trace $CLI experiments get-by-name --help +trace $CLI experiments get-experiment --help +trace $CLI experiments get-history --help +trace $CLI experiments get-permission-levels --help +trace $CLI experiments get-permissions --help +trace $CLI experiments get-run --help +trace $CLI experiments list-artifacts --help +trace $CLI experiments list-experiments --help +trace $CLI experiments log-batch --help +trace $CLI experiments log-inputs --help +trace $CLI experiments log-metric --help +trace $CLI experiments log-model --help +trace $CLI experiments log-param --help +trace $CLI experiments restore-experiment --help +trace $CLI experiments restore-run --help +trace $CLI experiments restore-runs --help +trace $CLI experiments search-experiments --help +trace $CLI experiments search-runs --help +trace $CLI experiments set-experiment-tag --help +trace $CLI experiments set-permissions --help +trace $CLI experiments set-tag --help +trace $CLI experiments update-experiment --help +trace $CLI experiments update-permissions --help +trace $CLI experiments update-run --help diff --git a/acceptance/help/cmd/workspace/external-locations/external-locations/output.txt b/acceptance/help/cmd/workspace/external-locations/external-locations/output.txt new file mode 100644 index 000000000..7e85930b6 --- /dev/null +++ b/acceptance/help/cmd/workspace/external-locations/external-locations/output.txt @@ -0,0 +1,132 @@ + +>>> $CLI external-locations create --help +Create an external location. + + Creates a new external location entry in the metastore. The caller must be a + metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the + metastore and the associated storage credential. + + Arguments: + NAME: Name of the external location. + URL: Path URL of the external location. + CREDENTIAL_NAME: Name of the storage credential used with this location. + +Usage: + databricks external-locations create NAME URL CREDENTIAL_NAME [flags] + +Flags: + --access-point string The AWS access point to use when accesing s3 for this external location. + --comment string User-provided free-form text description. + --fallback Indicates whether fallback mode is enabled for this external location. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --read-only Indicates whether the external location is read-only. + --skip-validation Skips validation of the storage credential associated with the external location. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI external-locations delete --help +Delete an external location. + + Deletes the specified external location from the metastore. The caller must be + the owner of the external location. + + Arguments: + NAME: Name of the external location. + +Usage: + databricks external-locations delete NAME [flags] + +Flags: + --force Force deletion even if there are dependent external tables or mounts. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI external-locations get --help +Get an external location. + + Gets an external location from the metastore. The caller must be either a + metastore admin, the owner of the external location, or a user that has some + privilege on the external location. + + Arguments: + NAME: Name of the external location. + +Usage: + databricks external-locations get NAME [flags] + +Flags: + -h, --help help for get + --include-browse Whether to include external locations in the response for which the principal can only access selective metadata for. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI external-locations list --help +List external locations. + + Gets an array of external locations (__ExternalLocationInfo__ objects) from + the metastore. The caller must be a metastore admin, the owner of the external + location, or a user that has some privilege on the external location. There is + no guarantee of a specific ordering of the elements in the array. + +Usage: + databricks external-locations list [flags] + +Flags: + -h, --help help for list + --include-browse Whether to include external locations in the response for which the principal can only access selective metadata for. + --max-results int Maximum number of external locations to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI external-locations update --help +Update an external location. + + Updates an external location in the metastore. The caller must be the owner of + the external location, or be a metastore admin. In the second case, the admin + can only update the name of the external location. + + Arguments: + NAME: Name of the external location. + +Usage: + databricks external-locations update NAME [flags] + +Flags: + --access-point string The AWS access point to use when accesing s3 for this external location. + --comment string User-provided free-form text description. + --credential-name string Name of the storage credential used with this location. + --fallback Indicates whether fallback mode is enabled for this external location. + --force Force update even if changing url invalidates dependent external tables or mounts. + -h, --help help for update + --isolation-mode IsolationMode . Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN] + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the external location. + --owner string The owner of the external location. + --read-only Indicates whether the external location is read-only. + --skip-validation Skips validation of the storage credential associated with the external location. + --url string Path URL of the external location. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/external-locations/external-locations/script b/acceptance/help/cmd/workspace/external-locations/external-locations/script new file mode 100755 index 000000000..2587e37d7 --- /dev/null +++ b/acceptance/help/cmd/workspace/external-locations/external-locations/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI external-locations create --help +trace $CLI external-locations delete --help +trace $CLI external-locations get --help +trace $CLI external-locations list --help +trace $CLI external-locations update --help diff --git a/acceptance/help/cmd/workspace/files/files/output.txt b/acceptance/help/cmd/workspace/files/files/output.txt new file mode 100644 index 000000000..2c9329280 --- /dev/null +++ b/acceptance/help/cmd/workspace/files/files/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI files create-directory --help +Error: unknown command "files" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/files/files/script b/acceptance/help/cmd/workspace/files/files/script new file mode 100755 index 000000000..21a800270 --- /dev/null +++ b/acceptance/help/cmd/workspace/files/files/script @@ -0,0 +1,9 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI files create-directory --help +trace $CLI files delete --help +trace $CLI files delete-directory --help +trace $CLI files download --help +trace $CLI files get-directory-metadata --help +trace $CLI files get-metadata --help +trace $CLI files list-directory-contents --help +trace $CLI files upload --help diff --git a/acceptance/help/cmd/workspace/functions/functions/output.txt b/acceptance/help/cmd/workspace/functions/functions/output.txt new file mode 100644 index 000000000..42f6964d5 --- /dev/null +++ b/acceptance/help/cmd/workspace/functions/functions/output.txt @@ -0,0 +1,139 @@ + +>>> $CLI functions create --help +Create a function. + + **WARNING: This API is experimental and will change in future versions** + + Creates a new function + + The user must have the following permissions in order for the function to be + created: - **USE_CATALOG** on the function's parent catalog - **USE_SCHEMA** + and **CREATE_FUNCTION** on the function's parent schema + +Usage: + databricks functions create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI functions delete --help +Delete a function. + + Deletes the function that matches the supplied name. For the deletion to + succeed, the user must satisfy one of the following conditions: - Is the owner + of the function's parent catalog - Is the owner of the function's parent + schema and have the **USE_CATALOG** privilege on its parent catalog - Is the + owner of the function itself and have both the **USE_CATALOG** privilege on + its parent catalog and the **USE_SCHEMA** privilege on its parent schema + + Arguments: + NAME: The fully-qualified name of the function (of the form + __catalog_name__.__schema_name__.__function__name__). + +Usage: + databricks functions delete NAME [flags] + +Flags: + --force Force deletion even if the function is notempty. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI functions get --help +Get a function. + + Gets a function from within a parent catalog and schema. For the fetch to + succeed, the user must satisfy one of the following requirements: - Is a + metastore admin - Is an owner of the function's parent catalog - Have the + **USE_CATALOG** privilege on the function's parent catalog and be the owner of + the function - Have the **USE_CATALOG** privilege on the function's parent + catalog, the **USE_SCHEMA** privilege on the function's parent schema, and the + **EXECUTE** privilege on the function itself + + Arguments: + NAME: The fully-qualified name of the function (of the form + __catalog_name__.__schema_name__.__function__name__). + +Usage: + databricks functions get NAME [flags] + +Flags: + -h, --help help for get + --include-browse Whether to include functions in the response for which the principal can only access selective metadata for. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI functions list --help +List functions. + + List functions within the specified parent catalog and schema. If the user is + a metastore admin, all functions are returned in the output list. Otherwise, + the user must have the **USE_CATALOG** privilege on the catalog and the + **USE_SCHEMA** privilege on the schema, and the output list contains only + functions for which either the user has the **EXECUTE** privilege or the user + is the owner. There is no guarantee of a specific ordering of the elements in + the array. + + Arguments: + CATALOG_NAME: Name of parent catalog for functions of interest. + SCHEMA_NAME: Parent schema of functions. + +Usage: + databricks functions list CATALOG_NAME SCHEMA_NAME [flags] + +Flags: + -h, --help help for list + --include-browse Whether to include functions in the response for which the principal can only access selective metadata for. + --max-results int Maximum number of functions to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI functions update --help +Update a function. + + Updates the function that matches the supplied name. Only the owner of the + function can be updated. If the user is not a metastore admin, the user must + be a member of the group that is the new function owner. - Is a metastore + admin - Is the owner of the function's parent catalog - Is the owner of the + function's parent schema and has the **USE_CATALOG** privilege on its parent + catalog - Is the owner of the function itself and has the **USE_CATALOG** + privilege on its parent catalog as well as the **USE_SCHEMA** privilege on the + function's parent schema. + + Arguments: + NAME: The fully-qualified name of the function (of the form + __catalog_name__.__schema_name__.__function__name__). + +Usage: + databricks functions update NAME [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --owner string Username of current owner of function. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/functions/functions/script b/acceptance/help/cmd/workspace/functions/functions/script new file mode 100755 index 000000000..e54977650 --- /dev/null +++ b/acceptance/help/cmd/workspace/functions/functions/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI functions create --help +trace $CLI functions delete --help +trace $CLI functions get --help +trace $CLI functions list --help +trace $CLI functions update --help diff --git a/acceptance/help/cmd/workspace/genie/genie/output.txt b/acceptance/help/cmd/workspace/genie/genie/output.txt new file mode 100644 index 000000000..fa3878f61 --- /dev/null +++ b/acceptance/help/cmd/workspace/genie/genie/output.txt @@ -0,0 +1,121 @@ + +>>> $CLI genie create-message --help +Create conversation message. + + Create new message in [conversation](:method:genie/startconversation). The AI + response uses all previously created messages in the conversation to respond. + + Arguments: + SPACE_ID: The ID associated with the Genie space where the conversation is started. + CONVERSATION_ID: The ID associated with the conversation. + CONTENT: User message content. + +Usage: + databricks genie create-message SPACE_ID CONVERSATION_ID CONTENT [flags] + +Flags: + -h, --help help for create-message + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach COMPLETED state + --timeout duration maximum amount of time to reach COMPLETED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI genie execute-message-query --help +Execute SQL query in a conversation message. + + Execute the SQL query in the message. + + Arguments: + SPACE_ID: Genie space ID + CONVERSATION_ID: Conversation ID + MESSAGE_ID: Message ID + +Usage: + databricks genie execute-message-query SPACE_ID CONVERSATION_ID MESSAGE_ID [flags] + +Flags: + -h, --help help for execute-message-query + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI genie get-message --help +Get conversation message. + + Get message from conversation. + + Arguments: + SPACE_ID: The ID associated with the Genie space where the target conversation is + located. + CONVERSATION_ID: The ID associated with the target conversation. + MESSAGE_ID: The ID associated with the target message from the identified + conversation. + +Usage: + databricks genie get-message SPACE_ID CONVERSATION_ID MESSAGE_ID [flags] + +Flags: + -h, --help help for get-message + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI genie get-message-query-result --help +Get conversation message SQL query result. + + Get the result of SQL query if the message has a query attachment. This is + only available if a message has a query attachment and the message status is + EXECUTING_QUERY. + + Arguments: + SPACE_ID: Genie space ID + CONVERSATION_ID: Conversation ID + MESSAGE_ID: Message ID + +Usage: + databricks genie get-message-query-result SPACE_ID CONVERSATION_ID MESSAGE_ID [flags] + +Flags: + -h, --help help for get-message-query-result + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI genie start-conversation --help +Start conversation. + + Start a new conversation. + + Arguments: + SPACE_ID: The ID associated with the Genie space where you want to start a + conversation. + CONTENT: The text of the message that starts the conversation. + +Usage: + databricks genie start-conversation SPACE_ID CONTENT [flags] + +Flags: + -h, --help help for start-conversation + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach COMPLETED state + --timeout duration maximum amount of time to reach COMPLETED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/genie/genie/script b/acceptance/help/cmd/workspace/genie/genie/script new file mode 100755 index 000000000..55d17fd5e --- /dev/null +++ b/acceptance/help/cmd/workspace/genie/genie/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI genie create-message --help +trace $CLI genie execute-message-query --help +trace $CLI genie get-message --help +trace $CLI genie get-message-query-result --help +trace $CLI genie start-conversation --help diff --git a/acceptance/help/cmd/workspace/git-credentials/git-credentials/output.txt b/acceptance/help/cmd/workspace/git-credentials/git-credentials/output.txt new file mode 100644 index 000000000..af75a0eeb --- /dev/null +++ b/acceptance/help/cmd/workspace/git-credentials/git-credentials/output.txt @@ -0,0 +1,114 @@ + +>>> $CLI git-credentials create --help +Create a credential entry. + + Creates a Git credential entry for the user. Only one Git credential per user + is supported, so any attempts to create credentials if an entry already exists + will fail. Use the PATCH endpoint to update existing credentials, or the + DELETE endpoint to delete existing credentials. + + Arguments: + GIT_PROVIDER: Git provider. This field is case-insensitive. The available Git providers + are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + awsCodeCommit. + +Usage: + databricks git-credentials create GIT_PROVIDER [flags] + +Flags: + --git-username string The username or email provided with your Git provider account, depending on which provider you are using. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --personal-access-token string The personal access token used to authenticate to the corresponding Git provider. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI git-credentials delete --help +Delete a credential. + + Deletes the specified Git credential. + + Arguments: + CREDENTIAL_ID: The ID for the corresponding credential to access. + +Usage: + databricks git-credentials delete CREDENTIAL_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI git-credentials get --help +Get a credential entry. + + Gets the Git credential with the specified credential ID. + + Arguments: + CREDENTIAL_ID: The ID for the corresponding credential to access. + +Usage: + databricks git-credentials get CREDENTIAL_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI git-credentials list --help +Get Git credentials. + + Lists the calling user's Git credentials. One credential per user is + supported. + +Usage: + databricks git-credentials list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI git-credentials update --help +Update a credential. + + Updates the specified Git credential. + + Arguments: + CREDENTIAL_ID: The ID for the corresponding credential to access. + GIT_PROVIDER: Git provider. This field is case-insensitive. The available Git providers + are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + awsCodeCommit. + +Usage: + databricks git-credentials update CREDENTIAL_ID GIT_PROVIDER [flags] + +Flags: + --git-username string The username or email provided with your Git provider account, depending on which provider you are using. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --personal-access-token string The personal access token used to authenticate to the corresponding Git provider. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/git-credentials/git-credentials/script b/acceptance/help/cmd/workspace/git-credentials/git-credentials/script new file mode 100755 index 000000000..3ffc1ac2c --- /dev/null +++ b/acceptance/help/cmd/workspace/git-credentials/git-credentials/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI git-credentials create --help +trace $CLI git-credentials delete --help +trace $CLI git-credentials get --help +trace $CLI git-credentials list --help +trace $CLI git-credentials update --help diff --git a/acceptance/help/cmd/workspace/global-init-scripts/global-init-scripts/output.txt b/acceptance/help/cmd/workspace/global-init-scripts/global-init-scripts/output.txt new file mode 100644 index 000000000..cdf533c99 --- /dev/null +++ b/acceptance/help/cmd/workspace/global-init-scripts/global-init-scripts/output.txt @@ -0,0 +1,110 @@ + +>>> $CLI global-init-scripts create --help +Create init script. + + Creates a new global init script in this workspace. + + Arguments: + NAME: The name of the script + SCRIPT: The Base64-encoded content of the script. + +Usage: + databricks global-init-scripts create NAME SCRIPT [flags] + +Flags: + --enabled Specifies whether the script is enabled. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --position int The position of a global init script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI global-init-scripts delete --help +Delete init script. + + Deletes a global init script. + + Arguments: + SCRIPT_ID: The ID of the global init script. + +Usage: + databricks global-init-scripts delete SCRIPT_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI global-init-scripts get --help +Get an init script. + + Gets all the details of a script, including its Base64-encoded contents. + + Arguments: + SCRIPT_ID: The ID of the global init script. + +Usage: + databricks global-init-scripts get SCRIPT_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI global-init-scripts list --help +Get init scripts. + + Get a list of all global init scripts for this workspace. This returns all + properties for each script but **not** the script contents. To retrieve the + contents of a script, use the [get a global init + script](:method:globalinitscripts/get) operation. + +Usage: + databricks global-init-scripts list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI global-init-scripts update --help +Update init script. + + Updates a global init script, specifying only the fields to change. All fields + are optional. Unspecified fields retain their current value. + + Arguments: + SCRIPT_ID: The ID of the global init script. + NAME: The name of the script + SCRIPT: The Base64-encoded content of the script. + +Usage: + databricks global-init-scripts update SCRIPT_ID NAME SCRIPT [flags] + +Flags: + --enabled Specifies whether the script is enabled. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --position int The position of a script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/global-init-scripts/global-init-scripts/script b/acceptance/help/cmd/workspace/global-init-scripts/global-init-scripts/script new file mode 100755 index 000000000..7e68133ec --- /dev/null +++ b/acceptance/help/cmd/workspace/global-init-scripts/global-init-scripts/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI global-init-scripts create --help +trace $CLI global-init-scripts delete --help +trace $CLI global-init-scripts get --help +trace $CLI global-init-scripts list --help +trace $CLI global-init-scripts update --help diff --git a/acceptance/help/cmd/workspace/grants/grants/output.txt b/acceptance/help/cmd/workspace/grants/grants/output.txt new file mode 100644 index 000000000..826fbedc3 --- /dev/null +++ b/acceptance/help/cmd/workspace/grants/grants/output.txt @@ -0,0 +1,66 @@ + +>>> $CLI grants get --help +Get permissions. + + Gets the permissions for a securable. + + Arguments: + SECURABLE_TYPE: Type of securable. + FULL_NAME: Full name of securable. + +Usage: + databricks grants get SECURABLE_TYPE FULL_NAME [flags] + +Flags: + -h, --help help for get + --principal string If provided, only the permissions for the specified principal (user or group) are returned. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI grants get-effective --help +Get effective permissions. + + Gets the effective permissions for a securable. + + Arguments: + SECURABLE_TYPE: Type of securable. + FULL_NAME: Full name of securable. + +Usage: + databricks grants get-effective SECURABLE_TYPE FULL_NAME [flags] + +Flags: + -h, --help help for get-effective + --principal string If provided, only the effective permissions for the specified principal (user or group) are returned. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI grants update --help +Update permissions. + + Updates the permissions for a securable. + + Arguments: + SECURABLE_TYPE: Type of securable. + FULL_NAME: Full name of securable. + +Usage: + databricks grants update SECURABLE_TYPE FULL_NAME [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/grants/grants/script b/acceptance/help/cmd/workspace/grants/grants/script new file mode 100755 index 000000000..b5ef14d02 --- /dev/null +++ b/acceptance/help/cmd/workspace/grants/grants/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI grants get --help +trace $CLI grants get-effective --help +trace $CLI grants update --help diff --git a/acceptance/help/cmd/workspace/groups/groups/output.txt b/acceptance/help/cmd/workspace/groups/groups/output.txt new file mode 100644 index 000000000..5c6d5651c --- /dev/null +++ b/acceptance/help/cmd/workspace/groups/groups/output.txt @@ -0,0 +1,131 @@ + +>>> $CLI groups create --help +Create a new group. + + Creates a group in the Databricks workspace with a unique name, using the + supplied group details. + +Usage: + databricks groups create [flags] + +Flags: + --display-name string String that represents a human-readable group name. + --external-id string + -h, --help help for create + --id string Databricks group ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI groups delete --help +Delete a group. + + Deletes a group from the Databricks workspace. + + Arguments: + ID: Unique ID for a group in the Databricks workspace. + +Usage: + databricks groups delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI groups get --help +Get group details. + + Gets the information for a specific group in the Databricks workspace. + + Arguments: + ID: Unique ID for a group in the Databricks workspace. + +Usage: + databricks groups get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI groups list --help +List group details. + + Gets all details of the groups associated with the Databricks workspace. + +Usage: + databricks groups list [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for list + --sort-by string Attribute to sort the results. + --sort-order ListSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI groups patch --help +Update group details. + + Partially updates the details of a group. + + Arguments: + ID: Unique ID for a group in the Databricks workspace. + +Usage: + databricks groups patch ID [flags] + +Flags: + -h, --help help for patch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI groups update --help +Replace a group. + + Updates the details of a group by replacing the entire group entity. + + Arguments: + ID: Databricks group ID + +Usage: + databricks groups update ID [flags] + +Flags: + --display-name string String that represents a human-readable group name. + --external-id string + -h, --help help for update + --id string Databricks group ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/groups/groups/script b/acceptance/help/cmd/workspace/groups/groups/script new file mode 100755 index 000000000..dc64ffef6 --- /dev/null +++ b/acceptance/help/cmd/workspace/groups/groups/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI groups create --help +trace $CLI groups delete --help +trace $CLI groups get --help +trace $CLI groups list --help +trace $CLI groups patch --help +trace $CLI groups update --help diff --git a/acceptance/help/cmd/workspace/instance-pools/instance-pools/output.txt b/acceptance/help/cmd/workspace/instance-pools/instance-pools/output.txt new file mode 100644 index 000000000..c4518852a --- /dev/null +++ b/acceptance/help/cmd/workspace/instance-pools/instance-pools/output.txt @@ -0,0 +1,207 @@ + +>>> $CLI instance-pools create --help +Create a new instance pool. + + Creates a new instance pool using idle and ready-to-use cloud instances. + + Arguments: + INSTANCE_POOL_NAME: Pool name requested by the user. Pool name must be unique. Length must be + between 1 and 100 characters. + NODE_TYPE_ID: This field encodes, through a single value, the resources available to + each of the Spark nodes in this cluster. For example, the Spark nodes can + be provisioned and optimized for memory or compute intensive workloads. A + list of available node types can be retrieved by using the + :method:clusters/listNodeTypes API call. + +Usage: + databricks instance-pools create INSTANCE_POOL_NAME NODE_TYPE_ID [flags] + +Flags: + --enable-elastic-disk Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. + -h, --help help for create + --idle-instance-autotermination-minutes int Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-capacity int Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances. + --min-idle-instances int Minimum number of idle instances to keep in the instance pool. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools delete --help +Delete an instance pool. + + Deletes the instance pool permanently. The idle instances in the pool are + terminated asynchronously. + + Arguments: + INSTANCE_POOL_ID: The instance pool to be terminated. + +Usage: + databricks instance-pools delete INSTANCE_POOL_ID [flags] + +Flags: + -h, --help help for delete + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools edit --help +Edit an existing instance pool. + + Modifies the configuration of an existing instance pool. + + Arguments: + INSTANCE_POOL_ID: Instance pool ID + INSTANCE_POOL_NAME: Pool name requested by the user. Pool name must be unique. Length must be + between 1 and 100 characters. + NODE_TYPE_ID: This field encodes, through a single value, the resources available to + each of the Spark nodes in this cluster. For example, the Spark nodes can + be provisioned and optimized for memory or compute intensive workloads. A + list of available node types can be retrieved by using the + :method:clusters/listNodeTypes API call. + +Usage: + databricks instance-pools edit INSTANCE_POOL_ID INSTANCE_POOL_NAME NODE_TYPE_ID [flags] + +Flags: + -h, --help help for edit + --idle-instance-autotermination-minutes int Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-capacity int Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances. + --min-idle-instances int Minimum number of idle instances to keep in the instance pool. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools get --help +Get instance pool information. + + Retrieve the information for an instance pool based on its identifier. + + Arguments: + INSTANCE_POOL_ID: The canonical unique identifier for the instance pool. + +Usage: + databricks instance-pools get INSTANCE_POOL_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools get-permission-levels --help +Get instance pool permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions. + +Usage: + databricks instance-pools get-permission-levels INSTANCE_POOL_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools get-permissions --help +Get instance pool permissions. + + Gets the permissions of an instance pool. Instance pools can inherit + permissions from their root object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions. + +Usage: + databricks instance-pools get-permissions INSTANCE_POOL_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools list --help +List instance pool info. + + Gets a list of instance pools with their statistics. + +Usage: + databricks instance-pools list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools set-permissions --help +Set instance pool permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions. + +Usage: + databricks instance-pools set-permissions INSTANCE_POOL_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-pools update-permissions --help +Update instance pool permissions. + + Updates the permissions on an instance pool. Instance pools can inherit + permissions from their root object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions. + +Usage: + databricks instance-pools update-permissions INSTANCE_POOL_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/instance-pools/instance-pools/script b/acceptance/help/cmd/workspace/instance-pools/instance-pools/script new file mode 100755 index 000000000..7eb60b6cf --- /dev/null +++ b/acceptance/help/cmd/workspace/instance-pools/instance-pools/script @@ -0,0 +1,10 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI instance-pools create --help +trace $CLI instance-pools delete --help +trace $CLI instance-pools edit --help +trace $CLI instance-pools get --help +trace $CLI instance-pools get-permission-levels --help +trace $CLI instance-pools get-permissions --help +trace $CLI instance-pools list --help +trace $CLI instance-pools set-permissions --help +trace $CLI instance-pools update-permissions --help diff --git a/acceptance/help/cmd/workspace/instance-profiles/instance-profiles/output.txt b/acceptance/help/cmd/workspace/instance-profiles/instance-profiles/output.txt new file mode 100644 index 000000000..eaa6b3946 --- /dev/null +++ b/acceptance/help/cmd/workspace/instance-profiles/instance-profiles/output.txt @@ -0,0 +1,107 @@ + +>>> $CLI instance-profiles add --help +Register an instance profile. + + In the UI, you can select the instance profile when launching clusters. This + API is only available to admin users. + + Arguments: + INSTANCE_PROFILE_ARN: The AWS ARN of the instance profile to register with Databricks. This + field is required. + +Usage: + databricks instance-profiles add INSTANCE_PROFILE_ARN [flags] + +Flags: + -h, --help help for add + --iam-role-arn string The AWS IAM role ARN of the role associated with the instance profile. + --is-meta-instance-profile Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --skip-validation By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-profiles edit --help +Edit an instance profile. + + The only supported field to change is the optional IAM role ARN associated + with the instance profile. It is required to specify the IAM role ARN if both + of the following are true: + + * Your role name and instance profile name do not match. The name is the part + after the last slash in each ARN. * You want to use the instance profile with + [Databricks SQL Serverless]. + + To understand where these fields are in the AWS console, see [Enable + serverless SQL warehouses]. + + This API is only available to admin users. + + [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html + [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html + + Arguments: + INSTANCE_PROFILE_ARN: The AWS ARN of the instance profile to register with Databricks. This + field is required. + +Usage: + databricks instance-profiles edit INSTANCE_PROFILE_ARN [flags] + +Flags: + -h, --help help for edit + --iam-role-arn string The AWS IAM role ARN of the role associated with the instance profile. + --is-meta-instance-profile Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-profiles list --help +List available instance profiles. + + List the instance profiles that the calling user can use to launch a cluster. + + This API is available to all users. + +Usage: + databricks instance-profiles list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI instance-profiles remove --help +Remove the instance profile. + + Remove the instance profile with the provided ARN. Existing clusters with this + instance profile will continue to function. + + This API is only accessible to admin users. + + Arguments: + INSTANCE_PROFILE_ARN: The ARN of the instance profile to remove. This field is required. + +Usage: + databricks instance-profiles remove INSTANCE_PROFILE_ARN [flags] + +Flags: + -h, --help help for remove + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/instance-profiles/instance-profiles/script b/acceptance/help/cmd/workspace/instance-profiles/instance-profiles/script new file mode 100755 index 000000000..64459a522 --- /dev/null +++ b/acceptance/help/cmd/workspace/instance-profiles/instance-profiles/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI instance-profiles add --help +trace $CLI instance-profiles edit --help +trace $CLI instance-profiles list --help +trace $CLI instance-profiles remove --help diff --git a/acceptance/help/cmd/workspace/ip-access-lists/ip-access-lists/output.txt b/acceptance/help/cmd/workspace/ip-access-lists/ip-access-lists/output.txt new file mode 100644 index 000000000..20f24496d --- /dev/null +++ b/acceptance/help/cmd/workspace/ip-access-lists/ip-access-lists/output.txt @@ -0,0 +1,178 @@ + +>>> $CLI ip-access-lists create --help +Create access list. + + Creates an IP access list for this workspace. + + A list can be an allow list or a block list. See the top of this file for a + description of how the server treats allow lists and block lists at runtime. + + When creating or updating an IP access list: + + * For all allow lists and block lists combined, the API supports a maximum of + 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + exceed that number return error 400 with error_code value QUOTA_EXCEEDED. + * If the new list would block the calling user's current IP, error 400 is + returned with error_code value INVALID_STATE. + + It can take a few minutes for the changes to take effect. **Note**: Your new + IP access list has no effect until you enable the feature. See + :method:workspaceconf/setStatus + + Arguments: + LABEL: Label for the IP access list. This **cannot** be empty. + LIST_TYPE: Type of IP access list. Valid values are as follows and are + case-sensitive: + + * ALLOW: An allow list. Include this IP or range. * BLOCK: A block + list. Exclude this IP or range. IP addresses in the block list are + excluded even if they are included in an allow list. + +Usage: + databricks ip-access-lists create LABEL LIST_TYPE [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI ip-access-lists delete --help +Delete access list. + + Deletes an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + +Usage: + databricks ip-access-lists delete IP_ACCESS_LIST_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI ip-access-lists get --help +Get access list. + + Gets an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + +Usage: + databricks ip-access-lists get IP_ACCESS_LIST_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI ip-access-lists list --help +Get access lists. + + Gets all IP access lists for the specified workspace. + +Usage: + databricks ip-access-lists list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI ip-access-lists replace --help +Replace access list. + + Replaces an IP access list, specified by its ID. + + A list can include allow lists and block lists. See the top of this file for a + description of how the server treats allow lists and block lists at run time. + When replacing an IP access list: * For all allow lists and block lists + combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR + counts as a single value. Attempts to exceed that number return error 400 with + error_code value QUOTA_EXCEEDED. * If the resulting list would block the + calling user's current IP, error 400 is returned with error_code value + INVALID_STATE. It can take a few minutes for the changes to take effect. + Note that your resulting IP access list has no effect until you enable the + feature. See :method:workspaceconf/setStatus. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + LABEL: Label for the IP access list. This **cannot** be empty. + LIST_TYPE: Type of IP access list. Valid values are as follows and are + case-sensitive: + + * ALLOW: An allow list. Include this IP or range. * BLOCK: A block + list. Exclude this IP or range. IP addresses in the block list are + excluded even if they are included in an allow list. + ENABLED: Specifies whether this IP access list is enabled. + +Usage: + databricks ip-access-lists replace IP_ACCESS_LIST_ID LABEL LIST_TYPE ENABLED [flags] + +Flags: + -h, --help help for replace + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI ip-access-lists update --help +Update access list. + + Updates an existing IP access list, specified by its ID. + + A list can include allow lists and block lists. See the top of this file for a + description of how the server treats allow lists and block lists at run time. + + When updating an IP access list: + + * For all allow lists and block lists combined, the API supports a maximum of + 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + exceed that number return error 400 with error_code value QUOTA_EXCEEDED. + * If the updated list would block the calling user's current IP, error 400 is + returned with error_code value INVALID_STATE. + + It can take a few minutes for the changes to take effect. Note that your + resulting IP access list has no effect until you enable the feature. See + :method:workspaceconf/setStatus. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list + +Usage: + databricks ip-access-lists update IP_ACCESS_LIST_ID [flags] + +Flags: + --enabled Specifies whether this IP access list is enabled. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --label string Label for the IP access list. + --list-type ListType Type of IP access list. Supported values: [ALLOW, BLOCK] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/ip-access-lists/ip-access-lists/script b/acceptance/help/cmd/workspace/ip-access-lists/ip-access-lists/script new file mode 100755 index 000000000..0c63a49eb --- /dev/null +++ b/acceptance/help/cmd/workspace/ip-access-lists/ip-access-lists/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI ip-access-lists create --help +trace $CLI ip-access-lists delete --help +trace $CLI ip-access-lists get --help +trace $CLI ip-access-lists list --help +trace $CLI ip-access-lists replace --help +trace $CLI ip-access-lists update --help diff --git a/acceptance/help/cmd/workspace/jobs/jobs/output.txt b/acceptance/help/cmd/workspace/jobs/jobs/output.txt new file mode 100644 index 000000000..e33d039fc --- /dev/null +++ b/acceptance/help/cmd/workspace/jobs/jobs/output.txt @@ -0,0 +1,470 @@ + +>>> $CLI jobs cancel-all-runs --help +Cancel all runs of a job. + + Cancels all active runs of a job. The runs are canceled asynchronously, so it + doesn't prevent new runs from being started. + +Usage: + databricks jobs cancel-all-runs [flags] + +Flags: + --all-queued-runs Optional boolean parameter to cancel all queued runs. + -h, --help help for cancel-all-runs + --job-id int The canonical identifier of the job to cancel all runs of. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs cancel-run --help +Cancel a run. + + Cancels a job run or a task run. The run is canceled asynchronously, so it may + still be running when this request completes. + + Arguments: + RUN_ID: This field is required. + +Usage: + databricks jobs cancel-run RUN_ID [flags] + +Flags: + -h, --help help for cancel-run + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach TERMINATED or SKIPPED state + --timeout duration maximum amount of time to reach TERMINATED or SKIPPED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs create --help +Create a new job. + + Create a new job. + +Usage: + databricks jobs create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs delete --help +Delete a job. + + Deletes a job. + + Arguments: + JOB_ID: The canonical identifier of the job to delete. This field is required. + +Usage: + databricks jobs delete JOB_ID [flags] + +Flags: + -h, --help help for delete + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs delete-run --help +Delete a job run. + + Deletes a non-active run. Returns an error if the run is active. + + Arguments: + RUN_ID: ID of the run to delete. + +Usage: + databricks jobs delete-run RUN_ID [flags] + +Flags: + -h, --help help for delete-run + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs export-run --help +Export and retrieve a job run. + + Export and retrieve the job run task. + + Arguments: + RUN_ID: The canonical identifier for the run. This field is required. + +Usage: + databricks jobs export-run RUN_ID [flags] + +Flags: + -h, --help help for export-run + --views-to-export ViewsToExport Which views to export (CODE, DASHBOARDS, or ALL). Supported values: [ALL, CODE, DASHBOARDS] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs get --help +Get a single job. + + Retrieves the details for a single job. + + In Jobs API 2.2, requests for a single job support pagination of tasks and + job_clusters when either exceeds 100 elements. Use the next_page_token + field to check for more results and pass its value as the page_token in + subsequent requests. Arrays with fewer than 100 elements in a page will be + empty on later pages. + + Arguments: + JOB_ID: The canonical identifier of the job to retrieve information about. This + field is required. + +Usage: + databricks jobs get JOB_ID [flags] + +Flags: + -h, --help help for get + --page-token string Use next_page_token returned from the previous GetJob to request the next page of the job's sub-resources. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs get-permission-levels --help +Get job permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + JOB_ID: The job for which to get or manage permissions. + +Usage: + databricks jobs get-permission-levels JOB_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs get-permissions --help +Get job permissions. + + Gets the permissions of a job. Jobs can inherit permissions from their root + object. + + Arguments: + JOB_ID: The job for which to get or manage permissions. + +Usage: + databricks jobs get-permissions JOB_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs get-run --help +Get a single job run. + + Retrieves the metadata of a run. + + In Jobs API 2.2, requests for a single job run support pagination of tasks + and job_clusters when either exceeds 100 elements. Use the next_page_token + field to check for more results and pass its value as the page_token in + subsequent requests. Arrays with fewer than 100 elements in a page will be + empty on later pages. + + Arguments: + RUN_ID: The canonical identifier of the run for which to retrieve the metadata. + This field is required. + +Usage: + databricks jobs get-run RUN_ID [flags] + +Flags: + -h, --help help for get-run + --include-history Whether to include the repair history in the response. + --include-resolved-values Whether to include resolved parameter values in the response. + --no-wait do not wait to reach TERMINATED or SKIPPED state + --page-token string Use next_page_token returned from the previous GetRun to request the next page of the run's sub-resources. + --timeout duration maximum amount of time to reach TERMINATED or SKIPPED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs get-run-output --help +Get the output for a single run. + + Retrieve the output and metadata of a single task run. When a notebook task + returns a value through the dbutils.notebook.exit() call, you can use this + endpoint to retrieve that value. Databricks restricts this API to returning + the first 5 MB of the output. To return a larger result, you can store job + results in a cloud storage service. + + This endpoint validates that the __run_id__ parameter is valid and returns an + HTTP status code 400 if the __run_id__ parameter is invalid. Runs are + automatically removed after 60 days. If you to want to reference them beyond + 60 days, you must save old run results before they expire. + + Arguments: + RUN_ID: The canonical identifier for the run. + +Usage: + databricks jobs get-run-output RUN_ID [flags] + +Flags: + -h, --help help for get-run-output + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs list --help +List jobs. + + Retrieves a list of jobs. + +Usage: + databricks jobs list [flags] + +Flags: + --expand-tasks Whether to include task and cluster details in the response. + -h, --help help for list + --limit int The number of jobs to return. + --name string A filter on the list based on the exact (case insensitive) job name. + --offset int The offset of the first job to return, relative to the most recently created job. + --page-token string Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of jobs respectively. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs list-runs --help +List job runs. + + List runs in descending order by start time. + +Usage: + databricks jobs list-runs [flags] + +Flags: + --active-only If active_only is true, only active runs are included in the results; otherwise, lists both active and completed runs. + --completed-only If completed_only is true, only completed runs are included in the results; otherwise, lists both active and completed runs. + --expand-tasks Whether to include task and cluster details in the response. + -h, --help help for list-runs + --job-id int The job for which to list runs. + --limit int The number of runs to return. + --offset int The offset of the first run to return, relative to the most recent run. + --page-token string Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of runs respectively. + --run-type RunType The type of runs to return. Supported values: [JOB_RUN, SUBMIT_RUN, WORKFLOW_RUN] + --start-time-from int Show runs that started _at or after_ this value. + --start-time-to int Show runs that started _at or before_ this value. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs repair-run --help +Repair a job run. + + Re-run one or more tasks. Tasks are re-run as part of the original job run. + They use the current job and task settings, and can be viewed in the history + for the original job run. + + Arguments: + RUN_ID: The job run ID of the run to repair. The run must not be in progress. + +Usage: + databricks jobs repair-run RUN_ID [flags] + +Flags: + -h, --help help for repair-run + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --latest-repair-id int The ID of the latest repair. + --no-wait do not wait to reach TERMINATED or SKIPPED state + --rerun-all-failed-tasks If true, repair all failed tasks. + --rerun-dependent-tasks If true, repair all tasks that depend on the tasks in rerun_tasks, even if they were previously successful. + --timeout duration maximum amount of time to reach TERMINATED or SKIPPED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs reset --help +Update all job settings (reset). + + Overwrite all settings for the given job. Use the [_Update_ + endpoint](:method:jobs/update) to update job settings partially. + +Usage: + databricks jobs reset [flags] + +Flags: + -h, --help help for reset + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs run-now --help +Trigger a new job run. + + Run a job and return the run_id of the triggered run. + + Arguments: + JOB_ID: The ID of the job to be executed + +Usage: + databricks jobs run-now JOB_ID [flags] + +Flags: + -h, --help help for run-now + --idempotency-token string An optional token to guarantee the idempotency of job run requests. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach TERMINATED or SKIPPED state + --timeout duration maximum amount of time to reach TERMINATED or SKIPPED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs set-permissions --help +Set job permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + JOB_ID: The job for which to get or manage permissions. + +Usage: + databricks jobs set-permissions JOB_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs submit --help +Create and trigger a one-time run. + + Submit a one-time run. This endpoint allows you to submit a workload directly + without creating a job. Runs submitted using this endpoint don’t display in + the UI. Use the jobs/runs/get API to check the run state after the job is + submitted. + +Usage: + databricks jobs submit [flags] + +Flags: + --budget-policy-id string The user specified id of the budget policy to use for this one-time run. + -h, --help help for submit + --idempotency-token string An optional token that can be used to guarantee the idempotency of job run requests. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach TERMINATED or SKIPPED state + --run-name string An optional name for the run. + --timeout duration maximum amount of time to reach TERMINATED or SKIPPED state (default 20m0s) + --timeout-seconds int An optional timeout applied to each run of this job. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs update --help +Update job settings partially. + + Add, update, or remove specific settings of an existing job. Use the [_Reset_ + endpoint](:method:jobs/reset) to overwrite all job settings. + + Arguments: + JOB_ID: The canonical identifier of the job to update. This field is required. + +Usage: + databricks jobs update JOB_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI jobs update-permissions --help +Update job permissions. + + Updates the permissions on a job. Jobs can inherit permissions from their root + object. + + Arguments: + JOB_ID: The job for which to get or manage permissions. + +Usage: + databricks jobs update-permissions JOB_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/jobs/jobs/script b/acceptance/help/cmd/workspace/jobs/jobs/script new file mode 100755 index 000000000..85320b1fb --- /dev/null +++ b/acceptance/help/cmd/workspace/jobs/jobs/script @@ -0,0 +1,21 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI jobs cancel-all-runs --help +trace $CLI jobs cancel-run --help +trace $CLI jobs create --help +trace $CLI jobs delete --help +trace $CLI jobs delete-run --help +trace $CLI jobs export-run --help +trace $CLI jobs get --help +trace $CLI jobs get-permission-levels --help +trace $CLI jobs get-permissions --help +trace $CLI jobs get-run --help +trace $CLI jobs get-run-output --help +trace $CLI jobs list --help +trace $CLI jobs list-runs --help +trace $CLI jobs repair-run --help +trace $CLI jobs reset --help +trace $CLI jobs run-now --help +trace $CLI jobs set-permissions --help +trace $CLI jobs submit --help +trace $CLI jobs update --help +trace $CLI jobs update-permissions --help diff --git a/acceptance/help/cmd/workspace/lakeview/lakeview/output.txt b/acceptance/help/cmd/workspace/lakeview/lakeview/output.txt new file mode 100644 index 000000000..56636bbbf --- /dev/null +++ b/acceptance/help/cmd/workspace/lakeview/lakeview/output.txt @@ -0,0 +1,372 @@ + +>>> $CLI lakeview create --help +Create dashboard. + + Create a draft dashboard. + +Usage: + databricks lakeview create [flags] + +Flags: + --display-name string The display name of the dashboard. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --serialized-dashboard string The contents of the dashboard in serialized string form. + --warehouse-id string The warehouse ID used to run the dashboard. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview create-schedule --help +Create dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + +Usage: + databricks lakeview create-schedule DASHBOARD_ID [flags] + +Flags: + --display-name string The display name for schedule. + -h, --help help for create-schedule + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --pause-status SchedulePauseStatus The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED] + --warehouse-id string The warehouse id to run the dashboard with for the schedule. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview create-subscription --help +Create schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs. + +Usage: + databricks lakeview create-subscription DASHBOARD_ID SCHEDULE_ID [flags] + +Flags: + -h, --help help for create-subscription + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview delete-schedule --help +Delete dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule. + +Usage: + databricks lakeview delete-schedule DASHBOARD_ID SCHEDULE_ID [flags] + +Flags: + -h, --help help for delete-schedule + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview delete-subscription --help +Delete schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription. + +Usage: + databricks lakeview delete-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID [flags] + +Flags: + -h, --help help for delete-subscription + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview get --help +Get dashboard. + + Get a draft dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard. + +Usage: + databricks lakeview get DASHBOARD_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview get-published --help +Get published dashboard. + + Get the current published dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the published dashboard. + +Usage: + databricks lakeview get-published DASHBOARD_ID [flags] + +Flags: + -h, --help help for get-published + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview get-schedule --help +Get dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule. + +Usage: + databricks lakeview get-schedule DASHBOARD_ID SCHEDULE_ID [flags] + +Flags: + -h, --help help for get-schedule + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview get-subscription --help +Get schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription. + +Usage: + databricks lakeview get-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID [flags] + +Flags: + -h, --help help for get-subscription + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview list --help +List dashboards. + +Usage: + databricks lakeview list [flags] + +Flags: + -h, --help help for list + --page-size int The number of dashboards to return per page. + --show-trashed The flag to include dashboards located in the trash. + --view DashboardView DASHBOARD_VIEW_BASIConly includes summary metadata from the dashboard. Supported values: [DASHBOARD_VIEW_BASIC] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview list-schedules --help +List dashboard schedules. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedules belongs. + +Usage: + databricks lakeview list-schedules DASHBOARD_ID [flags] + +Flags: + -h, --help help for list-schedules + --page-size int The number of schedules to return per page. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview list-subscriptions --help +List schedule subscriptions. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscriptions belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscriptions belongs. + +Usage: + databricks lakeview list-subscriptions DASHBOARD_ID SCHEDULE_ID [flags] + +Flags: + -h, --help help for list-subscriptions + --page-size int The number of subscriptions to return per page. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview migrate --help +Migrate dashboard. + + Migrates a classic SQL dashboard to Lakeview. + + Arguments: + SOURCE_DASHBOARD_ID: UUID of the dashboard to be migrated. + +Usage: + databricks lakeview migrate SOURCE_DASHBOARD_ID [flags] + +Flags: + --display-name string Display name for the new Lakeview dashboard. + -h, --help help for migrate + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --parent-path string The workspace path of the folder to contain the migrated Lakeview dashboard. + --update-parameter-syntax Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named syntax (:param) when converting datasets in the dashboard. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview publish --help +Publish dashboard. + + Publish the current draft dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to be published. + +Usage: + databricks lakeview publish DASHBOARD_ID [flags] + +Flags: + --embed-credentials Flag to indicate if the publisher's credentials should be embedded in the published dashboard. + -h, --help help for publish + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --warehouse-id string The ID of the warehouse that can be used to override the warehouse which was set in the draft. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview trash --help +Trash dashboard. + + Trash a dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard. + +Usage: + databricks lakeview trash DASHBOARD_ID [flags] + +Flags: + -h, --help help for trash + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview unpublish --help +Unpublish dashboard. + + Unpublish the dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the published dashboard. + +Usage: + databricks lakeview unpublish DASHBOARD_ID [flags] + +Flags: + -h, --help help for unpublish + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview update --help +Update dashboard. + + Update a draft dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard. + +Usage: + databricks lakeview update DASHBOARD_ID [flags] + +Flags: + --display-name string The display name of the dashboard. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --serialized-dashboard string The contents of the dashboard in serialized string form. + --warehouse-id string The warehouse ID used to run the dashboard. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI lakeview update-schedule --help +Update dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule. + +Usage: + databricks lakeview update-schedule DASHBOARD_ID SCHEDULE_ID [flags] + +Flags: + --display-name string The display name for schedule. + -h, --help help for update-schedule + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --pause-status SchedulePauseStatus The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED] + --warehouse-id string The warehouse id to run the dashboard with for the schedule. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/lakeview/lakeview/script b/acceptance/help/cmd/workspace/lakeview/lakeview/script new file mode 100755 index 000000000..7a5dd24fc --- /dev/null +++ b/acceptance/help/cmd/workspace/lakeview/lakeview/script @@ -0,0 +1,19 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI lakeview create --help +trace $CLI lakeview create-schedule --help +trace $CLI lakeview create-subscription --help +trace $CLI lakeview delete-schedule --help +trace $CLI lakeview delete-subscription --help +trace $CLI lakeview get --help +trace $CLI lakeview get-published --help +trace $CLI lakeview get-schedule --help +trace $CLI lakeview get-subscription --help +trace $CLI lakeview list --help +trace $CLI lakeview list-schedules --help +trace $CLI lakeview list-subscriptions --help +trace $CLI lakeview migrate --help +trace $CLI lakeview publish --help +trace $CLI lakeview trash --help +trace $CLI lakeview unpublish --help +trace $CLI lakeview update --help +trace $CLI lakeview update-schedule --help diff --git a/acceptance/help/cmd/workspace/libraries/libraries/output.txt b/acceptance/help/cmd/workspace/libraries/libraries/output.txt new file mode 100644 index 000000000..0afa1a68b --- /dev/null +++ b/acceptance/help/cmd/workspace/libraries/libraries/output.txt @@ -0,0 +1,83 @@ + +>>> $CLI libraries all-cluster-statuses --help +Get all statuses. + + Get the status of all libraries on all clusters. A status is returned for all + libraries installed on this cluster via the API or the libraries UI. + +Usage: + databricks libraries all-cluster-statuses [flags] + +Flags: + -h, --help help for all-cluster-statuses + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI libraries cluster-status --help +Get status. + + Get the status of libraries on a cluster. A status is returned for all + libraries installed on this cluster via the API or the libraries UI. The order + of returned libraries is as follows: 1. Libraries set to be installed on this + cluster, in the order that the libraries were added to the cluster, are + returned first. 2. Libraries that were previously requested to be installed on + this cluster or, but are now marked for removal, in no particular order, are + returned last. + + Arguments: + CLUSTER_ID: Unique identifier of the cluster whose status should be retrieved. + +Usage: + databricks libraries cluster-status CLUSTER_ID [flags] + +Flags: + -h, --help help for cluster-status + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI libraries install --help +Add a library. + + Add libraries to install on a cluster. The installation is asynchronous; it + happens in the background after the completion of this request. + +Usage: + databricks libraries install [flags] + +Flags: + -h, --help help for install + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI libraries uninstall --help +Uninstall libraries. + + Set libraries to uninstall from a cluster. The libraries won't be uninstalled + until the cluster is restarted. A request to uninstall a library that is not + currently installed is ignored. + +Usage: + databricks libraries uninstall [flags] + +Flags: + -h, --help help for uninstall + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/libraries/libraries/script b/acceptance/help/cmd/workspace/libraries/libraries/script new file mode 100755 index 000000000..bab6606d6 --- /dev/null +++ b/acceptance/help/cmd/workspace/libraries/libraries/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI libraries all-cluster-statuses --help +trace $CLI libraries cluster-status --help +trace $CLI libraries install --help +trace $CLI libraries uninstall --help diff --git a/acceptance/help/cmd/workspace/metastores/metastores/output.txt b/acceptance/help/cmd/workspace/metastores/metastores/output.txt new file mode 100644 index 000000000..c1e89382f --- /dev/null +++ b/acceptance/help/cmd/workspace/metastores/metastores/output.txt @@ -0,0 +1,227 @@ + +>>> $CLI metastores assign --help +Create an assignment. + + Creates a new metastore assignment. If an assignment for the same + __workspace_id__ exists, it will be overwritten by the new __metastore_id__ + and __default_catalog_name__. The caller must be an account admin. + + Arguments: + WORKSPACE_ID: A workspace ID. + METASTORE_ID: The unique ID of the metastore. + DEFAULT_CATALOG_NAME: The name of the default catalog in the metastore. This field is depracted. + Please use "Default Namespace API" to configure the default catalog for a + Databricks workspace. + +Usage: + databricks metastores assign WORKSPACE_ID METASTORE_ID DEFAULT_CATALOG_NAME [flags] + +Flags: + -h, --help help for assign + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores create --help +Create a metastore. + + Creates a new metastore based on a provided name and optional storage root + path. By default (if the __owner__ field is not set), the owner of the new + metastore is the user calling the __createMetastore__ API. If the __owner__ + field is set to the empty string (**""**), the ownership is assigned to the + System User instead. + + Arguments: + NAME: The user-specified name of the metastore. + +Usage: + databricks metastores create NAME [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --region string Cloud region which the metastore serves (e.g., us-west-2, westus). + --storage-root string The storage root URL for metastore. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores current --help +Get metastore assignment for workspace. + + Gets the metastore assignment for the workspace being accessed. + +Usage: + databricks metastores current [flags] + +Flags: + -h, --help help for current + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores delete --help +Delete a metastore. + + Deletes a metastore. The caller must be a metastore admin. + + Arguments: + ID: Unique ID of the metastore. + +Usage: + databricks metastores delete ID [flags] + +Flags: + --force Force deletion even if the metastore is not empty. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores get --help +Get a metastore. + + Gets a metastore that matches the supplied ID. The caller must be a metastore + admin to retrieve this info. + + Arguments: + ID: Unique ID of the metastore. + +Usage: + databricks metastores get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores list --help +List metastores. + + Gets an array of the available metastores (as __MetastoreInfo__ objects). The + caller must be an admin to retrieve this info. There is no guarantee of a + specific ordering of the elements in the array. + +Usage: + databricks metastores list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores summary --help +Get a metastore summary. + + Gets information about a metastore. This summary includes the storage + credential, the cloud vendor, the cloud region, and the global metastore ID. + +Usage: + databricks metastores summary [flags] + +Flags: + -h, --help help for summary + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores unassign --help +Delete an assignment. + + Deletes a metastore assignment. The caller must be an account administrator. + + Arguments: + WORKSPACE_ID: A workspace ID. + METASTORE_ID: Query for the ID of the metastore to delete. + +Usage: + databricks metastores unassign WORKSPACE_ID METASTORE_ID [flags] + +Flags: + -h, --help help for unassign + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores update --help +Update a metastore. + + Updates information for a specific metastore. The caller must be a metastore + admin. If the __owner__ field is set to the empty string (**""**), the + ownership is updated to the System User. + + Arguments: + ID: Unique ID of the metastore. + +Usage: + databricks metastores update ID [flags] + +Flags: + --delta-sharing-organization-name string The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name. + --delta-sharing-recipient-token-lifetime-in-seconds int The lifetime of delta sharing recipient token in seconds. + --delta-sharing-scope UpdateMetastoreDeltaSharingScope The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL] + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the metastore. + --owner string The owner of the metastore. + --privilege-model-version string Privilege model version of the metastore, of the form major.minor (e.g., 1.0). + --storage-root-credential-id string UUID of storage credential to access the metastore storage_root. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI metastores update-assignment --help +Update an assignment. + + Updates a metastore assignment. This operation can be used to update + __metastore_id__ or __default_catalog_name__ for a specified Workspace, if the + Workspace is already assigned a metastore. The caller must be an account admin + to update __metastore_id__; otherwise, the caller can be a Workspace admin. + + Arguments: + WORKSPACE_ID: A workspace ID. + +Usage: + databricks metastores update-assignment WORKSPACE_ID [flags] + +Flags: + --default-catalog-name string The name of the default catalog in the metastore. + -h, --help help for update-assignment + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --metastore-id string The unique ID of the metastore. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/metastores/metastores/script b/acceptance/help/cmd/workspace/metastores/metastores/script new file mode 100755 index 000000000..8a576dc11 --- /dev/null +++ b/acceptance/help/cmd/workspace/metastores/metastores/script @@ -0,0 +1,11 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI metastores assign --help +trace $CLI metastores create --help +trace $CLI metastores current --help +trace $CLI metastores delete --help +trace $CLI metastores get --help +trace $CLI metastores list --help +trace $CLI metastores summary --help +trace $CLI metastores unassign --help +trace $CLI metastores update --help +trace $CLI metastores update-assignment --help diff --git a/acceptance/help/cmd/workspace/model-registry/model-registry/output.txt b/acceptance/help/cmd/workspace/model-registry/model-registry/output.txt new file mode 100644 index 000000000..e019046ff --- /dev/null +++ b/acceptance/help/cmd/workspace/model-registry/model-registry/output.txt @@ -0,0 +1,874 @@ + +>>> $CLI model-registry approve-transition-request --help +Approve transition request. + + Approves a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + ARCHIVE_EXISTING_VERSIONS: Specifies whether to archive all current model versions in the target + stage. + +Usage: + databricks model-registry approve-transition-request NAME VERSION STAGE ARCHIVE_EXISTING_VERSIONS [flags] + +Flags: + --comment string User-provided comment on the action. + -h, --help help for approve-transition-request + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry create-comment --help +Post a comment. + + Posts a comment on a model version. A comment can be submitted either by a + user or programmatically to display relevant information about the model. For + example, test results or deployment errors. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + COMMENT: User-provided comment on the action. + +Usage: + databricks model-registry create-comment NAME VERSION COMMENT [flags] + +Flags: + -h, --help help for create-comment + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry create-model --help +Create a model. + + Creates a new registered model with the name specified in the request body. + + Throws RESOURCE_ALREADY_EXISTS if a registered model with the given name + exists. + + Arguments: + NAME: Register models under this name + +Usage: + databricks model-registry create-model NAME [flags] + +Flags: + --description string Optional description for registered model. + -h, --help help for create-model + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry create-model-version --help +Create a model version. + + Creates a model version. + + Arguments: + NAME: Register model under this name + SOURCE: URI indicating the location of the model artifacts. + +Usage: + databricks model-registry create-model-version NAME SOURCE [flags] + +Flags: + --description string Optional description for model version. + -h, --help help for create-model-version + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --run-id string MLflow run ID for correlation, if source was generated by an experiment run in MLflow tracking server. + --run-link string MLflow run link - this is the exact link of the run that generated this model version, potentially hosted at another instance of MLflow. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry create-transition-request --help +Make a transition request. + + Creates a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + +Usage: + databricks model-registry create-transition-request NAME VERSION STAGE [flags] + +Flags: + --comment string User-provided comment on the action. + -h, --help help for create-transition-request + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry create-webhook --help +Create a webhook. + + **NOTE**: This endpoint is in Public Preview. + + Creates a registry webhook. + +Usage: + databricks model-registry create-webhook [flags] + +Flags: + --description string User-specified description for the webhook. + -h, --help help for create-webhook + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --model-name string Name of the model whose events would trigger this webhook. + --status RegistryWebhookStatus Enable or disable triggering the webhook, or put the webhook into test mode. Supported values: [ACTIVE, DISABLED, TEST_MODE] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry delete-comment --help +Delete a comment. + + Deletes a comment on a model version. + +Usage: + databricks model-registry delete-comment ID [flags] + +Flags: + -h, --help help for delete-comment + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry delete-model --help +Delete a model. + + Deletes a registered model. + + Arguments: + NAME: Registered model unique name identifier. + +Usage: + databricks model-registry delete-model NAME [flags] + +Flags: + -h, --help help for delete-model + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry delete-model-tag --help +Delete a model tag. + + Deletes the tag for a registered model. + + Arguments: + NAME: Name of the registered model that the tag was logged under. + KEY: Name of the tag. The name must be an exact match; wild-card deletion is + not supported. Maximum size is 250 bytes. + +Usage: + databricks model-registry delete-model-tag NAME KEY [flags] + +Flags: + -h, --help help for delete-model-tag + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry delete-model-version --help +Delete a model version. + + Deletes a model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number + +Usage: + databricks model-registry delete-model-version NAME VERSION [flags] + +Flags: + -h, --help help for delete-model-version + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry delete-model-version-tag --help +Delete a model version tag. + + Deletes a model version tag. + + Arguments: + NAME: Name of the registered model that the tag was logged under. + VERSION: Model version number that the tag was logged under. + KEY: Name of the tag. The name must be an exact match; wild-card deletion is + not supported. Maximum size is 250 bytes. + +Usage: + databricks model-registry delete-model-version-tag NAME VERSION KEY [flags] + +Flags: + -h, --help help for delete-model-version-tag + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry delete-transition-request --help +Delete a transition request. + + Cancels a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition request. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + CREATOR: Username of the user who created this request. Of the transition requests + matching the specified details, only the one transition created by this + user will be deleted. + +Usage: + databricks model-registry delete-transition-request NAME VERSION STAGE CREATOR [flags] + +Flags: + --comment string User-provided comment on the action. + -h, --help help for delete-transition-request + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry delete-webhook --help +Delete a webhook. + + **NOTE:** This endpoint is in Public Preview. + + Deletes a registry webhook. + +Usage: + databricks model-registry delete-webhook [flags] + +Flags: + -h, --help help for delete-webhook + --id string Webhook ID required to delete a registry webhook. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry get-latest-versions --help +Get the latest version. + + Gets the latest version of a registered model. + + Arguments: + NAME: Registered model unique name identifier. + +Usage: + databricks model-registry get-latest-versions NAME [flags] + +Flags: + -h, --help help for get-latest-versions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry get-model --help +Get model. + + Get the details of a model. This is a Databricks workspace version of the + [MLflow endpoint] that also returns the model's Databricks workspace ID and + the permission level of the requesting user on the model. + + [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel + + Arguments: + NAME: Registered model unique name identifier. + +Usage: + databricks model-registry get-model NAME [flags] + +Flags: + -h, --help help for get-model + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry get-model-version --help +Get a model version. + + Get a model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number + +Usage: + databricks model-registry get-model-version NAME VERSION [flags] + +Flags: + -h, --help help for get-model-version + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry get-model-version-download-uri --help +Get a model version URI. + + Gets a URI to download the model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number + +Usage: + databricks model-registry get-model-version-download-uri NAME VERSION [flags] + +Flags: + -h, --help help for get-model-version-download-uri + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry get-permission-levels --help +Get registered model permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions. + +Usage: + databricks model-registry get-permission-levels REGISTERED_MODEL_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry get-permissions --help +Get registered model permissions. + + Gets the permissions of a registered model. Registered models can inherit + permissions from their root object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions. + +Usage: + databricks model-registry get-permissions REGISTERED_MODEL_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry list-models --help +List models. + + Lists all available registered models, up to the limit specified in + __max_results__. + +Usage: + databricks model-registry list-models [flags] + +Flags: + -h, --help help for list-models + --max-results int Maximum number of registered models desired. + --page-token string Pagination token to go to the next page based on a previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry list-transition-requests --help +List transition requests. + + Gets a list of all open stage transition requests for the model version. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + +Usage: + databricks model-registry list-transition-requests NAME VERSION [flags] + +Flags: + -h, --help help for list-transition-requests + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry list-webhooks --help +List registry webhooks. + + **NOTE:** This endpoint is in Public Preview. + + Lists all registry webhooks. + +Usage: + databricks model-registry list-webhooks [flags] + +Flags: + -h, --help help for list-webhooks + --model-name string If not specified, all webhooks associated with the specified events are listed, regardless of their associated model. + --page-token string Token indicating the page of artifact results to fetch. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry reject-transition-request --help +Reject a transition request. + + Rejects a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + +Usage: + databricks model-registry reject-transition-request NAME VERSION STAGE [flags] + +Flags: + --comment string User-provided comment on the action. + -h, --help help for reject-transition-request + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry rename-model --help +Rename a model. + + Renames a registered model. + + Arguments: + NAME: Registered model unique name identifier. + +Usage: + databricks model-registry rename-model NAME [flags] + +Flags: + -h, --help help for rename-model + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string If provided, updates the name for this registered_model. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry search-model-versions --help +Searches model versions. + + Searches for specific model versions based on the supplied __filter__. + +Usage: + databricks model-registry search-model-versions [flags] + +Flags: + --filter string String filter condition, like "name='my-model-name'". + -h, --help help for search-model-versions + --max-results int Maximum number of models desired. + --page-token string Pagination token to go to next page based on previous search query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry search-models --help +Search models. + + Search for registered models based on the specified __filter__. + +Usage: + databricks model-registry search-models [flags] + +Flags: + --filter string String filter condition, like "name LIKE 'my-model-name'". + -h, --help help for search-models + --max-results int Maximum number of models desired. + --page-token string Pagination token to go to the next page based on a previous search query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry set-model-tag --help +Set a tag. + + Sets a tag on a registered model. + + Arguments: + NAME: Unique name of the model. + KEY: Name of the tag. Maximum size depends on storage backend. If a tag with + this name already exists, its preexisting value will be replaced by the + specified value. All storage backends are guaranteed to support key + values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size. + +Usage: + databricks model-registry set-model-tag NAME KEY VALUE [flags] + +Flags: + -h, --help help for set-model-tag + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry set-model-version-tag --help +Set a version tag. + + Sets a model version tag. + + Arguments: + NAME: Unique name of the model. + VERSION: Model version number. + KEY: Name of the tag. Maximum size depends on storage backend. If a tag with + this name already exists, its preexisting value will be replaced by the + specified value. All storage backends are guaranteed to support key + values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size. + +Usage: + databricks model-registry set-model-version-tag NAME VERSION KEY VALUE [flags] + +Flags: + -h, --help help for set-model-version-tag + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry set-permissions --help +Set registered model permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions. + +Usage: + databricks model-registry set-permissions REGISTERED_MODEL_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry test-registry-webhook --help +Test a webhook. + + **NOTE:** This endpoint is in Public Preview. + + Tests a registry webhook. + + Arguments: + ID: Webhook ID + +Usage: + databricks model-registry test-registry-webhook ID [flags] + +Flags: + --event RegistryWebhookEvent If event is specified, the test trigger uses the specified event. Supported values: [ + COMMENT_CREATED, + MODEL_VERSION_CREATED, + MODEL_VERSION_TAG_SET, + MODEL_VERSION_TRANSITIONED_STAGE, + MODEL_VERSION_TRANSITIONED_TO_ARCHIVED, + MODEL_VERSION_TRANSITIONED_TO_PRODUCTION, + MODEL_VERSION_TRANSITIONED_TO_STAGING, + REGISTERED_MODEL_CREATED, + TRANSITION_REQUEST_CREATED, + TRANSITION_REQUEST_TO_ARCHIVED_CREATED, + TRANSITION_REQUEST_TO_PRODUCTION_CREATED, + TRANSITION_REQUEST_TO_STAGING_CREATED, + ] + -h, --help help for test-registry-webhook + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry transition-stage --help +Transition a stage. + + Transition a model version's stage. This is a Databricks workspace version of + the [MLflow endpoint] that also accepts a comment associated with the + transition to be recorded.", + + [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + ARCHIVE_EXISTING_VERSIONS: Specifies whether to archive all current model versions in the target + stage. + +Usage: + databricks model-registry transition-stage NAME VERSION STAGE ARCHIVE_EXISTING_VERSIONS [flags] + +Flags: + --comment string User-provided comment on the action. + -h, --help help for transition-stage + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry update-comment --help +Update a comment. + + Post an edit to a comment on a model version. + + Arguments: + ID: Unique identifier of an activity + COMMENT: User-provided comment on the action. + +Usage: + databricks model-registry update-comment ID COMMENT [flags] + +Flags: + -h, --help help for update-comment + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry update-model --help +Update model. + + Updates a registered model. + + Arguments: + NAME: Registered model unique name identifier. + +Usage: + databricks model-registry update-model NAME [flags] + +Flags: + --description string If provided, updates the description for this registered_model. + -h, --help help for update-model + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry update-model-version --help +Update model version. + + Updates the model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number + +Usage: + databricks model-registry update-model-version NAME VERSION [flags] + +Flags: + --description string If provided, updates the description for this registered_model. + -h, --help help for update-model-version + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry update-permissions --help +Update registered model permissions. + + Updates the permissions on a registered model. Registered models can inherit + permissions from their root object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions. + +Usage: + databricks model-registry update-permissions REGISTERED_MODEL_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-registry update-webhook --help +Update a webhook. + + **NOTE:** This endpoint is in Public Preview. + + Updates a registry webhook. + + Arguments: + ID: Webhook ID + +Usage: + databricks model-registry update-webhook ID [flags] + +Flags: + --description string User-specified description for the webhook. + -h, --help help for update-webhook + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --status RegistryWebhookStatus Enable or disable triggering the webhook, or put the webhook into test mode. Supported values: [ACTIVE, DISABLED, TEST_MODE] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/model-registry/model-registry/script b/acceptance/help/cmd/workspace/model-registry/model-registry/script new file mode 100755 index 000000000..e36099657 --- /dev/null +++ b/acceptance/help/cmd/workspace/model-registry/model-registry/script @@ -0,0 +1,37 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI model-registry approve-transition-request --help +trace $CLI model-registry create-comment --help +trace $CLI model-registry create-model --help +trace $CLI model-registry create-model-version --help +trace $CLI model-registry create-transition-request --help +trace $CLI model-registry create-webhook --help +trace $CLI model-registry delete-comment --help +trace $CLI model-registry delete-model --help +trace $CLI model-registry delete-model-tag --help +trace $CLI model-registry delete-model-version --help +trace $CLI model-registry delete-model-version-tag --help +trace $CLI model-registry delete-transition-request --help +trace $CLI model-registry delete-webhook --help +trace $CLI model-registry get-latest-versions --help +trace $CLI model-registry get-model --help +trace $CLI model-registry get-model-version --help +trace $CLI model-registry get-model-version-download-uri --help +trace $CLI model-registry get-permission-levels --help +trace $CLI model-registry get-permissions --help +trace $CLI model-registry list-models --help +trace $CLI model-registry list-transition-requests --help +trace $CLI model-registry list-webhooks --help +trace $CLI model-registry reject-transition-request --help +trace $CLI model-registry rename-model --help +trace $CLI model-registry search-model-versions --help +trace $CLI model-registry search-models --help +trace $CLI model-registry set-model-tag --help +trace $CLI model-registry set-model-version-tag --help +trace $CLI model-registry set-permissions --help +trace $CLI model-registry test-registry-webhook --help +trace $CLI model-registry transition-stage --help +trace $CLI model-registry update-comment --help +trace $CLI model-registry update-model --help +trace $CLI model-registry update-model-version --help +trace $CLI model-registry update-permissions --help +trace $CLI model-registry update-webhook --help diff --git a/acceptance/help/cmd/workspace/model-versions/model-versions/output.txt b/acceptance/help/cmd/workspace/model-versions/model-versions/output.txt new file mode 100644 index 000000000..dcd29b833 --- /dev/null +++ b/acceptance/help/cmd/workspace/model-versions/model-versions/output.txt @@ -0,0 +1,148 @@ + +>>> $CLI model-versions delete --help +Delete a Model Version. + + Deletes a model version from the specified registered model. Any aliases + assigned to the model version will also be deleted. + + The caller must be a metastore admin or an owner of the parent registered + model. For the latter case, the caller must also be the owner or have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the model version + VERSION: The integer version number of the model version + +Usage: + databricks model-versions delete FULL_NAME VERSION [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-versions get --help +Get a Model Version. + + Get a model version. + + The caller must be a metastore admin or an owner of (or have the **EXECUTE** + privilege on) the parent registered model. For the latter case, the caller + must also be the owner or have the **USE_CATALOG** privilege on the parent + catalog and the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the model version + VERSION: The integer version number of the model version + +Usage: + databricks model-versions get FULL_NAME VERSION [flags] + +Flags: + -h, --help help for get + --include-aliases Whether to include aliases associated with the model version in the response. + --include-browse Whether to include model versions in the response for which the principal can only access selective metadata for. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-versions get-by-alias --help +Get Model Version By Alias. + + Get a model version by alias. + + The caller must be a metastore admin or an owner of (or have the **EXECUTE** + privilege on) the registered model. For the latter case, the caller must also + be the owner or have the **USE_CATALOG** privilege on the parent catalog and + the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model + ALIAS: The name of the alias + +Usage: + databricks model-versions get-by-alias FULL_NAME ALIAS [flags] + +Flags: + -h, --help help for get-by-alias + --include-aliases Whether to include aliases associated with the model version in the response. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-versions list --help +List Model Versions. + + List model versions. You can list model versions under a particular schema, or + list all model versions in the current metastore. + + The returned models are filtered based on the privileges of the calling user. + For example, the metastore admin is able to list all the model versions. A + regular user needs to be the owner or have the **EXECUTE** privilege on the + parent registered model to recieve the model versions in the response. For the + latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + There is no guarantee of a specific ordering of the elements in the response. + The elements in the response will not contain any aliases or tags. + + Arguments: + FULL_NAME: The full three-level name of the registered model under which to list + model versions + +Usage: + databricks model-versions list FULL_NAME [flags] + +Flags: + -h, --help help for list + --include-browse Whether to include model versions in the response for which the principal can only access selective metadata for. + --max-results int Maximum number of model versions to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI model-versions update --help +Update a Model Version. + + Updates the specified model version. + + The caller must be a metastore admin or an owner of the parent registered + model. For the latter case, the caller must also be the owner or have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema. + + Currently only the comment of the model version can be updated. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the model version + VERSION: The integer version number of the model version + +Usage: + databricks model-versions update FULL_NAME VERSION [flags] + +Flags: + --comment string The comment attached to the model version. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/model-versions/model-versions/script b/acceptance/help/cmd/workspace/model-versions/model-versions/script new file mode 100755 index 000000000..0270d58cb --- /dev/null +++ b/acceptance/help/cmd/workspace/model-versions/model-versions/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI model-versions delete --help +trace $CLI model-versions get --help +trace $CLI model-versions get-by-alias --help +trace $CLI model-versions list --help +trace $CLI model-versions update --help diff --git a/acceptance/help/cmd/workspace/notification-destinations/notification-destinations/output.txt b/acceptance/help/cmd/workspace/notification-destinations/notification-destinations/output.txt new file mode 100644 index 000000000..fedac0f38 --- /dev/null +++ b/acceptance/help/cmd/workspace/notification-destinations/notification-destinations/output.txt @@ -0,0 +1,95 @@ + +>>> $CLI notification-destinations create --help +Create a notification destination. + + Creates a notification destination. Requires workspace admin permissions. + +Usage: + databricks notification-destinations create [flags] + +Flags: + --display-name string The display name for the notification destination. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI notification-destinations delete --help +Delete a notification destination. + + Deletes a notification destination. Requires workspace admin permissions. + +Usage: + databricks notification-destinations delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI notification-destinations get --help +Get a notification destination. + + Gets a notification destination. + +Usage: + databricks notification-destinations get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI notification-destinations list --help +List notification destinations. + + Lists notification destinations. + +Usage: + databricks notification-destinations list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI notification-destinations update --help +Update a notification destination. + + Updates a notification destination. Requires workspace admin permissions. At + least one field is required in the request body. + + Arguments: + ID: UUID identifying notification destination. + +Usage: + databricks notification-destinations update ID [flags] + +Flags: + --display-name string The display name for the notification destination. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/notification-destinations/notification-destinations/script b/acceptance/help/cmd/workspace/notification-destinations/notification-destinations/script new file mode 100755 index 000000000..0177c58ad --- /dev/null +++ b/acceptance/help/cmd/workspace/notification-destinations/notification-destinations/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI notification-destinations create --help +trace $CLI notification-destinations delete --help +trace $CLI notification-destinations get --help +trace $CLI notification-destinations list --help +trace $CLI notification-destinations update --help diff --git a/acceptance/help/cmd/workspace/online-tables/online-tables/output.txt b/acceptance/help/cmd/workspace/online-tables/online-tables/output.txt new file mode 100644 index 000000000..e5bc3e359 --- /dev/null +++ b/acceptance/help/cmd/workspace/online-tables/online-tables/output.txt @@ -0,0 +1,63 @@ + +>>> $CLI online-tables create --help +Create an Online Table. + + Create a new Online Table. + +Usage: + databricks online-tables create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string Full three-part (catalog, schema, table) name of the table. + --no-wait do not wait to reach ACTIVE state + --timeout duration maximum amount of time to reach ACTIVE state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI online-tables delete --help +Delete an Online Table. + + Delete an online table. Warning: This will delete all the data in the online + table. If the source Delta table was deleted or modified since this Online + Table was created, this will lose the data forever! + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table. + +Usage: + databricks online-tables delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI online-tables get --help +Get an Online Table. + + Get information about an existing online table and its status. + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table. + +Usage: + databricks online-tables get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/online-tables/online-tables/script b/acceptance/help/cmd/workspace/online-tables/online-tables/script new file mode 100755 index 000000000..7fc0e6412 --- /dev/null +++ b/acceptance/help/cmd/workspace/online-tables/online-tables/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI online-tables create --help +trace $CLI online-tables delete --help +trace $CLI online-tables get --help diff --git a/acceptance/help/cmd/workspace/permission-migration/permission-migration/output.txt b/acceptance/help/cmd/workspace/permission-migration/permission-migration/output.txt new file mode 100644 index 000000000..d41420ab1 --- /dev/null +++ b/acceptance/help/cmd/workspace/permission-migration/permission-migration/output.txt @@ -0,0 +1,23 @@ + +>>> $CLI permission-migration migrate-permissions --help +Migrate Permissions. + + Arguments: + WORKSPACE_ID: WorkspaceId of the associated workspace where the permission migration + will occur. + FROM_WORKSPACE_GROUP_NAME: The name of the workspace group that permissions will be migrated from. + TO_ACCOUNT_GROUP_NAME: The name of the account group that permissions will be migrated to. + +Usage: + databricks permission-migration migrate-permissions WORKSPACE_ID FROM_WORKSPACE_GROUP_NAME TO_ACCOUNT_GROUP_NAME [flags] + +Flags: + -h, --help help for migrate-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --size int The maximum number of permissions that will be migrated. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/permission-migration/permission-migration/script b/acceptance/help/cmd/workspace/permission-migration/permission-migration/script new file mode 100755 index 000000000..9ad7aaf6a --- /dev/null +++ b/acceptance/help/cmd/workspace/permission-migration/permission-migration/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI permission-migration migrate-permissions --help diff --git a/acceptance/help/cmd/workspace/permissions/permissions/output.txt b/acceptance/help/cmd/workspace/permissions/permissions/output.txt new file mode 100644 index 000000000..b3d211ad6 --- /dev/null +++ b/acceptance/help/cmd/workspace/permissions/permissions/output.txt @@ -0,0 +1,102 @@ + +>>> $CLI permissions get --help +Get object permissions. + + Gets the permissions of an object. Objects can inherit permissions from their + parent objects or root object. + + Arguments: + REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, + authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + directories, experiments, files, instance-pools, jobs, notebooks, + pipelines, queries, registered-models, repos, serving-endpoints, or + warehouses. + REQUEST_OBJECT_ID: The id of the request object. + +Usage: + databricks permissions get REQUEST_OBJECT_TYPE REQUEST_OBJECT_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI permissions get-permission-levels --help +Get object permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + REQUEST_OBJECT_TYPE: + REQUEST_OBJECT_ID: + +Usage: + databricks permissions get-permission-levels REQUEST_OBJECT_TYPE REQUEST_OBJECT_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI permissions set --help +Set object permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their parent objects or root object. + + Arguments: + REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, + authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + directories, experiments, files, instance-pools, jobs, notebooks, + pipelines, queries, registered-models, repos, serving-endpoints, or + warehouses. + REQUEST_OBJECT_ID: The id of the request object. + +Usage: + databricks permissions set REQUEST_OBJECT_TYPE REQUEST_OBJECT_ID [flags] + +Flags: + -h, --help help for set + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI permissions update --help +Update object permissions. + + Updates the permissions on an object. Objects can inherit permissions from + their parent objects or root object. + + Arguments: + REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, + authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + directories, experiments, files, instance-pools, jobs, notebooks, + pipelines, queries, registered-models, repos, serving-endpoints, or + warehouses. + REQUEST_OBJECT_ID: The id of the request object. + +Usage: + databricks permissions update REQUEST_OBJECT_TYPE REQUEST_OBJECT_ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/permissions/permissions/script b/acceptance/help/cmd/workspace/permissions/permissions/script new file mode 100755 index 000000000..d401f1cb8 --- /dev/null +++ b/acceptance/help/cmd/workspace/permissions/permissions/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI permissions get --help +trace $CLI permissions get-permission-levels --help +trace $CLI permissions set --help +trace $CLI permissions update --help diff --git a/acceptance/help/cmd/workspace/pipelines/pipelines/output.txt b/acceptance/help/cmd/workspace/pipelines/pipelines/output.txt new file mode 100644 index 000000000..49fe02e40 --- /dev/null +++ b/acceptance/help/cmd/workspace/pipelines/pipelines/output.txt @@ -0,0 +1,308 @@ + +>>> $CLI pipelines create --help +Create a pipeline. + + Creates a new data processing pipeline based on the requested configuration. + If successful, this method returns the ID of the new pipeline. + +Usage: + databricks pipelines create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines delete --help +Delete a pipeline. + + Deletes a pipeline. + +Usage: + databricks pipelines delete PIPELINE_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines get --help +Get a pipeline. + +Usage: + databricks pipelines get PIPELINE_ID [flags] + +Flags: + -h, --help help for get + --no-wait do not wait to reach RUNNING state + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines get-permission-levels --help +Get pipeline permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions. + +Usage: + databricks pipelines get-permission-levels PIPELINE_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines get-permissions --help +Get pipeline permissions. + + Gets the permissions of a pipeline. Pipelines can inherit permissions from + their root object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions. + +Usage: + databricks pipelines get-permissions PIPELINE_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines get-update --help +Get a pipeline update. + + Gets an update from an active pipeline. + + Arguments: + PIPELINE_ID: The ID of the pipeline. + UPDATE_ID: The ID of the update. + +Usage: + databricks pipelines get-update PIPELINE_ID UPDATE_ID [flags] + +Flags: + -h, --help help for get-update + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines list-pipeline-events --help +List pipeline events. + + Retrieves events for a pipeline. + +Usage: + databricks pipelines list-pipeline-events PIPELINE_ID [flags] + +Flags: + --filter string Criteria to select a subset of results, expressed using a SQL-like syntax. + -h, --help help for list-pipeline-events + --max-results int Max number of entries to return in a single page. + --page-token string Page token returned by previous call. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines list-pipelines --help +List pipelines. + + Lists pipelines defined in the Delta Live Tables system. + +Usage: + databricks pipelines list-pipelines [flags] + +Flags: + --filter string Select a subset of results based on the specified criteria. + -h, --help help for list-pipelines + --max-results int The maximum number of entries to return in a single page. + --page-token string Page token returned by previous call. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines list-updates --help +List pipeline updates. + + List updates for an active pipeline. + + Arguments: + PIPELINE_ID: The pipeline to return updates for. + +Usage: + databricks pipelines list-updates PIPELINE_ID [flags] + +Flags: + -h, --help help for list-updates + --max-results int Max number of entries to return in a single page. + --page-token string Page token returned by previous call. + --until-update-id string If present, returns updates until and including this update_id. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines set-permissions --help +Set pipeline permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions. + +Usage: + databricks pipelines set-permissions PIPELINE_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines start-update --help +Start a pipeline. + + Starts a new update for the pipeline. If there is already an active update for + the pipeline, the request will fail and the active update will remain running. + +Usage: + databricks pipelines start-update PIPELINE_ID [flags] + +Flags: + --cause StartUpdateCause . Supported values: [ + API_CALL, + JOB_TASK, + RETRY_ON_FAILURE, + SCHEMA_CHANGE, + SERVICE_UPGRADE, + USER_ACTION, + ] + --full-refresh If true, this update will reset all tables before running. + -h, --help help for start-update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --validate-only If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines stop --help +Stop a pipeline. + + Stops the pipeline by canceling the active update. If there is no active + update for the pipeline, this request is a no-op. + +Usage: + databricks pipelines stop PIPELINE_ID [flags] + +Flags: + -h, --help help for stop + --no-wait do not wait to reach IDLE state + --timeout duration maximum amount of time to reach IDLE state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI pipelines update --help +Edit a pipeline. + + Updates a pipeline with the supplied configuration. + + Arguments: + PIPELINE_ID: Unique identifier for this pipeline. + +Usage: + databricks pipelines update PIPELINE_ID [flags] + +Flags: + --allow-duplicate-names If false, deployment will fail if name has changed and conflicts the name of another pipeline. + --budget-policy-id string Budget policy of this pipeline. + --catalog string A catalog in Unity Catalog to publish data from this pipeline to. + --channel string DLT Release Channel that specifies which version to use. + --continuous Whether the pipeline is continuous or triggered. + --development Whether the pipeline is in Development mode. + --edition string Pipeline product edition. + --expected-last-modified int If present, the last-modified time of the pipeline settings before the edit. + -h, --help help for update + --id string Unique identifier for this pipeline. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string Friendly identifier for this pipeline. + --photon Whether Photon is enabled for this pipeline. + --pipeline-id string Unique identifier for this pipeline. + --schema string The default schema (database) where tables are read from or published to. + --serverless Whether serverless compute is enabled for this pipeline. + --storage string DBFS root directory for storing checkpoints and tables. + --target string Target schema (database) to add tables in this pipeline to. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + +>>> $CLI pipelines update-permissions --help +Update pipeline permissions. + + Updates the permissions on a pipeline. Pipelines can inherit permissions from + their root object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions. + +Usage: + databricks pipelines update-permissions PIPELINE_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/pipelines/pipelines/script b/acceptance/help/cmd/workspace/pipelines/pipelines/script new file mode 100755 index 000000000..1d533d113 --- /dev/null +++ b/acceptance/help/cmd/workspace/pipelines/pipelines/script @@ -0,0 +1,15 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI pipelines create --help +trace $CLI pipelines delete --help +trace $CLI pipelines get --help +trace $CLI pipelines get-permission-levels --help +trace $CLI pipelines get-permissions --help +trace $CLI pipelines get-update --help +trace $CLI pipelines list-pipeline-events --help +trace $CLI pipelines list-pipelines --help +trace $CLI pipelines list-updates --help +trace $CLI pipelines set-permissions --help +trace $CLI pipelines start-update --help +trace $CLI pipelines stop --help +trace $CLI pipelines update --help +trace $CLI pipelines update-permissions --help diff --git a/acceptance/help/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters/output.txt b/acceptance/help/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters/output.txt new file mode 100644 index 000000000..6c6e75817 --- /dev/null +++ b/acceptance/help/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters/output.txt @@ -0,0 +1,79 @@ + +>>> $CLI policy-compliance-for-clusters enforce-compliance --help +Enforce cluster policy compliance. + + Updates a cluster to be compliant with the current version of its policy. A + cluster can be updated if it is in a RUNNING or TERMINATED state. + + If a cluster is updated while in a RUNNING state, it will be restarted so + that the new attributes can take effect. + + If a cluster is updated while in a TERMINATED state, it will remain + TERMINATED. The next time the cluster is started, the new attributes will + take effect. + + Clusters created by the Databricks Jobs, DLT, or Models services cannot be + enforced by this API. Instead, use the "Enforce job policy compliance" API to + enforce policy compliance on jobs. + + Arguments: + CLUSTER_ID: The ID of the cluster you want to enforce policy compliance on. + +Usage: + databricks policy-compliance-for-clusters enforce-compliance CLUSTER_ID [flags] + +Flags: + -h, --help help for enforce-compliance + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --validate-only If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI policy-compliance-for-clusters get-compliance --help +Get cluster policy compliance. + + Returns the policy compliance status of a cluster. Clusters could be out of + compliance if their policy was updated after the cluster was last edited. + + Arguments: + CLUSTER_ID: The ID of the cluster to get the compliance status + +Usage: + databricks policy-compliance-for-clusters get-compliance CLUSTER_ID [flags] + +Flags: + -h, --help help for get-compliance + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI policy-compliance-for-clusters list-compliance --help +List cluster policy compliance. + + Returns the policy compliance status of all clusters that use a given policy. + Clusters could be out of compliance if their policy was updated after the + cluster was last edited. + + Arguments: + POLICY_ID: Canonical unique identifier for the cluster policy. + +Usage: + databricks policy-compliance-for-clusters list-compliance POLICY_ID [flags] + +Flags: + -h, --help help for list-compliance + --page-size int Use this field to specify the maximum number of results to be returned by the server. + --page-token string A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters/script b/acceptance/help/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters/script new file mode 100755 index 000000000..bd63a5a17 --- /dev/null +++ b/acceptance/help/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI policy-compliance-for-clusters enforce-compliance --help +trace $CLI policy-compliance-for-clusters get-compliance --help +trace $CLI policy-compliance-for-clusters list-compliance --help diff --git a/acceptance/help/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs/output.txt b/acceptance/help/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs/output.txt new file mode 100644 index 000000000..73f2e20f7 --- /dev/null +++ b/acceptance/help/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs/output.txt @@ -0,0 +1,72 @@ + +>>> $CLI policy-compliance-for-jobs enforce-compliance --help +Enforce job policy compliance. + + Updates a job so the job clusters that are created when running the job + (specified in new_cluster) are compliant with the current versions of their + respective cluster policies. All-purpose clusters used in the job will not be + updated. + + Arguments: + JOB_ID: The ID of the job you want to enforce policy compliance on. + +Usage: + databricks policy-compliance-for-jobs enforce-compliance JOB_ID [flags] + +Flags: + -h, --help help for enforce-compliance + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --validate-only If set, previews changes made to the job to comply with its policy, but does not update the job. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI policy-compliance-for-jobs get-compliance --help +Get job policy compliance. + + Returns the policy compliance status of a job. Jobs could be out of compliance + if a cluster policy they use was updated after the job was last edited and + some of its job clusters no longer comply with their updated policies. + + Arguments: + JOB_ID: The ID of the job whose compliance status you are requesting. + +Usage: + databricks policy-compliance-for-jobs get-compliance JOB_ID [flags] + +Flags: + -h, --help help for get-compliance + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI policy-compliance-for-jobs list-compliance --help +List job policy compliance. + + Returns the policy compliance status of all jobs that use a given policy. Jobs + could be out of compliance if a cluster policy they use was updated after the + job was last edited and its job clusters no longer comply with the updated + policy. + + Arguments: + POLICY_ID: Canonical unique identifier for the cluster policy. + +Usage: + databricks policy-compliance-for-jobs list-compliance POLICY_ID [flags] + +Flags: + -h, --help help for list-compliance + --page-size int Use this field to specify the maximum number of results to be returned by the server. + --page-token string A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs/script b/acceptance/help/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs/script new file mode 100755 index 000000000..8941f07d3 --- /dev/null +++ b/acceptance/help/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI policy-compliance-for-jobs enforce-compliance --help +trace $CLI policy-compliance-for-jobs get-compliance --help +trace $CLI policy-compliance-for-jobs list-compliance --help diff --git a/acceptance/help/cmd/workspace/policy-families/policy-families/output.txt b/acceptance/help/cmd/workspace/policy-families/policy-families/output.txt new file mode 100644 index 000000000..9cc62ecbe --- /dev/null +++ b/acceptance/help/cmd/workspace/policy-families/policy-families/output.txt @@ -0,0 +1,42 @@ + +>>> $CLI policy-families get --help +Get policy family information. + + Retrieve the information for an policy family based on its identifier and + version + + Arguments: + POLICY_FAMILY_ID: The family ID about which to retrieve information. + +Usage: + databricks policy-families get POLICY_FAMILY_ID [flags] + +Flags: + -h, --help help for get + --version int The version number for the family to fetch. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI policy-families list --help +List policy families. + + Returns the list of policy definition types available to use at their latest + version. This API is paginated. + +Usage: + databricks policy-families list [flags] + +Flags: + -h, --help help for list + --max-results int Maximum number of policy families to return. + --page-token string A token that can be used to get the next page of results. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/policy-families/policy-families/script b/acceptance/help/cmd/workspace/policy-families/policy-families/script new file mode 100755 index 000000000..c29505f38 --- /dev/null +++ b/acceptance/help/cmd/workspace/policy-families/policy-families/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI policy-families get --help +trace $CLI policy-families list --help diff --git a/acceptance/help/cmd/workspace/provider-exchange-filters/provider-exchange-filters/output.txt b/acceptance/help/cmd/workspace/provider-exchange-filters/provider-exchange-filters/output.txt new file mode 100644 index 000000000..1ecf44185 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-exchange-filters/provider-exchange-filters/output.txt @@ -0,0 +1,72 @@ + +>>> $CLI provider-exchange-filters create --help +Create a new exchange filter. + + Add an exchange filter. + +Usage: + databricks provider-exchange-filters create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchange-filters delete --help +Delete an exchange filter. + + Delete an exchange filter + +Usage: + databricks provider-exchange-filters delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchange-filters list --help +List exchange filters. + + List exchange filter + +Usage: + databricks provider-exchange-filters list EXCHANGE_ID [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchange-filters update --help +Update exchange filter. + + Update an exchange filter. + +Usage: + databricks provider-exchange-filters update ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/provider-exchange-filters/provider-exchange-filters/script b/acceptance/help/cmd/workspace/provider-exchange-filters/provider-exchange-filters/script new file mode 100755 index 000000000..cb1955348 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-exchange-filters/provider-exchange-filters/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI provider-exchange-filters create --help +trace $CLI provider-exchange-filters delete --help +trace $CLI provider-exchange-filters list --help +trace $CLI provider-exchange-filters update --help diff --git a/acceptance/help/cmd/workspace/provider-exchanges/provider-exchanges/output.txt b/acceptance/help/cmd/workspace/provider-exchanges/provider-exchanges/output.txt new file mode 100644 index 000000000..a888afa5e --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-exchanges/provider-exchanges/output.txt @@ -0,0 +1,162 @@ + +>>> $CLI provider-exchanges add-listing-to-exchange --help +Add an exchange for listing. + + Associate an exchange with a listing + +Usage: + databricks provider-exchanges add-listing-to-exchange LISTING_ID EXCHANGE_ID [flags] + +Flags: + -h, --help help for add-listing-to-exchange + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges create --help +Create an exchange. + + Create an exchange + +Usage: + databricks provider-exchanges create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges delete --help +Delete an exchange. + + This removes a listing from marketplace. + +Usage: + databricks provider-exchanges delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges delete-listing-from-exchange --help +Remove an exchange for listing. + + Disassociate an exchange with a listing + +Usage: + databricks provider-exchanges delete-listing-from-exchange ID [flags] + +Flags: + -h, --help help for delete-listing-from-exchange + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges get --help +Get an exchange. + + Get an exchange. + +Usage: + databricks provider-exchanges get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges list --help +List exchanges. + + List exchanges visible to provider + +Usage: + databricks provider-exchanges list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges list-exchanges-for-listing --help +List exchanges for listing. + + List exchanges associated with a listing + +Usage: + databricks provider-exchanges list-exchanges-for-listing LISTING_ID [flags] + +Flags: + -h, --help help for list-exchanges-for-listing + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges list-listings-for-exchange --help +List listings for exchange. + + List listings associated with an exchange + +Usage: + databricks provider-exchanges list-listings-for-exchange EXCHANGE_ID [flags] + +Flags: + -h, --help help for list-listings-for-exchange + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-exchanges update --help +Update exchange. + + Update an exchange + +Usage: + databricks provider-exchanges update ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/provider-exchanges/provider-exchanges/script b/acceptance/help/cmd/workspace/provider-exchanges/provider-exchanges/script new file mode 100755 index 000000000..2c6659b5e --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-exchanges/provider-exchanges/script @@ -0,0 +1,10 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI provider-exchanges add-listing-to-exchange --help +trace $CLI provider-exchanges create --help +trace $CLI provider-exchanges delete --help +trace $CLI provider-exchanges delete-listing-from-exchange --help +trace $CLI provider-exchanges get --help +trace $CLI provider-exchanges list --help +trace $CLI provider-exchanges list-exchanges-for-listing --help +trace $CLI provider-exchanges list-listings-for-exchange --help +trace $CLI provider-exchanges update --help diff --git a/acceptance/help/cmd/workspace/provider-files/provider-files/output.txt b/acceptance/help/cmd/workspace/provider-files/provider-files/output.txt new file mode 100644 index 000000000..37954e867 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-files/provider-files/output.txt @@ -0,0 +1,74 @@ + +>>> $CLI provider-files create --help +Create a file. + + Create a file. Currently, only provider icons and attached notebooks are + supported. + +Usage: + databricks provider-files create [flags] + +Flags: + --display-name string + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-files delete --help +Delete a file. + + Delete a file + +Usage: + databricks provider-files delete FILE_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-files get --help +Get a file. + + Get a file + +Usage: + databricks provider-files get FILE_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-files list --help +List files. + + List files attached to a parent entity. + +Usage: + databricks provider-files list [flags] + +Flags: + -h, --help help for list + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/provider-files/provider-files/script b/acceptance/help/cmd/workspace/provider-files/provider-files/script new file mode 100755 index 000000000..2d2ec7324 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-files/provider-files/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI provider-files create --help +trace $CLI provider-files delete --help +trace $CLI provider-files get --help +trace $CLI provider-files list --help diff --git a/acceptance/help/cmd/workspace/provider-listings/provider-listings/output.txt b/acceptance/help/cmd/workspace/provider-listings/provider-listings/output.txt new file mode 100644 index 000000000..0ef5a6387 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-listings/provider-listings/output.txt @@ -0,0 +1,89 @@ + +>>> $CLI provider-listings create --help +Create a listing. + + Create a new listing + +Usage: + databricks provider-listings create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-listings delete --help +Delete a listing. + + Delete a listing + +Usage: + databricks provider-listings delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-listings get --help +Get a listing. + + Get a listing + +Usage: + databricks provider-listings get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-listings list --help +List listings. + + List listings owned by this provider + +Usage: + databricks provider-listings list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-listings update --help +Update listing. + + Update a listing + +Usage: + databricks provider-listings update ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/provider-listings/provider-listings/script b/acceptance/help/cmd/workspace/provider-listings/provider-listings/script new file mode 100755 index 000000000..364223148 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-listings/provider-listings/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI provider-listings create --help +trace $CLI provider-listings delete --help +trace $CLI provider-listings get --help +trace $CLI provider-listings list --help +trace $CLI provider-listings update --help diff --git a/acceptance/help/cmd/workspace/provider-personalization-requests/provider-personalization-requests/output.txt b/acceptance/help/cmd/workspace/provider-personalization-requests/provider-personalization-requests/output.txt new file mode 100644 index 000000000..040074f55 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-personalization-requests/provider-personalization-requests/output.txt @@ -0,0 +1,40 @@ + +>>> $CLI provider-personalization-requests list --help +All personalization requests across all listings. + + List personalization requests to this provider. This will return all + personalization requests, regardless of which listing they are for. + +Usage: + databricks provider-personalization-requests list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-personalization-requests update --help +Update personalization request status. + + Update personalization request. This method only permits updating the status + of the request. + +Usage: + databricks provider-personalization-requests update LISTING_ID REQUEST_ID STATUS [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --reason string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/provider-personalization-requests/provider-personalization-requests/script b/acceptance/help/cmd/workspace/provider-personalization-requests/provider-personalization-requests/script new file mode 100755 index 000000000..0bb72d32a --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-personalization-requests/provider-personalization-requests/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI provider-personalization-requests list --help +trace $CLI provider-personalization-requests update --help diff --git a/acceptance/help/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards/output.txt b/acceptance/help/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards/output.txt new file mode 100644 index 000000000..64a9ea940 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards/output.txt @@ -0,0 +1,74 @@ + +>>> $CLI provider-provider-analytics-dashboards create --help +Create provider analytics dashboard. + + Create provider analytics dashboard. Returns Marketplace specific id. Not to + be confused with the Lakeview dashboard id. + +Usage: + databricks provider-provider-analytics-dashboards create [flags] + +Flags: + -h, --help help for create + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-provider-analytics-dashboards get --help +Get provider analytics dashboard. + + Get provider analytics dashboard. + +Usage: + databricks provider-provider-analytics-dashboards get [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-provider-analytics-dashboards get-latest-version --help +Get latest version of provider analytics dashboard. + + Get latest version of provider analytics dashboard. + +Usage: + databricks provider-provider-analytics-dashboards get-latest-version [flags] + +Flags: + -h, --help help for get-latest-version + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-provider-analytics-dashboards update --help +Update provider analytics dashboard. + + Update provider analytics dashboard. + + Arguments: + ID: id is immutable property and can't be updated. + +Usage: + databricks provider-provider-analytics-dashboards update ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --version int this is the version of the dashboard template we want to update our user to current expectation is that it should be equal to latest version of the dashboard template. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards/script b/acceptance/help/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards/script new file mode 100755 index 000000000..ff6bcd30a --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI provider-provider-analytics-dashboards create --help +trace $CLI provider-provider-analytics-dashboards get --help +trace $CLI provider-provider-analytics-dashboards get-latest-version --help +trace $CLI provider-provider-analytics-dashboards update --help diff --git a/acceptance/help/cmd/workspace/provider-providers/provider-providers/output.txt b/acceptance/help/cmd/workspace/provider-providers/provider-providers/output.txt new file mode 100644 index 000000000..36cd62317 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-providers/provider-providers/output.txt @@ -0,0 +1,89 @@ + +>>> $CLI provider-providers create --help +Create a provider. + + Create a provider + +Usage: + databricks provider-providers create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-providers delete --help +Delete provider. + + Delete provider + +Usage: + databricks provider-providers delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-providers get --help +Get provider. + + Get provider profile + +Usage: + databricks provider-providers get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-providers list --help +List providers. + + List provider profiles for account. + +Usage: + databricks provider-providers list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI provider-providers update --help +Update provider. + + Update provider profile + +Usage: + databricks provider-providers update ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/provider-providers/provider-providers/script b/acceptance/help/cmd/workspace/provider-providers/provider-providers/script new file mode 100755 index 000000000..57c46d608 --- /dev/null +++ b/acceptance/help/cmd/workspace/provider-providers/provider-providers/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI provider-providers create --help +trace $CLI provider-providers delete --help +trace $CLI provider-providers get --help +trace $CLI provider-providers list --help +trace $CLI provider-providers update --help diff --git a/acceptance/help/cmd/workspace/providers/providers/output.txt b/acceptance/help/cmd/workspace/providers/providers/output.txt new file mode 100644 index 000000000..ca5b5715a --- /dev/null +++ b/acceptance/help/cmd/workspace/providers/providers/output.txt @@ -0,0 +1,143 @@ + +>>> $CLI providers create --help +Create an auth provider. + + Creates a new authentication provider minimally based on a name and + authentication type. The caller must be an admin on the metastore. + + Arguments: + NAME: The name of the Provider. + AUTHENTICATION_TYPE: The delta sharing authentication type. + +Usage: + databricks providers create NAME AUTHENTICATION_TYPE [flags] + +Flags: + --comment string Description about the provider. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --recipient-profile-str string This field is required when the __authentication_type__ is **TOKEN**, **OAUTH_CLIENT_CREDENTIALS** or not provided. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI providers delete --help +Delete a provider. + + Deletes an authentication provider, if the caller is a metastore admin or is + the owner of the provider. + + Arguments: + NAME: Name of the provider. + +Usage: + databricks providers delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI providers get --help +Get a provider. + + Gets a specific authentication provider. The caller must supply the name of + the provider, and must either be a metastore admin or the owner of the + provider. + + Arguments: + NAME: Name of the provider. + +Usage: + databricks providers get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI providers list --help +List providers. + + Gets an array of available authentication providers. The caller must either be + a metastore admin or the owner of the providers. Providers not owned by the + caller are not included in the response. There is no guarantee of a specific + ordering of the elements in the array. + +Usage: + databricks providers list [flags] + +Flags: + --data-provider-global-metastore-id string If not provided, all providers will be returned. + -h, --help help for list + --max-results int Maximum number of providers to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI providers list-shares --help +List shares by Provider. + + Gets an array of a specified provider's shares within the metastore where: + + * the caller is a metastore admin, or * the caller is the owner. + + Arguments: + NAME: Name of the provider in which to list shares. + +Usage: + databricks providers list-shares NAME [flags] + +Flags: + -h, --help help for list-shares + --max-results int Maximum number of shares to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI providers update --help +Update a provider. + + Updates the information for an authentication provider, if the caller is a + metastore admin or is the owner of the provider. If the update changes the + provider name, the caller must be both a metastore admin and the owner of the + provider. + + Arguments: + NAME: Name of the provider. + +Usage: + databricks providers update NAME [flags] + +Flags: + --comment string Description about the provider. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the provider. + --owner string Username of Provider owner. + --recipient-profile-str string This field is required when the __authentication_type__ is **TOKEN**, **OAUTH_CLIENT_CREDENTIALS** or not provided. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/providers/providers/script b/acceptance/help/cmd/workspace/providers/providers/script new file mode 100755 index 000000000..f34cf01c1 --- /dev/null +++ b/acceptance/help/cmd/workspace/providers/providers/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI providers create --help +trace $CLI providers delete --help +trace $CLI providers get --help +trace $CLI providers list --help +trace $CLI providers list-shares --help +trace $CLI providers update --help diff --git a/acceptance/help/cmd/workspace/quality-monitors/quality-monitors/output.txt b/acceptance/help/cmd/workspace/quality-monitors/quality-monitors/output.txt new file mode 100644 index 000000000..f0e56e6fd --- /dev/null +++ b/acceptance/help/cmd/workspace/quality-monitors/quality-monitors/output.txt @@ -0,0 +1,287 @@ + +>>> $CLI quality-monitors cancel-refresh --help +Cancel refresh. + + Cancel an active monitor refresh for the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table. + REFRESH_ID: ID of the refresh. + +Usage: + databricks quality-monitors cancel-refresh TABLE_NAME REFRESH_ID [flags] + +Flags: + -h, --help help for cancel-refresh + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors create --help +Create a table monitor. + + Creates a new monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog, have + **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the + table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of + the table's parent schema, and have **SELECT** access on the table. 3. have + the following permissions: - **USE_CATALOG** on the table's parent catalog - + **USE_SCHEMA** on the table's parent schema - be an owner of the table. + + Workspace assets, such as the dashboard, will be created in the workspace + where this call was made. + + Arguments: + TABLE_NAME: Full name of the table. + ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables). + OUTPUT_SCHEMA_NAME: Schema where output metric tables are created. + +Usage: + databricks quality-monitors create TABLE_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME [flags] + +Flags: + --baseline-table-name string Name of the baseline table from which drift metrics are computed from. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --skip-builtin-dashboard Whether to skip creating a default dashboard summarizing data quality metrics. + --warehouse-id string Optional argument to specify the warehouse for dashboard creation. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors delete --help +Delete a table monitor. + + Deletes a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Note that the metric tables and dashboard will not be deleted as part of this + call; those assets must be manually cleaned up (if desired). + + Arguments: + TABLE_NAME: Full name of the table. + +Usage: + databricks quality-monitors delete TABLE_NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors get --help +Get a table monitor. + + Gets a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema. 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + The returned information includes configuration values, as well as information + on assets created by the monitor. Some information (e.g., dashboard) may be + filtered out if the caller is in a different workspace than where the monitor + was created. + + Arguments: + TABLE_NAME: Full name of the table. + +Usage: + databricks quality-monitors get TABLE_NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors get-refresh --help +Get refresh. + + Gets info about a specific monitor refresh using the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table. + REFRESH_ID: ID of the refresh. + +Usage: + databricks quality-monitors get-refresh TABLE_NAME REFRESH_ID [flags] + +Flags: + -h, --help help for get-refresh + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors list-refreshes --help +List refreshes. + + Gets an array containing the history of the most recent refreshes (up to 25) + for this table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table. + +Usage: + databricks quality-monitors list-refreshes TABLE_NAME [flags] + +Flags: + -h, --help help for list-refreshes + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors regenerate-dashboard --help +Regenerate a monitoring dashboard. + + Regenerates the monitoring dashboard for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + The call must be made from the workspace where the monitor was created. The + dashboard will be regenerated in the assets directory that was specified when + the monitor was created. + + Arguments: + TABLE_NAME: Full name of the table. + +Usage: + databricks quality-monitors regenerate-dashboard TABLE_NAME [flags] + +Flags: + -h, --help help for regenerate-dashboard + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --warehouse-id string Optional argument to specify the warehouse for dashboard regeneration. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors run-refresh --help +Queue a metric refresh for a monitor. + + Queues a metric refresh on the monitor for the specified table. The refresh + will execute in the background. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table. + +Usage: + databricks quality-monitors run-refresh TABLE_NAME [flags] + +Flags: + -h, --help help for run-refresh + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI quality-monitors update --help +Update a table monitor. + + Updates a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table. + + Additionally, the call must be made from the workspace where the monitor was + created, and the caller must be the original creator of the monitor. + + Certain configuration fields, such as output asset identifiers, cannot be + updated. + + Arguments: + TABLE_NAME: Full name of the table. + OUTPUT_SCHEMA_NAME: Schema where output metric tables are created. + +Usage: + databricks quality-monitors update TABLE_NAME OUTPUT_SCHEMA_NAME [flags] + +Flags: + --baseline-table-name string Name of the baseline table from which drift metrics are computed from. + --dashboard-id string Id of dashboard that visualizes the computed metrics. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/quality-monitors/quality-monitors/script b/acceptance/help/cmd/workspace/quality-monitors/quality-monitors/script new file mode 100755 index 000000000..c435d0e7f --- /dev/null +++ b/acceptance/help/cmd/workspace/quality-monitors/quality-monitors/script @@ -0,0 +1,10 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI quality-monitors cancel-refresh --help +trace $CLI quality-monitors create --help +trace $CLI quality-monitors delete --help +trace $CLI quality-monitors get --help +trace $CLI quality-monitors get-refresh --help +trace $CLI quality-monitors list-refreshes --help +trace $CLI quality-monitors regenerate-dashboard --help +trace $CLI quality-monitors run-refresh --help +trace $CLI quality-monitors update --help diff --git a/acceptance/help/cmd/workspace/queries-legacy/queries-legacy/output.txt b/acceptance/help/cmd/workspace/queries-legacy/queries-legacy/output.txt new file mode 100644 index 000000000..80edeeae9 --- /dev/null +++ b/acceptance/help/cmd/workspace/queries-legacy/queries-legacy/output.txt @@ -0,0 +1,161 @@ + +>>> $CLI queries-legacy create --help +Create a new query definition. + + Creates a new query definition. Queries created with this endpoint belong to + the authenticated user making the request. + + The data_source_id field specifies the ID of the SQL warehouse to run this + query against. You can use the Data Sources API to see a complete list of + available SQL warehouses. Or you can copy the data_source_id from an + existing query. + + **Note**: You cannot add a visualization until you create the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/create instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks queries-legacy create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries-legacy delete --help +Delete a query. + + Moves a query to the trash. Trashed queries immediately disappear from + searches and list views, and they cannot be used for alerts. The trash is + deleted after 30 days. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/delete instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks queries-legacy delete QUERY_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries-legacy get --help +Get a query definition. + + Retrieve a query object definition along with contextual permissions + information about the currently authenticated user. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/get instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks queries-legacy get QUERY_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries-legacy list --help +Get a list of queries. + + Gets a list of queries. Optionally, this list can be filtered by a search + term. + + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/list instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks queries-legacy list [flags] + +Flags: + -h, --help help for list + --order string Name of query attribute to order by. + --page int Page number to retrieve. + --page-size int Number of queries to return per page. + --q string Full text search term. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries-legacy restore --help +Restore a query. + + Restore a query that has been moved to the trash. A restored query appears in + list views and searches. You can use restored queries for alerts. + + **Note**: A new version of the Databricks SQL API is now available. Please see + the latest version. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks queries-legacy restore QUERY_ID [flags] + +Flags: + -h, --help help for restore + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries-legacy update --help +Change a query definition. + + Modify this query definition. + + **Note**: You cannot undo this operation. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/update instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks queries-legacy update QUERY_ID [flags] + +Flags: + --data-source-id string Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID. + --description string General description that conveys additional information about this query such as usage notes. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --name string The title of this query that appears in list views, widget headings, and on the query page. + --query string The text of the query to be run. + --run-as-role RunAsRole Sets the **Run as** role for the object. Supported values: [owner, viewer] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/queries-legacy/queries-legacy/script b/acceptance/help/cmd/workspace/queries-legacy/queries-legacy/script new file mode 100755 index 000000000..69026e18d --- /dev/null +++ b/acceptance/help/cmd/workspace/queries-legacy/queries-legacy/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI queries-legacy create --help +trace $CLI queries-legacy delete --help +trace $CLI queries-legacy get --help +trace $CLI queries-legacy list --help +trace $CLI queries-legacy restore --help +trace $CLI queries-legacy update --help diff --git a/acceptance/help/cmd/workspace/queries/queries/output.txt b/acceptance/help/cmd/workspace/queries/queries/output.txt new file mode 100644 index 000000000..15cdc6fba --- /dev/null +++ b/acceptance/help/cmd/workspace/queries/queries/output.txt @@ -0,0 +1,120 @@ + +>>> $CLI queries create --help +Create a query. + + Creates a query. + +Usage: + databricks queries create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries delete --help +Delete a query. + + Moves a query to the trash. Trashed queries immediately disappear from + searches and list views, and cannot be used for alerts. You can restore a + trashed query through the UI. A trashed query is permanently deleted after 30 + days. + +Usage: + databricks queries delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries get --help +Get a query. + + Gets a query. + +Usage: + databricks queries get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries list --help +List queries. + + Gets a list of queries accessible to the user, ordered by creation time. + **Warning:** Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + +Usage: + databricks queries list [flags] + +Flags: + -h, --help help for list + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries list-visualizations --help +List visualizations on a query. + + Gets a list of visualizations on a query. + +Usage: + databricks queries list-visualizations ID [flags] + +Flags: + -h, --help help for list-visualizations + --page-size int + --page-token string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI queries update --help +Update a query. + + Updates a query. + + Arguments: + ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space). + +Usage: + databricks queries update ID UPDATE_MASK [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/queries/queries/script b/acceptance/help/cmd/workspace/queries/queries/script new file mode 100755 index 000000000..f9acaf362 --- /dev/null +++ b/acceptance/help/cmd/workspace/queries/queries/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI queries create --help +trace $CLI queries delete --help +trace $CLI queries get --help +trace $CLI queries list --help +trace $CLI queries list-visualizations --help +trace $CLI queries update --help diff --git a/acceptance/help/cmd/workspace/query-history/query-history/output.txt b/acceptance/help/cmd/workspace/query-history/query-history/output.txt new file mode 100644 index 000000000..f771792b0 --- /dev/null +++ b/acceptance/help/cmd/workspace/query-history/query-history/output.txt @@ -0,0 +1,25 @@ + +>>> $CLI query-history list --help +List Queries. + + List the history of queries through SQL warehouses, and serverless compute. + + You can filter by user ID, warehouse ID, status, and time range. Most recently + started queries are returned first (up to max_results in request). The + pagination token returned in response can be used to list subsequent query + statuses. + +Usage: + databricks query-history list [flags] + +Flags: + -h, --help help for list + --include-metrics Whether to include the query metrics with each query. + --max-results int Limit the number of results returned in one page. + --page-token string A token that can be used to get the next page of results. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/query-history/query-history/script b/acceptance/help/cmd/workspace/query-history/query-history/script new file mode 100755 index 000000000..bb787ffc1 --- /dev/null +++ b/acceptance/help/cmd/workspace/query-history/query-history/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI query-history list --help diff --git a/acceptance/help/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy/output.txt b/acceptance/help/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy/output.txt new file mode 100644 index 000000000..b5e02b15e --- /dev/null +++ b/acceptance/help/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy/output.txt @@ -0,0 +1,74 @@ + +>>> $CLI query-visualizations-legacy create --help +Add visualization to a query. + + Creates visualization in the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queryvisualizations/create instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + +Usage: + databricks query-visualizations-legacy create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI query-visualizations-legacy delete --help +Remove visualization. + + Removes a visualization from the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queryvisualizations/delete instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + + Arguments: + ID: Widget ID returned by :method:queryvizualisations/create + +Usage: + databricks query-visualizations-legacy delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI query-visualizations-legacy update --help +Edit existing visualization. + + Updates visualization in the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queryvisualizations/update instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + + Arguments: + ID: The UUID for this visualization. + +Usage: + databricks query-visualizations-legacy update ID [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy/script b/acceptance/help/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy/script new file mode 100755 index 000000000..11f4d1bc5 --- /dev/null +++ b/acceptance/help/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI query-visualizations-legacy create --help +trace $CLI query-visualizations-legacy delete --help +trace $CLI query-visualizations-legacy update --help diff --git a/acceptance/help/cmd/workspace/query-visualizations/query-visualizations/output.txt b/acceptance/help/cmd/workspace/query-visualizations/query-visualizations/output.txt new file mode 100644 index 000000000..4d82e7dcb --- /dev/null +++ b/acceptance/help/cmd/workspace/query-visualizations/query-visualizations/output.txt @@ -0,0 +1,60 @@ + +>>> $CLI query-visualizations create --help +Add a visualization to a query. + + Adds a visualization to a query. + +Usage: + databricks query-visualizations create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI query-visualizations delete --help +Remove a visualization. + + Removes a visualization. + +Usage: + databricks query-visualizations delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI query-visualizations update --help +Update a visualization. + + Updates a visualization. + + Arguments: + ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space). + +Usage: + databricks query-visualizations update ID UPDATE_MASK [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/query-visualizations/query-visualizations/script b/acceptance/help/cmd/workspace/query-visualizations/query-visualizations/script new file mode 100755 index 000000000..0141780eb --- /dev/null +++ b/acceptance/help/cmd/workspace/query-visualizations/query-visualizations/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI query-visualizations create --help +trace $CLI query-visualizations delete --help +trace $CLI query-visualizations update --help diff --git a/acceptance/help/cmd/workspace/recipient-activation/recipient-activation/output.txt b/acceptance/help/cmd/workspace/recipient-activation/recipient-activation/output.txt new file mode 100644 index 000000000..0362e9d64 --- /dev/null +++ b/acceptance/help/cmd/workspace/recipient-activation/recipient-activation/output.txt @@ -0,0 +1,41 @@ + +>>> $CLI recipient-activation get-activation-url-info --help +Get a share activation URL. + + Gets an activation URL for a share. + + Arguments: + ACTIVATION_URL: The one time activation url. It also accepts activation token. + +Usage: + databricks recipient-activation get-activation-url-info ACTIVATION_URL [flags] + +Flags: + -h, --help help for get-activation-url-info + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI recipient-activation retrieve-token --help +Get an access token. + + Retrieve access token with an activation url. This is a public API without any + authentication. + + Arguments: + ACTIVATION_URL: The one time activation url. It also accepts activation token. + +Usage: + databricks recipient-activation retrieve-token ACTIVATION_URL [flags] + +Flags: + -h, --help help for retrieve-token + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/recipient-activation/recipient-activation/script b/acceptance/help/cmd/workspace/recipient-activation/recipient-activation/script new file mode 100755 index 000000000..2a4d11f10 --- /dev/null +++ b/acceptance/help/cmd/workspace/recipient-activation/recipient-activation/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI recipient-activation get-activation-url-info --help +trace $CLI recipient-activation retrieve-token --help diff --git a/acceptance/help/cmd/workspace/recipients/recipients/output.txt b/acceptance/help/cmd/workspace/recipients/recipients/output.txt new file mode 100644 index 000000000..6892408c9 --- /dev/null +++ b/acceptance/help/cmd/workspace/recipients/recipients/output.txt @@ -0,0 +1,171 @@ + +>>> $CLI recipients create --help +Create a share recipient. + + Creates a new recipient with the delta sharing authentication type in the + metastore. The caller must be a metastore admin or have the + **CREATE_RECIPIENT** privilege on the metastore. + + Arguments: + NAME: Name of Recipient. + AUTHENTICATION_TYPE: The delta sharing authentication type. + +Usage: + databricks recipients create NAME AUTHENTICATION_TYPE [flags] + +Flags: + --comment string Description about the recipient. + --data-recipient-global-metastore-id string The global Unity Catalog metastore id provided by the data recipient. + --expiration-time int Expiration timestamp of the token, in epoch milliseconds. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --owner string Username of the recipient owner. + --sharing-code string The one-time sharing code provided by the data recipient. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI recipients delete --help +Delete a share recipient. + + Deletes the specified recipient from the metastore. The caller must be the + owner of the recipient. + + Arguments: + NAME: Name of the recipient. + +Usage: + databricks recipients delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI recipients get --help +Get a share recipient. + + Gets a share recipient from the metastore if: + + * the caller is the owner of the share recipient, or: * is a metastore admin + + Arguments: + NAME: Name of the recipient. + +Usage: + databricks recipients get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI recipients list --help +List share recipients. + + Gets an array of all share recipients within the current metastore where: + + * the caller is a metastore admin, or * the caller is the owner. There is no + guarantee of a specific ordering of the elements in the array. + +Usage: + databricks recipients list [flags] + +Flags: + --data-recipient-global-metastore-id string If not provided, all recipients will be returned. + -h, --help help for list + --max-results int Maximum number of recipients to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI recipients rotate-token --help +Rotate a token. + + Refreshes the specified recipient's delta sharing authentication token with + the provided token info. The caller must be the owner of the recipient. + + Arguments: + NAME: The name of the Recipient. + EXISTING_TOKEN_EXPIRE_IN_SECONDS: The expiration time of the bearer token in ISO 8601 format. This will set + the expiration_time of existing token only to a smaller timestamp, it + cannot extend the expiration_time. Use 0 to expire the existing token + immediately, negative number will return an error. + +Usage: + databricks recipients rotate-token NAME EXISTING_TOKEN_EXPIRE_IN_SECONDS [flags] + +Flags: + -h, --help help for rotate-token + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI recipients share-permissions --help +Get recipient share permissions. + + Gets the share permissions for the specified Recipient. The caller must be a + metastore admin or the owner of the Recipient. + + Arguments: + NAME: The name of the Recipient. + +Usage: + databricks recipients share-permissions NAME [flags] + +Flags: + -h, --help help for share-permissions + --max-results int Maximum number of permissions to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI recipients update --help +Update a share recipient. + + Updates an existing recipient in the metastore. The caller must be a metastore + admin or the owner of the recipient. If the recipient name will be updated, + the user must be both a metastore admin and the owner of the recipient. + + Arguments: + NAME: Name of the recipient. + +Usage: + databricks recipients update NAME [flags] + +Flags: + --comment string Description about the recipient. + --expiration-time int Expiration timestamp of the token, in epoch milliseconds. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the recipient. + --owner string Username of the recipient owner. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/recipients/recipients/script b/acceptance/help/cmd/workspace/recipients/recipients/script new file mode 100755 index 000000000..71535154f --- /dev/null +++ b/acceptance/help/cmd/workspace/recipients/recipients/script @@ -0,0 +1,8 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI recipients create --help +trace $CLI recipients delete --help +trace $CLI recipients get --help +trace $CLI recipients list --help +trace $CLI recipients rotate-token --help +trace $CLI recipients share-permissions --help +trace $CLI recipients update --help diff --git a/acceptance/help/cmd/workspace/registered-models/registered-models/output.txt b/acceptance/help/cmd/workspace/registered-models/registered-models/output.txt new file mode 100644 index 000000000..88fbb4d89 --- /dev/null +++ b/acceptance/help/cmd/workspace/registered-models/registered-models/output.txt @@ -0,0 +1,208 @@ + +>>> $CLI registered-models create --help +Create a Registered Model. + + Creates a new registered model in Unity Catalog. + + File storage for model versions in the registered model will be located in the + default location which is specified by the parent schema, or the parent + catalog, or the Metastore. + + For registered model creation to succeed, the user must satisfy the following + conditions: - The caller must be a metastore admin, or be the owner of the + parent catalog and schema, or have the **USE_CATALOG** privilege on the parent + catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller + must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the parent + schema. + + Arguments: + CATALOG_NAME: The name of the catalog where the schema and the registered model reside + SCHEMA_NAME: The name of the schema where the registered model resides + NAME: The name of the registered model + +Usage: + databricks registered-models create CATALOG_NAME SCHEMA_NAME NAME [flags] + +Flags: + --comment string The comment attached to the registered model. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --storage-location string The storage location on the cloud under which model version data files are stored. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI registered-models delete --help +Delete a Registered Model. + + Deletes a registered model and all its model versions from the specified + parent catalog and schema. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model + +Usage: + databricks registered-models delete FULL_NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI registered-models delete-alias --help +Delete a Registered Model Alias. + + Deletes a registered model alias. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model + ALIAS: The name of the alias + +Usage: + databricks registered-models delete-alias FULL_NAME ALIAS [flags] + +Flags: + -h, --help help for delete-alias + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI registered-models get --help +Get a Registered Model. + + Get a registered model. + + The caller must be a metastore admin or an owner of (or have the **EXECUTE** + privilege on) the registered model. For the latter case, the caller must also + be the owner or have the **USE_CATALOG** privilege on the parent catalog and + the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model + +Usage: + databricks registered-models get FULL_NAME [flags] + +Flags: + -h, --help help for get + --include-aliases Whether to include registered model aliases in the response. + --include-browse Whether to include registered models in the response for which the principal can only access selective metadata for. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI registered-models list --help +List Registered Models. + + List registered models. You can list registered models under a particular + schema, or list all registered models in the current metastore. + + The returned models are filtered based on the privileges of the calling user. + For example, the metastore admin is able to list all the registered models. A + regular user needs to be the owner or have the **EXECUTE** privilege on the + registered model to recieve the registered models in the response. For the + latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + There is no guarantee of a specific ordering of the elements in the response. + +Usage: + databricks registered-models list [flags] + +Flags: + --catalog-name string The identifier of the catalog under which to list registered models. + -h, --help help for list + --include-browse Whether to include registered models in the response for which the principal can only access selective metadata for. + --max-results int Max number of registered models to return. + --page-token string Opaque token to send for the next page of results (pagination). + --schema-name string The identifier of the schema under which to list registered models. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI registered-models set-alias --help +Set a Registered Model Alias. + + Set an alias on the specified registered model. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + Arguments: + FULL_NAME: Full name of the registered model + ALIAS: The name of the alias + VERSION_NUM: The version number of the model version to which the alias points + +Usage: + databricks registered-models set-alias FULL_NAME ALIAS VERSION_NUM [flags] + +Flags: + -h, --help help for set-alias + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI registered-models update --help +Update a Registered Model. + + Updates the specified registered model. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + Currently only the name, the owner or the comment of the registered model can + be updated. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model + +Usage: + databricks registered-models update FULL_NAME [flags] + +Flags: + --comment string The comment attached to the registered model. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the registered model. + --owner string The identifier of the user who owns the registered model. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/registered-models/registered-models/script b/acceptance/help/cmd/workspace/registered-models/registered-models/script new file mode 100755 index 000000000..4d98c52e9 --- /dev/null +++ b/acceptance/help/cmd/workspace/registered-models/registered-models/script @@ -0,0 +1,8 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI registered-models create --help +trace $CLI registered-models delete --help +trace $CLI registered-models delete-alias --help +trace $CLI registered-models get --help +trace $CLI registered-models list --help +trace $CLI registered-models set-alias --help +trace $CLI registered-models update --help diff --git a/acceptance/help/cmd/workspace/repos/repos/output.txt b/acceptance/help/cmd/workspace/repos/repos/output.txt new file mode 100644 index 000000000..78ef27a16 --- /dev/null +++ b/acceptance/help/cmd/workspace/repos/repos/output.txt @@ -0,0 +1,198 @@ + +>>> $CLI repos create --help +Create a repo. + + Creates a repo in the workspace and links it to the remote Git repo specified. + Note that repos created programmatically must be linked to a remote Git repo, + unlike repos created in the browser. + + Arguments: + URL: URL of the Git repository to be linked. + PROVIDER: Git provider. This field is case-insensitive. The available Git providers + are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + awsCodeCommit. + +Usage: + databricks repos create URL [PROVIDER] [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --path string Desired path for the repo in the workspace. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos delete --help +Delete a repo. + + Deletes the specified repo. + + Arguments: + REPO_ID: The ID for the corresponding repo to delete. + +Usage: + databricks repos delete REPO_ID_OR_PATH [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos get --help +Get a repo. + + Returns the repo with the given repo ID. + + Arguments: + REPO_ID: ID of the Git folder (repo) object in the workspace. + +Usage: + databricks repos get REPO_ID_OR_PATH [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos get-permission-levels --help +Get repo permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions. + +Usage: + databricks repos get-permission-levels REPO_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos get-permissions --help +Get repo permissions. + + Gets the permissions of a repo. Repos can inherit permissions from their root + object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions. + +Usage: + databricks repos get-permissions REPO_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos list --help +Get repos. + + Returns repos that the calling user has Manage permissions on. Use + next_page_token to iterate through additional pages. + +Usage: + databricks repos list [flags] + +Flags: + -h, --help help for list + --next-page-token string Token used to get the next page of results. + --path-prefix string Filters repos that have paths starting with the given path prefix. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos set-permissions --help +Set repo permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions. + +Usage: + databricks repos set-permissions REPO_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos update --help +Update a repo. + + Updates the repo to a different branch or tag, or updates the repo to the + latest commit on the same branch. + + Arguments: + REPO_ID: ID of the Git folder (repo) object in the workspace. + +Usage: + databricks repos update REPO_ID_OR_PATH [flags] + +Flags: + --branch string Branch that the local version of the repo is checked out to. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --tag string Tag that the local version of the repo is checked out to. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI repos update-permissions --help +Update repo permissions. + + Updates the permissions on a repo. Repos can inherit permissions from their + root object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions. + +Usage: + databricks repos update-permissions REPO_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/repos/repos/script b/acceptance/help/cmd/workspace/repos/repos/script new file mode 100755 index 000000000..b1900c90d --- /dev/null +++ b/acceptance/help/cmd/workspace/repos/repos/script @@ -0,0 +1,10 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI repos create --help +trace $CLI repos delete --help +trace $CLI repos get --help +trace $CLI repos get-permission-levels --help +trace $CLI repos get-permissions --help +trace $CLI repos list --help +trace $CLI repos set-permissions --help +trace $CLI repos update --help +trace $CLI repos update-permissions --help diff --git a/acceptance/help/cmd/workspace/resource-quotas/resource-quotas/output.txt b/acceptance/help/cmd/workspace/resource-quotas/resource-quotas/output.txt new file mode 100644 index 000000000..858a99ef1 --- /dev/null +++ b/acceptance/help/cmd/workspace/resource-quotas/resource-quotas/output.txt @@ -0,0 +1,48 @@ + +>>> $CLI resource-quotas get-quota --help +Get information for a single resource quota. + + The GetQuota API returns usage information for a single resource quota, + defined as a child-parent pair. This API also refreshes the quota count if it + is out of date. Refreshes are triggered asynchronously. The updated count + might not be returned in the first call. + + Arguments: + PARENT_SECURABLE_TYPE: Securable type of the quota parent. + PARENT_FULL_NAME: Full name of the parent resource. Provide the metastore ID if the parent + is a metastore. + QUOTA_NAME: Name of the quota. Follows the pattern of the quota type, with "-quota" + added as a suffix. + +Usage: + databricks resource-quotas get-quota PARENT_SECURABLE_TYPE PARENT_FULL_NAME QUOTA_NAME [flags] + +Flags: + -h, --help help for get-quota + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI resource-quotas list-quotas --help +List all resource quotas under a metastore. + + ListQuotas returns all quota values under the metastore. There are no SLAs on + the freshness of the counts returned. This API does not trigger a refresh of + quota counts. + +Usage: + databricks resource-quotas list-quotas [flags] + +Flags: + -h, --help help for list-quotas + --max-results int The number of quotas to return. + --page-token string Opaque token for the next page of results. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/resource-quotas/resource-quotas/script b/acceptance/help/cmd/workspace/resource-quotas/resource-quotas/script new file mode 100755 index 000000000..a4fd5d51f --- /dev/null +++ b/acceptance/help/cmd/workspace/resource-quotas/resource-quotas/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI resource-quotas get-quota --help +trace $CLI resource-quotas list-quotas --help diff --git a/acceptance/help/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins/output.txt b/acceptance/help/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins/output.txt new file mode 100644 index 000000000..fc92bc0fa --- /dev/null +++ b/acceptance/help/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI restrict-workspace-admins delete --help +Error: unknown command "restrict-workspace-admins" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins/script b/acceptance/help/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins/script new file mode 100755 index 000000000..4795a2021 --- /dev/null +++ b/acceptance/help/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI restrict-workspace-admins delete --help +trace $CLI restrict-workspace-admins get --help +trace $CLI restrict-workspace-admins update --help diff --git a/acceptance/help/cmd/workspace/schemas/schemas/output.txt b/acceptance/help/cmd/workspace/schemas/schemas/output.txt new file mode 100644 index 000000000..94a9daf26 --- /dev/null +++ b/acceptance/help/cmd/workspace/schemas/schemas/output.txt @@ -0,0 +1,127 @@ + +>>> $CLI schemas create --help +Create a schema. + + Creates a new schema for catalog in the Metatastore. The caller must be a + metastore admin, or have the **CREATE_SCHEMA** privilege in the parent + catalog. + + Arguments: + NAME: Name of schema, relative to parent catalog. + CATALOG_NAME: Name of parent catalog. + +Usage: + databricks schemas create NAME CATALOG_NAME [flags] + +Flags: + --comment string User-provided free-form text description. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --storage-root string Storage root URL for managed tables within schema. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI schemas delete --help +Delete a schema. + + Deletes the specified schema from the parent catalog. The caller must be the + owner of the schema or an owner of the parent catalog. + + Arguments: + FULL_NAME: Full name of the schema. + +Usage: + databricks schemas delete FULL_NAME [flags] + +Flags: + --force Force deletion even if the schema is not empty. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI schemas get --help +Get a schema. + + Gets the specified schema within the metastore. The caller must be a metastore + admin, the owner of the schema, or a user that has the **USE_SCHEMA** + privilege on the schema. + + Arguments: + FULL_NAME: Full name of the schema. + +Usage: + databricks schemas get FULL_NAME [flags] + +Flags: + -h, --help help for get + --include-browse Whether to include schemas in the response for which the principal can only access selective metadata for. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI schemas list --help +List schemas. + + Gets an array of schemas for a catalog in the metastore. If the caller is the + metastore admin or the owner of the parent catalog, all schemas for the + catalog will be retrieved. Otherwise, only schemas owned by the caller (or for + which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is + no guarantee of a specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: Parent catalog for schemas of interest. + +Usage: + databricks schemas list CATALOG_NAME [flags] + +Flags: + -h, --help help for list + --include-browse Whether to include schemas in the response for which the principal can only access selective metadata for. + --max-results int Maximum number of schemas to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI schemas update --help +Update a schema. + + Updates a schema for a catalog. The caller must be the owner of the schema or + a metastore admin. If the caller is a metastore admin, only the __owner__ + field can be changed in the update. If the __name__ field must be updated, the + caller must be a metastore admin or have the **CREATE_SCHEMA** privilege on + the parent catalog. + + Arguments: + FULL_NAME: Full name of the schema. + +Usage: + databricks schemas update FULL_NAME [flags] + +Flags: + --comment string User-provided free-form text description. + --enable-predictive-optimization EnablePredictiveOptimization Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT] + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the schema. + --owner string Username of current owner of schema. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/schemas/schemas/script b/acceptance/help/cmd/workspace/schemas/schemas/script new file mode 100755 index 000000000..8549e504e --- /dev/null +++ b/acceptance/help/cmd/workspace/schemas/schemas/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI schemas create --help +trace $CLI schemas delete --help +trace $CLI schemas get --help +trace $CLI schemas list --help +trace $CLI schemas update --help diff --git a/acceptance/help/cmd/workspace/secrets/secrets/output.txt b/acceptance/help/cmd/workspace/secrets/secrets/output.txt new file mode 100644 index 000000000..b5c02a4ae --- /dev/null +++ b/acceptance/help/cmd/workspace/secrets/secrets/output.txt @@ -0,0 +1,317 @@ + +>>> $CLI secrets create-scope --help +Create a new secret scope. + + The scope name must consist of alphanumeric characters, dashes, underscores, + and periods, and may not exceed 128 characters. + + Arguments: + SCOPE: Scope name requested by the user. Scope names are unique. + +Usage: + databricks secrets create-scope SCOPE [flags] + +Flags: + -h, --help help for create-scope + --initial-manage-principal string The principal that is initially granted MANAGE permission to the created scope. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --scope-backend-type ScopeBackendType The backend type the scope will be created with. Supported values: [AZURE_KEYVAULT, DATABRICKS] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets delete-acl --help +Delete an ACL. + + Deletes the given ACL on the given scope. + + Users must have the MANAGE permission to invoke this API. Throws + RESOURCE_DOES_NOT_EXIST if no such secret scope, principal, or ACL exists. + Throws PERMISSION_DENIED if the user does not have permission to make this + API call. + + Arguments: + SCOPE: The name of the scope to remove permissions from. + PRINCIPAL: The principal to remove an existing ACL from. + +Usage: + databricks secrets delete-acl SCOPE PRINCIPAL [flags] + +Flags: + -h, --help help for delete-acl + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets delete-scope --help +Delete a secret scope. + + Deletes a secret scope. + + Throws RESOURCE_DOES_NOT_EXIST if the scope does not exist. Throws + PERMISSION_DENIED if the user does not have permission to make this API + call. + + Arguments: + SCOPE: Name of the scope to delete. + +Usage: + databricks secrets delete-scope SCOPE [flags] + +Flags: + -h, --help help for delete-scope + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets delete-secret --help +Delete a secret. + + Deletes the secret stored in this secret scope. You must have WRITE or + MANAGE permission on the secret scope. + + Throws RESOURCE_DOES_NOT_EXIST if no such secret scope or secret exists. + Throws PERMISSION_DENIED if the user does not have permission to make this + API call. + + Arguments: + SCOPE: The name of the scope that contains the secret to delete. + KEY: Name of the secret to delete. + +Usage: + databricks secrets delete-secret SCOPE KEY [flags] + +Flags: + -h, --help help for delete-secret + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets get-acl --help +Get secret ACL details. + + Gets the details about the given ACL, such as the group and permission. Users + must have the MANAGE permission to invoke this API. + + Throws RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws + PERMISSION_DENIED if the user does not have permission to make this API + call. + + Arguments: + SCOPE: The name of the scope to fetch ACL information from. + PRINCIPAL: The principal to fetch ACL information for. + +Usage: + databricks secrets get-acl SCOPE PRINCIPAL [flags] + +Flags: + -h, --help help for get-acl + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets get-secret --help +Get a secret. + + Gets the bytes representation of a secret value for the specified scope and + key. + + Users need the READ permission to make this call. + + Note that the secret value returned is in bytes. The interpretation of the + bytes is determined by the caller in DBUtils and the type the data is decoded + into. + + Throws PERMISSION_DENIED if the user does not have permission to make this + API call. Throws RESOURCE_DOES_NOT_EXIST if no such secret or secret scope + exists. + + Arguments: + SCOPE: The name of the scope to fetch secret information from. + KEY: The key to fetch secret for. + +Usage: + databricks secrets get-secret SCOPE KEY [flags] + +Flags: + -h, --help help for get-secret + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets list-acls --help +Lists ACLs. + + List the ACLs for a given secret scope. Users must have the MANAGE + permission to invoke this API. + + Throws RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws + PERMISSION_DENIED if the user does not have permission to make this API + call. + + Arguments: + SCOPE: The name of the scope to fetch ACL information from. + +Usage: + databricks secrets list-acls SCOPE [flags] + +Flags: + -h, --help help for list-acls + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets list-scopes --help +List all scopes. + + Lists all secret scopes available in the workspace. + + Throws PERMISSION_DENIED if the user does not have permission to make this + API call. + +Usage: + databricks secrets list-scopes [flags] + +Flags: + -h, --help help for list-scopes + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets list-secrets --help +List secret keys. + + Lists the secret keys that are stored at this scope. This is a metadata-only + operation; secret data cannot be retrieved using this API. Users need the READ + permission to make this call. + + The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws + RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws + PERMISSION_DENIED if the user does not have permission to make this API + call. + + Arguments: + SCOPE: The name of the scope to list secrets within. + +Usage: + databricks secrets list-secrets SCOPE [flags] + +Flags: + -h, --help help for list-secrets + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets put-acl --help +Create/update an ACL. + + Creates or overwrites the Access Control List (ACL) associated with the given + principal (user or group) on the specified scope point. + + In general, a user or group will use the most powerful permission available to + them, and permissions are ordered as follows: + + * MANAGE - Allowed to change ACLs, and read and write to this secret scope. + * WRITE - Allowed to read and write to this secret scope. * READ - Allowed + to read this secret scope and list what secrets are available. + + Note that in general, secret values can only be read from within a command on + a cluster (for example, through a notebook). There is no API to read the + actual secret value material outside of a cluster. However, the user's + permission will be applied based on who is executing the command, and they + must have at least READ permission. + + Users must have the MANAGE permission to invoke this API. + + The principal is a user or group name corresponding to an existing Databricks + principal to be granted or revoked access. + + Throws RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws + RESOURCE_ALREADY_EXISTS if a permission for the principal already exists. + Throws INVALID_PARAMETER_VALUE if the permission or principal is invalid. + Throws PERMISSION_DENIED if the user does not have permission to make this + API call. + + Arguments: + SCOPE: The name of the scope to apply permissions to. + PRINCIPAL: The principal in which the permission is applied. + PERMISSION: The permission level applied to the principal. + +Usage: + databricks secrets put-acl SCOPE PRINCIPAL PERMISSION [flags] + +Flags: + -h, --help help for put-acl + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI secrets put-secret --help +Add a secret. + + Inserts a secret under the provided scope with the given name. If a secret + already exists with the same name, this command overwrites the existing + secret's value. The server encrypts the secret using the secret scope's + encryption settings before storing it. + + You must have WRITE or MANAGE permission on the secret scope. The secret + key must consist of alphanumeric characters, dashes, underscores, and periods, + and cannot exceed 128 characters. The maximum allowed secret value size is 128 + KB. The maximum number of secrets in a given scope is 1000. + + The arguments "string-value" or "bytes-value" specify the type of the secret, + which will determine the value returned when the secret value is requested. + + You can specify the secret value in one of three ways: + * Specify the value as a string using the --string-value flag. + * Input the secret when prompted interactively (single-line secrets). + * Pass the secret via standard input (multi-line secrets). + +Usage: + databricks secrets put-secret SCOPE KEY [flags] + +Flags: + --bytes-value string If specified, value will be stored as bytes. + -h, --help help for put-secret + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --string-value string If specified, note that the value will be stored in UTF-8 (MB4) form. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/secrets/secrets/script b/acceptance/help/cmd/workspace/secrets/secrets/script new file mode 100755 index 000000000..15fb89ab2 --- /dev/null +++ b/acceptance/help/cmd/workspace/secrets/secrets/script @@ -0,0 +1,12 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI secrets create-scope --help +trace $CLI secrets delete-acl --help +trace $CLI secrets delete-scope --help +trace $CLI secrets delete-secret --help +trace $CLI secrets get-acl --help +trace $CLI secrets get-secret --help +trace $CLI secrets list-acls --help +trace $CLI secrets list-scopes --help +trace $CLI secrets list-secrets --help +trace $CLI secrets put-acl --help +trace $CLI secrets put-secret --help diff --git a/acceptance/help/cmd/workspace/service-principals/service-principals/output.txt b/acceptance/help/cmd/workspace/service-principals/service-principals/output.txt new file mode 100644 index 000000000..2a1f3e2b1 --- /dev/null +++ b/acceptance/help/cmd/workspace/service-principals/service-principals/output.txt @@ -0,0 +1,138 @@ + +>>> $CLI service-principals create --help +Create a service principal. + + Creates a new service principal in the Databricks workspace. + +Usage: + databricks service-principals create [flags] + +Flags: + --active If this user is active. + --application-id string UUID relating to the service principal. + --display-name string String that represents a concatenation of given and family names. + --external-id string + -h, --help help for create + --id string Databricks service principal ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI service-principals delete --help +Delete a service principal. + + Delete a single service principal in the Databricks workspace. + + Arguments: + ID: Unique ID for a service principal in the Databricks workspace. + +Usage: + databricks service-principals delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI service-principals get --help +Get service principal details. + + Gets the details for a single service principal define in the Databricks + workspace. + + Arguments: + ID: Unique ID for a service principal in the Databricks workspace. + +Usage: + databricks service-principals get ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI service-principals list --help +List service principals. + + Gets the set of service principals associated with a Databricks workspace. + +Usage: + databricks service-principals list [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for list + --sort-by string Attribute to sort the results. + --sort-order ListSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI service-principals patch --help +Update service principal details. + + Partially updates the details of a single service principal in the Databricks + workspace. + + Arguments: + ID: Unique ID for a service principal in the Databricks workspace. + +Usage: + databricks service-principals patch ID [flags] + +Flags: + -h, --help help for patch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI service-principals update --help +Replace service principal. + + Updates the details of a single service principal. + + This action replaces the existing service principal with the same name. + + Arguments: + ID: Databricks service principal ID. + +Usage: + databricks service-principals update ID [flags] + +Flags: + --active If this user is active. + --application-id string UUID relating to the service principal. + --display-name string String that represents a concatenation of given and family names. + --external-id string + -h, --help help for update + --id string Databricks service principal ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/service-principals/service-principals/script b/acceptance/help/cmd/workspace/service-principals/service-principals/script new file mode 100755 index 000000000..57cbf4c04 --- /dev/null +++ b/acceptance/help/cmd/workspace/service-principals/service-principals/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI service-principals create --help +trace $CLI service-principals delete --help +trace $CLI service-principals get --help +trace $CLI service-principals list --help +trace $CLI service-principals patch --help +trace $CLI service-principals update --help diff --git a/acceptance/help/cmd/workspace/serving-endpoints-data-plane/serving-endpoints-data-plane/output.txt b/acceptance/help/cmd/workspace/serving-endpoints-data-plane/serving-endpoints-data-plane/output.txt new file mode 100644 index 000000000..040823128 --- /dev/null +++ b/acceptance/help/cmd/workspace/serving-endpoints-data-plane/serving-endpoints-data-plane/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI serving-endpoints-data-plane query --help +Error: unknown command "serving-endpoints-data-plane" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/serving-endpoints-data-plane/serving-endpoints-data-plane/script b/acceptance/help/cmd/workspace/serving-endpoints-data-plane/serving-endpoints-data-plane/script new file mode 100755 index 000000000..667264654 --- /dev/null +++ b/acceptance/help/cmd/workspace/serving-endpoints-data-plane/serving-endpoints-data-plane/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI serving-endpoints-data-plane query --help diff --git a/acceptance/help/cmd/workspace/serving-endpoints/serving-endpoints/output.txt b/acceptance/help/cmd/workspace/serving-endpoints/serving-endpoints/output.txt new file mode 100644 index 000000000..683124c60 --- /dev/null +++ b/acceptance/help/cmd/workspace/serving-endpoints/serving-endpoints/output.txt @@ -0,0 +1,394 @@ + +>>> $CLI serving-endpoints build-logs --help +Get build logs for a served model. + + Retrieves the build logs associated with the provided served model. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required. + SERVED_MODEL_NAME: The name of the served model that build logs will be retrieved for. This + field is required. + +Usage: + databricks serving-endpoints build-logs NAME SERVED_MODEL_NAME [flags] + +Flags: + -h, --help help for build-logs + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints create --help +Create a new serving endpoint. + + Arguments: + NAME: The name of the serving endpoint. This field is required and must be + unique across a Databricks workspace. An endpoint name can consist of + alphanumeric characters, dashes, and underscores. + +Usage: + databricks serving-endpoints create NAME [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach NOT_UPDATING state + --route-optimized Enable route optimization for the serving endpoint. + --timeout duration maximum amount of time to reach NOT_UPDATING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints delete --help +Delete a serving endpoint. + +Usage: + databricks serving-endpoints delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints export-metrics --help +Get metrics of a serving endpoint. + + Retrieves the metrics associated with the provided serving endpoint in either + Prometheus or OpenMetrics exposition format. + + Arguments: + NAME: The name of the serving endpoint to retrieve metrics for. This field is + required. + +Usage: + databricks serving-endpoints export-metrics NAME [flags] + +Flags: + -h, --help help for export-metrics + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints get --help +Get a single serving endpoint. + + Retrieves the details for a single serving endpoint. + + Arguments: + NAME: The name of the serving endpoint. This field is required. + +Usage: + databricks serving-endpoints get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints get-open-api --help +Get the schema for a serving endpoint. + + Get the query schema of the serving endpoint in OpenAPI format. The schema + contains information for the supported paths, input and output format and + datatypes. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required. + +Usage: + databricks serving-endpoints get-open-api NAME [flags] + +Flags: + -h, --help help for get-open-api + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints get-permission-levels --help +Get serving endpoint permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions. + +Usage: + databricks serving-endpoints get-permission-levels SERVING_ENDPOINT_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints get-permissions --help +Get serving endpoint permissions. + + Gets the permissions of a serving endpoint. Serving endpoints can inherit + permissions from their root object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions. + +Usage: + databricks serving-endpoints get-permissions SERVING_ENDPOINT_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints http-request --help +Make external services call using the credentials stored in UC Connection. + + Arguments: + CONNECTION_NAME: The connection name to use. This is required to identify the external + connection. + METHOD: The HTTP method to use (e.g., 'GET', 'POST'). + PATH: The relative path for the API endpoint. This is required. + +Usage: + databricks serving-endpoints http-request CONNECTION_NAME METHOD PATH [flags] + +Flags: + --headers string Additional headers for the request. + -h, --help help for http-request + --json string The JSON payload to send in the request body. + --params string Query parameters for the request. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints list --help +Get all serving endpoints. + +Usage: + databricks serving-endpoints list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints logs --help +Get the latest logs for a served model. + + Retrieves the service logs associated with the provided served model. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required. + SERVED_MODEL_NAME: The name of the served model that logs will be retrieved for. This field + is required. + +Usage: + databricks serving-endpoints logs NAME SERVED_MODEL_NAME [flags] + +Flags: + -h, --help help for logs + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints patch --help +Update tags of a serving endpoint. + + Used to batch add and delete tags from a serving endpoint with a single API + call. + + Arguments: + NAME: The name of the serving endpoint who's tags to patch. This field is + required. + +Usage: + databricks serving-endpoints patch NAME [flags] + +Flags: + -h, --help help for patch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints put --help +Update rate limits of a serving endpoint. + + Used to update the rate limits of a serving endpoint. NOTE: Only foundation + model endpoints are currently supported. For external models, use AI Gateway + to manage rate limits. + + Arguments: + NAME: The name of the serving endpoint whose rate limits are being updated. This + field is required. + +Usage: + databricks serving-endpoints put NAME [flags] + +Flags: + -h, --help help for put + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints put-ai-gateway --help +Update AI Gateway of a serving endpoint. + + Used to update the AI Gateway of a serving endpoint. NOTE: Only external model + and provisioned throughput endpoints are currently supported. + + Arguments: + NAME: The name of the serving endpoint whose AI Gateway is being updated. This + field is required. + +Usage: + databricks serving-endpoints put-ai-gateway NAME [flags] + +Flags: + -h, --help help for put-ai-gateway + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints query --help +Query a serving endpoint. + + Arguments: + NAME: The name of the serving endpoint. This field is required. + +Usage: + databricks serving-endpoints query NAME [flags] + +Flags: + -h, --help help for query + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-tokens int The max tokens field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. + --n int The n (number of candidates) field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. + --stream The stream field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. + --temperature float The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints set-permissions --help +Set serving endpoint permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions. + +Usage: + databricks serving-endpoints set-permissions SERVING_ENDPOINT_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints update-config --help +Update config of a serving endpoint. + + Updates any combination of the serving endpoint's served entities, the compute + configuration of those served entities, and the endpoint's traffic config. An + endpoint that already has an update in progress can not be updated until the + current update completes or fails. + + Arguments: + NAME: The name of the serving endpoint to update. This field is required. + +Usage: + databricks serving-endpoints update-config NAME [flags] + +Flags: + -h, --help help for update-config + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach NOT_UPDATING state + --timeout duration maximum amount of time to reach NOT_UPDATING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI serving-endpoints update-permissions --help +Update serving endpoint permissions. + + Updates the permissions on a serving endpoint. Serving endpoints can inherit + permissions from their root object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions. + +Usage: + databricks serving-endpoints update-permissions SERVING_ENDPOINT_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/serving-endpoints/serving-endpoints/script b/acceptance/help/cmd/workspace/serving-endpoints/serving-endpoints/script new file mode 100755 index 000000000..d74f61ed3 --- /dev/null +++ b/acceptance/help/cmd/workspace/serving-endpoints/serving-endpoints/script @@ -0,0 +1,19 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI serving-endpoints build-logs --help +trace $CLI serving-endpoints create --help +trace $CLI serving-endpoints delete --help +trace $CLI serving-endpoints export-metrics --help +trace $CLI serving-endpoints get --help +trace $CLI serving-endpoints get-open-api --help +trace $CLI serving-endpoints get-permission-levels --help +trace $CLI serving-endpoints get-permissions --help +trace $CLI serving-endpoints http-request --help +trace $CLI serving-endpoints list --help +trace $CLI serving-endpoints logs --help +trace $CLI serving-endpoints patch --help +trace $CLI serving-endpoints put --help +trace $CLI serving-endpoints put-ai-gateway --help +trace $CLI serving-endpoints query --help +trace $CLI serving-endpoints set-permissions --help +trace $CLI serving-endpoints update-config --help +trace $CLI serving-endpoints update-permissions --help diff --git a/acceptance/help/cmd/workspace/settings/settings/output.txt b/acceptance/help/cmd/workspace/settings/settings/output.txt new file mode 100644 index 000000000..7901d464d --- /dev/null +++ b/acceptance/help/cmd/workspace/settings/settings/output.txt @@ -0,0 +1,3 @@ +script: line 65: syntax error near unexpected token `)' + +Exit code: 2 diff --git a/acceptance/help/cmd/workspace/settings/settings/script b/acceptance/help/cmd/workspace/settings/settings/script new file mode 100755 index 000000000..71380e875 --- /dev/null +++ b/acceptance/help/cmd/workspace/settings/settings/script @@ -0,0 +1 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. diff --git a/acceptance/help/cmd/workspace/shares/shares/output.txt b/acceptance/help/cmd/workspace/shares/shares/output.txt new file mode 100644 index 000000000..a15ef25bf --- /dev/null +++ b/acceptance/help/cmd/workspace/shares/shares/output.txt @@ -0,0 +1,180 @@ + +>>> $CLI shares create --help +Create a share. + + Creates a new share for data objects. Data objects can be added after creation + with **update**. The caller must be a metastore admin or have the + **CREATE_SHARE** privilege on the metastore. + + Arguments: + NAME: Name of the share. + +Usage: + databricks shares create NAME [flags] + +Flags: + --comment string User-provided free-form text description. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --storage-root string Storage root URL for the share. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI shares delete --help +Delete a share. + + Deletes a data object share from the metastore. The caller must be an owner of + the share. + + Arguments: + NAME: The name of the share. + +Usage: + databricks shares delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI shares get --help +Get a share. + + Gets a data object share from the metastore. The caller must be a metastore + admin or the owner of the share. + + Arguments: + NAME: The name of the share. + +Usage: + databricks shares get NAME [flags] + +Flags: + -h, --help help for get + --include-shared-data Query for data to include in the share. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI shares list --help +List shares. + + Gets an array of data object shares from the metastore. The caller must be a + metastore admin or the owner of the share. There is no guarantee of a specific + ordering of the elements in the array. + +Usage: + databricks shares list [flags] + +Flags: + -h, --help help for list + --max-results int Maximum number of shares to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI shares share-permissions --help +Get permissions. + + Gets the permissions for a data share from the metastore. The caller must be a + metastore admin or the owner of the share. + + Arguments: + NAME: The name of the share. + +Usage: + databricks shares share-permissions NAME [flags] + +Flags: + -h, --help help for share-permissions + --max-results int Maximum number of permissions to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI shares update --help +Update a share. + + Updates the share with the changes and data objects in the request. The caller + must be the owner of the share or a metastore admin. + + When the caller is a metastore admin, only the __owner__ field can be updated. + + In the case that the share name is changed, **updateShare** requires that the + caller is both the share owner and a metastore admin. + + If there are notebook files in the share, the __storage_root__ field cannot be + updated. + + For each table that is added through this method, the share owner must also + have **SELECT** privilege on the table. This privilege must be maintained + indefinitely for recipients to be able to access the table. Typically, you + should use a group as the share owner. + + Table removals through **update** do not require additional privileges. + + Arguments: + NAME: The name of the share. + +Usage: + databricks shares update NAME [flags] + +Flags: + --comment string User-provided free-form text description. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the share. + --owner string Username of current owner of share. + --storage-root string Storage root URL for the share. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI shares update-permissions --help +Update permissions. + + Updates the permissions for a data share in the metastore. The caller must be + a metastore admin or an owner of the share. + + For new recipient grants, the user must also be the owner of the recipients. + recipient revocations do not require additional privileges. + + Arguments: + NAME: The name of the share. + +Usage: + databricks shares update-permissions NAME [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-results int Maximum number of permissions to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/shares/shares/script b/acceptance/help/cmd/workspace/shares/shares/script new file mode 100755 index 000000000..4bef5f573 --- /dev/null +++ b/acceptance/help/cmd/workspace/shares/shares/script @@ -0,0 +1,8 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI shares create --help +trace $CLI shares delete --help +trace $CLI shares get --help +trace $CLI shares list --help +trace $CLI shares share-permissions --help +trace $CLI shares update --help +trace $CLI shares update-permissions --help diff --git a/acceptance/help/cmd/workspace/statement-execution/statement-execution/output.txt b/acceptance/help/cmd/workspace/statement-execution/statement-execution/output.txt new file mode 100644 index 000000000..bf4f9504b --- /dev/null +++ b/acceptance/help/cmd/workspace/statement-execution/statement-execution/output.txt @@ -0,0 +1,5 @@ + +>>> $CLI statement-execution cancel-execution --help +Error: unknown command "statement-execution" for "databricks" + +Exit code: 1 diff --git a/acceptance/help/cmd/workspace/statement-execution/statement-execution/script b/acceptance/help/cmd/workspace/statement-execution/statement-execution/script new file mode 100755 index 000000000..a778d1fc5 --- /dev/null +++ b/acceptance/help/cmd/workspace/statement-execution/statement-execution/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI statement-execution cancel-execution --help +trace $CLI statement-execution execute-statement --help +trace $CLI statement-execution get-statement --help +trace $CLI statement-execution get-statement-result-chunk-n --help diff --git a/acceptance/help/cmd/workspace/storage-credentials/storage-credentials/output.txt b/acceptance/help/cmd/workspace/storage-credentials/storage-credentials/output.txt new file mode 100644 index 000000000..d5b3783bd --- /dev/null +++ b/acceptance/help/cmd/workspace/storage-credentials/storage-credentials/output.txt @@ -0,0 +1,152 @@ + +>>> $CLI storage-credentials create --help +Create a storage credential. + + Creates a new storage credential. + + Arguments: + NAME: The credential name. The name must be unique within the metastore. + +Usage: + databricks storage-credentials create NAME [flags] + +Flags: + --comment string Comment associated with the credential. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --read-only Whether the storage credential is only usable for read operations. + --skip-validation Supplying true to this argument skips validation of the created credential. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI storage-credentials delete --help +Delete a credential. + + Deletes a storage credential from the metastore. The caller must be an owner + of the storage credential. + + Arguments: + NAME: Name of the storage credential. + +Usage: + databricks storage-credentials delete NAME [flags] + +Flags: + --force Force deletion even if there are dependent external locations or external tables. + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI storage-credentials get --help +Get a credential. + + Gets a storage credential from the metastore. The caller must be a metastore + admin, the owner of the storage credential, or have some permission on the + storage credential. + + Arguments: + NAME: Name of the storage credential. + +Usage: + databricks storage-credentials get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI storage-credentials list --help +List credentials. + + Gets an array of storage credentials (as __StorageCredentialInfo__ objects). + The array is limited to only those storage credentials the caller has + permission to access. If the caller is a metastore admin, retrieval of + credentials is unrestricted. There is no guarantee of a specific ordering of + the elements in the array. + +Usage: + databricks storage-credentials list [flags] + +Flags: + -h, --help help for list + --max-results int Maximum number of storage credentials to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI storage-credentials update --help +Update a credential. + + Updates a storage credential on the metastore. + + Arguments: + NAME: Name of the storage credential. + +Usage: + databricks storage-credentials update NAME [flags] + +Flags: + --comment string Comment associated with the credential. + --force Force update even if there are dependent external locations or external tables. + -h, --help help for update + --isolation-mode IsolationMode . Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN] + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the storage credential. + --owner string Username of current owner of credential. + --read-only Whether the storage credential is only usable for read operations. + --skip-validation Supplying true to this argument skips validation of the updated credential. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI storage-credentials validate --help +Validate a storage credential. + + Validates a storage credential. At least one of __external_location_name__ and + __url__ need to be provided. If only one of them is provided, it will be used + for validation. And if both are provided, the __url__ will be used for + validation, and __external_location_name__ will be ignored when checking + overlapping urls. + + Either the __storage_credential_name__ or the cloud-specific credential must + be provided. + + The caller must be a metastore admin or the storage credential owner or have + the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the storage + credential. + +Usage: + databricks storage-credentials validate [flags] + +Flags: + --external-location-name string The name of an existing external location to validate. + -h, --help help for validate + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --read-only Whether the storage credential is only usable for read operations. + --storage-credential-name string The name of the storage credential to validate. + --url string The external location url to validate. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/storage-credentials/storage-credentials/script b/acceptance/help/cmd/workspace/storage-credentials/storage-credentials/script new file mode 100755 index 000000000..e4bbd5499 --- /dev/null +++ b/acceptance/help/cmd/workspace/storage-credentials/storage-credentials/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI storage-credentials create --help +trace $CLI storage-credentials delete --help +trace $CLI storage-credentials get --help +trace $CLI storage-credentials list --help +trace $CLI storage-credentials update --help +trace $CLI storage-credentials validate --help diff --git a/acceptance/help/cmd/workspace/system-schemas/system-schemas/output.txt b/acceptance/help/cmd/workspace/system-schemas/system-schemas/output.txt new file mode 100644 index 000000000..d50219f67 --- /dev/null +++ b/acceptance/help/cmd/workspace/system-schemas/system-schemas/output.txt @@ -0,0 +1,67 @@ + +>>> $CLI system-schemas disable --help +Disable a system schema. + + Disables the system schema and removes it from the system catalog. The caller + must be an account admin or a metastore admin. + + Arguments: + METASTORE_ID: The metastore ID under which the system schema lives. + SCHEMA_NAME: Full name of the system schema. + +Usage: + databricks system-schemas disable METASTORE_ID SCHEMA_NAME [flags] + +Flags: + -h, --help help for disable + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI system-schemas enable --help +Enable a system schema. + + Enables the system schema and adds it to the system catalog. The caller must + be an account admin or a metastore admin. + + Arguments: + METASTORE_ID: The metastore ID under which the system schema lives. + SCHEMA_NAME: Full name of the system schema. + +Usage: + databricks system-schemas enable METASTORE_ID SCHEMA_NAME [flags] + +Flags: + -h, --help help for enable + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI system-schemas list --help +List system schemas. + + Gets an array of system schemas for a metastore. The caller must be an account + admin or a metastore admin. + + Arguments: + METASTORE_ID: The ID for the metastore in which the system schema resides. + +Usage: + databricks system-schemas list METASTORE_ID [flags] + +Flags: + -h, --help help for list + --max-results int Maximum number of schemas to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/system-schemas/system-schemas/script b/acceptance/help/cmd/workspace/system-schemas/system-schemas/script new file mode 100755 index 000000000..90321acd0 --- /dev/null +++ b/acceptance/help/cmd/workspace/system-schemas/system-schemas/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI system-schemas disable --help +trace $CLI system-schemas enable --help +trace $CLI system-schemas list --help diff --git a/acceptance/help/cmd/workspace/table-constraints/table-constraints/output.txt b/acceptance/help/cmd/workspace/table-constraints/table-constraints/output.txt new file mode 100644 index 000000000..82f760259 --- /dev/null +++ b/acceptance/help/cmd/workspace/table-constraints/table-constraints/output.txt @@ -0,0 +1,59 @@ + +>>> $CLI table-constraints create --help +Create a table constraint. + + Creates a new table constraint. + + For the table constraint creation to succeed, the user must satisfy both of + these conditions: - the user must have the **USE_CATALOG** privilege on the + table's parent catalog, the **USE_SCHEMA** privilege on the table's parent + schema, and be the owner of the table. - if the new constraint is a + __ForeignKeyConstraint__, the user must have the **USE_CATALOG** privilege on + the referenced parent table's catalog, the **USE_SCHEMA** privilege on the + referenced parent table's schema, and be the owner of the referenced parent + table. + +Usage: + databricks table-constraints create [flags] + +Flags: + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI table-constraints delete --help +Delete a table constraint. + + Deletes a table constraint. + + For the table constraint deletion to succeed, the user must satisfy both of + these conditions: - the user must have the **USE_CATALOG** privilege on the + table's parent catalog, the **USE_SCHEMA** privilege on the table's parent + schema, and be the owner of the table. - if __cascade__ argument is **true**, + the user must have the following permissions on all of the child tables: the + **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** privilege + on the table's schema, and be the owner of the table. + + Arguments: + FULL_NAME: Full name of the table referenced by the constraint. + CONSTRAINT_NAME: The name of the constraint to delete. + CASCADE: If true, try deleting all child constraints of the current constraint. If + false, reject this operation if the current constraint has any child + constraints. + +Usage: + databricks table-constraints delete FULL_NAME CONSTRAINT_NAME CASCADE [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/table-constraints/table-constraints/script b/acceptance/help/cmd/workspace/table-constraints/table-constraints/script new file mode 100755 index 000000000..a5902b0e4 --- /dev/null +++ b/acceptance/help/cmd/workspace/table-constraints/table-constraints/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI table-constraints create --help +trace $CLI table-constraints delete --help diff --git a/acceptance/help/cmd/workspace/tables/tables/output.txt b/acceptance/help/cmd/workspace/tables/tables/output.txt new file mode 100644 index 000000000..0afc4494d --- /dev/null +++ b/acceptance/help/cmd/workspace/tables/tables/output.txt @@ -0,0 +1,175 @@ + +>>> $CLI tables delete --help +Delete a table. + + Deletes a table from the specified parent catalog and schema. The caller must + be the owner of the parent catalog, have the **USE_CATALOG** privilege on the + parent catalog and be the owner of the parent schema, or be the owner of the + table and have the **USE_CATALOG** privilege on the parent catalog and the + **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: Full name of the table. + +Usage: + databricks tables delete FULL_NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI tables exists --help +Get boolean reflecting if table exists. + + Gets if a table exists in the metastore for a specific catalog and schema. The + caller must satisfy one of the following requirements: * Be a metastore admin + * Be the owner of the parent catalog * Be the owner of the parent schema and + have the USE_CATALOG privilege on the parent catalog * Have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema, and either be the table owner or have the + SELECT privilege on the table. * Have BROWSE privilege on the parent catalog * + Have BROWSE privilege on the parent schema. + + Arguments: + FULL_NAME: Full name of the table. + +Usage: + databricks tables exists FULL_NAME [flags] + +Flags: + -h, --help help for exists + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI tables get --help +Get a table. + + Gets a table from the metastore for a specific catalog and schema. The caller + must satisfy one of the following requirements: * Be a metastore admin * Be + the owner of the parent catalog * Be the owner of the parent schema and have + the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema, and either be the table owner or have the SELECT privilege on the + table. + + Arguments: + FULL_NAME: Full name of the table. + +Usage: + databricks tables get FULL_NAME [flags] + +Flags: + -h, --help help for get + --include-browse Whether to include tables in the response for which the principal can only access selective metadata for. + --include-delta-metadata Whether delta metadata should be included in the response. + --include-manifest-capabilities Whether to include a manifest containing capabilities the table has. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI tables list --help +List tables. + + Gets an array of all tables for the current metastore under the parent catalog + and schema. The caller must be a metastore admin or an owner of (or have the + **SELECT** privilege on) the table. For the latter case, the caller must also + be the owner or have the **USE_CATALOG** privilege on the parent catalog and + the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a + specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: Name of parent catalog for tables of interest. + SCHEMA_NAME: Parent schema of tables. + +Usage: + databricks tables list CATALOG_NAME SCHEMA_NAME [flags] + +Flags: + -h, --help help for list + --include-browse Whether to include tables in the response for which the principal can only access selective metadata for. + --include-delta-metadata Whether delta metadata should be included in the response. + --include-manifest-capabilities Whether to include a manifest containing capabilities the table has. + --max-results int Maximum number of tables to return. + --omit-columns Whether to omit the columns of the table from the response or not. + --omit-properties Whether to omit the properties of the table from the response or not. + --omit-username Whether to omit the username of the table (e.g. + --page-token string Opaque token to send for the next page of results (pagination). + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI tables list-summaries --help +List table summaries. + + Gets an array of summaries for tables for a schema and catalog within the + metastore. The table summaries returned are either: + + * summaries for tables (within the current metastore and parent catalog and + schema), when the user is a metastore admin, or: * summaries for tables and + schemas (within the current metastore and parent catalog) for which the user + has ownership or the **SELECT** privilege on the table and ownership or + **USE_SCHEMA** privilege on the schema, provided that the user also has + ownership or the **USE_CATALOG** privilege on the parent catalog. + + There is no guarantee of a specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: Name of parent catalog for tables of interest. + +Usage: + databricks tables list-summaries CATALOG_NAME [flags] + +Flags: + -h, --help help for list-summaries + --include-manifest-capabilities Whether to include a manifest containing capabilities the table has. + --max-results int Maximum number of summaries for tables to return. + --page-token string Opaque pagination token to go to next page based on previous query. + --schema-name-pattern string A sql LIKE pattern (% and _) for schema names. + --table-name-pattern string A sql LIKE pattern (% and _) for table names. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI tables update --help +Update a table owner. + + Change the owner of the table. The caller must be the owner of the parent + catalog, have the **USE_CATALOG** privilege on the parent catalog and be the + owner of the parent schema, or be the owner of the table and have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema. + + Arguments: + FULL_NAME: Full name of the table. + +Usage: + databricks tables update FULL_NAME [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --owner string + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/tables/tables/script b/acceptance/help/cmd/workspace/tables/tables/script new file mode 100755 index 000000000..4a6974603 --- /dev/null +++ b/acceptance/help/cmd/workspace/tables/tables/script @@ -0,0 +1,7 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI tables delete --help +trace $CLI tables exists --help +trace $CLI tables get --help +trace $CLI tables list --help +trace $CLI tables list-summaries --help +trace $CLI tables update --help diff --git a/acceptance/help/cmd/workspace/temporary-table-credentials/temporary-table-credentials/output.txt b/acceptance/help/cmd/workspace/temporary-table-credentials/temporary-table-credentials/output.txt new file mode 100644 index 000000000..2444d4b7b --- /dev/null +++ b/acceptance/help/cmd/workspace/temporary-table-credentials/temporary-table-credentials/output.txt @@ -0,0 +1,23 @@ + +>>> $CLI temporary-table-credentials generate-temporary-table-credentials --help +Generate a temporary table credential. + + Get a short-lived credential for directly accessing the table data on cloud + storage. The metastore must have external_access_enabled flag set to true + (default false). The caller must have EXTERNAL_USE_SCHEMA privilege on the + parent schema and this privilege can only be granted by catalog owners. + +Usage: + databricks temporary-table-credentials generate-temporary-table-credentials [flags] + +Flags: + -h, --help help for generate-temporary-table-credentials + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --operation TableOperation The operation performed against the table data, either READ or READ_WRITE. Supported values: [READ, READ_WRITE] + --table-id string UUID of the table to read or write. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/temporary-table-credentials/temporary-table-credentials/script b/acceptance/help/cmd/workspace/temporary-table-credentials/temporary-table-credentials/script new file mode 100755 index 000000000..8b4b7f6db --- /dev/null +++ b/acceptance/help/cmd/workspace/temporary-table-credentials/temporary-table-credentials/script @@ -0,0 +1,2 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI temporary-table-credentials generate-temporary-table-credentials --help diff --git a/acceptance/help/cmd/workspace/token-management/token-management/output.txt b/acceptance/help/cmd/workspace/token-management/token-management/output.txt new file mode 100644 index 000000000..87c7e4640 --- /dev/null +++ b/acceptance/help/cmd/workspace/token-management/token-management/output.txt @@ -0,0 +1,156 @@ + +>>> $CLI token-management create-obo-token --help +Create on-behalf token. + + Creates a token on behalf of a service principal. + + Arguments: + APPLICATION_ID: Application ID of the service principal. + +Usage: + databricks token-management create-obo-token APPLICATION_ID [flags] + +Flags: + --comment string Comment that describes the purpose of the token. + -h, --help help for create-obo-token + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --lifetime-seconds int The number of seconds before the token expires. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI token-management delete --help +Delete a token. + + Deletes a token, specified by its ID. + + Arguments: + TOKEN_ID: The ID of the token to revoke. + +Usage: + databricks token-management delete TOKEN_ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI token-management get --help +Get token info. + + Gets information about a token, specified by its ID. + + Arguments: + TOKEN_ID: The ID of the token to get. + +Usage: + databricks token-management get TOKEN_ID [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI token-management get-permission-levels --help +Get token permission levels. + + Gets the permission levels that a user can have on an object. + +Usage: + databricks token-management get-permission-levels [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI token-management get-permissions --help +Get token permissions. + + Gets the permissions of all tokens. Tokens can inherit permissions from their + root object. + +Usage: + databricks token-management get-permissions [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI token-management list --help +List all tokens. + + Lists all tokens associated with the specified workspace or user. + +Usage: + databricks token-management list [flags] + +Flags: + --created-by-id int User ID of the user that created the token. + --created-by-username string Username of the user that created the token. + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI token-management set-permissions --help +Set token permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + +Usage: + databricks token-management set-permissions [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI token-management update-permissions --help +Update token permissions. + + Updates the permissions on all tokens. Tokens can inherit permissions from + their root object. + +Usage: + databricks token-management update-permissions [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/token-management/token-management/script b/acceptance/help/cmd/workspace/token-management/token-management/script new file mode 100755 index 000000000..f6f69eb07 --- /dev/null +++ b/acceptance/help/cmd/workspace/token-management/token-management/script @@ -0,0 +1,9 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI token-management create-obo-token --help +trace $CLI token-management delete --help +trace $CLI token-management get --help +trace $CLI token-management get-permission-levels --help +trace $CLI token-management get-permissions --help +trace $CLI token-management list --help +trace $CLI token-management set-permissions --help +trace $CLI token-management update-permissions --help diff --git a/acceptance/help/cmd/workspace/tokens/tokens/output.txt b/acceptance/help/cmd/workspace/tokens/tokens/output.txt new file mode 100644 index 000000000..b1b8a3f37 --- /dev/null +++ b/acceptance/help/cmd/workspace/tokens/tokens/output.txt @@ -0,0 +1,64 @@ + +>>> $CLI tokens create --help +Create a user token. + + Creates and returns a token for a user. If this call is made through token + authentication, it creates a token with the same client ID as the + authenticated token. If the user's token quota is exceeded, this call returns + an error **QUOTA_EXCEEDED**. + +Usage: + databricks tokens create [flags] + +Flags: + --comment string Optional description to attach to the token. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --lifetime-seconds int The lifetime of the token, in seconds. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI tokens delete --help +Revoke token. + + Revokes an access token. + + If a token with the specified ID is not valid, this call returns an error + **RESOURCE_DOES_NOT_EXIST**. + + Arguments: + TOKEN_ID: The ID of the token to be revoked. + +Usage: + databricks tokens delete TOKEN_ID [flags] + +Flags: + -h, --help help for delete + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI tokens list --help +List tokens. + + Lists all the valid tokens for a user-workspace pair. + +Usage: + databricks tokens list [flags] + +Flags: + -h, --help help for list + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/tokens/tokens/script b/acceptance/help/cmd/workspace/tokens/tokens/script new file mode 100755 index 000000000..dbd034d60 --- /dev/null +++ b/acceptance/help/cmd/workspace/tokens/tokens/script @@ -0,0 +1,4 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI tokens create --help +trace $CLI tokens delete --help +trace $CLI tokens list --help diff --git a/acceptance/help/cmd/workspace/users/users/output.txt b/acceptance/help/cmd/workspace/users/users/output.txt new file mode 100644 index 000000000..cf233d247 --- /dev/null +++ b/acceptance/help/cmd/workspace/users/users/output.txt @@ -0,0 +1,219 @@ + +>>> $CLI users create --help +Create a new user. + + Creates a new user in the Databricks workspace. This new user will also be + added to the Databricks account. + +Usage: + databricks users create [flags] + +Flags: + --active If this user is active. + --display-name string String that represents a concatenation of given and family names. + --external-id string External ID is not currently supported. + -h, --help help for create + --id string Databricks user ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --user-name string Email address of the Databricks user. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users delete --help +Delete a user. + + Deletes a user. Deleting a user from a Databricks workspace also removes + objects associated with the user. + + Arguments: + ID: Unique ID for a user in the Databricks workspace. + +Usage: + databricks users delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users get --help +Get user details. + + Gets information for a specific user in Databricks workspace. + + Arguments: + ID: Unique ID for a user in the Databricks workspace. + +Usage: + databricks users get ID [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for get + --sort-by string Attribute to sort the results. + --sort-order GetSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users get-permission-levels --help +Get password permission levels. + + Gets the permission levels that a user can have on an object. + +Usage: + databricks users get-permission-levels [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users get-permissions --help +Get password permissions. + + Gets the permissions of all passwords. Passwords can inherit permissions from + their root object. + +Usage: + databricks users get-permissions [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users list --help +List users. + + Gets details for all the users associated with a Databricks workspace. + +Usage: + databricks users list [flags] + +Flags: + --attributes string Comma-separated list of attributes to return in response. + --count int Desired number of results per page. + --excluded-attributes string Comma-separated list of attributes to exclude in response. + --filter string Query by which the results have to be filtered. + -h, --help help for list + --sort-by string Attribute to sort the results. + --sort-order ListSortOrder The order to sort the results. Supported values: [ascending, descending] + --start-index int Specifies the index of the first result. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users patch --help +Update user details. + + Partially updates a user resource by applying the supplied operations on + specific user attributes. + + Arguments: + ID: Unique ID for a user in the Databricks workspace. + +Usage: + databricks users patch ID [flags] + +Flags: + -h, --help help for patch + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users set-permissions --help +Set password permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + +Usage: + databricks users set-permissions [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users update --help +Replace a user. + + Replaces a user's information with the data supplied in request. + + Arguments: + ID: Databricks user ID. This is automatically set by Databricks. Any value + provided by the client will be ignored. + +Usage: + databricks users update ID [flags] + +Flags: + --active If this user is active. + --display-name string String that represents a concatenation of given and family names. + --external-id string External ID is not currently supported. + -h, --help help for update + --id string Databricks user ID. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --user-name string Email address of the Databricks user. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI users update-permissions --help +Update password permissions. + + Updates the permissions on all passwords. Passwords can inherit permissions + from their root object. + +Usage: + databricks users update-permissions [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/users/users/script b/acceptance/help/cmd/workspace/users/users/script new file mode 100755 index 000000000..0beb1e202 --- /dev/null +++ b/acceptance/help/cmd/workspace/users/users/script @@ -0,0 +1,11 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI users create --help +trace $CLI users delete --help +trace $CLI users get --help +trace $CLI users get-permission-levels --help +trace $CLI users get-permissions --help +trace $CLI users list --help +trace $CLI users patch --help +trace $CLI users set-permissions --help +trace $CLI users update --help +trace $CLI users update-permissions --help diff --git a/acceptance/help/cmd/workspace/vector-search-endpoints/vector-search-endpoints/output.txt b/acceptance/help/cmd/workspace/vector-search-endpoints/vector-search-endpoints/output.txt new file mode 100644 index 000000000..c0e95f3ac --- /dev/null +++ b/acceptance/help/cmd/workspace/vector-search-endpoints/vector-search-endpoints/output.txt @@ -0,0 +1,76 @@ + +>>> $CLI vector-search-endpoints create-endpoint --help +Create an endpoint. + + Create a new endpoint. + + Arguments: + NAME: Name of endpoint + ENDPOINT_TYPE: Type of endpoint. + +Usage: + databricks vector-search-endpoints create-endpoint NAME ENDPOINT_TYPE [flags] + +Flags: + -h, --help help for create-endpoint + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --no-wait do not wait to reach ONLINE state + --timeout duration maximum amount of time to reach ONLINE state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-endpoints delete-endpoint --help +Delete an endpoint. + + Arguments: + ENDPOINT_NAME: Name of the endpoint + +Usage: + databricks vector-search-endpoints delete-endpoint ENDPOINT_NAME [flags] + +Flags: + -h, --help help for delete-endpoint + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-endpoints get-endpoint --help +Get an endpoint. + + Arguments: + ENDPOINT_NAME: Name of the endpoint + +Usage: + databricks vector-search-endpoints get-endpoint ENDPOINT_NAME [flags] + +Flags: + -h, --help help for get-endpoint + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-endpoints list-endpoints --help +List all endpoints. + +Usage: + databricks vector-search-endpoints list-endpoints [flags] + +Flags: + -h, --help help for list-endpoints + --page-token string Token for pagination. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/vector-search-endpoints/vector-search-endpoints/script b/acceptance/help/cmd/workspace/vector-search-endpoints/vector-search-endpoints/script new file mode 100755 index 000000000..13486ed75 --- /dev/null +++ b/acceptance/help/cmd/workspace/vector-search-endpoints/vector-search-endpoints/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI vector-search-endpoints create-endpoint --help +trace $CLI vector-search-endpoints delete-endpoint --help +trace $CLI vector-search-endpoints get-endpoint --help +trace $CLI vector-search-endpoints list-endpoints --help diff --git a/acceptance/help/cmd/workspace/vector-search-indexes/vector-search-indexes/output.txt b/acceptance/help/cmd/workspace/vector-search-indexes/vector-search-indexes/output.txt new file mode 100644 index 000000000..32bc45ff5 --- /dev/null +++ b/acceptance/help/cmd/workspace/vector-search-indexes/vector-search-indexes/output.txt @@ -0,0 +1,230 @@ + +>>> $CLI vector-search-indexes create-index --help +Create an index. + + Create a new index. + + Arguments: + NAME: Name of the index + ENDPOINT_NAME: Name of the endpoint to be used for serving the index + PRIMARY_KEY: Primary key of the index + INDEX_TYPE: There are 2 types of Vector Search indexes: + + - DELTA_SYNC: An index that automatically syncs with a source Delta + Table, automatically and incrementally updating the index as the + underlying data in the Delta Table changes. - DIRECT_ACCESS: An index + that supports direct read and write of vectors and metadata through our + REST and SDK APIs. With this model, the user manages index updates. + +Usage: + databricks vector-search-indexes create-index NAME ENDPOINT_NAME PRIMARY_KEY INDEX_TYPE [flags] + +Flags: + -h, --help help for create-index + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes delete-data-vector-index --help +Delete data from index. + + Handles the deletion of data from a specified vector index. + + Arguments: + INDEX_NAME: Name of the vector index where data is to be deleted. Must be a Direct + Vector Access Index. + +Usage: + databricks vector-search-indexes delete-data-vector-index INDEX_NAME [flags] + +Flags: + -h, --help help for delete-data-vector-index + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes delete-index --help +Delete an index. + + Delete an index. + + Arguments: + INDEX_NAME: Name of the index + +Usage: + databricks vector-search-indexes delete-index INDEX_NAME [flags] + +Flags: + -h, --help help for delete-index + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes get-index --help +Get an index. + + Get an index. + + Arguments: + INDEX_NAME: Name of the index + +Usage: + databricks vector-search-indexes get-index INDEX_NAME [flags] + +Flags: + -h, --help help for get-index + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes list-indexes --help +List indexes. + + List all indexes in the given endpoint. + + Arguments: + ENDPOINT_NAME: Name of the endpoint + +Usage: + databricks vector-search-indexes list-indexes ENDPOINT_NAME [flags] + +Flags: + -h, --help help for list-indexes + --page-token string Token for pagination. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes query-index --help +Query an index. + + Query the specified vector index. + + Arguments: + INDEX_NAME: Name of the vector index to query. + +Usage: + databricks vector-search-indexes query-index INDEX_NAME [flags] + +Flags: + --filters-json string JSON string representing query filters. + -h, --help help for query-index + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --num-results int Number of results to return. + --query-text string Query text. + --query-type string The query type to use. + --score-threshold float Threshold for the approximate nearest neighbor search. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes query-next-page --help +Query next page. + + Use next_page_token returned from previous QueryVectorIndex or + QueryVectorIndexNextPage request to fetch next page of results. + + Arguments: + INDEX_NAME: Name of the vector index to query. + +Usage: + databricks vector-search-indexes query-next-page INDEX_NAME [flags] + +Flags: + --endpoint-name string Name of the endpoint. + -h, --help help for query-next-page + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --page-token string Page token returned from previous QueryVectorIndex or QueryVectorIndexNextPage API. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes scan-index --help +Scan an index. + + Scan the specified vector index and return the first num_results entries + after the exclusive primary_key. + + Arguments: + INDEX_NAME: Name of the vector index to scan. + +Usage: + databricks vector-search-indexes scan-index INDEX_NAME [flags] + +Flags: + -h, --help help for scan-index + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --last-primary-key string Primary key of the last entry returned in the previous scan. + --num-results int Number of results to return. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes sync-index --help +Synchronize an index. + + Triggers a synchronization process for a specified vector index. + + Arguments: + INDEX_NAME: Name of the vector index to synchronize. Must be a Delta Sync Index. + +Usage: + databricks vector-search-indexes sync-index INDEX_NAME [flags] + +Flags: + -h, --help help for sync-index + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI vector-search-indexes upsert-data-vector-index --help +Upsert data into an index. + + Handles the upserting of data into a specified vector index. + + Arguments: + INDEX_NAME: Name of the vector index where data is to be upserted. Must be a Direct + Vector Access Index. + INPUTS_JSON: JSON string representing the data to be upserted. + +Usage: + databricks vector-search-indexes upsert-data-vector-index INDEX_NAME INPUTS_JSON [flags] + +Flags: + -h, --help help for upsert-data-vector-index + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/vector-search-indexes/vector-search-indexes/script b/acceptance/help/cmd/workspace/vector-search-indexes/vector-search-indexes/script new file mode 100755 index 000000000..1a7499676 --- /dev/null +++ b/acceptance/help/cmd/workspace/vector-search-indexes/vector-search-indexes/script @@ -0,0 +1,11 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI vector-search-indexes create-index --help +trace $CLI vector-search-indexes delete-data-vector-index --help +trace $CLI vector-search-indexes delete-index --help +trace $CLI vector-search-indexes get-index --help +trace $CLI vector-search-indexes list-indexes --help +trace $CLI vector-search-indexes query-index --help +trace $CLI vector-search-indexes query-next-page --help +trace $CLI vector-search-indexes scan-index --help +trace $CLI vector-search-indexes sync-index --help +trace $CLI vector-search-indexes upsert-data-vector-index --help diff --git a/acceptance/help/cmd/workspace/volumes/volumes/output.txt b/acceptance/help/cmd/workspace/volumes/volumes/output.txt new file mode 100644 index 000000000..086e78006 --- /dev/null +++ b/acceptance/help/cmd/workspace/volumes/volumes/output.txt @@ -0,0 +1,158 @@ + +>>> $CLI volumes create --help +Create a Volume. + + Creates a new volume. + + The user could create either an external volume or a managed volume. An + external volume will be created in the specified external location, while a + managed volume will be located in the default location which is specified by + the parent schema, or the parent catalog, or the Metastore. + + For the volume creation to succeed, the user must satisfy following + conditions: - The caller must be a metastore admin, or be the owner of the + parent catalog and schema, or have the **USE_CATALOG** privilege on the parent + catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller + must have **CREATE VOLUME** privilege on the parent schema. + + For an external volume, following conditions also need to satisfy - The caller + must have **CREATE EXTERNAL VOLUME** privilege on the external location. - + There are no other tables, nor volumes existing in the specified storage + location. - The specified storage location is not under the location of other + tables, nor volumes, or catalogs or schemas. + + Arguments: + CATALOG_NAME: The name of the catalog where the schema and the volume are + SCHEMA_NAME: The name of the schema where the volume is + NAME: The name of the volume + VOLUME_TYPE: + +Usage: + databricks volumes create CATALOG_NAME SCHEMA_NAME NAME VOLUME_TYPE [flags] + +Flags: + --comment string The comment attached to the volume. + -h, --help help for create + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --storage-location string The storage location on the cloud. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI volumes delete --help +Delete a Volume. + + Deletes a volume from the specified parent catalog and schema. + + The caller must be a metastore admin or an owner of the volume. For the latter + case, the caller must also be the owner or have the **USE_CATALOG** privilege + on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + NAME: The three-level (fully qualified) name of the volume + +Usage: + databricks volumes delete NAME [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI volumes list --help +List Volumes. + + Gets an array of volumes for the current metastore under the parent catalog + and schema. + + The returned volumes are filtered based on the privileges of the calling user. + For example, the metastore admin is able to list all the volumes. A regular + user needs to be the owner or have the **READ VOLUME** privilege on the volume + to recieve the volumes in the response. For the latter case, the caller must + also be the owner or have the **USE_CATALOG** privilege on the parent catalog + and the **USE_SCHEMA** privilege on the parent schema. + + There is no guarantee of a specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: The identifier of the catalog + SCHEMA_NAME: The identifier of the schema + +Usage: + databricks volumes list CATALOG_NAME SCHEMA_NAME [flags] + +Flags: + -h, --help help for list + --include-browse Whether to include volumes in the response for which the principal can only access selective metadata for. + --max-results int Maximum number of volumes to return (page length). + --page-token string Opaque token returned by a previous request. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI volumes read --help +Get a Volume. + + Gets a volume from the metastore for a specific catalog and schema. + + The caller must be a metastore admin or an owner of (or have the **READ + VOLUME** privilege on) the volume. For the latter case, the caller must also + be the owner or have the **USE_CATALOG** privilege on the parent catalog and + the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + NAME: The three-level (fully qualified) name of the volume + +Usage: + databricks volumes read NAME [flags] + +Flags: + -h, --help help for read + --include-browse Whether to include volumes in the response for which the principal can only access selective metadata for. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI volumes update --help +Update a Volume. + + Updates the specified volume under the specified parent catalog and schema. + + The caller must be a metastore admin or an owner of the volume. For the latter + case, the caller must also be the owner or have the **USE_CATALOG** privilege + on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. + + Currently only the name, the owner or the comment of the volume could be + updated. + + Arguments: + NAME: The three-level (fully qualified) name of the volume + +Usage: + databricks volumes update NAME [flags] + +Flags: + --comment string The comment attached to the volume. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --new-name string New name for the volume. + --owner string The identifier of the user who owns the volume. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/volumes/volumes/script b/acceptance/help/cmd/workspace/volumes/volumes/script new file mode 100755 index 000000000..f171acd27 --- /dev/null +++ b/acceptance/help/cmd/workspace/volumes/volumes/script @@ -0,0 +1,6 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI volumes create --help +trace $CLI volumes delete --help +trace $CLI volumes list --help +trace $CLI volumes read --help +trace $CLI volumes update --help diff --git a/acceptance/help/cmd/workspace/warehouses/warehouses/output.txt b/acceptance/help/cmd/workspace/warehouses/warehouses/output.txt new file mode 100644 index 000000000..b4622fc5e --- /dev/null +++ b/acceptance/help/cmd/workspace/warehouses/warehouses/output.txt @@ -0,0 +1,295 @@ + +>>> $CLI warehouses create --help +Create a warehouse. + + Creates a new SQL warehouse. + +Usage: + databricks warehouses create [flags] + +Flags: + --auto-stop-mins int The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. + --cluster-size string Size of the clusters allocated for this warehouse. + --creator-name string warehouse creator name. + --enable-photon Configures whether the warehouse should use Photon optimized clusters. + --enable-serverless-compute Configures whether the warehouse should use serverless compute. + -h, --help help for create + --instance-profile-arn string Deprecated. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-num-clusters int Maximum number of clusters that the autoscaler will create to handle concurrent queries. + --min-num-clusters int Minimum number of available clusters that will be maintained for this SQL warehouse. + --name string Logical name for the cluster. + --no-wait do not wait to reach RUNNING state + --spot-instance-policy SpotInstancePolicy Configurations whether the warehouse should use spot instances. Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED] + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + --warehouse-type CreateWarehouseRequestWarehouseType Warehouse type: PRO or CLASSIC. Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses delete --help +Delete a warehouse. + + Deletes a SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse. + +Usage: + databricks warehouses delete ID [flags] + +Flags: + -h, --help help for delete + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses edit --help +Update a warehouse. + + Updates the configuration for a SQL warehouse. + + Arguments: + ID: Required. Id of the warehouse to configure. + +Usage: + databricks warehouses edit ID [flags] + +Flags: + --auto-stop-mins int The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. + --cluster-size string Size of the clusters allocated for this warehouse. + --creator-name string warehouse creator name. + --enable-photon Configures whether the warehouse should use Photon optimized clusters. + --enable-serverless-compute Configures whether the warehouse should use serverless compute. + -h, --help help for edit + --instance-profile-arn string Deprecated. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --max-num-clusters int Maximum number of clusters that the autoscaler will create to handle concurrent queries. + --min-num-clusters int Minimum number of available clusters that will be maintained for this SQL warehouse. + --name string Logical name for the cluster. + --no-wait do not wait to reach RUNNING state + --spot-instance-policy SpotInstancePolicy Configurations whether the warehouse should use spot instances. Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED] + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + --warehouse-type EditWarehouseRequestWarehouseType Warehouse type: PRO or CLASSIC. Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses get --help +Get warehouse info. + + Gets the information for a single SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse. + +Usage: + databricks warehouses get ID [flags] + +Flags: + -h, --help help for get + --no-wait do not wait to reach RUNNING state + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses get-permission-levels --help +Get SQL warehouse permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions. + +Usage: + databricks warehouses get-permission-levels WAREHOUSE_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses get-permissions --help +Get SQL warehouse permissions. + + Gets the permissions of a SQL warehouse. SQL warehouses can inherit + permissions from their root object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions. + +Usage: + databricks warehouses get-permissions WAREHOUSE_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses get-workspace-warehouse-config --help +Get the workspace configuration. + + Gets the workspace level configuration that is shared by all SQL warehouses in + a workspace. + +Usage: + databricks warehouses get-workspace-warehouse-config [flags] + +Flags: + -h, --help help for get-workspace-warehouse-config + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses list --help +List warehouses. + + Lists all SQL warehouses that a user has manager permissions on. + +Usage: + databricks warehouses list [flags] + +Flags: + -h, --help help for list + --run-as-user-id int Service Principal which will be used to fetch the list of warehouses. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses set-permissions --help +Set SQL warehouse permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions. + +Usage: + databricks warehouses set-permissions WAREHOUSE_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses set-workspace-warehouse-config --help +Set the workspace configuration. + + Sets the workspace level configuration that is shared by all SQL warehouses in + a workspace. + +Usage: + databricks warehouses set-workspace-warehouse-config [flags] + +Flags: + --google-service-account string GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage. + -h, --help help for set-workspace-warehouse-config + --instance-profile-arn string AWS Only: Instance profile used to pass IAM role to the cluster. + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --security-policy SetWorkspaceWarehouseConfigRequestSecurityPolicy Security policy for warehouses. Supported values: [DATA_ACCESS_CONTROL, NONE, PASSTHROUGH] + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses start --help +Start a warehouse. + + Starts a SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse. + +Usage: + databricks warehouses start ID [flags] + +Flags: + -h, --help help for start + --no-wait do not wait to reach RUNNING state + --timeout duration maximum amount of time to reach RUNNING state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses stop --help +Stop a warehouse. + + Stops a SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse. + +Usage: + databricks warehouses stop ID [flags] + +Flags: + -h, --help help for stop + --no-wait do not wait to reach STOPPED state + --timeout duration maximum amount of time to reach STOPPED state (default 20m0s) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI warehouses update-permissions --help +Update SQL warehouse permissions. + + Updates the permissions on a SQL warehouse. SQL warehouses can inherit + permissions from their root object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions. + +Usage: + databricks warehouses update-permissions WAREHOUSE_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/warehouses/warehouses/script b/acceptance/help/cmd/workspace/warehouses/warehouses/script new file mode 100755 index 000000000..8b45478e1 --- /dev/null +++ b/acceptance/help/cmd/workspace/warehouses/warehouses/script @@ -0,0 +1,14 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI warehouses create --help +trace $CLI warehouses delete --help +trace $CLI warehouses edit --help +trace $CLI warehouses get --help +trace $CLI warehouses get-permission-levels --help +trace $CLI warehouses get-permissions --help +trace $CLI warehouses get-workspace-warehouse-config --help +trace $CLI warehouses list --help +trace $CLI warehouses set-permissions --help +trace $CLI warehouses set-workspace-warehouse-config --help +trace $CLI warehouses start --help +trace $CLI warehouses stop --help +trace $CLI warehouses update-permissions --help diff --git a/acceptance/help/cmd/workspace/workspace-bindings/workspace-bindings/output.txt b/acceptance/help/cmd/workspace/workspace-bindings/workspace-bindings/output.txt new file mode 100644 index 000000000..a78d3c640 --- /dev/null +++ b/acceptance/help/cmd/workspace/workspace-bindings/workspace-bindings/output.txt @@ -0,0 +1,90 @@ + +>>> $CLI workspace-bindings get --help +Get catalog workspace bindings. + + Gets workspace bindings of the catalog. The caller must be a metastore admin + or an owner of the catalog. + + Arguments: + NAME: The name of the catalog. + +Usage: + databricks workspace-bindings get NAME [flags] + +Flags: + -h, --help help for get + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace-bindings get-bindings --help +Get securable workspace bindings. + + Gets workspace bindings of the securable. The caller must be a metastore admin + or an owner of the securable. + + Arguments: + SECURABLE_TYPE: The type of the securable to bind to a workspace. + SECURABLE_NAME: The name of the securable. + +Usage: + databricks workspace-bindings get-bindings SECURABLE_TYPE SECURABLE_NAME [flags] + +Flags: + -h, --help help for get-bindings + --max-results int Maximum number of workspace bindings to return. + --page-token string Opaque pagination token to go to next page based on previous query. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace-bindings update --help +Update catalog workspace bindings. + + Updates workspace bindings of the catalog. The caller must be a metastore + admin or an owner of the catalog. + + Arguments: + NAME: The name of the catalog. + +Usage: + databricks workspace-bindings update NAME [flags] + +Flags: + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace-bindings update-bindings --help +Update securable workspace bindings. + + Updates workspace bindings of the securable. The caller must be a metastore + admin or an owner of the securable. + + Arguments: + SECURABLE_TYPE: The type of the securable to bind to a workspace. + SECURABLE_NAME: The name of the securable. + +Usage: + databricks workspace-bindings update-bindings SECURABLE_TYPE SECURABLE_NAME [flags] + +Flags: + -h, --help help for update-bindings + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/workspace-bindings/workspace-bindings/script b/acceptance/help/cmd/workspace/workspace-bindings/workspace-bindings/script new file mode 100755 index 000000000..f8e812806 --- /dev/null +++ b/acceptance/help/cmd/workspace/workspace-bindings/workspace-bindings/script @@ -0,0 +1,5 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI workspace-bindings get --help +trace $CLI workspace-bindings get-bindings --help +trace $CLI workspace-bindings update --help +trace $CLI workspace-bindings update-bindings --help diff --git a/acceptance/help/cmd/workspace/workspace-conf/workspace-conf/output.txt b/acceptance/help/cmd/workspace/workspace-conf/workspace-conf/output.txt new file mode 100644 index 000000000..e87d56f46 --- /dev/null +++ b/acceptance/help/cmd/workspace/workspace-conf/workspace-conf/output.txt @@ -0,0 +1,36 @@ + +>>> $CLI workspace-conf get-status --help +Check configuration status. + + Gets the configuration status for a workspace. + +Usage: + databricks workspace-conf get-status KEYS [flags] + +Flags: + -h, --help help for get-status + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace-conf set-status --help +Enable/disable features. + + Sets the configuration status for a workspace, including enabling or disabling + it. + +Usage: + databricks workspace-conf set-status [flags] + +Flags: + -h, --help help for set-status + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/workspace-conf/workspace-conf/script b/acceptance/help/cmd/workspace/workspace-conf/workspace-conf/script new file mode 100755 index 000000000..8e5246284 --- /dev/null +++ b/acceptance/help/cmd/workspace/workspace-conf/workspace-conf/script @@ -0,0 +1,3 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI workspace-conf get-status --help +trace $CLI workspace-conf set-status --help diff --git a/acceptance/help/cmd/workspace/workspace/workspace/output.txt b/acceptance/help/cmd/workspace/workspace/workspace/output.txt new file mode 100644 index 000000000..b3754e8bc --- /dev/null +++ b/acceptance/help/cmd/workspace/workspace/workspace/output.txt @@ -0,0 +1,268 @@ + +>>> $CLI workspace delete --help +Delete a workspace object. + + Deletes an object or a directory (and optionally recursively deletes all + objects in the directory). * If path does not exist, this call returns an + error RESOURCE_DOES_NOT_EXIST. * If path is a non-empty directory and + recursive is set to false, this call returns an error + DIRECTORY_NOT_EMPTY. + + Object deletion cannot be undone and deleting a directory recursively is not + atomic. + + Arguments: + PATH: The absolute path of the notebook or directory. + +Usage: + databricks workspace delete PATH [flags] + +Flags: + -h, --help help for delete + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --recursive The flag that specifies whether to delete the object recursively. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace export --help +Export a workspace object. + + Exports an object or the contents of an entire directory. + + If path does not exist, this call returns an error + RESOURCE_DOES_NOT_EXIST. + + If the exported data would exceed size limit, this call returns + MAX_NOTEBOOK_SIZE_EXCEEDED. Currently, this API does not support exporting a + library. + + Arguments: + PATH: The absolute path of the object or directory. Exporting a directory is + only supported for the DBC, SOURCE, and AUTO format. + +Usage: + databricks workspace export SOURCE_PATH [flags] + +Flags: + --file string Path on the local file system to save exported file at. + --format ExportFormat This specifies the format of the exported file. Supported values: [ + AUTO, + DBC, + HTML, + JUPYTER, + R_MARKDOWN, + SOURCE, + ] + -h, --help help for export + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace get-permission-levels --help +Get workspace object permission levels. + + Gets the permission levels that a user can have on an object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions. + +Usage: + databricks workspace get-permission-levels WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID [flags] + +Flags: + -h, --help help for get-permission-levels + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace get-permissions --help +Get workspace object permissions. + + Gets the permissions of a workspace object. Workspace objects can inherit + permissions from their parent objects or root object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions. + +Usage: + databricks workspace get-permissions WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID [flags] + +Flags: + -h, --help help for get-permissions + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace get-status --help +Get status. + + Gets the status of an object or a directory. If path does not exist, this + call returns an error RESOURCE_DOES_NOT_EXIST. + + Arguments: + PATH: The absolute path of the notebook or directory. + +Usage: + databricks workspace get-status PATH [flags] + +Flags: + -h, --help help for get-status + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace import --help +Import a workspace object. + + Imports a workspace object (for example, a notebook or file) or the contents + of an entire directory. If path already exists and overwrite is set to + false, this call returns an error RESOURCE_ALREADY_EXISTS. To import a + directory, you can use either the DBC format or the SOURCE format with the + language field unset. To import a single file as SOURCE, you must set the + language field. + + Arguments: + PATH: The absolute path of the object or directory. Importing a directory is + only supported for the DBC and SOURCE formats. + +Usage: + databricks workspace import TARGET_PATH [flags] + +Flags: + --content string The base64-encoded content. + --file string Path of local file to import + --format ImportFormat This specifies the format of the file to be imported. Supported values: [ + AUTO, + DBC, + HTML, + JUPYTER, + RAW, + R_MARKDOWN, + SOURCE, + ] + -h, --help help for import + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --language Language The language of the object. Supported values: [PYTHON, R, SCALA, SQL] + --overwrite The flag that specifies whether to overwrite existing object. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace list --help +List contents. + + Lists the contents of a directory, or the object if it is not a directory. If + the input path does not exist, this call returns an error + RESOURCE_DOES_NOT_EXIST. + + Arguments: + PATH: The absolute path of the notebook or directory. + +Usage: + databricks workspace list PATH [flags] + +Flags: + -h, --help help for list + --notebooks-modified-after int UTC timestamp in milliseconds. + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace mkdirs --help +Create a directory. + + Creates the specified directory (and necessary parent directories if they do + not exist). If there is an object (not a directory) at any prefix of the input + path, this call returns an error RESOURCE_ALREADY_EXISTS. + + Note that if this operation fails it may have succeeded in creating some of + the necessary parent directories. + + Arguments: + PATH: The absolute path of the directory. If the parent directories do not + exist, it will also create them. If the directory already exists, this + command will do nothing and succeed. + +Usage: + databricks workspace mkdirs PATH [flags] + +Flags: + -h, --help help for mkdirs + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace set-permissions --help +Set workspace object permissions. + + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their parent objects or root object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions. + +Usage: + databricks workspace set-permissions WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID [flags] + +Flags: + -h, --help help for set-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) + +>>> $CLI workspace update-permissions --help +Update workspace object permissions. + + Updates the permissions on a workspace object. Workspace objects can inherit + permissions from their parent objects or root object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions. + +Usage: + databricks workspace update-permissions WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID [flags] + +Flags: + -h, --help help for update-permissions + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + +Global Flags: + --debug enable debug logging + -o, --output type output type: text or json (default text) + -p, --profile string ~/.databrickscfg profile + -t, --target string bundle target to use (if applicable) diff --git a/acceptance/help/cmd/workspace/workspace/workspace/script b/acceptance/help/cmd/workspace/workspace/workspace/script new file mode 100755 index 000000000..c1248de1b --- /dev/null +++ b/acceptance/help/cmd/workspace/workspace/workspace/script @@ -0,0 +1,11 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +trace $CLI workspace delete --help +trace $CLI workspace export --help +trace $CLI workspace get-permission-levels --help +trace $CLI workspace get-permissions --help +trace $CLI workspace get-status --help +trace $CLI workspace import --help +trace $CLI workspace list --help +trace $CLI workspace mkdirs --help +trace $CLI workspace set-permissions --help +trace $CLI workspace update-permissions --help