From 781688c9cb7699d9c0e1977d2f77334381c65640 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 16:41:24 +0200 Subject: [PATCH] Bump github.com/databricks/databricks-sdk-go from 0.38.0 to 0.39.0 (#1405) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.38.0 to 0.39.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.39.0

0.39.0

Note: This release contains breaking changes, please see the API changes below for more details.

API Changes:

OpenAPI SHA: 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55, Date: 2024-04-23

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.39.0

Note: This release contains breaking changes, please see the API changes below for more details.

API Changes:

OpenAPI SHA: 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55, Date: 2024-04-23

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.38.0&new-version=0.39.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .codegen/service.go.tmpl | 1 + bundle/schema/docs/bundle_descriptions.json | 205 ++++++++++++++++-- .../esm-enablement-account.go | 3 + .../automatic-cluster-update.go | 3 + .../csp-enablement/csp-enablement.go | 3 + .../esm-enablement/esm-enablement.go | 3 + cmd/workspace/jobs/jobs.go | 1 + cmd/workspace/libraries/libraries.go | 53 ++--- cmd/workspace/pipelines/pipelines.go | 2 + .../provider-exchanges/provider-exchanges.go | 22 +- .../serving-endpoints/serving-endpoints.go | 62 ++++++ go.mod | 2 +- go.sum | 4 +- 14 files changed, 296 insertions(+), 70 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 0aa4b102..1f11c17b 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -94684175b8bd65f8701f89729351f8069e8309c9 \ No newline at end of file +21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 6aabb02c..492b2132 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -151,6 +151,7 @@ func new{{.PascalName}}() *cobra.Command { "provider-exchanges delete" "provider-exchanges delete-listing-from-exchange" "provider-exchanges list-exchanges-for-listing" + "provider-exchanges list-listings-for-exchange" -}} {{- $fullCommandName := (print $serviceName " " .KebabName) -}} {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index ca889ae5..75499507 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -46,6 +46,17 @@ "properties": { "fail_on_active_runs": { "description": "" + }, + "lock": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "force": { + "description": "" + } + } } } }, @@ -76,6 +87,9 @@ "additionalproperties": { "description": "" } + }, + "use_legacy_run_as": { + "description": "" } } }, @@ -242,7 +256,7 @@ "description": "", "properties": { "client": { - "description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" + "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version." }, "dependencies": { "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", @@ -909,10 +923,10 @@ } }, "egg": { - "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "jar": { - "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "maven": { "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", @@ -942,8 +956,11 @@ } } }, + "requirements": { + "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`" + }, "whl": { - "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." } } } @@ -1303,6 +1320,9 @@ }, "source": { "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + }, + "warehouse_id": { + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail." } } }, @@ -1526,7 +1546,7 @@ } }, "file": { - "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", + "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", "properties": { "path": { "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." @@ -1562,7 +1582,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -1679,7 +1699,7 @@ } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -2415,6 +2435,17 @@ "continuous": { "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." }, + "deployment": { + "description": "Deployment type of this pipeline.", + "properties": { + "kind": { + "description": "The deployment method that manages the pipeline." + }, + "metadata_file_path": { + "description": "The path to the file containing metadata about the deployment." + } + } + }, "development": { "description": "Whether the pipeline is in Development mode. Defaults to false." }, @@ -2441,6 +2472,65 @@ "id": { "description": "Unique identifier for this pipeline." }, + "ingestion_definition": { + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.", + "properties": { + "connection_name": { + "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "ingestion_gateway_id": { + "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "objects": { + "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "items": { + "description": "", + "properties": { + "schema": { + "description": "Select tables from a specific source schema.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store tables." + }, + "destination_schema": { + "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists." + }, + "source_catalog": { + "description": "The source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Required. Schema name in the source database." + } + } + }, + "table": { + "description": "Select tables from a specific source table.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table." + }, + "destination_schema": { + "description": "Required. Destination schema to store table." + }, + "destination_table": { + "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used." + }, + "source_catalog": { + "description": "Source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Schema name in the source database. Might be optional depending on the type of source." + }, + "source_table": { + "description": "Required. Table name in the source database." + } + } + } + } + } + } + } + }, "libraries": { "description": "Libraries or code needed by this deployment.", "items": { @@ -2682,6 +2772,17 @@ "properties": { "fail_on_active_runs": { "description": "" + }, + "lock": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "force": { + "description": "" + } + } } } }, @@ -2878,7 +2979,7 @@ "description": "", "properties": { "client": { - "description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" + "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version." }, "dependencies": { "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", @@ -3545,10 +3646,10 @@ } }, "egg": { - "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "jar": { - "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "maven": { "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", @@ -3578,8 +3679,11 @@ } } }, + "requirements": { + "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`" + }, "whl": { - "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." } } } @@ -3939,6 +4043,9 @@ }, "source": { "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + }, + "warehouse_id": { + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail." } } }, @@ -4162,7 +4269,7 @@ } }, "file": { - "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", + "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", "properties": { "path": { "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." @@ -4198,7 +4305,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -4315,7 +4422,7 @@ } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -5051,6 +5158,17 @@ "continuous": { "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." }, + "deployment": { + "description": "Deployment type of this pipeline.", + "properties": { + "kind": { + "description": "The deployment method that manages the pipeline." + }, + "metadata_file_path": { + "description": "The path to the file containing metadata about the deployment." + } + } + }, "development": { "description": "Whether the pipeline is in Development mode. Defaults to false." }, @@ -5077,6 +5195,65 @@ "id": { "description": "Unique identifier for this pipeline." }, + "ingestion_definition": { + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.", + "properties": { + "connection_name": { + "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "ingestion_gateway_id": { + "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "objects": { + "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "items": { + "description": "", + "properties": { + "schema": { + "description": "Select tables from a specific source schema.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store tables." + }, + "destination_schema": { + "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists." + }, + "source_catalog": { + "description": "The source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Required. Schema name in the source database." + } + } + }, + "table": { + "description": "Select tables from a specific source table.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table." + }, + "destination_schema": { + "description": "Required. Destination schema to store table." + }, + "destination_table": { + "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used." + }, + "source_catalog": { + "description": "Source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Schema name in the source database. Might be optional depending on the type of source." + }, + "source_table": { + "description": "Required. Table name in the source database." + } + } + } + } + } + } + } + }, "libraries": { "description": "Libraries or code needed by this deployment.", "items": { diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go index dd407e2e..a2e95ffe 100755 --- a/cmd/account/esm-enablement-account/esm-enablement-account.go +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -25,6 +25,9 @@ func New() *cobra.Command { setting is disabled for new workspaces. After workspace creation, account admins can enable enhanced security monitoring individually for each workspace.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go index 2385195b..681dba7b 100755 --- a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -22,6 +22,9 @@ func New() *cobra.Command { Short: `Controls whether automatic cluster update is enabled for the current workspace.`, Long: `Controls whether automatic cluster update is enabled for the current workspace. By default, it is turned off.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/csp-enablement/csp-enablement.go b/cmd/workspace/csp-enablement/csp-enablement.go index 31259156..e82fdc2a 100755 --- a/cmd/workspace/csp-enablement/csp-enablement.go +++ b/cmd/workspace/csp-enablement/csp-enablement.go @@ -25,6 +25,9 @@ func New() *cobra.Command { off. This settings can NOT be disabled once it is enabled.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/esm-enablement/esm-enablement.go b/cmd/workspace/esm-enablement/esm-enablement.go index a65fe2f7..784c01f2 100755 --- a/cmd/workspace/esm-enablement/esm-enablement.go +++ b/cmd/workspace/esm-enablement/esm-enablement.go @@ -27,6 +27,9 @@ func New() *cobra.Command { If the compliance security profile is disabled, you can enable or disable this setting and it is not permanent.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 267dfc73..e31c3f08 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1513,6 +1513,7 @@ func newSubmit() *cobra.Command { // TODO: complex arg: pipeline_task // TODO: complex arg: python_wheel_task // TODO: complex arg: queue + // TODO: complex arg: run_as // TODO: complex arg: run_job_task cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`) // TODO: complex arg: spark_jar_task diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index e11e5a4c..aed8843d 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -25,18 +25,14 @@ func New() *cobra.Command { To make third-party or custom code available to notebooks and jobs running on your clusters, you can install a library. Libraries can be written in Python, - Java, Scala, and R. You can upload Java, Scala, and Python libraries and point - to external packages in PyPI, Maven, and CRAN repositories. + Java, Scala, and R. You can upload Python, Java, Scala and R libraries and + point to external packages in PyPI, Maven, and CRAN repositories. Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library directly from a public repository such as PyPI or Maven, using a previously installed workspace library, or using an init script. - When you install a library on a cluster, a notebook already attached to that - cluster will not immediately see the new library. You must first detach and - then reattach the notebook to the cluster. - When you uninstall a library from a cluster, the library is removed only when you restart the cluster. Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart.`, @@ -75,9 +71,8 @@ func newAllClusterStatuses() *cobra.Command { cmd.Short = `Get all statuses.` cmd.Long = `Get all statuses. - Get the status of all libraries on all clusters. A status will be available - for all libraries installed on this cluster via the API or the libraries UI as - well as libraries set to be installed on all clusters via the libraries UI.` + Get the status of all libraries on all clusters. A status is returned for all + libraries installed on this cluster via the API or the libraries UI.` cmd.Annotations = make(map[string]string) @@ -110,13 +105,13 @@ func newAllClusterStatuses() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var clusterStatusOverrides []func( *cobra.Command, - *compute.ClusterStatusRequest, + *compute.ClusterStatus, ) func newClusterStatus() *cobra.Command { cmd := &cobra.Command{} - var clusterStatusReq compute.ClusterStatusRequest + var clusterStatusReq compute.ClusterStatus // TODO: short flags @@ -124,21 +119,13 @@ func newClusterStatus() *cobra.Command { cmd.Short = `Get status.` cmd.Long = `Get status. - Get the status of libraries on a cluster. A status will be available for all - libraries installed on this cluster via the API or the libraries UI as well as - libraries set to be installed on all clusters via the libraries UI. The order - of returned libraries will be as follows. - - 1. Libraries set to be installed on this cluster will be returned first. - Within this group, the final order will be order in which the libraries were - added to the cluster. - - 2. Libraries set to be installed on all clusters are returned next. Within - this group there is no order guarantee. - - 3. Libraries that were previously requested on this cluster or on all - clusters, but now marked for removal. Within this group there is no order - guarantee. + Get the status of libraries on a cluster. A status is returned for all + libraries installed on this cluster via the API or the libraries UI. The order + of returned libraries is as follows: 1. Libraries set to be installed on this + cluster, in the order that the libraries were added to the cluster, are + returned first. 2. Libraries that were previously requested to be installed on + this cluster or, but are now marked for removal, in no particular order, are + returned last. Arguments: CLUSTER_ID: Unique identifier of the cluster whose status should be retrieved.` @@ -195,12 +182,8 @@ func newInstall() *cobra.Command { cmd.Short = `Add a library.` cmd.Long = `Add a library. - Add libraries to be installed on a cluster. The installation is asynchronous; - it happens in the background after the completion of this request. - - **Note**: The actual set of libraries to be installed on a cluster is the - union of the libraries specified via this method and the libraries set to be - installed on all clusters via the libraries UI.` + Add libraries to install on a cluster. The installation is asynchronous; it + happens in the background after the completion of this request.` cmd.Annotations = make(map[string]string) @@ -259,9 +242,9 @@ func newUninstall() *cobra.Command { cmd.Short = `Uninstall libraries.` cmd.Long = `Uninstall libraries. - Set libraries to be uninstalled on a cluster. The libraries won't be - uninstalled until the cluster is restarted. Uninstalling libraries that are - not installed on the cluster will have no impact but is not an error.` + Set libraries to uninstall from a cluster. The libraries won't be uninstalled + until the cluster is restarted. A request to uninstall a library that is not + currently installed is ignored.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index b7c3235f..5a55fd72 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -940,11 +940,13 @@ func newUpdate() *cobra.Command { // TODO: array: clusters // TODO: map via StringToStringVar: configuration cmd.Flags().BoolVar(&updateReq.Continuous, "continuous", updateReq.Continuous, `Whether the pipeline is continuous or triggered.`) + // TODO: complex arg: deployment cmd.Flags().BoolVar(&updateReq.Development, "development", updateReq.Development, `Whether the pipeline is in Development mode.`) cmd.Flags().StringVar(&updateReq.Edition, "edition", updateReq.Edition, `Pipeline product edition.`) cmd.Flags().Int64Var(&updateReq.ExpectedLastModified, "expected-last-modified", updateReq.ExpectedLastModified, `If present, the last-modified time of the pipeline settings before the edit.`) // TODO: complex arg: filters cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Unique identifier for this pipeline.`) + // TODO: complex arg: ingestion_definition // TODO: array: libraries cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Friendly identifier for this pipeline.`) // TODO: array: notifications diff --git a/cmd/workspace/provider-exchanges/provider-exchanges.go b/cmd/workspace/provider-exchanges/provider-exchanges.go index fe1a9a3d..c9f5818f 100755 --- a/cmd/workspace/provider-exchanges/provider-exchanges.go +++ b/cmd/workspace/provider-exchanges/provider-exchanges.go @@ -508,28 +508,16 @@ func newListListingsForExchange() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No EXCHANGE_ID argument specified. Loading names for Provider Exchanges drop-down." - names, err := w.ProviderExchanges.ExchangeListingExchangeNameToExchangeIdMap(ctx, marketplace.ListExchangesForListingRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Provider Exchanges drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have ") - } listListingsForExchangeReq.ExchangeId = args[0] response := w.ProviderExchanges.ListListingsForExchange(ctx, listListingsForExchangeReq) diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 6706b99e..dee341ab 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -46,6 +46,7 @@ func New() *cobra.Command { cmd.AddCommand(newDelete()) cmd.AddCommand(newExportMetrics()) cmd.AddCommand(newGet()) + cmd.AddCommand(newGetOpenApi()) cmd.AddCommand(newGetPermissionLevels()) cmd.AddCommand(newGetPermissions()) cmd.AddCommand(newList()) @@ -379,6 +380,67 @@ func newGet() *cobra.Command { return cmd } +// start get-open-api command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOpenApiOverrides []func( + *cobra.Command, + *serving.GetOpenApiRequest, +) + +func newGetOpenApi() *cobra.Command { + cmd := &cobra.Command{} + + var getOpenApiReq serving.GetOpenApiRequest + + // TODO: short flags + + cmd.Use = "get-open-api NAME" + cmd.Short = `Get the schema for a serving endpoint.` + cmd.Long = `Get the schema for a serving endpoint. + + Get the query schema of the serving endpoint in OpenAPI format. The schema + contains information for the supported paths, input and output format and + datatypes. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getOpenApiReq.Name = args[0] + + err = w.ServingEndpoints.GetOpenApi(ctx, getOpenApiReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOpenApiOverrides { + fn(cmd, &getOpenApiReq) + } + + return cmd +} + // start get-permission-levels command // Slice with functions to override default command behavior. diff --git a/go.mod b/go.mod index 6a991b0e..7b2d31da 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.38.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.39.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 8fe9109b..5dc02d09 100644 --- a/go.sum +++ b/go.sum @@ -30,8 +30,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.38.0 h1:MQhOCWTkdKItG+n6ZwcXQv9FWBVXq9fax8VSZns2e+0= -github.com/databricks/databricks-sdk-go v0.38.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= +github.com/databricks/databricks-sdk-go v0.39.0 h1:nVnQYkk47SkEsRSXWkn6j7jBOxXgusjoo6xwbaHTGss= +github.com/databricks/databricks-sdk-go v0.39.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=