mirror of https://github.com/databricks/cli.git
regenerate + fixes
This commit is contained in:
parent
73f832b672
commit
da4b856e57
|
@ -1 +1 @@
|
|||
cf9c61453990df0f9453670f2fe68e1b128647a2
|
||||
25b2478e5a18c888f0d423249abde5499dc58424
|
|
@ -30,6 +30,8 @@ cmd/account/users/users.go linguist-generated=true
|
|||
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
||||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true
|
||||
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true
|
||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||
cmd/workspace/apps/apps.go linguist-generated=true
|
||||
|
|
|
@ -214,7 +214,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
|
||||
// Dashboards: Prefix
|
||||
for key, dashboard := range r.Dashboards {
|
||||
if dashboard == nil || dashboard.CreateDashboardRequest == nil {
|
||||
if dashboard == nil || dashboard.Dashboard == nil {
|
||||
diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -26,13 +26,13 @@ func TestConfigureDashboardDefaultsParentPath(t *testing.T) {
|
|||
"d1": {
|
||||
// Empty string is skipped.
|
||||
// See below for how it is set.
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
ParentPath: "",
|
||||
},
|
||||
},
|
||||
"d2": {
|
||||
// Non-empty string is skipped.
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
ParentPath: "already-set",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -89,7 +89,7 @@ func TestInitializeURLs(t *testing.T) {
|
|||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
ID: "01ef8d56871e1d50ae30ce7375e42478",
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My special dashboard",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -126,7 +126,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dashboard1": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "dashboard1",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -17,7 +17,7 @@ type Dashboard struct {
|
|||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
|
||||
*dashboards.CreateDashboardRequest
|
||||
*dashboards.Dashboard
|
||||
|
||||
// =========================
|
||||
// === Additional fields ===
|
||||
|
|
|
@ -29,7 +29,7 @@ func mockDashboardBundle(t *testing.T) *bundle.Bundle {
|
|||
Resources: config.Resources{
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"dash1": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "My Special Dashboard",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -792,7 +792,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
|
@ -951,12 +951,12 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
Dashboards: map[string]*resources.Dashboard{
|
||||
"test_dashboard": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard",
|
||||
},
|
||||
},
|
||||
"test_dashboard_new": {
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "test_dashboard_new",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
func TestConvertDashboard(t *testing.T) {
|
||||
var src = resources.Dashboard{
|
||||
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: "my dashboard",
|
||||
WarehouseId: "f00dcafe",
|
||||
ParentPath: "/some/path",
|
||||
|
|
|
@ -185,6 +185,14 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"create_time": {
|
||||
"description": "The timestamp of when the dashboard was created.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"dashboard_id": {
|
||||
"description": "UUID identifying the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"display_name": {
|
||||
"description": "The display name of the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
|
@ -192,13 +200,25 @@
|
|||
"embed_credentials": {
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"etag": {
|
||||
"description": "The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard\nhas not been modified since the last read.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"file_path": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"lifecycle_state": {
|
||||
"description": "The state of the dashboard resource. Used for tracking trashed status.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState"
|
||||
},
|
||||
"parent_path": {
|
||||
"description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"path": {
|
||||
"description": "The workspace path of the dashboard asset, including the file name.\nExported dashboards always have the file extension `.lvdash.json`.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"permissions": {
|
||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||
},
|
||||
|
@ -206,15 +226,16 @@
|
|||
"description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.",
|
||||
"$ref": "#/$defs/interface"
|
||||
},
|
||||
"update_time": {
|
||||
"description": "The timestamp of when the dashboard was last updated by the user.\nThis field is excluded in List Dashboards responses.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"warehouse_id": {
|
||||
"description": "The warehouse ID used to run the dashboard.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"display_name"
|
||||
]
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
|
@ -2325,6 +2346,13 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"dashboards.LifecycleState": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"ACTIVE",
|
||||
"TRASHED"
|
||||
]
|
||||
},
|
||||
"jobs.Condition": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
|
@ -3102,7 +3130,7 @@
|
|||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"jar_params": {
|
||||
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.",
|
||||
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"job_id": {
|
||||
|
@ -3436,11 +3464,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"condition_task": {
|
||||
"description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.",
|
||||
"description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask"
|
||||
},
|
||||
"dbt_task": {
|
||||
"description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
|
||||
"description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask"
|
||||
},
|
||||
"depends_on": {
|
||||
|
@ -3468,7 +3496,7 @@
|
|||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"for_each_task": {
|
||||
"description": "If for_each_task, indicates that this task must execute the nested task within it.",
|
||||
"description": "The task executes a nested task for every input provided when the `for_each_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask"
|
||||
},
|
||||
"health": {
|
||||
|
@ -3495,7 +3523,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec"
|
||||
},
|
||||
"notebook_task": {
|
||||
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
||||
"description": "The task runs a notebook when the `notebook_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask"
|
||||
},
|
||||
"notification_settings": {
|
||||
|
@ -3503,11 +3531,11 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings"
|
||||
},
|
||||
"pipeline_task": {
|
||||
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
|
||||
"description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask"
|
||||
},
|
||||
"python_wheel_task": {
|
||||
"description": "If python_wheel_task, indicates that this job must execute a PythonWheel.",
|
||||
"description": "The task runs a Python wheel when the `python_wheel_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask"
|
||||
},
|
||||
"retry_on_timeout": {
|
||||
|
@ -3519,23 +3547,23 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunIf"
|
||||
},
|
||||
"run_job_task": {
|
||||
"description": "If run_job_task, indicates that this task must execute another job.",
|
||||
"description": "The task triggers another job when the `run_job_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask"
|
||||
},
|
||||
"spark_jar_task": {
|
||||
"description": "If spark_jar_task, indicates that this task must run a JAR.",
|
||||
"description": "The task runs a JAR when the `spark_jar_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask"
|
||||
},
|
||||
"spark_python_task": {
|
||||
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
||||
"description": "The task runs a Python file when the `spark_python_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask"
|
||||
},
|
||||
"spark_submit_task": {
|
||||
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||
"description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask"
|
||||
},
|
||||
"sql_task": {
|
||||
"description": "If sql_task, indicates that this job must execute a SQL task.",
|
||||
"description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SqlTask"
|
||||
},
|
||||
"task_key": {
|
||||
|
@ -3821,12 +3849,7 @@
|
|||
},
|
||||
"status": {
|
||||
"description": "Current status of `model_version`",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus",
|
||||
"enum": [
|
||||
"PENDING_REGISTRATION",
|
||||
"FAILED_REGISTRATION",
|
||||
"READY"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus"
|
||||
},
|
||||
"status_message": {
|
||||
"description": "Details on current `status`, if it is pending or failed.",
|
||||
|
@ -3854,7 +3877,13 @@
|
|||
]
|
||||
},
|
||||
"ml.ModelVersionStatus": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Current status of `model_version`",
|
||||
"enum": [
|
||||
"PENDING_REGISTRATION",
|
||||
"FAILED_REGISTRATION",
|
||||
"READY"
|
||||
]
|
||||
},
|
||||
"ml.ModelVersionTag": {
|
||||
"anyOf": [
|
||||
|
@ -4188,11 +4217,7 @@
|
|||
},
|
||||
"mode": {
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode",
|
||||
"enum": [
|
||||
"ENHANCED",
|
||||
"LEGACY"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4208,7 +4233,12 @@
|
|||
]
|
||||
},
|
||||
"pipelines.PipelineClusterAutoscaleMode": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
|
||||
"enum": [
|
||||
"ENHANCED",
|
||||
"LEGACY"
|
||||
]
|
||||
},
|
||||
"pipelines.PipelineDeployment": {
|
||||
"anyOf": [
|
||||
|
@ -4411,11 +4441,7 @@
|
|||
},
|
||||
"scd_type": {
|
||||
"description": "The SCD type to use to ingest the table.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType",
|
||||
"enum": [
|
||||
"SCD_TYPE_1",
|
||||
"SCD_TYPE_2"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType"
|
||||
},
|
||||
"sequence_by": {
|
||||
"description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.",
|
||||
|
@ -4431,7 +4457,12 @@
|
|||
]
|
||||
},
|
||||
"pipelines.TableSpecificConfigScdType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The SCD type to use to ingest the table.",
|
||||
"enum": [
|
||||
"SCD_TYPE_1",
|
||||
"SCD_TYPE_2"
|
||||
]
|
||||
},
|
||||
"serving.Ai21LabsConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4520,11 +4551,7 @@
|
|||
"properties": {
|
||||
"behavior": {
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"BLOCK"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4539,7 +4566,12 @@
|
|||
]
|
||||
},
|
||||
"serving.AiGatewayGuardrailPiiBehaviorBehavior": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"BLOCK"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayGuardrails": {
|
||||
"anyOf": [
|
||||
|
@ -4604,18 +4636,11 @@
|
|||
},
|
||||
"key": {
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey"
|
||||
},
|
||||
"renewal_period": {
|
||||
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4631,10 +4656,19 @@
|
|||
]
|
||||
},
|
||||
"serving.AiGatewayRateLimitKey": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayRateLimitRenewalPeriod": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
},
|
||||
"serving.AiGatewayUsageTrackingConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4681,13 +4715,7 @@
|
|||
},
|
||||
"bedrock_provider": {
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider",
|
||||
"enum": [
|
||||
"anthropic",
|
||||
"cohere",
|
||||
"ai21labs",
|
||||
"amazon"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -4703,7 +4731,14 @@
|
|||
]
|
||||
},
|
||||
"serving.AmazonBedrockConfigBedrockProvider": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"enum": [
|
||||
"anthropic",
|
||||
"cohere",
|
||||
"ai21labs",
|
||||
"amazon"
|
||||
]
|
||||
},
|
||||
"serving.AnthropicConfig": {
|
||||
"anyOf": [
|
||||
|
@ -4910,17 +4945,7 @@
|
|||
},
|
||||
"provider": {
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider",
|
||||
"enum": [
|
||||
"ai21labs",
|
||||
"anthropic",
|
||||
"amazon-bedrock",
|
||||
"cohere",
|
||||
"databricks-model-serving",
|
||||
"google-cloud-vertex-ai",
|
||||
"openai",
|
||||
"palm"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider"
|
||||
},
|
||||
"task": {
|
||||
"description": "The task type of the external model.",
|
||||
|
@ -4941,7 +4966,18 @@
|
|||
]
|
||||
},
|
||||
"serving.ExternalModelProvider": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"enum": [
|
||||
"ai21labs",
|
||||
"anthropic",
|
||||
"amazon-bedrock",
|
||||
"cohere",
|
||||
"databricks-model-serving",
|
||||
"google-cloud-vertex-ai",
|
||||
"openai",
|
||||
"palm"
|
||||
]
|
||||
},
|
||||
"serving.GoogleCloudVertexAiConfig": {
|
||||
"anyOf": [
|
||||
|
@ -5047,18 +5083,11 @@
|
|||
},
|
||||
"key": {
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey"
|
||||
},
|
||||
"renewal_period": {
|
||||
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -5074,10 +5103,19 @@
|
|||
]
|
||||
},
|
||||
"serving.RateLimitKey": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
]
|
||||
},
|
||||
"serving.RateLimitRenewalPeriod": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
},
|
||||
"serving.Route": {
|
||||
"anyOf": [
|
||||
|
@ -5202,23 +5240,11 @@
|
|||
},
|
||||
"workload_size": {
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize",
|
||||
"enum": [
|
||||
"Small",
|
||||
"Medium",
|
||||
"Large"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize"
|
||||
},
|
||||
"workload_type": {
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_SMALL",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -5235,10 +5261,24 @@
|
|||
]
|
||||
},
|
||||
"serving.ServedModelInputWorkloadSize": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"enum": [
|
||||
"Small",
|
||||
"Medium",
|
||||
"Large"
|
||||
]
|
||||
},
|
||||
"serving.ServedModelInputWorkloadType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_SMALL",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
},
|
||||
"serving.TrafficConfig": {
|
||||
"anyOf": [
|
||||
|
|
|
@ -191,6 +191,8 @@ func newList() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `An opaque page token which was the next_page_token in the response of the previous request to list the secrets for this service principal.`)
|
||||
|
||||
cmd.Use = "list SERVICE_PRINCIPAL_ID"
|
||||
cmd.Short = `List service principal secrets.`
|
||||
cmd.Long = `List service principal secrets.
|
||||
|
|
|
@ -81,6 +81,7 @@ func newCreate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`)
|
||||
// TODO: complex arg: gcp_managed_network_config
|
||||
// TODO: complex arg: gke_config
|
||||
cmd.Flags().BoolVar(&createReq.IsNoPublicIpEnabled, "is-no-public-ip-enabled", createReq.IsNoPublicIpEnabled, `Whether no public IP is enabled for the workspace.`)
|
||||
cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`)
|
||||
cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`)
|
||||
cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``)
|
||||
|
|
162
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go
generated
Executable file
162
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go
generated
Executable file
|
@ -0,0 +1,162 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package aibi_dashboard_embedding_access_policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "aibi-dashboard-embedding-access-policy",
|
||||
Short: `Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the workspace level.`,
|
||||
Long: `Controls whether AI/BI published dashboard embedding is enabled, conditionally
|
||||
enabled, or disabled at the workspace level. By default, this setting is
|
||||
conditionally enabled (ALLOW_APPROVED_DOMAINS).`,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get"
|
||||
cmd.Short = `Retrieve the AI/BI dashboard embedding access policy.`
|
||||
cmd.Long = `Retrieve the AI/BI dashboard embedding access policy.
|
||||
|
||||
Retrieves the AI/BI dashboard embedding access policy. The default setting is
|
||||
ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be embedded on approved
|
||||
domains.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update"
|
||||
cmd.Short = `Update the AI/BI dashboard embedding access policy.`
|
||||
cmd.Long = `Update the AI/BI dashboard embedding access policy.
|
||||
|
||||
Updates the AI/BI dashboard embedding access policy at the workspace level.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service AibiDashboardEmbeddingAccessPolicy
|
162
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go
generated
Executable file
162
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go
generated
Executable file
|
@ -0,0 +1,162 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package aibi_dashboard_embedding_approved_domains
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "aibi-dashboard-embedding-approved-domains",
|
||||
Short: `Controls the list of domains approved to host the embedded AI/BI dashboards.`,
|
||||
Long: `Controls the list of domains approved to host the embedded AI/BI dashboards.
|
||||
The approved domains list can't be mutated when the current access policy is
|
||||
not set to ALLOW_APPROVED_DOMAINS.`,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get"
|
||||
cmd.Short = `Retrieve the list of domains approved to host embedded AI/BI dashboards.`
|
||||
cmd.Long = `Retrieve the list of domains approved to host embedded AI/BI dashboards.
|
||||
|
||||
Retrieves the list of domains approved to host embedded AI/BI dashboards.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update"
|
||||
cmd.Short = `Update the list of domains approved to host embedded AI/BI dashboards.`
|
||||
cmd.Long = `Update the list of domains approved to host embedded AI/BI dashboards.
|
||||
|
||||
Updates the list of domains approved to host embedded AI/BI dashboards. This
|
||||
update will fail if the current workspace access policy is not
|
||||
ALLOW_APPROVED_DOMAINS.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := updateJson.Unmarshal(&updateReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service AibiDashboardEmbeddingApprovedDomains
|
|
@ -77,30 +77,18 @@ func newCreate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`)
|
||||
// TODO: array: resources
|
||||
// TODO: complex arg: app
|
||||
|
||||
cmd.Use = "create NAME"
|
||||
cmd.Use = "create"
|
||||
cmd.Short = `Create an app.`
|
||||
cmd.Long = `Create an app.
|
||||
|
||||
Creates a new app.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the app. The name must contain only lowercase alphanumeric
|
||||
characters and hyphens. It must be unique within the workspace.`
|
||||
Creates a new app.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(1)
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -121,9 +109,6 @@ func newCreate() *cobra.Command {
|
|||
}
|
||||
}
|
||||
}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
createReq.Name = args[0]
|
||||
}
|
||||
|
||||
wait, err := w.Apps.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
|
@ -244,9 +229,7 @@ func newDeploy() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&deployReq.DeploymentId, "deployment-id", deployReq.DeploymentId, `The unique id of the deployment.`)
|
||||
cmd.Flags().Var(&deployReq.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`)
|
||||
cmd.Flags().StringVar(&deployReq.SourceCodePath, "source-code-path", deployReq.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`)
|
||||
// TODO: complex arg: app_deployment
|
||||
|
||||
cmd.Use = "deploy APP_NAME"
|
||||
cmd.Short = `Create an app deployment.`
|
||||
|
@ -925,8 +908,7 @@ func newUpdate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`)
|
||||
// TODO: array: resources
|
||||
// TODO: complex arg: app
|
||||
|
||||
cmd.Use = "update NAME"
|
||||
cmd.Short = `Update an app.`
|
||||
|
@ -935,8 +917,7 @@ func newUpdate() *cobra.Command {
|
|||
Updates the app with the supplied name.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the app. The name must contain only lowercase alphanumeric
|
||||
characters and hyphens. It must be unique within the workspace.`
|
||||
NAME: The name of the app.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
|
|
@ -160,13 +160,13 @@ func newCreateMessage() *cobra.Command {
|
|||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var executeMessageQueryOverrides []func(
|
||||
*cobra.Command,
|
||||
*dashboards.ExecuteMessageQueryRequest,
|
||||
*dashboards.GenieExecuteMessageQueryRequest,
|
||||
)
|
||||
|
||||
func newExecuteMessageQuery() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var executeMessageQueryReq dashboards.ExecuteMessageQueryRequest
|
||||
var executeMessageQueryReq dashboards.GenieExecuteMessageQueryRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
|
|
|
@ -847,7 +847,7 @@ func newGetRun() *cobra.Command {
|
|||
|
||||
cmd.Flags().BoolVar(&getRunReq.IncludeHistory, "include-history", getRunReq.IncludeHistory, `Whether to include the repair history in the response.`)
|
||||
cmd.Flags().BoolVar(&getRunReq.IncludeResolvedValues, "include-resolved-values", getRunReq.IncludeResolvedValues, `Whether to include resolved parameter values in the response.`)
|
||||
cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page or the previous page of job tasks, set this field to the value of the next_page_token or prev_page_token returned in the GetJob response.`)
|
||||
cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page of job tasks, set this field to the value of the next_page_token returned in the GetJob response.`)
|
||||
|
||||
cmd.Use = "get-run RUN_ID"
|
||||
cmd.Short = `Get a single job run.`
|
||||
|
|
|
@ -75,30 +75,18 @@ func newCreate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.ParentPath, "parent-path", createReq.ParentPath, `The workspace path of the folder containing the dashboard.`)
|
||||
cmd.Flags().StringVar(&createReq.SerializedDashboard, "serialized-dashboard", createReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`)
|
||||
cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `The warehouse ID used to run the dashboard.`)
|
||||
// TODO: complex arg: dashboard
|
||||
|
||||
cmd.Use = "create DISPLAY_NAME"
|
||||
cmd.Use = "create"
|
||||
cmd.Short = `Create dashboard.`
|
||||
cmd.Long = `Create dashboard.
|
||||
|
||||
Create a draft dashboard.
|
||||
|
||||
Arguments:
|
||||
DISPLAY_NAME: The display name of the dashboard.`
|
||||
Create a draft dashboard.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'display_name' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(1)
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -119,9 +107,6 @@ func newCreate() *cobra.Command {
|
|||
}
|
||||
}
|
||||
}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
createReq.DisplayName = args[0]
|
||||
}
|
||||
|
||||
response, err := w.Lakeview.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
|
@ -160,8 +145,7 @@ func newCreateSchedule() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createScheduleReq.DisplayName, "display-name", createScheduleReq.DisplayName, `The display name for schedule.`)
|
||||
cmd.Flags().Var(&createScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`)
|
||||
// TODO: complex arg: schedule
|
||||
|
||||
cmd.Use = "create-schedule DASHBOARD_ID"
|
||||
cmd.Short = `Create dashboard schedule.`
|
||||
|
@ -196,8 +180,6 @@ func newCreateSchedule() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
createScheduleReq.DashboardId = args[0]
|
||||
|
||||
|
@ -238,6 +220,8 @@ func newCreateSubscription() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createSubscriptionJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: subscription
|
||||
|
||||
cmd.Use = "create-subscription DASHBOARD_ID SCHEDULE_ID"
|
||||
cmd.Short = `Create schedule subscription.`
|
||||
cmd.Long = `Create schedule subscription.
|
||||
|
@ -272,8 +256,6 @@ func newCreateSubscription() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
createSubscriptionReq.DashboardId = args[0]
|
||||
createSubscriptionReq.ScheduleId = args[1]
|
||||
|
@ -1131,10 +1113,7 @@ func newUpdate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `The display name of the dashboard.`)
|
||||
cmd.Flags().StringVar(&updateReq.Etag, "etag", updateReq.Etag, `The etag for the dashboard.`)
|
||||
cmd.Flags().StringVar(&updateReq.SerializedDashboard, "serialized-dashboard", updateReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`)
|
||||
cmd.Flags().StringVar(&updateReq.WarehouseId, "warehouse-id", updateReq.WarehouseId, `The warehouse ID used to run the dashboard.`)
|
||||
// TODO: complex arg: dashboard
|
||||
|
||||
cmd.Use = "update DASHBOARD_ID"
|
||||
cmd.Short = `Update dashboard.`
|
||||
|
@ -1208,9 +1187,7 @@ func newUpdateSchedule() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateScheduleReq.DisplayName, "display-name", updateScheduleReq.DisplayName, `The display name for schedule.`)
|
||||
cmd.Flags().StringVar(&updateScheduleReq.Etag, "etag", updateScheduleReq.Etag, `The etag for the schedule.`)
|
||||
cmd.Flags().Var(&updateScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`)
|
||||
// TODO: complex arg: schedule
|
||||
|
||||
cmd.Use = "update-schedule DASHBOARD_ID SCHEDULE_ID"
|
||||
cmd.Short = `Update dashboard schedule.`
|
||||
|
@ -1246,8 +1223,6 @@ func newUpdateSchedule() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
updateScheduleReq.DashboardId = args[0]
|
||||
updateScheduleReq.ScheduleId = args[1]
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
package online_tables
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
|
@ -54,11 +57,15 @@ func newCreate() *cobra.Command {
|
|||
var createReq catalog.CreateOnlineTableRequest
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
var createSkipWait bool
|
||||
var createTimeout time.Duration
|
||||
|
||||
cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach ACTIVE state`)
|
||||
cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ACTIVE state`)
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Full three-part (catalog, schema, table) name of the table.`)
|
||||
// TODO: complex arg: spec
|
||||
// TODO: complex arg: table
|
||||
|
||||
cmd.Use = "create"
|
||||
cmd.Short = `Create an Online Table.`
|
||||
|
@ -91,11 +98,24 @@ func newCreate() *cobra.Command {
|
|||
}
|
||||
}
|
||||
|
||||
response, err := w.OnlineTables.Create(ctx, createReq)
|
||||
wait, err := w.OnlineTables.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
if createSkipWait {
|
||||
return cmdio.Render(ctx, wait.Response)
|
||||
}
|
||||
spinner := cmdio.Spinner(ctx)
|
||||
info, err := wait.OnProgress(func(i *catalog.OnlineTable) {
|
||||
status := i.UnityCatalogProvisioningState
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
spinner <- statusMessage
|
||||
}).GetWithTimeout(createTimeout)
|
||||
close(spinner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, info)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
|
|
|
@ -5,6 +5,8 @@ package settings
|
|||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
aibi_dashboard_embedding_access_policy "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-access-policy"
|
||||
aibi_dashboard_embedding_approved_domains "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-approved-domains"
|
||||
automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update"
|
||||
compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile"
|
||||
default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace"
|
||||
|
@ -30,6 +32,8 @@ func New() *cobra.Command {
|
|||
}
|
||||
|
||||
// Add subservices
|
||||
cmd.AddCommand(aibi_dashboard_embedding_access_policy.New())
|
||||
cmd.AddCommand(aibi_dashboard_embedding_approved_domains.New())
|
||||
cmd.AddCommand(automatic_cluster_update.New())
|
||||
cmd.AddCommand(compliance_security_profile.New())
|
||||
cmd.AddCommand(default_namespace.New())
|
||||
|
|
|
@ -47,7 +47,9 @@ func TestAccDashboards(t *testing.T) {
|
|||
// Make an out of band modification to the dashboard and confirm that it is detected.
|
||||
_, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{
|
||||
DashboardId: oi.ResourceId,
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
SerializedDashboard: dashboard.SerializedDashboard,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -30,10 +30,12 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) {
|
|||
dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-")
|
||||
|
||||
dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: dashboardName,
|
||||
ParentPath: dir,
|
||||
SerializedDashboard: string(dashboardPayload),
|
||||
WarehouseId: warehouseId,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
t.Logf("Dashboard ID (per Lakeview API): %s", dashboard.DashboardId)
|
||||
|
@ -62,9 +64,11 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) {
|
|||
// Try to overwrite the dashboard via the Lakeview API (and expect failure).
|
||||
{
|
||||
_, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{
|
||||
Dashboard: &dashboards.Dashboard{
|
||||
DisplayName: dashboardName,
|
||||
ParentPath: dir,
|
||||
SerializedDashboard: string(dashboardPayload),
|
||||
},
|
||||
})
|
||||
require.ErrorIs(t, err, apierr.ErrResourceAlreadyExists)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue