mirror of https://github.com/databricks/cli.git
Bump github.com/databricks/databricks-sdk-go from 0.30.1 to 0.32.0 (#1199)
Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.30.1 to 0.32.0. --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester <andrew.nester@databricks.com>
This commit is contained in:
parent
80670eceed
commit
299e9b56a6
|
@ -1 +1 @@
|
|||
e05401ed5dd4974c5333d737ec308a7d451f749f
|
||||
c40670f5a2055c92cf0a6aac92a5bccebfb80866
|
|
@ -193,7 +193,7 @@
|
|||
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
"description": "Whether this trigger is paused or not."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -500,6 +500,12 @@
|
|||
},
|
||||
"local_ssd_count": {
|
||||
"description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type."
|
||||
},
|
||||
"use_preemptible_executors": {
|
||||
"description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead."
|
||||
},
|
||||
"zone_id": {
|
||||
"description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -508,6 +514,14 @@
|
|||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"abfss": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"dbfs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
|
||||
"properties": {
|
||||
|
@ -524,6 +538,14 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"gcs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"
|
||||
}
|
||||
}
|
||||
},
|
||||
"s3": {
|
||||
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
|
||||
"properties": {
|
||||
|
@ -703,7 +725,7 @@
|
|||
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
"description": "Whether this trigger is paused or not."
|
||||
},
|
||||
"quartz_cron_expression": {
|
||||
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
|
||||
|
@ -757,11 +779,14 @@
|
|||
"description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used."
|
||||
},
|
||||
"project_directory": {
|
||||
"description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used."
|
||||
"description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used."
|
||||
},
|
||||
"schema": {
|
||||
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
},
|
||||
"warehouse_id": {
|
||||
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
|
||||
}
|
||||
|
@ -816,6 +841,7 @@
|
|||
"existing_cluster_id": {
|
||||
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
|
||||
},
|
||||
"for_each_task": null,
|
||||
"health": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
|
@ -1082,6 +1108,12 @@
|
|||
},
|
||||
"local_ssd_count": {
|
||||
"description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type."
|
||||
},
|
||||
"use_preemptible_executors": {
|
||||
"description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead."
|
||||
},
|
||||
"zone_id": {
|
||||
"description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1090,6 +1122,14 @@
|
|||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"abfss": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"dbfs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
|
||||
"properties": {
|
||||
|
@ -1106,6 +1146,14 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"gcs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"
|
||||
}
|
||||
}
|
||||
},
|
||||
"s3": {
|
||||
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
|
||||
"properties": {
|
||||
|
@ -1212,7 +1260,7 @@
|
|||
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
||||
"properties": {
|
||||
"base_parameters": {
|
||||
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n",
|
||||
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -1303,7 +1351,7 @@
|
|||
"description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail."
|
||||
},
|
||||
"parameters": {
|
||||
"description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n",
|
||||
"description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -1314,7 +1362,7 @@
|
|||
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
||||
"properties": {
|
||||
"parameters": {
|
||||
"description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n",
|
||||
"description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -1331,7 +1379,7 @@
|
|||
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n",
|
||||
"properties": {
|
||||
"parameters": {
|
||||
"description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n",
|
||||
"description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -1398,7 +1446,10 @@
|
|||
"description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.",
|
||||
"properties": {
|
||||
"path": {
|
||||
"description": "Relative path of the SQL file in the remote Git repository."
|
||||
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1483,7 +1534,7 @@
|
|||
"description": "An optional timeout applied to each run of this job. A value of `0` means no timeout."
|
||||
},
|
||||
"trigger": {
|
||||
"description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"properties": {
|
||||
"file_arrival": {
|
||||
"description": "File arrival trigger settings.",
|
||||
|
@ -1492,7 +1543,7 @@
|
|||
"description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n"
|
||||
},
|
||||
"url": {
|
||||
"description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location."
|
||||
"description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume."
|
||||
},
|
||||
"wait_after_last_change_seconds": {
|
||||
"description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
||||
|
@ -1500,7 +1551,27 @@
|
|||
}
|
||||
},
|
||||
"pause_status": {
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
"description": "Whether this trigger is paused or not."
|
||||
},
|
||||
"table": {
|
||||
"description": "Table trigger settings.",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "The table(s) condition based on which to trigger a job run."
|
||||
},
|
||||
"min_time_between_triggers_seconds": {
|
||||
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n"
|
||||
},
|
||||
"table_names": {
|
||||
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"wait_after_last_change_seconds": {
|
||||
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1969,10 +2040,13 @@
|
|||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||
"properties": {
|
||||
"max_workers": {
|
||||
"description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`."
|
||||
"description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`."
|
||||
},
|
||||
"min_workers": {
|
||||
"description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation."
|
||||
"description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation."
|
||||
},
|
||||
"mode": {
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -2101,6 +2175,12 @@
|
|||
},
|
||||
"local_ssd_count": {
|
||||
"description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type."
|
||||
},
|
||||
"use_preemptible_executors": {
|
||||
"description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead."
|
||||
},
|
||||
"zone_id": {
|
||||
"description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -2109,6 +2189,14 @@
|
|||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"abfss": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"dbfs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
|
||||
"properties": {
|
||||
|
@ -2125,6 +2213,14 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"gcs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"
|
||||
}
|
||||
}
|
||||
},
|
||||
"s3": {
|
||||
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
|
||||
"properties": {
|
||||
|
@ -2629,7 +2725,7 @@
|
|||
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
"description": "Whether this trigger is paused or not."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -2936,6 +3032,12 @@
|
|||
},
|
||||
"local_ssd_count": {
|
||||
"description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type."
|
||||
},
|
||||
"use_preemptible_executors": {
|
||||
"description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead."
|
||||
},
|
||||
"zone_id": {
|
||||
"description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -2944,6 +3046,14 @@
|
|||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"abfss": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"dbfs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
|
||||
"properties": {
|
||||
|
@ -2960,6 +3070,14 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"gcs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"
|
||||
}
|
||||
}
|
||||
},
|
||||
"s3": {
|
||||
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
|
||||
"properties": {
|
||||
|
@ -3139,7 +3257,7 @@
|
|||
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
"description": "Whether this trigger is paused or not."
|
||||
},
|
||||
"quartz_cron_expression": {
|
||||
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
|
||||
|
@ -3193,11 +3311,14 @@
|
|||
"description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used."
|
||||
},
|
||||
"project_directory": {
|
||||
"description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used."
|
||||
"description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used."
|
||||
},
|
||||
"schema": {
|
||||
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
},
|
||||
"warehouse_id": {
|
||||
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
|
||||
}
|
||||
|
@ -3252,6 +3373,7 @@
|
|||
"existing_cluster_id": {
|
||||
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
|
||||
},
|
||||
"for_each_task": null,
|
||||
"health": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
|
@ -3518,6 +3640,12 @@
|
|||
},
|
||||
"local_ssd_count": {
|
||||
"description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type."
|
||||
},
|
||||
"use_preemptible_executors": {
|
||||
"description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead."
|
||||
},
|
||||
"zone_id": {
|
||||
"description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -3526,6 +3654,14 @@
|
|||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"abfss": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"dbfs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
|
||||
"properties": {
|
||||
|
@ -3542,6 +3678,14 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"gcs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"
|
||||
}
|
||||
}
|
||||
},
|
||||
"s3": {
|
||||
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
|
||||
"properties": {
|
||||
|
@ -3648,7 +3792,7 @@
|
|||
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
||||
"properties": {
|
||||
"base_parameters": {
|
||||
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n",
|
||||
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -3739,7 +3883,7 @@
|
|||
"description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail."
|
||||
},
|
||||
"parameters": {
|
||||
"description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n",
|
||||
"description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -3750,7 +3894,7 @@
|
|||
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
||||
"properties": {
|
||||
"parameters": {
|
||||
"description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n",
|
||||
"description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -3767,7 +3911,7 @@
|
|||
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n",
|
||||
"properties": {
|
||||
"parameters": {
|
||||
"description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n",
|
||||
"description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -3834,7 +3978,10 @@
|
|||
"description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.",
|
||||
"properties": {
|
||||
"path": {
|
||||
"description": "Relative path of the SQL file in the remote Git repository."
|
||||
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -3919,7 +4066,7 @@
|
|||
"description": "An optional timeout applied to each run of this job. A value of `0` means no timeout."
|
||||
},
|
||||
"trigger": {
|
||||
"description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"properties": {
|
||||
"file_arrival": {
|
||||
"description": "File arrival trigger settings.",
|
||||
|
@ -3928,7 +4075,7 @@
|
|||
"description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n"
|
||||
},
|
||||
"url": {
|
||||
"description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location."
|
||||
"description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume."
|
||||
},
|
||||
"wait_after_last_change_seconds": {
|
||||
"description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
||||
|
@ -3936,7 +4083,27 @@
|
|||
}
|
||||
},
|
||||
"pause_status": {
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
"description": "Whether this trigger is paused or not."
|
||||
},
|
||||
"table": {
|
||||
"description": "Table trigger settings.",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"description": "The table(s) condition based on which to trigger a job run."
|
||||
},
|
||||
"min_time_between_triggers_seconds": {
|
||||
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n"
|
||||
},
|
||||
"table_names": {
|
||||
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"wait_after_last_change_seconds": {
|
||||
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -4405,10 +4572,13 @@
|
|||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||
"properties": {
|
||||
"max_workers": {
|
||||
"description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`."
|
||||
"description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`."
|
||||
},
|
||||
"min_workers": {
|
||||
"description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation."
|
||||
"description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation."
|
||||
},
|
||||
"mode": {
|
||||
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -4537,6 +4707,12 @@
|
|||
},
|
||||
"local_ssd_count": {
|
||||
"description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type."
|
||||
},
|
||||
"use_preemptible_executors": {
|
||||
"description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead."
|
||||
},
|
||||
"zone_id": {
|
||||
"description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -4545,6 +4721,14 @@
|
|||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"abfss": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"dbfs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
|
||||
"properties": {
|
||||
|
@ -4561,6 +4745,14 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"gcs": {
|
||||
"description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`",
|
||||
"properties": {
|
||||
"destination": {
|
||||
"description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"
|
||||
}
|
||||
}
|
||||
},
|
||||
"s3": {
|
||||
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
|
||||
"properties": {
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
package settings
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
|
@ -60,25 +62,18 @@ func newDeletePersonalComputeSetting() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete-personal-compute-setting ETAG"
|
||||
cmd.Flags().StringVar(&deletePersonalComputeSettingReq.Etag, "etag", deletePersonalComputeSettingReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "delete-personal-compute-setting"
|
||||
cmd.Short = `Delete Personal Compute setting.`
|
||||
cmd.Long = `Delete Personal Compute setting.
|
||||
|
||||
Reverts back the Personal Compute setting value to default (ON)
|
||||
|
||||
Arguments:
|
||||
ETAG: etag used for versioning. The response is at least as fresh as the eTag
|
||||
provided. This is used for optimistic concurrency control as a way to help
|
||||
prevent simultaneous writes of a setting overwriting each other. It is
|
||||
strongly suggested that systems make use of the etag in the read -> delete
|
||||
pattern to perform setting deletions in order to avoid race conditions.
|
||||
That is, get an etag from a GET request, and pass it with the DELETE
|
||||
request to identify the rule set version you are deleting.`
|
||||
Reverts back the Personal Compute setting value to default (ON)`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
check := cobra.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -87,8 +82,6 @@ func newDeletePersonalComputeSetting() *cobra.Command {
|
|||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
deletePersonalComputeSettingReq.Etag = args[0]
|
||||
|
||||
response, err := a.Settings.DeletePersonalComputeSetting(ctx, deletePersonalComputeSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -114,41 +107,34 @@ func init() {
|
|||
})
|
||||
}
|
||||
|
||||
// start read-personal-compute-setting command
|
||||
// start get-personal-compute-setting command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var readPersonalComputeSettingOverrides []func(
|
||||
var getPersonalComputeSettingOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.ReadPersonalComputeSettingRequest,
|
||||
*settings.GetPersonalComputeSettingRequest,
|
||||
)
|
||||
|
||||
func newReadPersonalComputeSetting() *cobra.Command {
|
||||
func newGetPersonalComputeSetting() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var readPersonalComputeSettingReq settings.ReadPersonalComputeSettingRequest
|
||||
var getPersonalComputeSettingReq settings.GetPersonalComputeSettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "read-personal-compute-setting ETAG"
|
||||
cmd.Flags().StringVar(&getPersonalComputeSettingReq.Etag, "etag", getPersonalComputeSettingReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get-personal-compute-setting"
|
||||
cmd.Short = `Get Personal Compute setting.`
|
||||
cmd.Long = `Get Personal Compute setting.
|
||||
|
||||
Gets the value of the Personal Compute setting.
|
||||
|
||||
Arguments:
|
||||
ETAG: etag used for versioning. The response is at least as fresh as the eTag
|
||||
provided. This is used for optimistic concurrency control as a way to help
|
||||
prevent simultaneous writes of a setting overwriting each other. It is
|
||||
strongly suggested that systems make use of the etag in the read -> delete
|
||||
pattern to perform setting deletions in order to avoid race conditions.
|
||||
That is, get an etag from a GET request, and pass it with the DELETE
|
||||
request to identify the rule set version you are deleting.`
|
||||
Gets the value of the Personal Compute setting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
check := cobra.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -157,9 +143,7 @@ func newReadPersonalComputeSetting() *cobra.Command {
|
|||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
readPersonalComputeSettingReq.Etag = args[0]
|
||||
|
||||
response, err := a.Settings.ReadPersonalComputeSetting(ctx, readPersonalComputeSettingReq)
|
||||
response, err := a.Settings.GetPersonalComputeSetting(ctx, getPersonalComputeSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -171,8 +155,8 @@ func newReadPersonalComputeSetting() *cobra.Command {
|
|||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range readPersonalComputeSettingOverrides {
|
||||
fn(cmd, &readPersonalComputeSettingReq)
|
||||
for _, fn := range getPersonalComputeSettingOverrides {
|
||||
fn(cmd, &getPersonalComputeSettingReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
|
@ -180,7 +164,7 @@ func newReadPersonalComputeSetting() *cobra.Command {
|
|||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newReadPersonalComputeSetting())
|
||||
cmd.AddCommand(newGetPersonalComputeSetting())
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -202,9 +186,6 @@ func newUpdatePersonalComputeSetting() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&updatePersonalComputeSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().BoolVar(&updatePersonalComputeSettingReq.AllowMissing, "allow-missing", updatePersonalComputeSettingReq.AllowMissing, `This should always be set to true for Settings RPCs.`)
|
||||
// TODO: complex arg: setting
|
||||
|
||||
cmd.Use = "update-personal-compute-setting"
|
||||
cmd.Short = `Update Personal Compute setting.`
|
||||
cmd.Long = `Update Personal Compute setting.
|
||||
|
@ -213,11 +194,6 @@ func newUpdatePersonalComputeSetting() *cobra.Command {
|
|||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
|
@ -228,6 +204,8 @@ func newUpdatePersonalComputeSetting() *cobra.Command {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := a.Settings.UpdatePersonalComputeSetting(ctx, updatePersonalComputeSettingReq)
|
||||
|
|
|
@ -127,7 +127,7 @@ func newDelete() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete NAME_ARG"
|
||||
cmd.Use = "delete NAME"
|
||||
cmd.Short = `Delete a clean room.`
|
||||
cmd.Long = `Delete a clean room.
|
||||
|
||||
|
@ -135,7 +135,7 @@ func newDelete() *cobra.Command {
|
|||
owner of the clean room.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: The name of the clean room.`
|
||||
NAME: The name of the clean room.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -149,7 +149,7 @@ func newDelete() *cobra.Command {
|
|||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
deleteReq.NameArg = args[0]
|
||||
deleteReq.Name = args[0]
|
||||
|
||||
err = w.CleanRooms.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
|
@ -194,7 +194,7 @@ func newGet() *cobra.Command {
|
|||
|
||||
cmd.Flags().BoolVar(&getReq.IncludeRemoteDetails, "include-remote-details", getReq.IncludeRemoteDetails, `Whether to include remote details (central) on the clean room.`)
|
||||
|
||||
cmd.Use = "get NAME_ARG"
|
||||
cmd.Use = "get NAME"
|
||||
cmd.Short = `Get a clean room.`
|
||||
cmd.Long = `Get a clean room.
|
||||
|
||||
|
@ -202,7 +202,7 @@ func newGet() *cobra.Command {
|
|||
metastore admin or the owner of the clean room.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: The name of the clean room.`
|
||||
NAME: The name of the clean room.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -216,7 +216,7 @@ func newGet() *cobra.Command {
|
|||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
getReq.NameArg = args[0]
|
||||
getReq.Name = args[0]
|
||||
|
||||
response, err := w.CleanRooms.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
|
@ -329,7 +329,7 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`)
|
||||
|
||||
cmd.Use = "update NAME_ARG"
|
||||
cmd.Use = "update NAME"
|
||||
cmd.Short = `Update a clean room.`
|
||||
cmd.Long = `Update a clean room.
|
||||
|
||||
|
@ -349,7 +349,7 @@ func newUpdate() *cobra.Command {
|
|||
Table removals through **update** do not require additional privileges.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: The name of the clean room.`
|
||||
NAME: The name of the clean room.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -369,7 +369,7 @@ func newUpdate() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
updateReq.NameArg = args[0]
|
||||
updateReq.Name = args[0]
|
||||
|
||||
response, err := w.CleanRooms.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
|
|
|
@ -134,14 +134,14 @@ func newDelete() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete NAME_ARG"
|
||||
cmd.Use = "delete NAME"
|
||||
cmd.Short = `Delete a connection.`
|
||||
cmd.Long = `Delete a connection.
|
||||
|
||||
Deletes the connection that matches the supplied name.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: The name of the connection to be deleted.`
|
||||
NAME: The name of the connection to be deleted.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -152,7 +152,7 @@ func newDelete() *cobra.Command {
|
|||
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME_ARG argument specified. Loading names for Connections drop-down."
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down."
|
||||
names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
|
@ -167,7 +167,7 @@ func newDelete() *cobra.Command {
|
|||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the name of the connection to be deleted")
|
||||
}
|
||||
deleteReq.NameArg = args[0]
|
||||
deleteReq.Name = args[0]
|
||||
|
||||
err = w.Connections.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
|
@ -210,14 +210,14 @@ func newGet() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get NAME_ARG"
|
||||
cmd.Use = "get NAME"
|
||||
cmd.Short = `Get a connection.`
|
||||
cmd.Long = `Get a connection.
|
||||
|
||||
Gets a connection from it's name.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: Name of the connection.`
|
||||
NAME: Name of the connection.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -228,7 +228,7 @@ func newGet() *cobra.Command {
|
|||
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME_ARG argument specified. Loading names for Connections drop-down."
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down."
|
||||
names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
|
@ -243,7 +243,7 @@ func newGet() *cobra.Command {
|
|||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have name of the connection")
|
||||
}
|
||||
getReq.NameArg = args[0]
|
||||
getReq.Name = args[0]
|
||||
|
||||
response, err := w.Connections.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
|
@ -336,18 +336,17 @@ func newUpdate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the connection.`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the connection.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`)
|
||||
|
||||
cmd.Use = "update NAME_ARG"
|
||||
cmd.Use = "update NAME"
|
||||
cmd.Short = `Update a connection.`
|
||||
cmd.Long = `Update a connection.
|
||||
|
||||
Updates the connection that matches the supplied name.
|
||||
|
||||
Arguments:
|
||||
NAME_ARG: Name of the connection.`
|
||||
NAME: Name of the connection.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -369,7 +368,7 @@ func newUpdate() *cobra.Command {
|
|||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
updateReq.NameArg = args[0]
|
||||
updateReq.Name = args[0]
|
||||
|
||||
response, err := w.Connections.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
|
|
|
@ -61,8 +61,8 @@ func newExchangeToken() *cobra.Command {
|
|||
cmd.Short = `Exchange token.`
|
||||
cmd.Long = `Exchange token.
|
||||
|
||||
Exchange tokens with an Identity Provider to get a new access token. It
|
||||
allowes specifying scopes to determine token permissions.`
|
||||
Exchange tokens with an Identity Provider to get a new access token. It allows
|
||||
specifying scopes to determine token permissions.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
|
|
@ -276,7 +276,10 @@ func newList() *cobra.Command {
|
|||
cmd.Short = `Get dashboard objects.`
|
||||
cmd.Long = `Get dashboard objects.
|
||||
|
||||
Fetch a paginated list of dashboard objects.`
|
||||
Fetch a paginated list of dashboard objects.
|
||||
|
||||
### **Warning: Calling this API concurrently 10 or more times could result in
|
||||
throttling, service degradation, or a temporary ban.**`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
|
|
@ -42,6 +42,84 @@ func New() *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
// start cancel-refresh command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cancelRefreshOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.CancelRefreshRequest,
|
||||
)
|
||||
|
||||
func newCancelRefresh() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var cancelRefreshReq catalog.CancelRefreshRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "cancel-refresh FULL_NAME REFRESH_ID"
|
||||
cmd.Short = `Cancel refresh.`
|
||||
cmd.Long = `Cancel refresh.
|
||||
|
||||
Cancel an active monitor refresh for the given refresh ID.
|
||||
|
||||
The caller must either: 1. be an owner of the table's parent catalog 2. have
|
||||
**USE_CATALOG** on the table's parent catalog and be an owner of the table's
|
||||
parent schema 3. have the following permissions: - **USE_CATALOG** on the
|
||||
table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
|
||||
owner of the table
|
||||
|
||||
Additionally, the call must be made from the workspace where the monitor was
|
||||
created.
|
||||
|
||||
Arguments:
|
||||
FULL_NAME: Full name of the table.
|
||||
REFRESH_ID: ID of the refresh.`
|
||||
|
||||
// This command is being previewed; hide from help output.
|
||||
cmd.Hidden = true
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
cancelRefreshReq.FullName = args[0]
|
||||
cancelRefreshReq.RefreshId = args[1]
|
||||
|
||||
err = w.LakehouseMonitors.CancelRefresh(ctx, cancelRefreshReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cancelRefreshOverrides {
|
||||
fn(cmd, &cancelRefreshReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newCancelRefresh())
|
||||
})
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
|
@ -302,6 +380,229 @@ func init() {
|
|||
})
|
||||
}
|
||||
|
||||
// start get-refresh command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getRefreshOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.GetRefreshRequest,
|
||||
)
|
||||
|
||||
func newGetRefresh() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getRefreshReq catalog.GetRefreshRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-refresh FULL_NAME REFRESH_ID"
|
||||
cmd.Short = `Get refresh.`
|
||||
cmd.Long = `Get refresh.
|
||||
|
||||
Gets info about a specific monitor refresh using the given refresh ID.
|
||||
|
||||
The caller must either: 1. be an owner of the table's parent catalog 2. have
|
||||
**USE_CATALOG** on the table's parent catalog and be an owner of the table's
|
||||
parent schema 3. have the following permissions: - **USE_CATALOG** on the
|
||||
table's parent catalog - **USE_SCHEMA** on the table's parent schema -
|
||||
**SELECT** privilege on the table.
|
||||
|
||||
Additionally, the call must be made from the workspace where the monitor was
|
||||
created.
|
||||
|
||||
Arguments:
|
||||
FULL_NAME: Full name of the table.
|
||||
REFRESH_ID: ID of the refresh.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
getRefreshReq.FullName = args[0]
|
||||
getRefreshReq.RefreshId = args[1]
|
||||
|
||||
response, err := w.LakehouseMonitors.GetRefresh(ctx, getRefreshReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getRefreshOverrides {
|
||||
fn(cmd, &getRefreshReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newGetRefresh())
|
||||
})
|
||||
}
|
||||
|
||||
// start list-refreshes command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listRefreshesOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.ListRefreshesRequest,
|
||||
)
|
||||
|
||||
func newListRefreshes() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listRefreshesReq catalog.ListRefreshesRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "list-refreshes FULL_NAME"
|
||||
cmd.Short = `List refreshes.`
|
||||
cmd.Long = `List refreshes.
|
||||
|
||||
Gets an array containing the history of the most recent refreshes (up to 25)
|
||||
for this table.
|
||||
|
||||
The caller must either: 1. be an owner of the table's parent catalog 2. have
|
||||
**USE_CATALOG** on the table's parent catalog and be an owner of the table's
|
||||
parent schema 3. have the following permissions: - **USE_CATALOG** on the
|
||||
table's parent catalog - **USE_SCHEMA** on the table's parent schema -
|
||||
**SELECT** privilege on the table.
|
||||
|
||||
Additionally, the call must be made from the workspace where the monitor was
|
||||
created.
|
||||
|
||||
Arguments:
|
||||
FULL_NAME: Full name of the table.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
listRefreshesReq.FullName = args[0]
|
||||
|
||||
response, err := w.LakehouseMonitors.ListRefreshes(ctx, listRefreshesReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listRefreshesOverrides {
|
||||
fn(cmd, &listRefreshesReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newListRefreshes())
|
||||
})
|
||||
}
|
||||
|
||||
// start run-refresh command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var runRefreshOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.RunRefreshRequest,
|
||||
)
|
||||
|
||||
func newRunRefresh() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var runRefreshReq catalog.RunRefreshRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "run-refresh FULL_NAME"
|
||||
cmd.Short = `Queue a metric refresh for a monitor.`
|
||||
cmd.Long = `Queue a metric refresh for a monitor.
|
||||
|
||||
Queues a metric refresh on the monitor for the specified table. The refresh
|
||||
will execute in the background.
|
||||
|
||||
The caller must either: 1. be an owner of the table's parent catalog 2. have
|
||||
**USE_CATALOG** on the table's parent catalog and be an owner of the table's
|
||||
parent schema 3. have the following permissions: - **USE_CATALOG** on the
|
||||
table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
|
||||
owner of the table
|
||||
|
||||
Additionally, the call must be made from the workspace where the monitor was
|
||||
created.
|
||||
|
||||
Arguments:
|
||||
FULL_NAME: Full name of the table.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
runRefreshReq.FullName = args[0]
|
||||
|
||||
response, err := w.LakehouseMonitors.RunRefresh(ctx, runRefreshReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range runRefreshOverrides {
|
||||
fn(cmd, &runRefreshReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newRunRefresh())
|
||||
})
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
|
|
|
@ -24,9 +24,6 @@ func New() *cobra.Command {
|
|||
Annotations: map[string]string{
|
||||
"package": "dashboards",
|
||||
},
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
|
|
|
@ -619,7 +619,6 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&updateReq.DeltaSharingOrganizationName, "delta-sharing-organization-name", updateReq.DeltaSharingOrganizationName, `The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.`)
|
||||
cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`)
|
||||
cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL]`)
|
||||
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The user-specified name of the metastore.`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the metastore.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the metastore.`)
|
||||
cmd.Flags().StringVar(&updateReq.PrivilegeModelVersion, "privilege-model-version", updateReq.PrivilegeModelVersion, `Privilege model version of the metastore, of the form major.minor (e.g., 1.0).`)
|
||||
|
|
|
@ -705,96 +705,6 @@ func init() {
|
|||
})
|
||||
}
|
||||
|
||||
// start reset command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var resetOverrides []func(
|
||||
*cobra.Command,
|
||||
*pipelines.ResetRequest,
|
||||
)
|
||||
|
||||
func newReset() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var resetReq pipelines.ResetRequest
|
||||
|
||||
var resetSkipWait bool
|
||||
var resetTimeout time.Duration
|
||||
|
||||
cmd.Flags().BoolVar(&resetSkipWait, "no-wait", resetSkipWait, `do not wait to reach RUNNING state`)
|
||||
cmd.Flags().DurationVar(&resetTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach RUNNING state`)
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "reset PIPELINE_ID"
|
||||
cmd.Short = `Reset a pipeline.`
|
||||
cmd.Long = `Reset a pipeline.
|
||||
|
||||
Resets a pipeline.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have ")
|
||||
}
|
||||
resetReq.PipelineId = args[0]
|
||||
|
||||
wait, err := w.Pipelines.Reset(ctx, resetReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resetSkipWait {
|
||||
return nil
|
||||
}
|
||||
spinner := cmdio.Spinner(ctx)
|
||||
info, err := wait.OnProgress(func(i *pipelines.GetPipelineResponse) {
|
||||
statusMessage := i.Cause
|
||||
spinner <- statusMessage
|
||||
}).GetWithTimeout(resetTimeout)
|
||||
close(spinner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, info)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range resetOverrides {
|
||||
fn(cmd, &resetReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newReset())
|
||||
})
|
||||
}
|
||||
|
||||
// start set-permissions command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
|
|
|
@ -587,7 +587,6 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`)
|
||||
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the registered model.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the registered model.`)
|
||||
|
||||
|
|
|
@ -378,7 +378,6 @@ func newUpdate() *cobra.Command {
|
|||
|
||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||
cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`)
|
||||
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of schema, relative to parent catalog.`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the schema.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`)
|
||||
// TODO: map via StringToStringVar: properties
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
package settings
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
|
@ -44,23 +46,25 @@ func New() *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
// start delete-default-workspace-namespace command
|
||||
// start delete-default-namespace-setting command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteDefaultWorkspaceNamespaceOverrides []func(
|
||||
var deleteDefaultNamespaceSettingOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.DeleteDefaultWorkspaceNamespaceRequest,
|
||||
*settings.DeleteDefaultNamespaceSettingRequest,
|
||||
)
|
||||
|
||||
func newDeleteDefaultWorkspaceNamespace() *cobra.Command {
|
||||
func newDeleteDefaultNamespaceSetting() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteDefaultWorkspaceNamespaceReq settings.DeleteDefaultWorkspaceNamespaceRequest
|
||||
var deleteDefaultNamespaceSettingReq settings.DeleteDefaultNamespaceSettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete-default-workspace-namespace ETAG"
|
||||
cmd.Flags().StringVar(&deleteDefaultNamespaceSettingReq.Etag, "etag", deleteDefaultNamespaceSettingReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "delete-default-namespace-setting"
|
||||
cmd.Short = `Delete the default namespace setting.`
|
||||
cmd.Long = `Delete the default namespace setting.
|
||||
|
||||
|
@ -68,159 +72,7 @@ func newDeleteDefaultWorkspaceNamespace() *cobra.Command {
|
|||
be provided in DELETE requests (as a query parameter). The etag can be
|
||||
retrieved by making a GET request before the DELETE request. If the setting is
|
||||
updated/deleted concurrently, DELETE will fail with 409 and the request will
|
||||
need to be retried by using the fresh etag in the 409 response.
|
||||
|
||||
Arguments:
|
||||
ETAG: etag used for versioning. The response is at least as fresh as the eTag
|
||||
provided. This is used for optimistic concurrency control as a way to help
|
||||
prevent simultaneous writes of a setting overwriting each other. It is
|
||||
strongly suggested that systems make use of the etag in the read -> delete
|
||||
pattern to perform setting deletions in order to avoid race conditions.
|
||||
That is, get an etag from a GET request, and pass it with the DELETE
|
||||
request to identify the rule set version you are deleting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
deleteDefaultWorkspaceNamespaceReq.Etag = args[0]
|
||||
|
||||
response, err := w.Settings.DeleteDefaultWorkspaceNamespace(ctx, deleteDefaultWorkspaceNamespaceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteDefaultWorkspaceNamespaceOverrides {
|
||||
fn(cmd, &deleteDefaultWorkspaceNamespaceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newDeleteDefaultWorkspaceNamespace())
|
||||
})
|
||||
}
|
||||
|
||||
// start read-default-workspace-namespace command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var readDefaultWorkspaceNamespaceOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.ReadDefaultWorkspaceNamespaceRequest,
|
||||
)
|
||||
|
||||
func newReadDefaultWorkspaceNamespace() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var readDefaultWorkspaceNamespaceReq settings.ReadDefaultWorkspaceNamespaceRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "read-default-workspace-namespace ETAG"
|
||||
cmd.Short = `Get the default namespace setting.`
|
||||
cmd.Long = `Get the default namespace setting.
|
||||
|
||||
Gets the default namespace setting.
|
||||
|
||||
Arguments:
|
||||
ETAG: etag used for versioning. The response is at least as fresh as the eTag
|
||||
provided. This is used for optimistic concurrency control as a way to help
|
||||
prevent simultaneous writes of a setting overwriting each other. It is
|
||||
strongly suggested that systems make use of the etag in the read -> delete
|
||||
pattern to perform setting deletions in order to avoid race conditions.
|
||||
That is, get an etag from a GET request, and pass it with the DELETE
|
||||
request to identify the rule set version you are deleting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
readDefaultWorkspaceNamespaceReq.Etag = args[0]
|
||||
|
||||
response, err := w.Settings.ReadDefaultWorkspaceNamespace(ctx, readDefaultWorkspaceNamespaceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range readDefaultWorkspaceNamespaceOverrides {
|
||||
fn(cmd, &readDefaultWorkspaceNamespaceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newReadDefaultWorkspaceNamespace())
|
||||
})
|
||||
}
|
||||
|
||||
// start update-default-workspace-namespace command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateDefaultWorkspaceNamespaceOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateDefaultWorkspaceNamespaceRequest,
|
||||
)
|
||||
|
||||
func newUpdateDefaultWorkspaceNamespace() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateDefaultWorkspaceNamespaceReq settings.UpdateDefaultWorkspaceNamespaceRequest
|
||||
var updateDefaultWorkspaceNamespaceJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateDefaultWorkspaceNamespaceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().BoolVar(&updateDefaultWorkspaceNamespaceReq.AllowMissing, "allow-missing", updateDefaultWorkspaceNamespaceReq.AllowMissing, `This should always be set to true for Settings API.`)
|
||||
cmd.Flags().StringVar(&updateDefaultWorkspaceNamespaceReq.FieldMask, "field-mask", updateDefaultWorkspaceNamespaceReq.FieldMask, `Field mask is required to be passed into the PATCH request.`)
|
||||
// TODO: complex arg: setting
|
||||
|
||||
cmd.Use = "update-default-workspace-namespace"
|
||||
cmd.Short = `Update the default namespace setting.`
|
||||
cmd.Long = `Update the default namespace setting.
|
||||
|
||||
Updates the default namespace setting for the workspace. A fresh etag needs to
|
||||
be provided in PATCH requests (as part of the setting field). The etag can be
|
||||
retrieved by making a GET request before the PATCH request. Note that if the
|
||||
setting does not exist, GET will return a NOT_FOUND error and the etag will be
|
||||
present in the error response, which should be set in the PATCH request. If
|
||||
the setting is updated concurrently, PATCH will fail with 409 and the request
|
||||
will need to be retried by using the fresh etag in the 409 response.`
|
||||
need to be retried by using the fresh etag in the 409 response.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -234,14 +86,7 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command {
|
|||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateDefaultWorkspaceNamespaceJson.Unmarshal(&updateDefaultWorkspaceNamespaceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
response, err := w.Settings.UpdateDefaultWorkspaceNamespace(ctx, updateDefaultWorkspaceNamespaceReq)
|
||||
response, err := w.Settings.DeleteDefaultNamespaceSetting(ctx, deleteDefaultNamespaceSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -253,8 +98,8 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command {
|
|||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateDefaultWorkspaceNamespaceOverrides {
|
||||
fn(cmd, &updateDefaultWorkspaceNamespaceReq)
|
||||
for _, fn := range deleteDefaultNamespaceSettingOverrides {
|
||||
fn(cmd, &deleteDefaultNamespaceSettingReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
|
@ -262,7 +107,334 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command {
|
|||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newUpdateDefaultWorkspaceNamespace())
|
||||
cmd.AddCommand(newDeleteDefaultNamespaceSetting())
|
||||
})
|
||||
}
|
||||
|
||||
// start delete-restrict-workspace-admins-setting command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteRestrictWorkspaceAdminsSettingOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.DeleteRestrictWorkspaceAdminsSettingRequest,
|
||||
)
|
||||
|
||||
func newDeleteRestrictWorkspaceAdminsSetting() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteRestrictWorkspaceAdminsSettingReq settings.DeleteRestrictWorkspaceAdminsSettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&deleteRestrictWorkspaceAdminsSettingReq.Etag, "etag", deleteRestrictWorkspaceAdminsSettingReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "delete-restrict-workspace-admins-setting"
|
||||
cmd.Short = `Delete the restrict workspace admins setting.`
|
||||
cmd.Long = `Delete the restrict workspace admins setting.
|
||||
|
||||
Reverts the restrict workspace admins setting status for the workspace. A
|
||||
fresh etag needs to be provided in DELETE requests (as a query parameter). The
|
||||
etag can be retrieved by making a GET request before the DELETE request. If
|
||||
the setting is updated/deleted concurrently, DELETE will fail with 409 and the
|
||||
request will need to be retried by using the fresh etag in the 409 response.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.DeleteRestrictWorkspaceAdminsSetting(ctx, deleteRestrictWorkspaceAdminsSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteRestrictWorkspaceAdminsSettingOverrides {
|
||||
fn(cmd, &deleteRestrictWorkspaceAdminsSettingReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newDeleteRestrictWorkspaceAdminsSetting())
|
||||
})
|
||||
}
|
||||
|
||||
// start get-default-namespace-setting command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getDefaultNamespaceSettingOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetDefaultNamespaceSettingRequest,
|
||||
)
|
||||
|
||||
func newGetDefaultNamespaceSetting() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getDefaultNamespaceSettingReq settings.GetDefaultNamespaceSettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getDefaultNamespaceSettingReq.Etag, "etag", getDefaultNamespaceSettingReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get-default-namespace-setting"
|
||||
cmd.Short = `Get the default namespace setting.`
|
||||
cmd.Long = `Get the default namespace setting.
|
||||
|
||||
Gets the default namespace setting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.GetDefaultNamespaceSetting(ctx, getDefaultNamespaceSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getDefaultNamespaceSettingOverrides {
|
||||
fn(cmd, &getDefaultNamespaceSettingReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newGetDefaultNamespaceSetting())
|
||||
})
|
||||
}
|
||||
|
||||
// start get-restrict-workspace-admins-setting command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getRestrictWorkspaceAdminsSettingOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetRestrictWorkspaceAdminsSettingRequest,
|
||||
)
|
||||
|
||||
func newGetRestrictWorkspaceAdminsSetting() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getRestrictWorkspaceAdminsSettingReq settings.GetRestrictWorkspaceAdminsSettingRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getRestrictWorkspaceAdminsSettingReq.Etag, "etag", getRestrictWorkspaceAdminsSettingReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get-restrict-workspace-admins-setting"
|
||||
cmd.Short = `Get the restrict workspace admins setting.`
|
||||
cmd.Long = `Get the restrict workspace admins setting.
|
||||
|
||||
Gets the restrict workspace admins setting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.GetRestrictWorkspaceAdminsSetting(ctx, getRestrictWorkspaceAdminsSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getRestrictWorkspaceAdminsSettingOverrides {
|
||||
fn(cmd, &getRestrictWorkspaceAdminsSettingReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newGetRestrictWorkspaceAdminsSetting())
|
||||
})
|
||||
}
|
||||
|
||||
// start update-default-namespace-setting command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateDefaultNamespaceSettingOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateDefaultNamespaceSettingRequest,
|
||||
)
|
||||
|
||||
func newUpdateDefaultNamespaceSetting() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateDefaultNamespaceSettingReq settings.UpdateDefaultNamespaceSettingRequest
|
||||
var updateDefaultNamespaceSettingJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateDefaultNamespaceSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update-default-namespace-setting"
|
||||
cmd.Short = `Update the default namespace setting.`
|
||||
cmd.Long = `Update the default namespace setting.
|
||||
|
||||
Updates the default namespace setting for the workspace. A fresh etag needs to
|
||||
be provided in PATCH requests (as part of the setting field). The etag can be
|
||||
retrieved by making a GET request before the PATCH request. Note that if the
|
||||
setting does not exist, GET will return a NOT_FOUND error and the etag will be
|
||||
present in the error response, which should be set in the PATCH request. If
|
||||
the setting is updated concurrently, PATCH will fail with 409 and the request
|
||||
will need to be retried by using the fresh etag in the 409 response.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateDefaultNamespaceSettingJson.Unmarshal(&updateDefaultNamespaceSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.Settings.UpdateDefaultNamespaceSetting(ctx, updateDefaultNamespaceSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateDefaultNamespaceSettingOverrides {
|
||||
fn(cmd, &updateDefaultNamespaceSettingReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newUpdateDefaultNamespaceSetting())
|
||||
})
|
||||
}
|
||||
|
||||
// start update-restrict-workspace-admins-setting command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateRestrictWorkspaceAdminsSettingOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateRestrictWorkspaceAdminsSettingRequest,
|
||||
)
|
||||
|
||||
func newUpdateRestrictWorkspaceAdminsSetting() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateRestrictWorkspaceAdminsSettingReq settings.UpdateRestrictWorkspaceAdminsSettingRequest
|
||||
var updateRestrictWorkspaceAdminsSettingJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateRestrictWorkspaceAdminsSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update-restrict-workspace-admins-setting"
|
||||
cmd.Short = `Update the restrict workspace admins setting.`
|
||||
cmd.Long = `Update the restrict workspace admins setting.
|
||||
|
||||
Updates the restrict workspace admins setting for the workspace. A fresh etag
|
||||
needs to be provided in PATCH requests (as part of the setting field). The
|
||||
etag can be retrieved by making a GET request before the PATCH request. If the
|
||||
setting is updated concurrently, PATCH will fail with 409 and the request will
|
||||
need to be retried by using the fresh etag in the 409 response.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateRestrictWorkspaceAdminsSettingJson.Unmarshal(&updateRestrictWorkspaceAdminsSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.Settings.UpdateRestrictWorkspaceAdminsSetting(ctx, updateRestrictWorkspaceAdminsSettingReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateRestrictWorkspaceAdminsSettingOverrides {
|
||||
fn(cmd, &updateRestrictWorkspaceAdminsSettingReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(newUpdateRestrictWorkspaceAdminsSetting())
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ func newCreateIndex() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: delta_sync_vector_index_spec
|
||||
// TODO: complex arg: delta_sync_index_spec
|
||||
// TODO: complex arg: direct_access_index_spec
|
||||
cmd.Flags().StringVar(&createIndexReq.EndpointName, "endpoint-name", createIndexReq.EndpointName, `Name of the endpoint to be used for serving the index.`)
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ func newDelete() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "delete FULL_NAME_ARG"
|
||||
cmd.Use = "delete NAME"
|
||||
cmd.Short = `Delete a Volume.`
|
||||
cmd.Long = `Delete a Volume.
|
||||
|
||||
|
@ -185,7 +185,7 @@ func newDelete() *cobra.Command {
|
|||
on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.
|
||||
|
||||
Arguments:
|
||||
FULL_NAME_ARG: The three-level (fully qualified) name of the volume`
|
||||
NAME: The three-level (fully qualified) name of the volume`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -196,7 +196,7 @@ func newDelete() *cobra.Command {
|
|||
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down."
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down."
|
||||
names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
|
@ -211,7 +211,7 @@ func newDelete() *cobra.Command {
|
|||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume")
|
||||
}
|
||||
deleteReq.FullNameArg = args[0]
|
||||
deleteReq.Name = args[0]
|
||||
|
||||
err = w.Volumes.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
|
@ -254,12 +254,15 @@ func newList() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of volumes to return (page length).`)
|
||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token returned by a previous request.`)
|
||||
|
||||
cmd.Use = "list CATALOG_NAME SCHEMA_NAME"
|
||||
cmd.Short = `List Volumes.`
|
||||
cmd.Long = `List Volumes.
|
||||
|
||||
Gets an array of all volumes for the current metastore under the parent
|
||||
catalog and schema.
|
||||
Gets an array of volumes for the current metastore under the parent catalog
|
||||
and schema.
|
||||
|
||||
The returned volumes are filtered based on the privileges of the calling user.
|
||||
For example, the metastore admin is able to list all the volumes. A regular
|
||||
|
@ -274,9 +277,6 @@ func newList() *cobra.Command {
|
|||
CATALOG_NAME: The identifier of the catalog
|
||||
SCHEMA_NAME: The identifier of the schema`
|
||||
|
||||
// This command is being previewed; hide from help output.
|
||||
cmd.Hidden = true
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
|
@ -333,7 +333,7 @@ func newRead() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "read FULL_NAME_ARG"
|
||||
cmd.Use = "read NAME"
|
||||
cmd.Short = `Get a Volume.`
|
||||
cmd.Long = `Get a Volume.
|
||||
|
||||
|
@ -345,7 +345,7 @@ func newRead() *cobra.Command {
|
|||
the **USE_SCHEMA** privilege on the parent schema.
|
||||
|
||||
Arguments:
|
||||
FULL_NAME_ARG: The three-level (fully qualified) name of the volume`
|
||||
NAME: The three-level (fully qualified) name of the volume`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -356,7 +356,7 @@ func newRead() *cobra.Command {
|
|||
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down."
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down."
|
||||
names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
|
@ -371,7 +371,7 @@ func newRead() *cobra.Command {
|
|||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume")
|
||||
}
|
||||
readReq.FullNameArg = args[0]
|
||||
readReq.Name = args[0]
|
||||
|
||||
response, err := w.Volumes.Read(ctx, readReq)
|
||||
if err != nil {
|
||||
|
@ -417,11 +417,10 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the volume.`)
|
||||
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the volume.`)
|
||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the volume.`)
|
||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the volume.`)
|
||||
|
||||
cmd.Use = "update FULL_NAME_ARG"
|
||||
cmd.Use = "update NAME"
|
||||
cmd.Short = `Update a Volume.`
|
||||
cmd.Long = `Update a Volume.
|
||||
|
||||
|
@ -435,7 +434,7 @@ func newUpdate() *cobra.Command {
|
|||
updated.
|
||||
|
||||
Arguments:
|
||||
FULL_NAME_ARG: The three-level (fully qualified) name of the volume`
|
||||
NAME: The three-level (fully qualified) name of the volume`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -452,7 +451,7 @@ func newUpdate() *cobra.Command {
|
|||
}
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down."
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down."
|
||||
names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
|
@ -467,7 +466,7 @@ func newUpdate() *cobra.Command {
|
|||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume")
|
||||
}
|
||||
updateReq.FullNameArg = args[0]
|
||||
updateReq.Name = args[0]
|
||||
|
||||
response, err := w.Volumes.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
|
|
20
go.mod
20
go.mod
|
@ -4,7 +4,7 @@ go 1.21
|
|||
|
||||
require (
|
||||
github.com/briandowns/spinner v1.23.0 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.30.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.32.0 // Apache 2.0
|
||||
github.com/fatih/color v1.16.0 // MIT
|
||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||
|
@ -41,7 +41,7 @@ require (
|
|||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
|
@ -55,18 +55,18 @@ require (
|
|||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/zclconf/go-cty v1.14.1 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
|
||||
go.opentelemetry.io/otel v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.22.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/api v0.154.0 // indirect
|
||||
google.golang.org/api v0.161.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect
|
||||
google.golang.org/grpc v1.60.1 // indirect
|
||||
google.golang.org/protobuf v1.32.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
|
|
@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/databricks/databricks-sdk-go v0.30.1 h1:ux6I3aHqUH/AOLZEaEHBmwkbHuSAmb+42mTfvh2A7bE=
|
||||
github.com/databricks/databricks-sdk-go v0.30.1/go.mod h1:QB64wT8EmR9T4ZPqeTRKjfIF4tPZuP9M9kM8Hcr019Q=
|
||||
github.com/databricks/databricks-sdk-go v0.32.0 h1:H6SQmfOOXd6x2fOp+zISkcR1nzJ7NTXXmIv8lWyK66Y=
|
||||
github.com/databricks/databricks-sdk-go v0.32.0/go.mod h1:yyXGdhEfXBBsIoTm0mdl8QN0xzCQPUVZTozMM/7wVuI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -53,8 +53,8 @@ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgF
|
|||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
|
@ -161,16 +161,16 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA
|
|||
github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
|
||||
go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
|
||||
go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
|
||||
go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
|
||||
go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
|
||||
go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
|
||||
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
|
@ -244,8 +244,8 @@ golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
|
|||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050=
|
||||
google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc=
|
||||
google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU=
|
||||
google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
|
@ -253,15 +253,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ
|
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -273,8 +273,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
|
|
Loading…
Reference in New Issue