mirror of https://github.com/databricks/cli.git
Bump github.com/databricks/databricks-sdk-go from 0.36.0 to 0.37.0 (#1326)
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.36.0&new-version=0.37.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details> --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester <andrew.nester@databricks.com>
This commit is contained in:
parent
c1963ec0df
commit
f28a9d7107
|
@ -1 +1 @@
|
||||||
93763b0d7ae908520c229c786fff28b8fd623261
|
e316cc3d78d087522a74650e26586088da9ac8cb
|
|
@ -23,7 +23,7 @@ func TestMergeJobClusters(t *testing.T) {
|
||||||
JobClusters: []jobs.JobCluster{
|
JobClusters: []jobs.JobCluster{
|
||||||
{
|
{
|
||||||
JobClusterKey: "foo",
|
JobClusterKey: "foo",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "13.3.x-scala2.12",
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
NodeTypeId: "i3.xlarge",
|
NodeTypeId: "i3.xlarge",
|
||||||
NumWorkers: 2,
|
NumWorkers: 2,
|
||||||
|
@ -31,13 +31,13 @@ func TestMergeJobClusters(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobClusterKey: "bar",
|
JobClusterKey: "bar",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "10.4.x-scala2.12",
|
SparkVersion: "10.4.x-scala2.12",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobClusterKey: "foo",
|
JobClusterKey: "foo",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
NodeTypeId: "i3.2xlarge",
|
NodeTypeId: "i3.2xlarge",
|
||||||
NumWorkers: 4,
|
NumWorkers: 4,
|
||||||
},
|
},
|
||||||
|
@ -79,14 +79,14 @@ func TestMergeJobClustersWithNilKey(t *testing.T) {
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
JobClusters: []jobs.JobCluster{
|
JobClusters: []jobs.JobCluster{
|
||||||
{
|
{
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "13.3.x-scala2.12",
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
NodeTypeId: "i3.xlarge",
|
NodeTypeId: "i3.xlarge",
|
||||||
NumWorkers: 2,
|
NumWorkers: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
NodeTypeId: "i3.2xlarge",
|
NodeTypeId: "i3.2xlarge",
|
||||||
NumWorkers: 4,
|
NumWorkers: 4,
|
||||||
},
|
},
|
||||||
|
|
|
@ -29,7 +29,7 @@ func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnosti
|
||||||
Kind: jobs.JobDeploymentKindBundle,
|
Kind: jobs.JobDeploymentKindBundle,
|
||||||
MetadataFilePath: path.Join(b.Config.Workspace.StatePath, MetadataFileName),
|
MetadataFilePath: path.Join(b.Config.Workspace.StatePath, MetadataFileName),
|
||||||
}
|
}
|
||||||
job.JobSettings.EditMode = jobs.JobSettingsEditModeUiLocked
|
job.JobSettings.EditMode = jobs.JobEditModeUiLocked
|
||||||
job.JobSettings.Format = jobs.FormatMultiTask
|
job.JobSettings.Format = jobs.FormatMultiTask
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ func TestAnnotateJobsMutator(t *testing.T) {
|
||||||
MetadataFilePath: "/a/b/c/metadata.json",
|
MetadataFilePath: "/a/b/c/metadata.json",
|
||||||
},
|
},
|
||||||
b.Config.Resources.Jobs["my-job-1"].JobSettings.Deployment)
|
b.Config.Resources.Jobs["my-job-1"].JobSettings.Deployment)
|
||||||
assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-1"].EditMode)
|
assert.Equal(t, jobs.JobEditModeUiLocked, b.Config.Resources.Jobs["my-job-1"].EditMode)
|
||||||
assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-1"].Format)
|
assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-1"].Format)
|
||||||
|
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
|
@ -53,7 +53,7 @@ func TestAnnotateJobsMutator(t *testing.T) {
|
||||||
MetadataFilePath: "/a/b/c/metadata.json",
|
MetadataFilePath: "/a/b/c/metadata.json",
|
||||||
},
|
},
|
||||||
b.Config.Resources.Jobs["my-job-2"].JobSettings.Deployment)
|
b.Config.Resources.Jobs["my-job-2"].JobSettings.Deployment)
|
||||||
assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-2"].EditMode)
|
assert.Equal(t, jobs.JobEditModeUiLocked, b.Config.Resources.Jobs["my-job-2"].EditMode)
|
||||||
assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-2"].Format)
|
assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-2"].Format)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ func TestBundleToTerraformJob(t *testing.T) {
|
||||||
JobClusters: []jobs.JobCluster{
|
JobClusters: []jobs.JobCluster{
|
||||||
{
|
{
|
||||||
JobClusterKey: "key",
|
JobClusterKey: "key",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "10.4.x-scala2.12",
|
SparkVersion: "10.4.x-scala2.12",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -21,7 +21,7 @@ func TestConvertJob(t *testing.T) {
|
||||||
JobClusters: []jobs.JobCluster{
|
JobClusters: []jobs.JobCluster{
|
||||||
{
|
{
|
||||||
JobClusterKey: "key",
|
JobClusterKey: "key",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "10.4.x-scala2.12",
|
SparkVersion: "10.4.x-scala2.12",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -46,7 +46,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
if task.JobClusterKey != "" {
|
if task.JobClusterKey != "" {
|
||||||
for _, job := range b.Config.Resources.Jobs {
|
for _, job := range b.Config.Resources.Jobs {
|
||||||
for _, cluster := range job.JobClusters {
|
for _, cluster := range job.JobClusters {
|
||||||
if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster != nil {
|
if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" {
|
||||||
if lowerThanExpectedVersion(ctx, cluster.NewCluster.SparkVersion) {
|
if lowerThanExpectedVersion(ctx, cluster.NewCluster.SparkVersion) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,13 +63,13 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) {
|
||||||
JobClusters: []jobs.JobCluster{
|
JobClusters: []jobs.JobCluster{
|
||||||
{
|
{
|
||||||
JobClusterKey: "cluster1",
|
JobClusterKey: "cluster1",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "12.2.x-scala2.12",
|
SparkVersion: "12.2.x-scala2.12",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobClusterKey: "cluster2",
|
JobClusterKey: "cluster2",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "13.1.x-scala2.12",
|
SparkVersion: "13.1.x-scala2.12",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -157,13 +157,13 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
||||||
JobClusters: []jobs.JobCluster{
|
JobClusters: []jobs.JobCluster{
|
||||||
{
|
{
|
||||||
JobClusterKey: "cluster1",
|
JobClusterKey: "cluster1",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "12.2.x-scala2.12",
|
SparkVersion: "12.2.x-scala2.12",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobClusterKey: "cluster2",
|
JobClusterKey: "cluster2",
|
||||||
NewCluster: &compute.ClusterSpec{
|
NewCluster: compute.ClusterSpec{
|
||||||
SparkVersion: "13.1.x-scala2.12",
|
SparkVersion: "13.1.x-scala2.12",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -201,7 +201,7 @@
|
||||||
"description": "Deployment information for jobs managed by external sources.",
|
"description": "Deployment information for jobs managed by external sources.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"kind": {
|
"kind": {
|
||||||
"description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n"
|
"description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle."
|
||||||
},
|
},
|
||||||
"metadata_file_path": {
|
"metadata_file_path": {
|
||||||
"description": "Path of the file that contains deployment metadata."
|
"description": "Path of the file that contains deployment metadata."
|
||||||
|
@ -212,7 +212,7 @@
|
||||||
"description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding."
|
"description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding."
|
||||||
},
|
},
|
||||||
"edit_mode": {
|
"edit_mode": {
|
||||||
"description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n"
|
"description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified."
|
||||||
},
|
},
|
||||||
"email_notifications": {
|
"email_notifications": {
|
||||||
"description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.",
|
"description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.",
|
||||||
|
@ -279,7 +279,7 @@
|
||||||
"description": "The source of the job specification in the remote repository when the job is source controlled.",
|
"description": "The source of the job specification in the remote repository when the job is source controlled.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"dirty_state": {
|
"dirty_state": {
|
||||||
"description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n"
|
"description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced."
|
||||||
},
|
},
|
||||||
"import_from_git_branch": {
|
"import_from_git_branch": {
|
||||||
"description": "Name of the branch which the job is imported from."
|
"description": "Name of the branch which the job is imported from."
|
||||||
|
@ -322,7 +322,7 @@
|
||||||
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
|
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
|
||||||
},
|
},
|
||||||
"new_cluster": {
|
"new_cluster": {
|
||||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": ""
|
||||||
|
@ -652,7 +652,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"max_concurrent_runs": {
|
"max_concurrent_runs": {
|
||||||
"description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped."
|
"description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped."
|
||||||
},
|
},
|
||||||
"name": {
|
"name": {
|
||||||
"description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding."
|
"description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding."
|
||||||
|
@ -728,10 +728,10 @@
|
||||||
"description": "Whether this trigger is paused or not."
|
"description": "Whether this trigger is paused or not."
|
||||||
},
|
},
|
||||||
"quartz_cron_expression": {
|
"quartz_cron_expression": {
|
||||||
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
|
"description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required."
|
||||||
},
|
},
|
||||||
"timezone_id": {
|
"timezone_id": {
|
||||||
"description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n"
|
"description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -756,7 +756,7 @@
|
||||||
"description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference."
|
"description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference."
|
||||||
},
|
},
|
||||||
"op": {
|
"op": {
|
||||||
"description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n"
|
"description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison."
|
||||||
},
|
},
|
||||||
"right": {
|
"right": {
|
||||||
"description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference."
|
"description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference."
|
||||||
|
@ -779,13 +779,13 @@
|
||||||
"description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used."
|
"description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used."
|
||||||
},
|
},
|
||||||
"project_directory": {
|
"project_directory": {
|
||||||
"description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used."
|
"description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used."
|
||||||
},
|
},
|
||||||
"schema": {
|
"schema": {
|
||||||
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
|
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
},
|
},
|
||||||
"warehouse_id": {
|
"warehouse_id": {
|
||||||
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
|
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
|
||||||
|
@ -793,7 +793,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"depends_on": {
|
"depends_on": {
|
||||||
"description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n",
|
"description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -809,9 +809,15 @@
|
||||||
"description": {
|
"description": {
|
||||||
"description": "An optional description for this task."
|
"description": "An optional description for this task."
|
||||||
},
|
},
|
||||||
|
"disable_auto_optimization": {
|
||||||
|
"description": "An option to disable auto optimization in serverless"
|
||||||
|
},
|
||||||
"email_notifications": {
|
"email_notifications": {
|
||||||
"description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.",
|
"description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"no_alert_for_skipped_runs": {
|
||||||
|
"description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped."
|
||||||
|
},
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
"description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.",
|
"description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -839,9 +845,11 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"existing_cluster_id": {
|
"existing_cluster_id": {
|
||||||
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
|
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability"
|
||||||
|
},
|
||||||
|
"for_each_task": {
|
||||||
|
"description": ""
|
||||||
},
|
},
|
||||||
"for_each_task": null,
|
|
||||||
"health": {
|
"health": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -868,7 +876,7 @@
|
||||||
"description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`."
|
"description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`."
|
||||||
},
|
},
|
||||||
"libraries": {
|
"libraries": {
|
||||||
"description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.",
|
"description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -930,7 +938,7 @@
|
||||||
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
|
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
|
||||||
},
|
},
|
||||||
"new_cluster": {
|
"new_cluster": {
|
||||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": ""
|
||||||
|
@ -1260,16 +1268,16 @@
|
||||||
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"base_parameters": {
|
"base_parameters": {
|
||||||
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n",
|
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"notebook_path": {
|
"notebook_path": {
|
||||||
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
|
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1291,7 +1299,7 @@
|
||||||
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
|
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"full_refresh": {
|
"full_refresh": {
|
||||||
"description": "If true, a full refresh will be triggered on the delta live table."
|
"description": "If true, triggers a full refresh on the delta live table."
|
||||||
},
|
},
|
||||||
"pipeline_id": {
|
"pipeline_id": {
|
||||||
"description": "The full name of the pipeline task to execute."
|
"description": "The full name of the pipeline task to execute."
|
||||||
|
@ -1322,14 +1330,26 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"retry_on_timeout": {
|
"retry_on_timeout": {
|
||||||
"description": "An optional policy to specify whether to retry a task when it times out."
|
"description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout."
|
||||||
},
|
},
|
||||||
"run_if": {
|
"run_if": {
|
||||||
"description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n"
|
"description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed"
|
||||||
},
|
},
|
||||||
"run_job_task": {
|
"run_job_task": {
|
||||||
"description": "If run_job_task, indicates that this task must execute another job.",
|
"description": "If run_job_task, indicates that this task must execute another job.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"dbt_commands": {
|
||||||
|
"description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"jar_params": {
|
||||||
|
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
"job_id": {
|
"job_id": {
|
||||||
"description": "ID of the job to trigger."
|
"description": "ID of the job to trigger."
|
||||||
},
|
},
|
||||||
|
@ -1338,6 +1358,44 @@
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"notebook_params": {
|
||||||
|
"description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pipeline_params": {
|
||||||
|
"description": "",
|
||||||
|
"properties": {
|
||||||
|
"full_refresh": {
|
||||||
|
"description": "If true, triggers a full refresh on the delta live table."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"python_named_params": {
|
||||||
|
"description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"python_params": {
|
||||||
|
"description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spark_submit_params": {
|
||||||
|
"description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sql_params": {
|
||||||
|
"description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1345,13 +1403,13 @@
|
||||||
"description": "If spark_jar_task, indicates that this task must run a JAR.",
|
"description": "If spark_jar_task, indicates that this task must run a JAR.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"jar_uri": {
|
"jar_uri": {
|
||||||
"description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n"
|
"description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create."
|
||||||
},
|
},
|
||||||
"main_class_name": {
|
"main_class_name": {
|
||||||
"description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail."
|
"description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail."
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
"description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -1362,7 +1420,7 @@
|
||||||
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
"description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -1371,15 +1429,15 @@
|
||||||
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
|
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spark_submit_task": {
|
"spark_submit_task": {
|
||||||
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n",
|
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
"description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -1449,7 +1507,7 @@
|
||||||
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
|
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1479,7 +1537,7 @@
|
||||||
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
|
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
|
||||||
},
|
},
|
||||||
"webhook_notifications": {
|
"webhook_notifications": {
|
||||||
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
|
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
||||||
|
@ -1540,13 +1598,13 @@
|
||||||
"description": "File arrival trigger settings.",
|
"description": "File arrival trigger settings.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"min_time_between_triggers_seconds": {
|
"min_time_between_triggers_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n"
|
"description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds"
|
||||||
},
|
},
|
||||||
"url": {
|
"url": {
|
||||||
"description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume."
|
"description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location."
|
||||||
},
|
},
|
||||||
"wait_after_last_change_seconds": {
|
"wait_after_last_change_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
"description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1554,13 +1612,13 @@
|
||||||
"description": "Whether this trigger is paused or not."
|
"description": "Whether this trigger is paused or not."
|
||||||
},
|
},
|
||||||
"table": {
|
"table": {
|
||||||
"description": "Table trigger settings.",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"condition": {
|
"condition": {
|
||||||
"description": "The table(s) condition based on which to trigger a job run."
|
"description": "The table(s) condition based on which to trigger a job run."
|
||||||
},
|
},
|
||||||
"min_time_between_triggers_seconds": {
|
"min_time_between_triggers_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n"
|
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds."
|
||||||
},
|
},
|
||||||
"table_names": {
|
"table_names": {
|
||||||
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
||||||
|
@ -1569,14 +1627,34 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"wait_after_last_change_seconds": {
|
"wait_after_last_change_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"table_update": {
|
||||||
|
"description": "",
|
||||||
|
"properties": {
|
||||||
|
"condition": {
|
||||||
|
"description": "The table(s) condition based on which to trigger a job run."
|
||||||
|
},
|
||||||
|
"min_time_between_triggers_seconds": {
|
||||||
|
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds."
|
||||||
|
},
|
||||||
|
"table_names": {
|
||||||
|
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"wait_after_last_change_seconds": {
|
||||||
|
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"webhook_notifications": {
|
"webhook_notifications": {
|
||||||
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
|
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
||||||
|
@ -1680,16 +1758,8 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"anthropic_config": {
|
"amazon_bedrock_config": {
|
||||||
"description": "Anthropic Config. Only required if the provider is 'anthropic'.",
|
"description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.",
|
||||||
"properties": {
|
|
||||||
"anthropic_api_key": {
|
|
||||||
"description": "The Databricks secret key reference for an Anthropic API key."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"aws_bedrock_config": {
|
|
||||||
"description": "AWS Bedrock Config. Only required if the provider is 'aws-bedrock'.",
|
|
||||||
"properties": {
|
"properties": {
|
||||||
"aws_access_key_id": {
|
"aws_access_key_id": {
|
||||||
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
|
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
|
||||||
|
@ -1701,7 +1771,15 @@
|
||||||
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
|
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
|
||||||
},
|
},
|
||||||
"bedrock_provider": {
|
"bedrock_provider": {
|
||||||
"description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
|
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"anthropic_config": {
|
||||||
|
"description": "Anthropic Config. Only required if the provider is 'anthropic'.",
|
||||||
|
"properties": {
|
||||||
|
"anthropic_api_key": {
|
||||||
|
"description": "The Databricks secret key reference for an Anthropic API key."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1759,7 +1837,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"provider": {
|
"provider": {
|
||||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
|
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
|
||||||
},
|
},
|
||||||
"task": {
|
"task": {
|
||||||
"description": "The task type of the external model."
|
"description": "The task type of the external model."
|
||||||
|
@ -2734,7 +2812,7 @@
|
||||||
"description": "Deployment information for jobs managed by external sources.",
|
"description": "Deployment information for jobs managed by external sources.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"kind": {
|
"kind": {
|
||||||
"description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n"
|
"description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle."
|
||||||
},
|
},
|
||||||
"metadata_file_path": {
|
"metadata_file_path": {
|
||||||
"description": "Path of the file that contains deployment metadata."
|
"description": "Path of the file that contains deployment metadata."
|
||||||
|
@ -2745,7 +2823,7 @@
|
||||||
"description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding."
|
"description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding."
|
||||||
},
|
},
|
||||||
"edit_mode": {
|
"edit_mode": {
|
||||||
"description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n"
|
"description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified."
|
||||||
},
|
},
|
||||||
"email_notifications": {
|
"email_notifications": {
|
||||||
"description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.",
|
"description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.",
|
||||||
|
@ -2812,7 +2890,7 @@
|
||||||
"description": "The source of the job specification in the remote repository when the job is source controlled.",
|
"description": "The source of the job specification in the remote repository when the job is source controlled.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"dirty_state": {
|
"dirty_state": {
|
||||||
"description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n"
|
"description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced."
|
||||||
},
|
},
|
||||||
"import_from_git_branch": {
|
"import_from_git_branch": {
|
||||||
"description": "Name of the branch which the job is imported from."
|
"description": "Name of the branch which the job is imported from."
|
||||||
|
@ -2855,7 +2933,7 @@
|
||||||
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
|
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
|
||||||
},
|
},
|
||||||
"new_cluster": {
|
"new_cluster": {
|
||||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": ""
|
||||||
|
@ -3185,7 +3263,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"max_concurrent_runs": {
|
"max_concurrent_runs": {
|
||||||
"description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped."
|
"description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped."
|
||||||
},
|
},
|
||||||
"name": {
|
"name": {
|
||||||
"description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding."
|
"description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding."
|
||||||
|
@ -3261,10 +3339,10 @@
|
||||||
"description": "Whether this trigger is paused or not."
|
"description": "Whether this trigger is paused or not."
|
||||||
},
|
},
|
||||||
"quartz_cron_expression": {
|
"quartz_cron_expression": {
|
||||||
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
|
"description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required."
|
||||||
},
|
},
|
||||||
"timezone_id": {
|
"timezone_id": {
|
||||||
"description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n"
|
"description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -3289,7 +3367,7 @@
|
||||||
"description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference."
|
"description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference."
|
||||||
},
|
},
|
||||||
"op": {
|
"op": {
|
||||||
"description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n"
|
"description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison."
|
||||||
},
|
},
|
||||||
"right": {
|
"right": {
|
||||||
"description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference."
|
"description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference."
|
||||||
|
@ -3312,13 +3390,13 @@
|
||||||
"description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used."
|
"description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used."
|
||||||
},
|
},
|
||||||
"project_directory": {
|
"project_directory": {
|
||||||
"description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used."
|
"description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used."
|
||||||
},
|
},
|
||||||
"schema": {
|
"schema": {
|
||||||
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
|
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
},
|
},
|
||||||
"warehouse_id": {
|
"warehouse_id": {
|
||||||
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
|
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
|
||||||
|
@ -3326,7 +3404,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"depends_on": {
|
"depends_on": {
|
||||||
"description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n",
|
"description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -3342,9 +3420,15 @@
|
||||||
"description": {
|
"description": {
|
||||||
"description": "An optional description for this task."
|
"description": "An optional description for this task."
|
||||||
},
|
},
|
||||||
|
"disable_auto_optimization": {
|
||||||
|
"description": "An option to disable auto optimization in serverless"
|
||||||
|
},
|
||||||
"email_notifications": {
|
"email_notifications": {
|
||||||
"description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.",
|
"description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"no_alert_for_skipped_runs": {
|
||||||
|
"description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped."
|
||||||
|
},
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
"description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.",
|
"description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -3372,9 +3456,11 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"existing_cluster_id": {
|
"existing_cluster_id": {
|
||||||
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
|
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability"
|
||||||
|
},
|
||||||
|
"for_each_task": {
|
||||||
|
"description": ""
|
||||||
},
|
},
|
||||||
"for_each_task": null,
|
|
||||||
"health": {
|
"health": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -3401,7 +3487,7 @@
|
||||||
"description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`."
|
"description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`."
|
||||||
},
|
},
|
||||||
"libraries": {
|
"libraries": {
|
||||||
"description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.",
|
"description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -3463,7 +3549,7 @@
|
||||||
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
|
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
|
||||||
},
|
},
|
||||||
"new_cluster": {
|
"new_cluster": {
|
||||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": ""
|
||||||
|
@ -3793,16 +3879,16 @@
|
||||||
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"base_parameters": {
|
"base_parameters": {
|
||||||
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n",
|
"description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"notebook_path": {
|
"notebook_path": {
|
||||||
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
|
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -3824,7 +3910,7 @@
|
||||||
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
|
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"full_refresh": {
|
"full_refresh": {
|
||||||
"description": "If true, a full refresh will be triggered on the delta live table."
|
"description": "If true, triggers a full refresh on the delta live table."
|
||||||
},
|
},
|
||||||
"pipeline_id": {
|
"pipeline_id": {
|
||||||
"description": "The full name of the pipeline task to execute."
|
"description": "The full name of the pipeline task to execute."
|
||||||
|
@ -3855,14 +3941,26 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"retry_on_timeout": {
|
"retry_on_timeout": {
|
||||||
"description": "An optional policy to specify whether to retry a task when it times out."
|
"description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout."
|
||||||
},
|
},
|
||||||
"run_if": {
|
"run_if": {
|
||||||
"description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n"
|
"description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed"
|
||||||
},
|
},
|
||||||
"run_job_task": {
|
"run_job_task": {
|
||||||
"description": "If run_job_task, indicates that this task must execute another job.",
|
"description": "If run_job_task, indicates that this task must execute another job.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"dbt_commands": {
|
||||||
|
"description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"jar_params": {
|
||||||
|
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
"job_id": {
|
"job_id": {
|
||||||
"description": "ID of the job to trigger."
|
"description": "ID of the job to trigger."
|
||||||
},
|
},
|
||||||
|
@ -3871,6 +3969,44 @@
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"notebook_params": {
|
||||||
|
"description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pipeline_params": {
|
||||||
|
"description": "",
|
||||||
|
"properties": {
|
||||||
|
"full_refresh": {
|
||||||
|
"description": "If true, triggers a full refresh on the delta live table."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"python_named_params": {
|
||||||
|
"description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"python_params": {
|
||||||
|
"description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spark_submit_params": {
|
||||||
|
"description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sql_params": {
|
||||||
|
"description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -3878,13 +4014,13 @@
|
||||||
"description": "If spark_jar_task, indicates that this task must run a JAR.",
|
"description": "If spark_jar_task, indicates that this task must run a JAR.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"jar_uri": {
|
"jar_uri": {
|
||||||
"description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n"
|
"description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create."
|
||||||
},
|
},
|
||||||
"main_class_name": {
|
"main_class_name": {
|
||||||
"description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail."
|
"description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail."
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
"description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -3895,7 +4031,7 @@
|
||||||
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
"description": "If spark_python_task, indicates that this task must run a Python file.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
"description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -3904,15 +4040,15 @@
|
||||||
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
|
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spark_submit_task": {
|
"spark_submit_task": {
|
||||||
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n",
|
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n",
|
"description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -3982,7 +4118,7 @@
|
||||||
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
|
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
|
||||||
},
|
},
|
||||||
"source": {
|
"source": {
|
||||||
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
|
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -4012,7 +4148,7 @@
|
||||||
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
|
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
|
||||||
},
|
},
|
||||||
"webhook_notifications": {
|
"webhook_notifications": {
|
||||||
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
|
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
||||||
|
@ -4073,13 +4209,13 @@
|
||||||
"description": "File arrival trigger settings.",
|
"description": "File arrival trigger settings.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"min_time_between_triggers_seconds": {
|
"min_time_between_triggers_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n"
|
"description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds"
|
||||||
},
|
},
|
||||||
"url": {
|
"url": {
|
||||||
"description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume."
|
"description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location."
|
||||||
},
|
},
|
||||||
"wait_after_last_change_seconds": {
|
"wait_after_last_change_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
"description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -4087,13 +4223,13 @@
|
||||||
"description": "Whether this trigger is paused or not."
|
"description": "Whether this trigger is paused or not."
|
||||||
},
|
},
|
||||||
"table": {
|
"table": {
|
||||||
"description": "Table trigger settings.",
|
"description": "",
|
||||||
"properties": {
|
"properties": {
|
||||||
"condition": {
|
"condition": {
|
||||||
"description": "The table(s) condition based on which to trigger a job run."
|
"description": "The table(s) condition based on which to trigger a job run."
|
||||||
},
|
},
|
||||||
"min_time_between_triggers_seconds": {
|
"min_time_between_triggers_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n"
|
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds."
|
||||||
},
|
},
|
||||||
"table_names": {
|
"table_names": {
|
||||||
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
||||||
|
@ -4102,14 +4238,34 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"wait_after_last_change_seconds": {
|
"wait_after_last_change_seconds": {
|
||||||
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n"
|
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"table_update": {
|
||||||
|
"description": "",
|
||||||
|
"properties": {
|
||||||
|
"condition": {
|
||||||
|
"description": "The table(s) condition based on which to trigger a job run."
|
||||||
|
},
|
||||||
|
"min_time_between_triggers_seconds": {
|
||||||
|
"description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds."
|
||||||
|
},
|
||||||
|
"table_names": {
|
||||||
|
"description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"wait_after_last_change_seconds": {
|
||||||
|
"description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"webhook_notifications": {
|
"webhook_notifications": {
|
||||||
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
|
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
|
||||||
|
@ -4213,16 +4369,8 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"anthropic_config": {
|
"amazon_bedrock_config": {
|
||||||
"description": "Anthropic Config. Only required if the provider is 'anthropic'.",
|
"description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.",
|
||||||
"properties": {
|
|
||||||
"anthropic_api_key": {
|
|
||||||
"description": "The Databricks secret key reference for an Anthropic API key."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"aws_bedrock_config": {
|
|
||||||
"description": "AWS Bedrock Config. Only required if the provider is 'aws-bedrock'.",
|
|
||||||
"properties": {
|
"properties": {
|
||||||
"aws_access_key_id": {
|
"aws_access_key_id": {
|
||||||
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
|
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
|
||||||
|
@ -4234,7 +4382,15 @@
|
||||||
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
|
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
|
||||||
},
|
},
|
||||||
"bedrock_provider": {
|
"bedrock_provider": {
|
||||||
"description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
|
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"anthropic_config": {
|
||||||
|
"description": "Anthropic Config. Only required if the provider is 'anthropic'.",
|
||||||
|
"properties": {
|
||||||
|
"anthropic_api_key": {
|
||||||
|
"description": "The Databricks secret key reference for an Anthropic API key."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -4292,7 +4448,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"provider": {
|
"provider": {
|
||||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
|
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
|
||||||
},
|
},
|
||||||
"task": {
|
"task": {
|
||||||
"description": "The task type of the external model."
|
"description": "The task type of the external model."
|
||||||
|
|
|
@ -133,12 +133,12 @@ func TestGenerateJobCommand(t *testing.T) {
|
||||||
Settings: &jobs.JobSettings{
|
Settings: &jobs.JobSettings{
|
||||||
Name: "test-job",
|
Name: "test-job",
|
||||||
JobClusters: []jobs.JobCluster{
|
JobClusters: []jobs.JobCluster{
|
||||||
{NewCluster: &compute.ClusterSpec{
|
{NewCluster: compute.ClusterSpec{
|
||||||
CustomTags: map[string]string{
|
CustomTags: map[string]string{
|
||||||
"Tag1": "24X7-1234",
|
"Tag1": "24X7-1234",
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
{NewCluster: &compute.ClusterSpec{
|
{NewCluster: compute.ClusterSpec{
|
||||||
SparkConf: map[string]string{
|
SparkConf: map[string]string{
|
||||||
"spark.databricks.delta.preview.enabled": "true",
|
"spark.databricks.delta.preview.enabled": "true",
|
||||||
},
|
},
|
||||||
|
|
|
@ -65,7 +65,7 @@ func newCreate() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `Git username.`)
|
cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`)
|
||||||
cmd.Flags().StringVar(&createReq.PersonalAccessToken, "personal-access-token", createReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`)
|
cmd.Flags().StringVar(&createReq.PersonalAccessToken, "personal-access-token", createReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`)
|
||||||
|
|
||||||
cmd.Use = "create GIT_PROVIDER"
|
cmd.Use = "create GIT_PROVIDER"
|
||||||
|
@ -335,7 +335,7 @@ func newUpdate() *cobra.Command {
|
||||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Flags().StringVar(&updateReq.GitProvider, "git-provider", updateReq.GitProvider, `Git provider.`)
|
cmd.Flags().StringVar(&updateReq.GitProvider, "git-provider", updateReq.GitProvider, `Git provider.`)
|
||||||
cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `Git username.`)
|
cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`)
|
||||||
cmd.Flags().StringVar(&updateReq.PersonalAccessToken, "personal-access-token", updateReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`)
|
cmd.Flags().StringVar(&updateReq.PersonalAccessToken, "personal-access-token", updateReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`)
|
||||||
|
|
||||||
cmd.Use = "update CREDENTIAL_ID"
|
cmd.Use = "update CREDENTIAL_ID"
|
||||||
|
|
|
@ -243,13 +243,13 @@ func newDelete() *cobra.Command {
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
var getOverrides []func(
|
var getOverrides []func(
|
||||||
*cobra.Command,
|
*cobra.Command,
|
||||||
*settings.GetIpAccessList,
|
*settings.GetIpAccessListRequest,
|
||||||
)
|
)
|
||||||
|
|
||||||
func newGet() *cobra.Command {
|
func newGet() *cobra.Command {
|
||||||
cmd := &cobra.Command{}
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
var getReq settings.GetIpAccessList
|
var getReq settings.GetIpAccessListRequest
|
||||||
|
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
|
|
|
@ -436,7 +436,7 @@ func newDeleteRun() *cobra.Command {
|
||||||
Deletes a non-active run. Returns an error if the run is active.
|
Deletes a non-active run. Returns an error if the run is active.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
RUN_ID: The canonical identifier of the run for which to retrieve the metadata.`
|
RUN_ID: ID of the run to delete.`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
@ -470,14 +470,14 @@ func newDeleteRun() *cobra.Command {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
}
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "The canonical identifier of the run for which to retrieve the metadata")
|
id, err := cmdio.Select(ctx, names, "ID of the run to delete")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
args = append(args, id)
|
args = append(args, id)
|
||||||
}
|
}
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return fmt.Errorf("expected to have the canonical identifier of the run for which to retrieve the metadata")
|
return fmt.Errorf("expected to have id of the run to delete")
|
||||||
}
|
}
|
||||||
_, err = fmt.Sscan(args[0], &deleteRunReq.RunId)
|
_, err = fmt.Sscan(args[0], &deleteRunReq.RunId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -908,7 +908,7 @@ func newGetRunOutput() *cobra.Command {
|
||||||
60 days, you must save old run results before they expire.
|
60 days, you must save old run results before they expire.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
RUN_ID: The canonical identifier for the run. This field is required.`
|
RUN_ID: The canonical identifier for the run.`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
@ -1038,8 +1038,8 @@ func newListRuns() *cobra.Command {
|
||||||
cmd.Flags().IntVar(&listRunsReq.Offset, "offset", listRunsReq.Offset, `The offset of the first run to return, relative to the most recent run.`)
|
cmd.Flags().IntVar(&listRunsReq.Offset, "offset", listRunsReq.Offset, `The offset of the first run to return, relative to the most recent run.`)
|
||||||
cmd.Flags().StringVar(&listRunsReq.PageToken, "page-token", listRunsReq.PageToken, `Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of runs respectively.`)
|
cmd.Flags().StringVar(&listRunsReq.PageToken, "page-token", listRunsReq.PageToken, `Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of runs respectively.`)
|
||||||
cmd.Flags().Var(&listRunsReq.RunType, "run-type", `The type of runs to return. Supported values: [JOB_RUN, SUBMIT_RUN, WORKFLOW_RUN]`)
|
cmd.Flags().Var(&listRunsReq.RunType, "run-type", `The type of runs to return. Supported values: [JOB_RUN, SUBMIT_RUN, WORKFLOW_RUN]`)
|
||||||
cmd.Flags().IntVar(&listRunsReq.StartTimeFrom, "start-time-from", listRunsReq.StartTimeFrom, `Show runs that started _at or after_ this value.`)
|
cmd.Flags().Int64Var(&listRunsReq.StartTimeFrom, "start-time-from", listRunsReq.StartTimeFrom, `Show runs that started _at or after_ this value.`)
|
||||||
cmd.Flags().IntVar(&listRunsReq.StartTimeTo, "start-time-to", listRunsReq.StartTimeTo, `Show runs that started _at or before_ this value.`)
|
cmd.Flags().Int64Var(&listRunsReq.StartTimeTo, "start-time-to", listRunsReq.StartTimeTo, `Show runs that started _at or before_ this value.`)
|
||||||
|
|
||||||
cmd.Use = "list-runs"
|
cmd.Use = "list-runs"
|
||||||
cmd.Short = `List job runs.`
|
cmd.Short = `List job runs.`
|
||||||
|
@ -1502,13 +1502,23 @@ func newSubmit() *cobra.Command {
|
||||||
cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
// TODO: array: access_control_list
|
// TODO: array: access_control_list
|
||||||
|
// TODO: complex arg: condition_task
|
||||||
|
// TODO: complex arg: dbt_task
|
||||||
// TODO: complex arg: email_notifications
|
// TODO: complex arg: email_notifications
|
||||||
// TODO: complex arg: git_source
|
// TODO: complex arg: git_source
|
||||||
// TODO: complex arg: health
|
// TODO: complex arg: health
|
||||||
cmd.Flags().StringVar(&submitReq.IdempotencyToken, "idempotency-token", submitReq.IdempotencyToken, `An optional token that can be used to guarantee the idempotency of job run requests.`)
|
cmd.Flags().StringVar(&submitReq.IdempotencyToken, "idempotency-token", submitReq.IdempotencyToken, `An optional token that can be used to guarantee the idempotency of job run requests.`)
|
||||||
|
// TODO: complex arg: notebook_task
|
||||||
// TODO: complex arg: notification_settings
|
// TODO: complex arg: notification_settings
|
||||||
|
// TODO: complex arg: pipeline_task
|
||||||
|
// TODO: complex arg: python_wheel_task
|
||||||
// TODO: complex arg: queue
|
// TODO: complex arg: queue
|
||||||
|
// TODO: complex arg: run_job_task
|
||||||
cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`)
|
cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`)
|
||||||
|
// TODO: complex arg: spark_jar_task
|
||||||
|
// TODO: complex arg: spark_python_task
|
||||||
|
// TODO: complex arg: spark_submit_task
|
||||||
|
// TODO: complex arg: sql_task
|
||||||
// TODO: array: tasks
|
// TODO: array: tasks
|
||||||
cmd.Flags().IntVar(&submitReq.TimeoutSeconds, "timeout-seconds", submitReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`)
|
cmd.Flags().IntVar(&submitReq.TimeoutSeconds, "timeout-seconds", submitReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`)
|
||||||
// TODO: complex arg: webhook_notifications
|
// TODO: complex arg: webhook_notifications
|
||||||
|
|
|
@ -33,8 +33,10 @@ func New() *cobra.Command {
|
||||||
cmd.AddCommand(newCreate())
|
cmd.AddCommand(newCreate())
|
||||||
cmd.AddCommand(newGet())
|
cmd.AddCommand(newGet())
|
||||||
cmd.AddCommand(newGetPublished())
|
cmd.AddCommand(newGetPublished())
|
||||||
|
cmd.AddCommand(newMigrate())
|
||||||
cmd.AddCommand(newPublish())
|
cmd.AddCommand(newPublish())
|
||||||
cmd.AddCommand(newTrash())
|
cmd.AddCommand(newTrash())
|
||||||
|
cmd.AddCommand(newUnpublish())
|
||||||
cmd.AddCommand(newUpdate())
|
cmd.AddCommand(newUpdate())
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
// Apply optional overrides to this command.
|
||||||
|
@ -240,6 +242,87 @@ func newGetPublished() *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start migrate command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var migrateOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*dashboards.MigrateDashboardRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newMigrate() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var migrateReq dashboards.MigrateDashboardRequest
|
||||||
|
var migrateJson flags.JsonFlag
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
cmd.Flags().Var(&migrateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
cmd.Flags().StringVar(&migrateReq.DisplayName, "display-name", migrateReq.DisplayName, `Display name for the new Lakeview dashboard.`)
|
||||||
|
cmd.Flags().StringVar(&migrateReq.ParentPath, "parent-path", migrateReq.ParentPath, `The workspace path of the folder to contain the migrated Lakeview dashboard.`)
|
||||||
|
|
||||||
|
cmd.Use = "migrate SOURCE_DASHBOARD_ID"
|
||||||
|
cmd.Short = `Migrate dashboard.`
|
||||||
|
cmd.Long = `Migrate dashboard.
|
||||||
|
|
||||||
|
Migrates a classic SQL dashboard to Lakeview.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
SOURCE_DASHBOARD_ID: UUID of the dashboard to be migrated.`
|
||||||
|
|
||||||
|
// This command is being previewed; hide from help output.
|
||||||
|
cmd.Hidden = true
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err := root.ExactArgs(0)(cmd, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'source_dashboard_id' in your JSON input")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = migrateJson.Unmarshal(&migrateReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !cmd.Flags().Changed("json") {
|
||||||
|
migrateReq.SourceDashboardId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := w.Lakeview.Migrate(ctx, migrateReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range migrateOverrides {
|
||||||
|
fn(cmd, &migrateReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
// start publish command
|
// start publish command
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
// Slice with functions to override default command behavior.
|
||||||
|
@ -367,6 +450,67 @@ func newTrash() *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start unpublish command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var unpublishOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*dashboards.UnpublishDashboardRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newUnpublish() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var unpublishReq dashboards.UnpublishDashboardRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Use = "unpublish DASHBOARD_ID"
|
||||||
|
cmd.Short = `Unpublish dashboard.`
|
||||||
|
cmd.Long = `Unpublish dashboard.
|
||||||
|
|
||||||
|
Unpublish the dashboard.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
DASHBOARD_ID: UUID identifying the dashboard to be published.`
|
||||||
|
|
||||||
|
// This command is being previewed; hide from help output.
|
||||||
|
cmd.Hidden = true
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
unpublishReq.DashboardId = args[0]
|
||||||
|
|
||||||
|
err = w.Lakeview.Unpublish(ctx, unpublishReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range unpublishOverrides {
|
||||||
|
fn(cmd, &unpublishReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
// start update command
|
// start update command
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
// Slice with functions to override default command behavior.
|
||||||
|
|
|
@ -64,6 +64,9 @@ func New() *cobra.Command {
|
||||||
For the mapping of the required permissions for specific actions or abilities
|
For the mapping of the required permissions for specific actions or abilities
|
||||||
and other important information, see [Access Control].
|
and other important information, see [Access Control].
|
||||||
|
|
||||||
|
Note that to manage access control on service principals, use **[Account
|
||||||
|
Access Control Proxy](:service:accountaccesscontrolproxy)**.
|
||||||
|
|
||||||
[Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html`,
|
[Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html`,
|
||||||
GroupID: "iam",
|
GroupID: "iam",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
|
@ -112,7 +115,7 @@ func newGet() *cobra.Command {
|
||||||
REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
|
REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
|
||||||
authorization, clusters, cluster-policies, directories, experiments,
|
authorization, clusters, cluster-policies, directories, experiments,
|
||||||
files, instance-pools, jobs, notebooks, pipelines, registered-models,
|
files, instance-pools, jobs, notebooks, pipelines, registered-models,
|
||||||
repos, serving-endpoints, or sql-warehouses.
|
repos, serving-endpoints, or warehouses.
|
||||||
REQUEST_OBJECT_ID: The id of the request object.`
|
REQUEST_OBJECT_ID: The id of the request object.`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
@ -240,7 +243,7 @@ func newSet() *cobra.Command {
|
||||||
REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
|
REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
|
||||||
authorization, clusters, cluster-policies, directories, experiments,
|
authorization, clusters, cluster-policies, directories, experiments,
|
||||||
files, instance-pools, jobs, notebooks, pipelines, registered-models,
|
files, instance-pools, jobs, notebooks, pipelines, registered-models,
|
||||||
repos, serving-endpoints, or sql-warehouses.
|
repos, serving-endpoints, or warehouses.
|
||||||
REQUEST_OBJECT_ID: The id of the request object.`
|
REQUEST_OBJECT_ID: The id of the request object.`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
@ -314,7 +317,7 @@ func newUpdate() *cobra.Command {
|
||||||
REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
|
REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
|
||||||
authorization, clusters, cluster-policies, directories, experiments,
|
authorization, clusters, cluster-policies, directories, experiments,
|
||||||
files, instance-pools, jobs, notebooks, pipelines, registered-models,
|
files, instance-pools, jobs, notebooks, pipelines, registered-models,
|
||||||
repos, serving-endpoints, or sql-warehouses.
|
repos, serving-endpoints, or warehouses.
|
||||||
REQUEST_OBJECT_ID: The id of the request object.`
|
REQUEST_OBJECT_ID: The id of the request object.`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -5,7 +5,7 @@ go 1.21
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
||||||
github.com/briandowns/spinner v1.23.0 // Apache 2.0
|
github.com/briandowns/spinner v1.23.0 // Apache 2.0
|
||||||
github.com/databricks/databricks-sdk-go v0.36.0 // Apache 2.0
|
github.com/databricks/databricks-sdk-go v0.37.0 // Apache 2.0
|
||||||
github.com/fatih/color v1.16.0 // MIT
|
github.com/fatih/color v1.16.0 // MIT
|
||||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||||
|
|
|
@ -30,8 +30,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/databricks/databricks-sdk-go v0.36.0 h1:QOO9VxBh6JmzzPpCHh0h1f4Ijk+Y3mqBtNN1nzp2Nq8=
|
github.com/databricks/databricks-sdk-go v0.37.0 h1:8ej3hNqfyfDNdV5YBjfLbq+p99JLu5NTtzwObbsIhRM=
|
||||||
github.com/databricks/databricks-sdk-go v0.36.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM=
|
github.com/databricks/databricks-sdk-go v0.37.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
|
Loading…
Reference in New Issue