Bump Databricks Go SDK version to v0.29

This commit is contained in:
monalisa 2024-01-11 13:23:20 +05:30
parent f5c46478f4
commit 158cbb9826
No known key found for this signature in database
GPG Key ID: 92A07DF49CCB0622
25 changed files with 1515 additions and 109 deletions

View File

@ -1 +1 @@
63caa3cb0c05045e81d3dcf2451fa990d8670f36 a7a9dc025bb80303e676bf3708942c6aa06689f1

3
.gitattributes vendored
View File

@ -50,6 +50,7 @@ cmd/workspace/instance-pools/instance-pools.go linguist-generated=true
cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true
cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true
cmd/workspace/jobs/jobs.go linguist-generated=true cmd/workspace/jobs/jobs.go linguist-generated=true
cmd/workspace/lakeview/lakeview.go linguist-generated=true
cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/libraries/libraries.go linguist-generated=true
cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true
cmd/workspace/model-registry/model-registry.go linguist-generated=true cmd/workspace/model-registry/model-registry.go linguist-generated=true
@ -78,6 +79,8 @@ cmd/workspace/tables/tables.go linguist-generated=true
cmd/workspace/token-management/token-management.go linguist-generated=true cmd/workspace/token-management/token-management.go linguist-generated=true
cmd/workspace/tokens/tokens.go linguist-generated=true cmd/workspace/tokens/tokens.go linguist-generated=true
cmd/workspace/users/users.go linguist-generated=true cmd/workspace/users/users.go linguist-generated=true
cmd/workspace/vector-search-endpoints/vector-search-endpoints.go linguist-generated=true
cmd/workspace/vector-search-indexes/vector-search-indexes.go linguist-generated=true
cmd/workspace/volumes/volumes.go linguist-generated=true cmd/workspace/volumes/volumes.go linguist-generated=true
cmd/workspace/warehouses/warehouses.go linguist-generated=true cmd/workspace/warehouses/warehouses.go linguist-generated=true
cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true

View File

@ -182,7 +182,7 @@
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
"properties": { "properties": {
"pause_status": { "pause_status": {
"description": "Indicate whether this schedule is paused or not." "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
} }
} }
}, },
@ -692,7 +692,7 @@
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
"properties": { "properties": {
"pause_status": { "pause_status": {
"description": "Indicate whether this schedule is paused or not." "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
}, },
"quartz_cron_expression": { "quartz_cron_expression": {
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
@ -1210,7 +1210,7 @@
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
}, },
"source": { "source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
} }
} }
}, },
@ -1312,7 +1312,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
}, },
"source": { "source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
} }
} }
}, },
@ -1417,7 +1417,7 @@
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
}, },
"webhook_notifications": { "webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"properties": { "properties": {
"on_duration_warning_threshold_exceeded": { "on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -1489,12 +1489,12 @@
} }
}, },
"pause_status": { "pause_status": {
"description": "Indicate whether this schedule is paused or not." "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
} }
} }
}, },
"webhook_notifications": { "webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"properties": { "properties": {
"on_duration_warning_threshold_exceeded": { "on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -2541,7 +2541,7 @@
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
"properties": { "properties": {
"pause_status": { "pause_status": {
"description": "Indicate whether this schedule is paused or not." "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
} }
} }
}, },
@ -3051,7 +3051,7 @@
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
"properties": { "properties": {
"pause_status": { "pause_status": {
"description": "Indicate whether this schedule is paused or not." "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
}, },
"quartz_cron_expression": { "quartz_cron_expression": {
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
@ -3569,7 +3569,7 @@
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
}, },
"source": { "source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
} }
} }
}, },
@ -3671,7 +3671,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
}, },
"source": { "source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
} }
} }
}, },
@ -3776,7 +3776,7 @@
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
}, },
"webhook_notifications": { "webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"properties": { "properties": {
"on_duration_warning_threshold_exceeded": { "on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -3848,12 +3848,12 @@
} }
}, },
"pause_status": { "pause_status": {
"description": "Indicate whether this schedule is paused or not." "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
} }
} }
}, },
"webhook_notifications": { "webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"properties": { "properties": {
"on_duration_warning_threshold_exceeded": { "on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -4783,6 +4783,44 @@
}, },
"description": { "description": {
"description": "" "description": ""
},
"lookup": {
"description": "",
"properties": {
"alert": {
"description": ""
},
"cluster": {
"description": ""
},
"cluster_policy": {
"description": ""
},
"dashboard": {
"description": ""
},
"instance_pool": {
"description": ""
},
"job": {
"description": ""
},
"metastore": {
"description": ""
},
"pipeline": {
"description": ""
},
"query": {
"description": ""
},
"service_principal": {
"description": ""
},
"warehouse": {
"description": ""
}
}
} }
} }
} }

View File

@ -74,7 +74,9 @@ func newCreate() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "create" // TODO: array: ip_addresses
cmd.Use = "create LABEL LIST_TYPE"
cmd.Short = `Create access list.` cmd.Short = `Create access list.`
cmd.Long = `Create access list. cmd.Long = `Create access list.
@ -91,10 +93,31 @@ func newCreate() *cobra.Command {
* If the new list would block the calling user's current IP, error 400 is * If the new list would block the calling user's current IP, error 400 is
returned with error_code value INVALID_STATE. returned with error_code value INVALID_STATE.
It can take a few minutes for the changes to take effect.` It can take a few minutes for the changes to take effect.
Arguments:
LABEL: Label for the IP access list. This **cannot** be empty.
LIST_TYPE: Type of IP access list. Valid values are as follows and are
case-sensitive:
* ALLOW: An allow list. Include this IP or range. * BLOCK: A block
list. Exclude this IP or range. IP addresses in the block list are
excluded even if they are included in an allow list.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'label', 'list_type' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(2)
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context() ctx := cmd.Context()
@ -105,8 +128,15 @@ func newCreate() *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
} else { }
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") if !cmd.Flags().Changed("json") {
createReq.Label = args[0]
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[1], &createReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
} }
response, err := a.IpAccessLists.Create(ctx, createReq) response, err := a.IpAccessLists.Create(ctx, createReq)
@ -157,7 +187,7 @@ func newDelete() *cobra.Command {
Deletes an IP access list, specified by its list ID. Deletes an IP access list, specified by its list ID.
Arguments: Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list.` IP_ACCESS_LIST_ID: The ID for the corresponding IP access list`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -233,7 +263,7 @@ func newGet() *cobra.Command {
Gets an IP access list, specified by its list ID. Gets an IP access list, specified by its list ID.
Arguments: Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list.` IP_ACCESS_LIST_ID: The ID for the corresponding IP access list`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -352,9 +382,9 @@ func newReplace() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().Var(&replaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&replaceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&replaceReq.ListId, "list-id", replaceReq.ListId, `Universally unique identifier (UUID) of the IP access list.`) // TODO: array: ip_addresses
cmd.Use = "replace" cmd.Use = "replace IP_ACCESS_LIST_ID LABEL LIST_TYPE ENABLED"
cmd.Short = `Replace access list.` cmd.Short = `Replace access list.`
cmd.Long = `Replace access list. cmd.Long = `Replace access list.
@ -367,10 +397,33 @@ func newReplace() *cobra.Command {
counts as a single value. Attempts to exceed that number return error 400 with counts as a single value. Attempts to exceed that number return error 400 with
error_code value QUOTA_EXCEEDED. * If the resulting list would block the error_code value QUOTA_EXCEEDED. * If the resulting list would block the
calling user's current IP, error 400 is returned with error_code value calling user's current IP, error 400 is returned with error_code value
INVALID_STATE. It can take a few minutes for the changes to take effect.` INVALID_STATE. It can take a few minutes for the changes to take effect.
Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list
LABEL: Label for the IP access list. This **cannot** be empty.
LIST_TYPE: Type of IP access list. Valid values are as follows and are
case-sensitive:
* ALLOW: An allow list. Include this IP or range. * BLOCK: A block
list. Exclude this IP or range. IP addresses in the block list are
excluded even if they are included in an allow list.
ENABLED: Specifies whether this IP access list is enabled.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(1)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, provide only IP_ACCESS_LIST_ID as positional arguments. Provide 'label', 'list_type', 'enabled' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(4)
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context() ctx := cmd.Context()
@ -381,8 +434,22 @@ func newReplace() *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
} else { }
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") replaceReq.IpAccessListId = args[0]
if !cmd.Flags().Changed("json") {
replaceReq.Label = args[1]
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[2], &replaceReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[2])
}
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[3], &replaceReq.Enabled)
if err != nil {
return fmt.Errorf("invalid ENABLED: %s", args[3])
}
} }
err = a.IpAccessLists.Replace(ctx, replaceReq) err = a.IpAccessLists.Replace(ctx, replaceReq)
@ -428,9 +495,12 @@ func newUpdate() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.ListId, "list-id", updateReq.ListId, `Universally unique identifier (UUID) of the IP access list.`) cmd.Flags().BoolVar(&updateReq.Enabled, "enabled", updateReq.Enabled, `Specifies whether this IP access list is enabled.`)
// TODO: array: ip_addresses
cmd.Flags().StringVar(&updateReq.Label, "label", updateReq.Label, `Label for the IP access list.`)
cmd.Flags().Var(&updateReq.ListType, "list-type", `Type of IP access list. Supported values: [ALLOW, BLOCK]`)
cmd.Use = "update" cmd.Use = "update IP_ACCESS_LIST_ID"
cmd.Short = `Update access list.` cmd.Short = `Update access list.`
cmd.Long = `Update access list. cmd.Long = `Update access list.
@ -447,7 +517,10 @@ func newUpdate() *cobra.Command {
* If the updated list would block the calling user's current IP, error 400 is * If the updated list would block the calling user's current IP, error 400 is
returned with error_code value INVALID_STATE. returned with error_code value INVALID_STATE.
It can take a few minutes for the changes to take effect.` It can take a few minutes for the changes to take effect.
Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -461,9 +534,25 @@ func newUpdate() *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
} }
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No IP_ACCESS_LIST_ID argument specified. Loading names for Account Ip Access Lists drop-down."
names, err := a.IpAccessLists.IpAccessListInfoLabelToListIdMap(ctx)
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Account Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have the id for the corresponding ip access list")
}
updateReq.IpAccessListId = args[0]
err = a.IpAccessLists.Update(ctx, updateReq) err = a.IpAccessLists.Update(ctx, updateReq)
if err != nil { if err != nil {

View File

@ -260,7 +260,7 @@ func newList() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of clean rooms to return.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of clean rooms to return.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to next page based on previous query.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
cmd.Use = "list" cmd.Use = "list"
cmd.Short = `List clean rooms.` cmd.Short = `List clean rooms.`

6
cmd/workspace/cmd.go generated
View File

@ -27,6 +27,7 @@ import (
instance_profiles "github.com/databricks/cli/cmd/workspace/instance-profiles" instance_profiles "github.com/databricks/cli/cmd/workspace/instance-profiles"
ip_access_lists "github.com/databricks/cli/cmd/workspace/ip-access-lists" ip_access_lists "github.com/databricks/cli/cmd/workspace/ip-access-lists"
jobs "github.com/databricks/cli/cmd/workspace/jobs" jobs "github.com/databricks/cli/cmd/workspace/jobs"
lakeview "github.com/databricks/cli/cmd/workspace/lakeview"
libraries "github.com/databricks/cli/cmd/workspace/libraries" libraries "github.com/databricks/cli/cmd/workspace/libraries"
metastores "github.com/databricks/cli/cmd/workspace/metastores" metastores "github.com/databricks/cli/cmd/workspace/metastores"
model_registry "github.com/databricks/cli/cmd/workspace/model-registry" model_registry "github.com/databricks/cli/cmd/workspace/model-registry"
@ -55,6 +56,8 @@ import (
token_management "github.com/databricks/cli/cmd/workspace/token-management" token_management "github.com/databricks/cli/cmd/workspace/token-management"
tokens "github.com/databricks/cli/cmd/workspace/tokens" tokens "github.com/databricks/cli/cmd/workspace/tokens"
users "github.com/databricks/cli/cmd/workspace/users" users "github.com/databricks/cli/cmd/workspace/users"
vector_search_endpoints "github.com/databricks/cli/cmd/workspace/vector-search-endpoints"
vector_search_indexes "github.com/databricks/cli/cmd/workspace/vector-search-indexes"
volumes "github.com/databricks/cli/cmd/workspace/volumes" volumes "github.com/databricks/cli/cmd/workspace/volumes"
warehouses "github.com/databricks/cli/cmd/workspace/warehouses" warehouses "github.com/databricks/cli/cmd/workspace/warehouses"
workspace "github.com/databricks/cli/cmd/workspace/workspace" workspace "github.com/databricks/cli/cmd/workspace/workspace"
@ -90,6 +93,7 @@ func All() []*cobra.Command {
out = append(out, instance_profiles.New()) out = append(out, instance_profiles.New())
out = append(out, ip_access_lists.New()) out = append(out, ip_access_lists.New())
out = append(out, jobs.New()) out = append(out, jobs.New())
out = append(out, lakeview.New())
out = append(out, libraries.New()) out = append(out, libraries.New())
out = append(out, metastores.New()) out = append(out, metastores.New())
out = append(out, model_registry.New()) out = append(out, model_registry.New())
@ -118,6 +122,8 @@ func All() []*cobra.Command {
out = append(out, token_management.New()) out = append(out, token_management.New())
out = append(out, tokens.New()) out = append(out, tokens.New())
out = append(out, users.New()) out = append(out, users.New())
out = append(out, vector_search_endpoints.New())
out = append(out, vector_search_indexes.New())
out = append(out, volumes.New()) out = append(out, volumes.New())
out = append(out, warehouses.New()) out = append(out, warehouses.New())
out = append(out, workspace.New()) out = append(out, workspace.New())

View File

@ -121,7 +121,10 @@ func newDelete() *cobra.Command {
cmd.Use = "delete ID" cmd.Use = "delete ID"
cmd.Short = `Remove widget.` cmd.Short = `Remove widget.`
cmd.Long = `Remove widget.` cmd.Long = `Remove widget.
Arguments:
ID: Widget ID returned by :method:dashboardwidgets/create`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)

View File

@ -283,27 +283,43 @@ func init() {
// Functions can be added from the `init()` function in manually curated files in this directory. // Functions can be added from the `init()` function in manually curated files in this directory.
var listOverrides []func( var listOverrides []func(
*cobra.Command, *cobra.Command,
*catalog.ListExternalLocationsRequest,
) )
func newList() *cobra.Command { func newList() *cobra.Command {
cmd := &cobra.Command{} cmd := &cobra.Command{}
var listReq catalog.ListExternalLocationsRequest
// TODO: short flags
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of external locations to return.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
cmd.Use = "list" cmd.Use = "list"
cmd.Short = `List external locations.` cmd.Short = `List external locations.`
cmd.Long = `List external locations. cmd.Long = `List external locations.
Gets an array of external locations (__ExternalLocationInfo__ objects) from Gets an array of external locations (__ExternalLocationInfo__ objects) from
the metastore. The caller must be a metastore admin, the owner of the external the metastore. The caller must be a metastore admin, the owner of the external
location, or a user that has some privilege on the external location. There is location, or a user that has some privilege on the external location. For
no guarantee of a specific ordering of the elements in the array.` unpaginated request, there is no guarantee of a specific ordering of the
elements in the array. For paginated request, elements are ordered by their
name.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(0)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context() ctx := cmd.Context()
w := root.WorkspaceClient(ctx) w := root.WorkspaceClient(ctx)
response, err := w.ExternalLocations.ListAll(ctx)
response, err := w.ExternalLocations.ListAll(ctx, listReq)
if err != nil { if err != nil {
return err return err
} }
@ -316,7 +332,7 @@ func newList() *cobra.Command {
// Apply optional overrides to this command. // Apply optional overrides to this command.
for _, fn := range listOverrides { for _, fn := range listOverrides {
fn(cmd) fn(cmd, &listReq)
} }
return cmd return cmd

View File

@ -292,6 +292,9 @@ func newList() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of functions to return.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
cmd.Use = "list CATALOG_NAME SCHEMA_NAME" cmd.Use = "list CATALOG_NAME SCHEMA_NAME"
cmd.Short = `List functions.` cmd.Short = `List functions.`
cmd.Long = `List functions. cmd.Long = `List functions.
@ -301,8 +304,9 @@ func newList() *cobra.Command {
the user must have the **USE_CATALOG** privilege on the catalog and the the user must have the **USE_CATALOG** privilege on the catalog and the
**USE_SCHEMA** privilege on the schema, and the output list contains only **USE_SCHEMA** privilege on the schema, and the output list contains only
functions for which either the user has the **EXECUTE** privilege or the user functions for which either the user has the **EXECUTE** privilege or the user
is the owner. There is no guarantee of a specific ordering of the elements in is the owner. For unpaginated request, there is no guarantee of a specific
the array. ordering of the elements in the array. For paginated request, elements are
ordered by their name.
Arguments: Arguments:
CATALOG_NAME: Name of parent catalog for functions of interest. CATALOG_NAME: Name of parent catalog for functions of interest.

View File

@ -73,7 +73,9 @@ func newCreate() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "create" // TODO: array: ip_addresses
cmd.Use = "create LABEL LIST_TYPE"
cmd.Short = `Create access list.` cmd.Short = `Create access list.`
cmd.Long = `Create access list. cmd.Long = `Create access list.
@ -92,10 +94,31 @@ func newCreate() *cobra.Command {
It can take a few minutes for the changes to take effect. **Note**: Your new It can take a few minutes for the changes to take effect. **Note**: Your new
IP access list has no effect until you enable the feature. See IP access list has no effect until you enable the feature. See
:method:workspaceconf/setStatus` :method:workspaceconf/setStatus
Arguments:
LABEL: Label for the IP access list. This **cannot** be empty.
LIST_TYPE: Type of IP access list. Valid values are as follows and are
case-sensitive:
* ALLOW: An allow list. Include this IP or range. * BLOCK: A block
list. Exclude this IP or range. IP addresses in the block list are
excluded even if they are included in an allow list.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'label', 'list_type' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(2)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context() ctx := cmd.Context()
@ -106,8 +129,15 @@ func newCreate() *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
} else { }
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") if !cmd.Flags().Changed("json") {
createReq.Label = args[0]
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[1], &createReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
} }
response, err := w.IpAccessLists.Create(ctx, createReq) response, err := w.IpAccessLists.Create(ctx, createReq)
@ -158,7 +188,7 @@ func newDelete() *cobra.Command {
Deletes an IP access list, specified by its list ID. Deletes an IP access list, specified by its list ID.
Arguments: Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list to modify.` IP_ACCESS_LIST_ID: The ID for the corresponding IP access list`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -175,14 +205,14 @@ func newDelete() *cobra.Command {
if err != nil { if err != nil {
return fmt.Errorf("failed to load names for Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err) return fmt.Errorf("failed to load names for Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
} }
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list to modify") id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list")
if err != nil { if err != nil {
return err return err
} }
args = append(args, id) args = append(args, id)
} }
if len(args) != 1 { if len(args) != 1 {
return fmt.Errorf("expected to have the id for the corresponding ip access list to modify") return fmt.Errorf("expected to have the id for the corresponding ip access list")
} }
deleteReq.IpAccessListId = args[0] deleteReq.IpAccessListId = args[0]
@ -234,7 +264,7 @@ func newGet() *cobra.Command {
Gets an IP access list, specified by its list ID. Gets an IP access list, specified by its list ID.
Arguments: Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list to modify.` IP_ACCESS_LIST_ID: The ID for the corresponding IP access list`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -251,14 +281,14 @@ func newGet() *cobra.Command {
if err != nil { if err != nil {
return fmt.Errorf("failed to load names for Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err) return fmt.Errorf("failed to load names for Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
} }
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list to modify") id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list")
if err != nil { if err != nil {
return err return err
} }
args = append(args, id) args = append(args, id)
} }
if len(args) != 1 { if len(args) != 1 {
return fmt.Errorf("expected to have the id for the corresponding ip access list to modify") return fmt.Errorf("expected to have the id for the corresponding ip access list")
} }
getReq.IpAccessListId = args[0] getReq.IpAccessListId = args[0]
@ -353,9 +383,9 @@ func newReplace() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().Var(&replaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&replaceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&replaceReq.ListId, "list-id", replaceReq.ListId, `Universally unique identifier (UUID) of the IP access list.`) // TODO: array: ip_addresses
cmd.Use = "replace" cmd.Use = "replace IP_ACCESS_LIST_ID LABEL LIST_TYPE ENABLED"
cmd.Short = `Replace access list.` cmd.Short = `Replace access list.`
cmd.Long = `Replace access list. cmd.Long = `Replace access list.
@ -370,10 +400,33 @@ func newReplace() *cobra.Command {
calling user's current IP, error 400 is returned with error_code value calling user's current IP, error 400 is returned with error_code value
INVALID_STATE. It can take a few minutes for the changes to take effect. INVALID_STATE. It can take a few minutes for the changes to take effect.
Note that your resulting IP access list has no effect until you enable the Note that your resulting IP access list has no effect until you enable the
feature. See :method:workspaceconf/setStatus.` feature. See :method:workspaceconf/setStatus.
Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list
LABEL: Label for the IP access list. This **cannot** be empty.
LIST_TYPE: Type of IP access list. Valid values are as follows and are
case-sensitive:
* ALLOW: An allow list. Include this IP or range. * BLOCK: A block
list. Exclude this IP or range. IP addresses in the block list are
excluded even if they are included in an allow list.
ENABLED: Specifies whether this IP access list is enabled.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(1)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, provide only IP_ACCESS_LIST_ID as positional arguments. Provide 'label', 'list_type', 'enabled' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(4)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context() ctx := cmd.Context()
@ -384,8 +437,22 @@ func newReplace() *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
} else { }
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") replaceReq.IpAccessListId = args[0]
if !cmd.Flags().Changed("json") {
replaceReq.Label = args[1]
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[2], &replaceReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[2])
}
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[3], &replaceReq.Enabled)
if err != nil {
return fmt.Errorf("invalid ENABLED: %s", args[3])
}
} }
err = w.IpAccessLists.Replace(ctx, replaceReq) err = w.IpAccessLists.Replace(ctx, replaceReq)
@ -431,9 +498,12 @@ func newUpdate() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.ListId, "list-id", updateReq.ListId, `Universally unique identifier (UUID) of the IP access list.`) cmd.Flags().BoolVar(&updateReq.Enabled, "enabled", updateReq.Enabled, `Specifies whether this IP access list is enabled.`)
// TODO: array: ip_addresses
cmd.Flags().StringVar(&updateReq.Label, "label", updateReq.Label, `Label for the IP access list.`)
cmd.Flags().Var(&updateReq.ListType, "list-type", `Type of IP access list. Supported values: [ALLOW, BLOCK]`)
cmd.Use = "update" cmd.Use = "update IP_ACCESS_LIST_ID"
cmd.Short = `Update access list.` cmd.Short = `Update access list.`
cmd.Long = `Update access list. cmd.Long = `Update access list.
@ -452,7 +522,10 @@ func newUpdate() *cobra.Command {
It can take a few minutes for the changes to take effect. Note that your It can take a few minutes for the changes to take effect. Note that your
resulting IP access list has no effect until you enable the feature. See resulting IP access list has no effect until you enable the feature. See
:method:workspaceconf/setStatus.` :method:workspaceconf/setStatus.
Arguments:
IP_ACCESS_LIST_ID: The ID for the corresponding IP access list`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -466,9 +539,25 @@ func newUpdate() *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
} }
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No IP_ACCESS_LIST_ID argument specified. Loading names for Ip Access Lists drop-down."
names, err := w.IpAccessLists.IpAccessListInfoLabelToListIdMap(ctx)
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have the id for the corresponding ip access list")
}
updateReq.IpAccessListId = args[0]
err = w.IpAccessLists.Update(ctx, updateReq) err = w.IpAccessLists.Update(ctx, updateReq)
if err != nil { if err != nil {

View File

@ -1251,11 +1251,11 @@ func newReset() *cobra.Command {
cmd.Flags().Var(&resetJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&resetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "reset" cmd.Use = "reset"
cmd.Short = `Overwrite all settings for a job.` cmd.Short = `Update all job settings (reset).`
cmd.Long = `Overwrite all settings for a job. cmd.Long = `Update all job settings (reset).
Overwrite all settings for the given job. Use the Update endpoint to update Overwrite all settings for the given job. Use the [_Update_
job settings partially.` endpoint](:method:jobs/update) to update job settings partially.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -1638,11 +1638,11 @@ func newUpdate() *cobra.Command {
// TODO: complex arg: new_settings // TODO: complex arg: new_settings
cmd.Use = "update JOB_ID" cmd.Use = "update JOB_ID"
cmd.Short = `Partially update a job.` cmd.Short = `Update job settings partially.`
cmd.Long = `Partially update a job. cmd.Long = `Update job settings partially.
Add, update, or remove specific settings of an existing job. Use the ResetJob Add, update, or remove specific settings of an existing job. Use the [_Reset_
to overwrite all job settings. endpoint](:method:jobs/reset) to overwrite all job settings.
Arguments: Arguments:
JOB_ID: The canonical identifier of the job to update. This field is required.` JOB_ID: The canonical identifier of the job to update. This field is required.`

115
cmd/workspace/lakeview/lakeview.go generated Executable file
View File

@ -0,0 +1,115 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package lakeview
import (
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "lakeview",
Short: `These APIs provide specific management operations for Lakeview dashboards.`,
Long: `These APIs provide specific management operations for Lakeview dashboards.
Generic resource management can be done with Workspace API (import, export,
get-status, list, delete).`,
GroupID: "dashboards",
Annotations: map[string]string{
"package": "dashboards",
},
// This service is being previewed; hide from help output.
Hidden: true,
}
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start publish command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var publishOverrides []func(
*cobra.Command,
*dashboards.PublishRequest,
)
func newPublish() *cobra.Command {
cmd := &cobra.Command{}
var publishReq dashboards.PublishRequest
var publishJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&publishJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().BoolVar(&publishReq.EmbedCredentials, "embed-credentials", publishReq.EmbedCredentials, `Flag to indicate if the publisher's credentials should be embedded in the published dashboard.`)
cmd.Flags().StringVar(&publishReq.WarehouseId, "warehouse-id", publishReq.WarehouseId, `The ID of the warehouse that can be used to override the warehouse which was set in the draft.`)
cmd.Use = "publish DASHBOARD_ID"
cmd.Short = `Publish dashboard.`
cmd.Long = `Publish dashboard.
Publish the current draft dashboard.
Arguments:
DASHBOARD_ID: UUID identifying the dashboard to be published.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = publishJson.Unmarshal(&publishReq)
if err != nil {
return err
}
}
publishReq.DashboardId = args[0]
err = w.Lakeview.Publish(ctx, publishReq)
if err != nil {
return err
}
return nil
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range publishOverrides {
fn(cmd, &publishReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newPublish())
})
}
// end service Lakeview

View File

@ -277,8 +277,8 @@ func newList() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Max number of model versions to return.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of model versions to return.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
cmd.Use = "list FULL_NAME" cmd.Use = "list FULL_NAME"
cmd.Short = `List Model Versions.` cmd.Short = `List Model Versions.`

View File

@ -103,8 +103,11 @@ func newGet() *cobra.Command {
parent objects or root object. parent objects or root object.
Arguments: Arguments:
REQUEST_OBJECT_TYPE: <needs content> REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
REQUEST_OBJECT_ID: ` authorization, clusters, cluster-policies, directories, experiments,
files, instance-pools, jobs, notebooks, pipelines, registered-models,
repos, serving-endpoints, or sql-warehouses.
REQUEST_OBJECT_ID: The id of the request object.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -240,8 +243,11 @@ func newSet() *cobra.Command {
parent objects or root object. parent objects or root object.
Arguments: Arguments:
REQUEST_OBJECT_TYPE: <needs content> REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
REQUEST_OBJECT_ID: ` authorization, clusters, cluster-policies, directories, experiments,
files, instance-pools, jobs, notebooks, pipelines, registered-models,
repos, serving-endpoints, or sql-warehouses.
REQUEST_OBJECT_ID: The id of the request object.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
@ -317,8 +323,11 @@ func newUpdate() *cobra.Command {
their parent objects or root object. their parent objects or root object.
Arguments: Arguments:
REQUEST_OBJECT_TYPE: <needs content> REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following:
REQUEST_OBJECT_ID: ` authorization, clusters, cluster-policies, directories, experiments,
files, instance-pools, jobs, notebooks, pipelines, registered-models,
repos, serving-endpoints, or sql-warehouses.
REQUEST_OBJECT_ID: The id of the request object.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)

View File

@ -417,7 +417,7 @@ func newUpdate() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.DataSourceId, "data-source-id", updateReq.DataSourceId, `Data source ID.`) cmd.Flags().StringVar(&updateReq.DataSourceId, "data-source-id", updateReq.DataSourceId, `Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID.`)
cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `General description that conveys additional information about this query such as usage notes.`) cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `General description that conveys additional information about this query such as usage notes.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this query that appears in list views, widget headings, and on the query page.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this query that appears in list views, widget headings, and on the query page.`)
// TODO: any: options // TODO: any: options

View File

@ -121,7 +121,10 @@ func newDelete() *cobra.Command {
cmd.Use = "delete ID" cmd.Use = "delete ID"
cmd.Short = `Remove visualization.` cmd.Short = `Remove visualization.`
cmd.Long = `Remove visualization.` cmd.Long = `Remove visualization.
Arguments:
ID: Widget ID returned by :method:queryvizualisations/create`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)

View File

@ -301,6 +301,9 @@ func newList() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of schemas to return.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
cmd.Use = "list CATALOG_NAME" cmd.Use = "list CATALOG_NAME"
cmd.Short = `List schemas.` cmd.Short = `List schemas.`
cmd.Long = `List schemas. cmd.Long = `List schemas.
@ -308,8 +311,10 @@ func newList() *cobra.Command {
Gets an array of schemas for a catalog in the metastore. If the caller is the Gets an array of schemas for a catalog in the metastore. If the caller is the
metastore admin or the owner of the parent catalog, all schemas for the metastore admin or the owner of the parent catalog, all schemas for the
catalog will be retrieved. Otherwise, only schemas owned by the caller (or for catalog will be retrieved. Otherwise, only schemas owned by the caller (or for
which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is which the caller has the **USE_SCHEMA** privilege) will be retrieved. For
no guarantee of a specific ordering of the elements in the array. unpaginated request, there is no guarantee of a specific ordering of the
elements in the array. For paginated request, elements are ordered by their
name.
Arguments: Arguments:
CATALOG_NAME: Parent catalog for schemas of interest.` CATALOG_NAME: Parent catalog for schemas of interest.`

View File

@ -973,20 +973,29 @@ func newUpdateConfig() *cobra.Command {
cmd.Flags().Var(&updateConfigJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&updateConfigJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: complex arg: auto_capture_config // TODO: complex arg: auto_capture_config
// TODO: array: served_entities
// TODO: array: served_models // TODO: array: served_models
// TODO: complex arg: traffic_config // TODO: complex arg: traffic_config
cmd.Use = "update-config" cmd.Use = "update-config NAME"
cmd.Short = `Update a serving endpoint with a new config.` cmd.Short = `Update a serving endpoint with a new config.`
cmd.Long = `Update a serving endpoint with a new config. cmd.Long = `Update a serving endpoint with a new config.
Updates any combination of the serving endpoint's served entities, the compute Updates any combination of the serving endpoint's served entities, the compute
configuration of those served entities, and the endpoint's traffic config. An configuration of those served entities, and the endpoint's traffic config. An
endpoint that already has an update in progress can not be updated until the endpoint that already has an update in progress can not be updated until the
current update completes or fails.` current update completes or fails.
Arguments:
NAME: The name of the serving endpoint to update. This field is required.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context() ctx := cmd.Context()
@ -997,9 +1006,8 @@ func newUpdateConfig() *cobra.Command {
if err != nil { if err != nil {
return err return err
} }
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
} }
updateConfigReq.Name = args[0]
wait, err := w.ServingEndpoints.UpdateConfig(ctx, updateConfigReq) wait, err := w.ServingEndpoints.UpdateConfig(ctx, updateConfigReq)
if err != nil { if err != nil {

View File

@ -175,7 +175,7 @@ func newDelete() *cobra.Command {
if len(args) == 0 { if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx) promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down." promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down."
names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx) names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx, catalog.ListStorageCredentialsRequest{})
close(promptSpinner) close(promptSpinner)
if err != nil { if err != nil {
return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err) return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
@ -253,7 +253,7 @@ func newGet() *cobra.Command {
if len(args) == 0 { if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx) promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down." promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down."
names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx) names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx, catalog.ListStorageCredentialsRequest{})
close(promptSpinner) close(promptSpinner)
if err != nil { if err != nil {
return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err) return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
@ -300,28 +300,43 @@ func init() {
// Functions can be added from the `init()` function in manually curated files in this directory. // Functions can be added from the `init()` function in manually curated files in this directory.
var listOverrides []func( var listOverrides []func(
*cobra.Command, *cobra.Command,
*catalog.ListStorageCredentialsRequest,
) )
func newList() *cobra.Command { func newList() *cobra.Command {
cmd := &cobra.Command{} cmd := &cobra.Command{}
var listReq catalog.ListStorageCredentialsRequest
// TODO: short flags
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of storage credentials to return.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
cmd.Use = "list" cmd.Use = "list"
cmd.Short = `List credentials.` cmd.Short = `List credentials.`
cmd.Long = `List credentials. cmd.Long = `List credentials.
Gets an array of storage credentials (as __StorageCredentialInfo__ objects). Gets an array of storage credentials (as __StorageCredentialInfo__ objects).
The array is limited to only those storage credentials the caller has The array is limited to only those storage credentials the caller has
permission to access. If the caller is a metastore admin, all storage permission to access. If the caller is a metastore admin, retrieval of
credentials will be retrieved. There is no guarantee of a specific ordering of credentials is unrestricted. For unpaginated request, there is no guarantee of
the elements in the array.` a specific ordering of the elements in the array. For paginated request,
elements are ordered by their name.`
cmd.Annotations = make(map[string]string) cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(0)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context() ctx := cmd.Context()
w := root.WorkspaceClient(ctx) w := root.WorkspaceClient(ctx)
response, err := w.StorageCredentials.ListAll(ctx)
response, err := w.StorageCredentials.ListAll(ctx, listReq)
if err != nil { if err != nil {
return err return err
} }
@ -334,7 +349,7 @@ func newList() *cobra.Command {
// Apply optional overrides to this command. // Apply optional overrides to this command.
for _, fn := range listOverrides { for _, fn := range listOverrides {
fn(cmd) fn(cmd, &listReq)
} }
return cmd return cmd
@ -401,7 +416,7 @@ func newUpdate() *cobra.Command {
if len(args) == 0 { if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx) promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down." promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down."
names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx) names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx, catalog.ListStorageCredentialsRequest{})
close(promptSpinner) close(promptSpinner)
if err != nil { if err != nil {
return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err) return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err)

View File

@ -222,7 +222,9 @@ func newList() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().BoolVar(&listReq.IncludeDeltaMetadata, "include-delta-metadata", listReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`) cmd.Flags().BoolVar(&listReq.IncludeDeltaMetadata, "include-delta-metadata", listReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`)
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return (page length).`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return.`)
cmd.Flags().BoolVar(&listReq.OmitColumns, "omit-columns", listReq.OmitColumns, `Whether to omit the columns of the table from the response or not.`)
cmd.Flags().BoolVar(&listReq.OmitProperties, "omit-properties", listReq.OmitProperties, `Whether to omit the properties of the table from the response or not.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`)
cmd.Use = "list CATALOG_NAME SCHEMA_NAME" cmd.Use = "list CATALOG_NAME SCHEMA_NAME"
@ -296,8 +298,8 @@ func newListSummaries() *cobra.Command {
// TODO: short flags // TODO: short flags
cmd.Flags().IntVar(&listSummariesReq.MaxResults, "max-results", listSummariesReq.MaxResults, `Maximum number of tables to return (page length).`) cmd.Flags().IntVar(&listSummariesReq.MaxResults, "max-results", listSummariesReq.MaxResults, `Maximum number of summaries for tables to return.`)
cmd.Flags().StringVar(&listSummariesReq.PageToken, "page-token", listSummariesReq.PageToken, `Opaque token to send for the next page of results (pagination).`) cmd.Flags().StringVar(&listSummariesReq.PageToken, "page-token", listSummariesReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
cmd.Flags().StringVar(&listSummariesReq.SchemaNamePattern, "schema-name-pattern", listSummariesReq.SchemaNamePattern, `A sql LIKE pattern (% and _) for schema names.`) cmd.Flags().StringVar(&listSummariesReq.SchemaNamePattern, "schema-name-pattern", listSummariesReq.SchemaNamePattern, `A sql LIKE pattern (% and _) for schema names.`)
cmd.Flags().StringVar(&listSummariesReq.TableNamePattern, "table-name-pattern", listSummariesReq.TableNamePattern, `A sql LIKE pattern (% and _) for table names.`) cmd.Flags().StringVar(&listSummariesReq.TableNamePattern, "table-name-pattern", listSummariesReq.TableNamePattern, `A sql LIKE pattern (% and _) for table names.`)
@ -308,11 +310,11 @@ func newListSummaries() *cobra.Command {
Gets an array of summaries for tables for a schema and catalog within the Gets an array of summaries for tables for a schema and catalog within the
metastore. The table summaries returned are either: metastore. The table summaries returned are either:
* summaries for all tables (within the current metastore and parent catalog * summaries for tables (within the current metastore and parent catalog and
and schema), when the user is a metastore admin, or: * summaries for all schema), when the user is a metastore admin, or: * summaries for tables and
tables and schemas (within the current metastore and parent catalog) for which schemas (within the current metastore and parent catalog) for which the user
the user has ownership or the **SELECT** privilege on the table and ownership has ownership or the **SELECT** privilege on the table and ownership or
or **USE_SCHEMA** privilege on the schema, provided that the user also has **USE_SCHEMA** privilege on the schema, provided that the user also has
ownership or the **USE_CATALOG** privilege on the parent catalog. ownership or the **USE_CATALOG** privilege on the parent catalog.
There is no guarantee of a specific ordering of the elements in the array. There is no guarantee of a specific ordering of the elements in the array.

View File

@ -156,7 +156,7 @@ func newDelete() *cobra.Command {
if len(args) == 0 { if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx) promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No TOKEN_ID argument specified. Loading names for Tokens drop-down." promptSpinner <- "No TOKEN_ID argument specified. Loading names for Tokens drop-down."
names, err := w.Tokens.TokenInfoCommentToTokenIdMap(ctx) names, err := w.Tokens.PublicTokenInfoCommentToTokenIdMap(ctx)
close(promptSpinner) close(promptSpinner)
if err != nil { if err != nil {
return fmt.Errorf("failed to load names for Tokens drop-down. Please manually specify required arguments. Original error: %w", err) return fmt.Errorf("failed to load names for Tokens drop-down. Please manually specify required arguments. Original error: %w", err)

View File

@ -0,0 +1,336 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package vector_search_endpoints
import (
"fmt"
"time"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/vectorsearch"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "vector-search-endpoints",
Short: `**Endpoint**: Represents the compute resources to host vector search indexes.`,
Long: `**Endpoint**: Represents the compute resources to host vector search indexes.`,
GroupID: "vectorsearch",
Annotations: map[string]string{
"package": "vectorsearch",
},
}
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start create-endpoint command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var createEndpointOverrides []func(
*cobra.Command,
*vectorsearch.CreateEndpoint,
)
func newCreateEndpoint() *cobra.Command {
cmd := &cobra.Command{}
var createEndpointReq vectorsearch.CreateEndpoint
var createEndpointJson flags.JsonFlag
var createEndpointSkipWait bool
var createEndpointTimeout time.Duration
cmd.Flags().BoolVar(&createEndpointSkipWait, "no-wait", createEndpointSkipWait, `do not wait to reach ONLINE state`)
cmd.Flags().DurationVar(&createEndpointTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ONLINE state`)
// TODO: short flags
cmd.Flags().Var(&createEndpointJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "create-endpoint NAME ENDPOINT_TYPE"
cmd.Short = `Create an endpoint.`
cmd.Long = `Create an endpoint.
Create a new endpoint.
Arguments:
NAME: Name of endpoint
ENDPOINT_TYPE: Type of endpoint.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'endpoint_type' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(2)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = createEndpointJson.Unmarshal(&createEndpointReq)
if err != nil {
return err
}
}
if !cmd.Flags().Changed("json") {
createEndpointReq.Name = args[0]
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[1], &createEndpointReq.EndpointType)
if err != nil {
return fmt.Errorf("invalid ENDPOINT_TYPE: %s", args[1])
}
}
wait, err := w.VectorSearchEndpoints.CreateEndpoint(ctx, createEndpointReq)
if err != nil {
return err
}
if createEndpointSkipWait {
return cmdio.Render(ctx, wait.Response)
}
spinner := cmdio.Spinner(ctx)
info, err := wait.OnProgress(func(i *vectorsearch.EndpointInfo) {
if i.EndpointStatus == nil {
return
}
status := i.EndpointStatus.State
statusMessage := fmt.Sprintf("current status: %s", status)
if i.EndpointStatus != nil {
statusMessage = i.EndpointStatus.Message
}
spinner <- statusMessage
}).GetWithTimeout(createEndpointTimeout)
close(spinner)
if err != nil {
return err
}
return cmdio.Render(ctx, info)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range createEndpointOverrides {
fn(cmd, &createEndpointReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newCreateEndpoint())
})
}
// start delete-endpoint command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var deleteEndpointOverrides []func(
*cobra.Command,
*vectorsearch.DeleteEndpointRequest,
)
func newDeleteEndpoint() *cobra.Command {
cmd := &cobra.Command{}
var deleteEndpointReq vectorsearch.DeleteEndpointRequest
// TODO: short flags
cmd.Use = "delete-endpoint ENDPOINT_NAME NAME"
cmd.Short = `Delete an endpoint.`
cmd.Long = `Delete an endpoint.
Arguments:
ENDPOINT_NAME: Name of the endpoint
NAME: Name of the endpoint to delete`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(2)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
deleteEndpointReq.EndpointName = args[0]
deleteEndpointReq.Name = args[1]
err = w.VectorSearchEndpoints.DeleteEndpoint(ctx, deleteEndpointReq)
if err != nil {
return err
}
return nil
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range deleteEndpointOverrides {
fn(cmd, &deleteEndpointReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newDeleteEndpoint())
})
}
// start get-endpoint command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getEndpointOverrides []func(
*cobra.Command,
*vectorsearch.GetEndpointRequest,
)
func newGetEndpoint() *cobra.Command {
cmd := &cobra.Command{}
var getEndpointReq vectorsearch.GetEndpointRequest
// TODO: short flags
cmd.Use = "get-endpoint ENDPOINT_NAME"
cmd.Short = `Get an endpoint.`
cmd.Long = `Get an endpoint.
Arguments:
ENDPOINT_NAME: Name of the endpoint`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
getEndpointReq.EndpointName = args[0]
response, err := w.VectorSearchEndpoints.GetEndpoint(ctx, getEndpointReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getEndpointOverrides {
fn(cmd, &getEndpointReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newGetEndpoint())
})
}
// start list-endpoints command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var listEndpointsOverrides []func(
*cobra.Command,
*vectorsearch.ListEndpointsRequest,
)
func newListEndpoints() *cobra.Command {
cmd := &cobra.Command{}
var listEndpointsReq vectorsearch.ListEndpointsRequest
// TODO: short flags
cmd.Flags().StringVar(&listEndpointsReq.PageToken, "page-token", listEndpointsReq.PageToken, `Token for pagination.`)
cmd.Use = "list-endpoints"
cmd.Short = `List all endpoints.`
cmd.Long = `List all endpoints.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(0)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.VectorSearchEndpoints.ListEndpointsAll(ctx, listEndpointsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range listEndpointsOverrides {
fn(cmd, &listEndpointsReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newListEndpoints())
})
}
// end service VectorSearchEndpoints

View File

@ -0,0 +1,625 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package vector_search_indexes
import (
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/vectorsearch"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "vector-search-indexes",
Short: `**Index**: An efficient representation of your embedding vectors that supports real-time and efficient approximate nearest neighbor (ANN) search queries.`,
Long: `**Index**: An efficient representation of your embedding vectors that supports
real-time and efficient approximate nearest neighbor (ANN) search queries.
There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index
that automatically syncs with a source Delta Table, automatically and
incrementally updating the index as the underlying data in the Delta Table
changes. * **Direct Vector Access Index**: An index that supports direct read
and write of vectors and metadata through our REST and SDK APIs. With this
model, the user manages index updates.`,
GroupID: "vectorsearch",
Annotations: map[string]string{
"package": "vectorsearch",
},
}
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start create-index command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var createIndexOverrides []func(
*cobra.Command,
*vectorsearch.CreateVectorIndexRequest,
)
func newCreateIndex() *cobra.Command {
cmd := &cobra.Command{}
var createIndexReq vectorsearch.CreateVectorIndexRequest
var createIndexJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&createIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: complex arg: delta_sync_vector_index_spec
// TODO: complex arg: direct_access_index_spec
cmd.Flags().StringVar(&createIndexReq.EndpointName, "endpoint-name", createIndexReq.EndpointName, `Name of the endpoint to be used for serving the index.`)
cmd.Use = "create-index NAME PRIMARY_KEY INDEX_TYPE"
cmd.Short = `Create an index.`
cmd.Long = `Create an index.
Create a new index.
Arguments:
NAME: Name of the index
PRIMARY_KEY: Primary key of the index
INDEX_TYPE: There are 2 types of Vector Search indexes:
- DELTA_SYNC: An index that automatically syncs with a source Delta
Table, automatically and incrementally updating the index as the
underlying data in the Delta Table changes. - DIRECT_ACCESS: An index
that supports direct read and write of vectors and metadata through our
REST and SDK APIs. With this model, the user manages index updates.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'primary_key', 'index_type' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(3)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = createIndexJson.Unmarshal(&createIndexReq)
if err != nil {
return err
}
}
if !cmd.Flags().Changed("json") {
createIndexReq.Name = args[0]
}
if !cmd.Flags().Changed("json") {
createIndexReq.PrimaryKey = args[1]
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[2], &createIndexReq.IndexType)
if err != nil {
return fmt.Errorf("invalid INDEX_TYPE: %s", args[2])
}
}
response, err := w.VectorSearchIndexes.CreateIndex(ctx, createIndexReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range createIndexOverrides {
fn(cmd, &createIndexReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newCreateIndex())
})
}
// start delete-data-vector-index command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var deleteDataVectorIndexOverrides []func(
*cobra.Command,
*vectorsearch.DeleteDataVectorIndexRequest,
)
func newDeleteDataVectorIndex() *cobra.Command {
cmd := &cobra.Command{}
var deleteDataVectorIndexReq vectorsearch.DeleteDataVectorIndexRequest
var deleteDataVectorIndexJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&deleteDataVectorIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "delete-data-vector-index"
cmd.Short = `Delete data from index.`
cmd.Long = `Delete data from index.
Handles the deletion of data from a specified vector index.`
cmd.Annotations = make(map[string]string)
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = deleteDataVectorIndexJson.Unmarshal(&deleteDataVectorIndexReq)
if err != nil {
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.VectorSearchIndexes.DeleteDataVectorIndex(ctx, deleteDataVectorIndexReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range deleteDataVectorIndexOverrides {
fn(cmd, &deleteDataVectorIndexReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newDeleteDataVectorIndex())
})
}
// start delete-index command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var deleteIndexOverrides []func(
*cobra.Command,
*vectorsearch.DeleteIndexRequest,
)
func newDeleteIndex() *cobra.Command {
cmd := &cobra.Command{}
var deleteIndexReq vectorsearch.DeleteIndexRequest
// TODO: short flags
cmd.Use = "delete-index INDEX_NAME"
cmd.Short = `Delete an index.`
cmd.Long = `Delete an index.
Delete an index.
Arguments:
INDEX_NAME: Name of the index`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
deleteIndexReq.IndexName = args[0]
err = w.VectorSearchIndexes.DeleteIndex(ctx, deleteIndexReq)
if err != nil {
return err
}
return nil
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range deleteIndexOverrides {
fn(cmd, &deleteIndexReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newDeleteIndex())
})
}
// start get-index command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getIndexOverrides []func(
*cobra.Command,
*vectorsearch.GetIndexRequest,
)
func newGetIndex() *cobra.Command {
cmd := &cobra.Command{}
var getIndexReq vectorsearch.GetIndexRequest
// TODO: short flags
cmd.Use = "get-index INDEX_NAME"
cmd.Short = `Get an index.`
cmd.Long = `Get an index.
Get an index.
Arguments:
INDEX_NAME: Name of the index`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
getIndexReq.IndexName = args[0]
response, err := w.VectorSearchIndexes.GetIndex(ctx, getIndexReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getIndexOverrides {
fn(cmd, &getIndexReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newGetIndex())
})
}
// start list-indexes command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var listIndexesOverrides []func(
*cobra.Command,
*vectorsearch.ListIndexesRequest,
)
func newListIndexes() *cobra.Command {
cmd := &cobra.Command{}
var listIndexesReq vectorsearch.ListIndexesRequest
// TODO: short flags
cmd.Flags().StringVar(&listIndexesReq.PageToken, "page-token", listIndexesReq.PageToken, `Token for pagination.`)
cmd.Use = "list-indexes ENDPOINT_NAME"
cmd.Short = `List indexes.`
cmd.Long = `List indexes.
List all indexes in the given endpoint.
Arguments:
ENDPOINT_NAME: Name of the endpoint`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
listIndexesReq.EndpointName = args[0]
response, err := w.VectorSearchIndexes.ListIndexesAll(ctx, listIndexesReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range listIndexesOverrides {
fn(cmd, &listIndexesReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newListIndexes())
})
}
// start query-index command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var queryIndexOverrides []func(
*cobra.Command,
*vectorsearch.QueryVectorIndexRequest,
)
func newQueryIndex() *cobra.Command {
cmd := &cobra.Command{}
var queryIndexReq vectorsearch.QueryVectorIndexRequest
var queryIndexJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&queryIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&queryIndexReq.FiltersJson, "filters-json", queryIndexReq.FiltersJson, `JSON string representing query filters.`)
cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`)
cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`)
// TODO: array: query_vector
cmd.Use = "query-index"
cmd.Short = `Query an index.`
cmd.Long = `Query an index.
Query the specified vector index.`
cmd.Annotations = make(map[string]string)
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = queryIndexJson.Unmarshal(&queryIndexReq)
if err != nil {
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.VectorSearchIndexes.QueryIndex(ctx, queryIndexReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range queryIndexOverrides {
fn(cmd, &queryIndexReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newQueryIndex())
})
}
// start sync-index command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var syncIndexOverrides []func(
*cobra.Command,
*vectorsearch.SyncIndexRequest,
)
func newSyncIndex() *cobra.Command {
cmd := &cobra.Command{}
var syncIndexReq vectorsearch.SyncIndexRequest
// TODO: short flags
cmd.Use = "sync-index INDEX_NAME"
cmd.Short = `Synchronize an index.`
cmd.Long = `Synchronize an index.
Triggers a synchronization process for a specified vector index.
Arguments:
INDEX_NAME: Name of the vector index to synchronize. Must be a Delta Sync Index.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
syncIndexReq.IndexName = args[0]
err = w.VectorSearchIndexes.SyncIndex(ctx, syncIndexReq)
if err != nil {
return err
}
return nil
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range syncIndexOverrides {
fn(cmd, &syncIndexReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newSyncIndex())
})
}
// start upsert-data-vector-index command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var upsertDataVectorIndexOverrides []func(
*cobra.Command,
*vectorsearch.UpsertDataVectorIndexRequest,
)
func newUpsertDataVectorIndex() *cobra.Command {
cmd := &cobra.Command{}
var upsertDataVectorIndexReq vectorsearch.UpsertDataVectorIndexRequest
var upsertDataVectorIndexJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&upsertDataVectorIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "upsert-data-vector-index NAME INPUTS_JSON"
cmd.Short = `Upsert data into an index.`
cmd.Long = `Upsert data into an index.
Handles the upserting of data into a specified vector index.
Arguments:
NAME: Name of the vector index where data is to be upserted. Must be a Direct
Vector Access Index.
INPUTS_JSON: JSON string representing the data to be upserted.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(1)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, provide only NAME as positional arguments. Provide 'inputs_json' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(2)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = upsertDataVectorIndexJson.Unmarshal(&upsertDataVectorIndexReq)
if err != nil {
return err
}
}
upsertDataVectorIndexReq.Name = args[0]
if !cmd.Flags().Changed("json") {
upsertDataVectorIndexReq.InputsJson = args[1]
}
response, err := w.VectorSearchIndexes.UpsertDataVectorIndex(ctx, upsertDataVectorIndexReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range upsertDataVectorIndexOverrides {
fn(cmd, &upsertDataVectorIndexReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newUpsertDataVectorIndex())
})
}
// end service VectorSearchIndexes

25
go.mod
View File

@ -4,7 +4,7 @@ go 1.21
require ( require (
github.com/briandowns/spinner v1.23.0 // Apache 2.0 github.com/briandowns/spinner v1.23.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.27.0 // Apache 2.0 github.com/databricks/databricks-sdk-go v0.29.0 // Apache 2.0
github.com/fatih/color v1.16.0 // MIT github.com/fatih/color v1.16.0 // MIT
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
github.com/google/uuid v1.5.0 // BSD-3-Clause github.com/google/uuid v1.5.0 // BSD-3-Clause
@ -21,9 +21,9 @@ require (
github.com/spf13/cobra v1.8.0 // Apache 2.0 github.com/spf13/cobra v1.8.0 // Apache 2.0
github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/spf13/pflag v1.0.5 // BSD-3-Clause
github.com/stretchr/testify v1.8.4 // MIT github.com/stretchr/testify v1.8.4 // MIT
golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e
golang.org/x/mod v0.14.0 golang.org/x/mod v0.14.0
golang.org/x/oauth2 v0.15.0 golang.org/x/oauth2 v0.16.0
golang.org/x/sync v0.6.0 golang.org/x/sync v0.6.0
golang.org/x/term v0.16.0 golang.org/x/term v0.16.0
golang.org/x/text v0.14.0 golang.org/x/text v0.14.0
@ -43,6 +43,9 @@ require (
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
github.com/cloudflare/circl v1.3.7 // indirect github.com/cloudflare/circl v1.3.7 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
@ -54,14 +57,18 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/zclconf/go-cty v1.14.1 // indirect github.com/zclconf/go-cty v1.14.1 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.17.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
golang.org/x/net v0.19.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
golang.org/x/crypto v0.18.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/sys v0.16.0 // indirect golang.org/x/sys v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/api v0.153.0 // indirect google.golang.org/api v0.155.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect
google.golang.org/grpc v1.59.0 // indirect google.golang.org/grpc v1.60.1 // indirect
google.golang.org/protobuf v1.31.0 // indirect google.golang.org/protobuf v1.32.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
) )

33
go.sum generated
View File

@ -32,6 +32,8 @@ github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53E
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/databricks/databricks-sdk-go v0.27.0 h1:JJ9CxVE7Js08Ug/gafM1gGYx+u/je2g2I4bSYeMPPaY= github.com/databricks/databricks-sdk-go v0.27.0 h1:JJ9CxVE7Js08Ug/gafM1gGYx+u/je2g2I4bSYeMPPaY=
github.com/databricks/databricks-sdk-go v0.27.0/go.mod h1:AGzQDmVUcf/J9ARx2FgObcRI5RO2VZ1jehhxFM6tA60= github.com/databricks/databricks-sdk-go v0.27.0/go.mod h1:AGzQDmVUcf/J9ARx2FgObcRI5RO2VZ1jehhxFM6tA60=
github.com/databricks/databricks-sdk-go v0.29.0 h1:p53y3IvYjNvWve3ALXdsJx67RPk/M4rt0JBgweq5s2Y=
github.com/databricks/databricks-sdk-go v0.29.0/go.mod h1:4Iy1e1XZiMC15BfWMQVrtr6va8wSEkiUXv0ZRMfgo3w=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -44,6 +46,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
@ -52,6 +56,11 @@ github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk=
github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
@ -155,6 +164,14 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA
github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -164,9 +181,13 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e h1:723BNChdd0c2Wk6WOE320qGBiPtYx0F0Bbm1kriShfE=
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -188,9 +209,13 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -249,6 +274,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA=
google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
@ -258,6 +285,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
@ -265,6 +294,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -278,6 +309,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=