Use Go SDK Iterators when listing resources with the CLI (#1202)

## Changes
Currently, when the CLI run a list API call (like list jobs), it uses
the `List*All` methods from the SDK, which list all resources in the
collection. This is very slow for large collections: if you need to list
all jobs from a workspace that has 10,000+ jobs, you'll be waiting for
at least 100 RPCs to complete before seeing any output.

Instead of using List*All() methods, the SDK recently added an iterator
data structure that allows traversing the collection without needing to
completely list it first. New pages are fetched lazily if the next
requested item belongs to the next page. Using the List() methods that
return these iterators, the CLI can proactively print out some of the
response before the complete collection has been fetched.

This involves a pretty major rewrite of the rendering logic in `cmdio`.
The idea there is to define custom rendering logic based on the type of
the provided resource. There are three renderer interfaces:

1. textRenderer: supports printing something in a textual format (i.e.
not JSON, and not templated).
2. jsonRenderer: supports printing something in a pretty-printed JSON
format.
3. templateRenderer: supports printing something using a text template.

There are also three renderer implementations:

1. readerRenderer: supports printing a reader. This only implements the
textRenderer interface.
2. iteratorRenderer: supports printing a `listing.Iterator` from the Go
SDK. This implements jsonRenderer and templateRenderer, buffering 20
resources at a time before writing them to the output.
3. defaultRenderer: supports printing arbitrary resources (the previous
implementation).

Callers will either use `cmdio.Render()` for rendering individual
resources or `io.Reader` or `cmdio.RenderIterator()` for rendering an
iterator. This separate method is needed to safely be able to match on
the type of the iterator, since Go does not allow runtime type matches
on generic types with an existential type parameter.

One other change that needs to happen is to split the templates used for
text representation of list resources into a header template and a row
template. The template is now executed multiple times for List API
calls, but the header should only be printed once. To support this, I
have added `headerTemplate` to `cmdIO`, and I have also changed
`RenderWithTemplate` to include a `headerTemplate` parameter everywhere.

## Tests
- [x] Unit tests for text rendering logic
- [x] Unit test for reflection-based iterator construction.

---------

Co-authored-by: Andrew Nester <andrew.nester@databricks.com>
This commit is contained in:
Miles Yucht 2024-02-21 15:16:36 +01:00 committed by GitHub
parent 5309e0fc2a
commit b65ce75c1f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
89 changed files with 714 additions and 519 deletions

View File

@ -300,16 +300,22 @@ func init() {
// end service {{.Name}}{{end}}
{{- define "method-call" -}}
{{if .Response}}response, err :={{else}}err ={{end}} {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.PascalName}}{{if .Pagination}}All{{end}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
{{if .Response -}}
response{{ if not .Pagination}}, err{{end}} :=
{{- else -}}
err =
{{- end}} {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
{{- if not (and .Response .Pagination) }}
if err != nil {
return err
}
{{- end}}
{{ if .Response -}}
{{- if .IsResponseByteStream -}}
defer response.{{.ResponseBodyField.PascalName}}.Close()
return cmdio.RenderReader(ctx, response.{{.ResponseBodyField.PascalName}})
return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response.{{.ResponseBodyField.PascalName}})
{{- else -}}
return cmdio.Render(ctx, response)
return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response)
{{- end -}}
{{ else -}}
return nil

View File

@ -1,8 +1,8 @@
{
"description": "Root of the bundle config",
"description": "",
"properties": {
"artifacts": {
"description": "A description of all code artifacts in this bundle.",
"description": "",
"additionalproperties": {
"description": "",
"properties": {
@ -33,7 +33,7 @@
}
},
"bundle": {
"description": "The details for this bundle.",
"description": "",
"properties": {
"compute_id": {
"description": ""
@ -58,7 +58,7 @@
}
},
"name": {
"description": "The name of the bundle."
"description": ""
}
}
},
@ -77,7 +77,7 @@
}
},
"include": {
"description": "A list of glob patterns of files to load and merge into the this configuration. Defaults to no files being included.",
"description": "",
"items": {
"description": ""
}
@ -193,7 +193,7 @@
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
"properties": {
"pause_status": {
"description": "Whether this trigger is paused or not."
"description": "Indicate whether this schedule is paused or not."
}
}
},
@ -322,7 +322,7 @@
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
},
"new_cluster": {
"description": "If new_cluster, a description of a cluster that is created for each task.",
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
"properties": {
"apply_policy_default_values": {
"description": ""
@ -725,7 +725,7 @@
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
"properties": {
"pause_status": {
"description": "Whether this trigger is paused or not."
"description": "Indicate whether this schedule is paused or not."
},
"quartz_cron_expression": {
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
@ -785,7 +785,7 @@
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
},
"warehouse_id": {
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
@ -930,7 +930,7 @@
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
},
"new_cluster": {
"description": "If new_cluster, a description of a cluster that is created for each task.",
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
"properties": {
"apply_policy_default_values": {
"description": ""
@ -1269,7 +1269,7 @@
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
}
}
},
@ -1371,7 +1371,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
}
}
},
@ -1449,7 +1449,7 @@
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
}
}
},
@ -1551,7 +1551,7 @@
}
},
"pause_status": {
"description": "Whether this trigger is paused or not."
"description": "Indicate whether this schedule is paused or not."
},
"table": {
"description": "Table trigger settings.",
@ -2535,7 +2535,7 @@
"description": "",
"properties": {
"artifacts": {
"description": "A description of all code artifacts in this bundle.",
"description": "",
"additionalproperties": {
"description": "",
"properties": {
@ -2566,7 +2566,7 @@
}
},
"bundle": {
"description": "The details for this bundle.",
"description": "",
"properties": {
"compute_id": {
"description": ""
@ -2591,7 +2591,7 @@
}
},
"name": {
"description": "The name of the bundle."
"description": ""
}
}
},
@ -2726,7 +2726,7 @@
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
"properties": {
"pause_status": {
"description": "Whether this trigger is paused or not."
"description": "Indicate whether this schedule is paused or not."
}
}
},
@ -2855,7 +2855,7 @@
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
},
"new_cluster": {
"description": "If new_cluster, a description of a cluster that is created for each task.",
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
"properties": {
"apply_policy_default_values": {
"description": ""
@ -3258,7 +3258,7 @@
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
"properties": {
"pause_status": {
"description": "Whether this trigger is paused or not."
"description": "Indicate whether this schedule is paused or not."
},
"quartz_cron_expression": {
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
@ -3318,7 +3318,7 @@
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
},
"warehouse_id": {
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
@ -3463,7 +3463,7 @@
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
},
"new_cluster": {
"description": "If new_cluster, a description of a cluster that is created for each task.",
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
"properties": {
"apply_policy_default_values": {
"description": ""
@ -3802,7 +3802,7 @@
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
}
}
},
@ -3904,7 +3904,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
}
}
},
@ -3982,7 +3982,7 @@
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
},
"source": {
"description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n"
"description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n"
}
}
},
@ -4084,7 +4084,7 @@
}
},
"pause_status": {
"description": "Whether this trigger is paused or not."
"description": "Indicate whether this schedule is paused or not."
},
"table": {
"description": "Table trigger settings.",
@ -5115,10 +5115,10 @@
}
},
"workspace": {
"description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.",
"description": "",
"properties": {
"artifact_path": {
"description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`"
"description": ""
},
"auth_type": {
"description": ""
@ -5127,10 +5127,10 @@
"description": ""
},
"azure_environment": {
"description": "Azure environment, one of (Public, UsGov, China, Germany)."
"description": ""
},
"azure_login_app_id": {
"description": "Azure Login Application ID."
"description": ""
},
"azure_tenant_id": {
"description": ""
@ -5139,28 +5139,28 @@
"description": ""
},
"azure_workspace_resource_id": {
"description": "Azure Resource Manager ID for Azure Databricks workspace."
"description": ""
},
"client_id": {
"description": ""
},
"file_path": {
"description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`"
"description": ""
},
"google_service_account": {
"description": ""
},
"host": {
"description": "Host url of the workspace."
"description": ""
},
"profile": {
"description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg."
"description": ""
},
"root_path": {
"description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`"
"description": ""
},
"state_path": {
"description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`"
"description": ""
}
}
}
@ -5220,10 +5220,10 @@
}
},
"workspace": {
"description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.",
"description": "",
"properties": {
"artifact_path": {
"description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`"
"description": ""
},
"auth_type": {
"description": ""
@ -5232,10 +5232,10 @@
"description": ""
},
"azure_environment": {
"description": "Azure environment, one of (Public, UsGov, China, Germany)."
"description": ""
},
"azure_login_app_id": {
"description": "Azure Login Application ID."
"description": ""
},
"azure_tenant_id": {
"description": ""
@ -5244,28 +5244,28 @@
"description": ""
},
"azure_workspace_resource_id": {
"description": "Azure Resource Manager ID for Azure Databricks workspace."
"description": ""
},
"client_id": {
"description": ""
},
"file_path": {
"description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`"
"description": ""
},
"google_service_account": {
"description": ""
},
"host": {
"description": "Host url of the workspace."
"description": ""
},
"profile": {
"description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg."
"description": ""
},
"root_path": {
"description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`"
"description": ""
},
"state_path": {
"description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`"
"description": ""
}
}
}

View File

@ -92,7 +92,7 @@ func newDownload() *cobra.Command {
return err
}
defer response.Contents.Close()
return cmdio.RenderReader(ctx, response.Contents)
return cmdio.Render(ctx, response.Contents)
}
// Disable completions since they are not applicable.

View File

@ -281,11 +281,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.Budgets.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.Budgets.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -262,11 +262,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.CustomAppIntegration.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.CustomAppIntegration.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -314,11 +314,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.Groups.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.Groups.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -339,11 +339,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.IpAccessLists.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.IpAccessLists.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -303,11 +303,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.LogDelivery.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.LogDelivery.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -294,11 +294,8 @@ func newList() *cobra.Command {
listReq.MetastoreId = args[0]
response, err := a.MetastoreAssignments.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.MetastoreAssignments.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -257,11 +257,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.Metastores.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.Metastores.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -546,11 +546,8 @@ func newListNetworkConnectivityConfigurations() *cobra.Command {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.NetworkConnectivity.ListNetworkConnectivityConfigurationsAll(ctx, listNetworkConnectivityConfigurationsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.NetworkConnectivity.ListNetworkConnectivityConfigurations(ctx, listNetworkConnectivityConfigurationsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -612,11 +609,8 @@ func newListPrivateEndpointRules() *cobra.Command {
listPrivateEndpointRulesReq.NetworkConnectivityConfigId = args[0]
response, err := a.NetworkConnectivity.ListPrivateEndpointRulesAll(ctx, listPrivateEndpointRulesReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.NetworkConnectivity.ListPrivateEndpointRules(ctx, listPrivateEndpointRulesReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -72,11 +72,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.OAuthPublishedApps.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.OAuthPublishedApps.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -262,11 +262,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.PublishedAppIntegration.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.PublishedAppIntegration.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -226,11 +226,8 @@ func newList() *cobra.Command {
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
}
response, err := a.ServicePrincipalSecrets.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.ServicePrincipalSecrets.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -313,11 +313,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.ServicePrincipals.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.ServicePrincipals.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -329,11 +329,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
a := root.AccountClient(ctx)
response, err := a.Users.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.Users.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -219,11 +219,8 @@ func newList() *cobra.Command {
return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0])
}
response, err := a.WorkspaceAssignment.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := a.WorkspaceAssignment.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -27,7 +27,7 @@ func newCatCommand() *cobra.Command {
if err != nil {
return err
}
return cmdio.RenderReader(ctx, r)
return cmdio.Render(ctx, r)
}
return cmd

View File

@ -107,7 +107,7 @@ func (c *copy) emitFileSkippedEvent(sourcePath, targetPath string) error {
event := newFileSkippedEvent(fullSourcePath, fullTargetPath)
template := "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n"
return cmdio.RenderWithTemplate(c.ctx, event, template)
return cmdio.RenderWithTemplate(c.ctx, event, "", template)
}
func (c *copy) emitFileCopiedEvent(sourcePath, targetPath string) error {
@ -123,7 +123,7 @@ func (c *copy) emitFileCopiedEvent(sourcePath, targetPath string) error {
event := newFileCopiedEvent(fullSourcePath, fullTargetPath)
template := "{{.SourcePath}} -> {{.TargetPath}}\n"
return cmdio.RenderWithTemplate(c.ctx, event, template)
return cmdio.RenderWithTemplate(c.ctx, event, "", template)
}
func newCpCommand() *cobra.Command {

View File

@ -78,12 +78,12 @@ func newLsCommand() *cobra.Command {
// Use template for long mode if the flag is set
if long {
return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(`
return cmdio.RenderWithTemplate(ctx, jsonDirEntries, "", cmdio.Heredoc(`
{{range .}}{{if .IsDir}}DIRECTORY {{else}}FILE {{end}}{{.Size}} {{.ModTime|pretty_date}} {{.Name}}
{{end}}
`))
}
return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(`
return cmdio.RenderWithTemplate(ctx, jsonDirEntries, "", cmdio.Heredoc(`
{{range .}}{{.Name}}
{{end}}
`))

View File

@ -87,7 +87,7 @@ func (cp *proxy) renderJsonAsTable(cmd *cobra.Command, args []string, envs map[s
}
// IntelliJ eagerly replaces tabs with spaces, even though we're not asking for it
fixedTemplate := strings.ReplaceAll(cp.TableTemplate, "\\t", "\t")
return cmdio.RenderWithTemplate(ctx, anyVal, fixedTemplate)
return cmdio.RenderWithTemplate(ctx, anyVal, "", fixedTemplate)
}
func (cp *proxy) commandInput(cmd *cobra.Command) ([]string, error) {

View File

@ -38,13 +38,14 @@ func OutputType(cmd *cobra.Command) flags.Output {
}
func (f *outputFlag) initializeIO(cmd *cobra.Command) error {
var template string
var headerTemplate, template string
if cmd.Annotations != nil {
// rely on zeroval being an empty string
template = cmd.Annotations["template"]
headerTemplate = cmd.Annotations["headerTemplate"]
}
cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), template)
cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template)
ctx := cmdio.InContext(cmd.Context(), cmdIO)
cmd.SetContext(ctx)
return nil

View File

@ -292,11 +292,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Catalogs.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Catalogs.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -6,8 +6,9 @@ import (
)
func listOverride(listCmd *cobra.Command) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "Name"}} {{header "Type"}} {{header "Comment"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "Name"}} {{header "Type"}} {{header "Comment"}}
{{range .}}{{.Name|green}} {{blue "%s" .CatalogType}} {{.Comment}}
{{end}}`)
}

View File

@ -282,11 +282,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.CleanRooms.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.CleanRooms.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -603,11 +603,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ClusterPolicies.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ClusterPolicies.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -653,11 +653,8 @@ func newEvents() *cobra.Command {
eventsReq.ClusterId = args[0]
}
response, err := w.Clusters.EventsAll(ctx, eventsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Clusters.Events(ctx, eventsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -957,11 +954,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Clusters.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Clusters.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, _ *compute.ListClustersRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "State"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "State"}}
{{range .}}{{.ClusterId | green}} {{.ClusterName | cyan}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}}
{{end}}`)
}

View File

@ -293,11 +293,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Connections.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Connections.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -293,11 +293,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Dashboards.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Dashboards.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, _ *sql.ListDashboardsRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}}
{{range .}}{{.Id|green}} {{.Name}}
{{end}}`)
}

View File

@ -733,11 +733,8 @@ func newGetHistory() *cobra.Command {
getHistoryReq.MetricKey = args[0]
response, err := w.Experiments.GetHistoryAll(ctx, getHistoryReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Experiments.GetHistory(ctx, getHistoryReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -998,11 +995,8 @@ func newListArtifacts() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Experiments.ListArtifactsAll(ctx, listArtifactsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Experiments.ListArtifacts(ctx, listArtifactsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1061,11 +1055,8 @@ func newListExperiments() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Experiments.ListExperimentsAll(ctx, listExperimentsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Experiments.ListExperiments(ctx, listExperimentsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1842,11 +1833,8 @@ func newSearchExperiments() *cobra.Command {
}
}
response, err := w.Experiments.SearchExperimentsAll(ctx, searchExperimentsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Experiments.SearchExperiments(ctx, searchExperimentsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1919,11 +1907,8 @@ func newSearchRuns() *cobra.Command {
}
}
response, err := w.Experiments.SearchRunsAll(ctx, searchRunsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Experiments.SearchRuns(ctx, searchRunsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -319,11 +319,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ExternalLocations.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ExternalLocations.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, listReq *catalog.ListExternalLocationsRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "Name"}} {{header "Credential"}} {{header "URL"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "Name"}} {{header "Credential"}} {{header "URL"}}
{{range .}}{{.Name|green}} {{.CredentialName|cyan}} {{.Url}}
{{end}}`)
}

View File

@ -327,11 +327,8 @@ func newList() *cobra.Command {
listReq.CatalogName = args[0]
listReq.SchemaName = args[1]
response, err := w.Functions.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Functions.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -311,11 +311,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.GitCredentials.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.GitCredentials.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -309,11 +309,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.GlobalInitScripts.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.GlobalInitScripts.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -314,11 +314,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Groups.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Groups.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -602,11 +602,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.InstancePools.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.InstancePools.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -251,11 +251,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.InstanceProfiles.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.InstanceProfiles.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -340,11 +340,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.IpAccessLists.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.IpAccessLists.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -1042,11 +1042,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Jobs.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Jobs.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1112,11 +1109,8 @@ func newListRuns() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Jobs.ListRunsAll(ctx, listRunsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Jobs.ListRuns(ctx, listRunsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -13,8 +13,9 @@ func listOverride(listCmd *cobra.Command, listReq *jobs.ListJobsRequest) {
}
func listRunsOverride(listRunsCmd *cobra.Command, listRunsReq *jobs.ListRunsRequest) {
listRunsCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "Job ID"}} {{header "Run ID"}} {{header "Result State"}} URL`)
listRunsCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "Job ID"}} {{header "Run ID"}} {{header "Result State"}} URL
{{range .}}{{green "%d" .JobId}} {{cyan "%d" .RunId}} {{if eq .State.ResultState "SUCCESS"}}{{"SUCCESS"|green}}{{else}}{{red "%s" .State.ResultState}}{{end}} {{.RunPageUrl}}
{{end}}`)
}

View File

@ -157,11 +157,8 @@ func newClusterStatus() *cobra.Command {
clusterStatusReq.ClusterId = args[0]
response, err := w.Libraries.ClusterStatusAll(ctx, clusterStatusReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Libraries.ClusterStatus(ctx, clusterStatusReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -455,11 +455,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Metastores.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Metastores.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -6,8 +6,9 @@ import (
)
func listOverride(listCmd *cobra.Command) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{"Region"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{"Region"}}
{{range .}}{{.MetastoreId|green}} {{.Name|cyan}} {{.Region}}
{{end}}`)
}

View File

@ -1128,11 +1128,8 @@ func newGetLatestVersions() *cobra.Command {
getLatestVersionsReq.Name = args[0]
}
response, err := w.ModelRegistry.GetLatestVersionsAll(ctx, getLatestVersionsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ModelRegistry.GetLatestVersions(ctx, getLatestVersionsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1520,11 +1517,8 @@ func newListModels() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ModelRegistry.ListModelsAll(ctx, listModelsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ModelRegistry.ListModels(ctx, listModelsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1586,11 +1580,8 @@ func newListTransitionRequests() *cobra.Command {
listTransitionRequestsReq.Name = args[0]
listTransitionRequestsReq.Version = args[1]
response, err := w.ModelRegistry.ListTransitionRequestsAll(ctx, listTransitionRequestsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ModelRegistry.ListTransitionRequests(ctx, listTransitionRequestsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1651,11 +1642,8 @@ func newListWebhooks() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ModelRegistry.ListWebhooksAll(ctx, listWebhooksReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ModelRegistry.ListWebhooks(ctx, listWebhooksReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1900,11 +1888,8 @@ func newSearchModelVersions() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ModelRegistry.SearchModelVersionsAll(ctx, searchModelVersionsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ModelRegistry.SearchModelVersions(ctx, searchModelVersionsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -1964,11 +1949,8 @@ func newSearchModels() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ModelRegistry.SearchModelsAll(ctx, searchModelsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ModelRegistry.SearchModels(ctx, searchModelsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -315,11 +315,8 @@ func newList() *cobra.Command {
listReq.FullName = args[0]
response, err := w.ModelVersions.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ModelVersions.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -536,11 +536,8 @@ func newListPipelineEvents() *cobra.Command {
}
listPipelineEventsReq.PipelineId = args[0]
response, err := w.Pipelines.ListPipelineEventsAll(ctx, listPipelineEventsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Pipelines.ListPipelineEvents(ctx, listPipelineEventsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -600,11 +597,8 @@ func newListPipelines() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Pipelines.ListPipelinesAll(ctx, listPipelinesReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Pipelines.ListPipelines(ctx, listPipelinesReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -138,11 +138,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.PolicyFamilies.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.PolicyFamilies.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -323,11 +323,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Providers.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Providers.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -401,11 +398,8 @@ func newListShares() *cobra.Command {
}
listSharesReq.Name = args[0]
response, err := w.Providers.ListSharesAll(ctx, listSharesReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Providers.ListShares(ctx, listSharesReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -8,8 +8,9 @@ import (
func listOverride(listCmd *cobra.Command, listReq *sql.ListQueriesRequest) {
// TODO: figure out colored/non-colored headers and colspan shifts
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "Author"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "Author"}}
{{range .}}{{.Id|green}} {{.Name|cyan}} {{.User.Email|cyan}}
{{end}}`)
}

View File

@ -303,11 +303,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Queries.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Queries.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -73,11 +73,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.QueryHistory.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.QueryHistory.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -342,11 +342,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Recipients.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Recipients.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -450,11 +450,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.RegisteredModels.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.RegisteredModels.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -485,11 +485,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Repos.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Repos.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, listReq *catalog.ListSchemasRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "Full Name"}} {{header "Owner"}} {{header "Comment"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "Full Name"}} {{header "Owner"}} {{header "Comment"}}
{{range .}}{{.FullName|green}} {{.Owner|cyan}} {{.Comment}}
{{end}}`)
}

View File

@ -333,11 +333,8 @@ func newList() *cobra.Command {
listReq.CatalogName = args[0]
response, err := w.Schemas.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Schemas.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -11,15 +11,17 @@ func cmdOverride(cmd *cobra.Command) {
}
func listScopesOverride(listScopesCmd *cobra.Command) {
listScopesCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "Scope"}} {{header "Backend Type"}}`)
listScopesCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "Scope"}} {{header "Backend Type"}}
{{range .}}{{.Name|green}} {{.BackendType}}
{{end}}`)
}
func listSecretsOverride(listSecretsCommand *cobra.Command, _ *workspace.ListSecretsRequest) {
listSecretsCommand.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "Key"}} {{header "Last Updated Timestamp"}}`)
listSecretsCommand.Annotations["template"] = cmdio.Heredoc(`
{{header "Key"}} {{header "Last Updated Timestamp"}}
{{range .}}{{.Key|green}} {{.LastUpdatedTimestamp}}
{{end}}`)
}

View File

@ -590,11 +590,8 @@ func newListAcls() *cobra.Command {
listAclsReq.Scope = args[0]
response, err := w.Secrets.ListAclsAll(ctx, listAclsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Secrets.ListAcls(ctx, listAclsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -641,11 +638,8 @@ func newListScopes() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Secrets.ListScopesAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Secrets.ListScopes(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -712,11 +706,8 @@ func newListSecrets() *cobra.Command {
listSecretsReq.Scope = args[0]
response, err := w.Secrets.ListSecretsAll(ctx, listSecretsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Secrets.ListSecrets(ctx, listSecretsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -313,11 +313,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ServicePrincipals.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ServicePrincipals.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -543,11 +543,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.ServingEndpoints.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.ServingEndpoints.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -281,11 +281,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Shares.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Shares.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, listReq *catalog.ListStorageCredentialsRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "Credentials"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "Credentials"}}
{{range .}}{{.Id|green}} {{.Name|cyan}} {{if .AwsIamRole}}{{.AwsIamRole.RoleArn}}{{end}}{{if .AzureServicePrincipal}}{{.AzureServicePrincipal.ApplicationId}}{{end}}{{if .DatabricksGcpServiceAccount}}{{.DatabricksGcpServiceAccount.Email}}{{end}}
{{end}}`)
}

View File

@ -336,11 +336,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.StorageCredentials.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.StorageCredentials.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -216,11 +216,8 @@ func newList() *cobra.Command {
listReq.MetastoreId = args[0]
response, err := w.SystemSchemas.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.SystemSchemas.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, listReq *catalog.ListTablesRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "Full Name"}} {{header "Table Type"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "Full Name"}} {{header "Table Type"}}
{{range .}}{{.FullName|green}} {{blue "%s" .TableType}}
{{end}}`)
}

View File

@ -342,11 +342,8 @@ func newList() *cobra.Command {
listReq.CatalogName = args[0]
listReq.SchemaName = args[1]
response, err := w.Tables.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Tables.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
@ -433,11 +430,8 @@ func newListSummaries() *cobra.Command {
}
listSummariesReq.CatalogName = args[0]
response, err := w.Tables.ListSummariesAll(ctx, listSummariesReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Tables.ListSummaries(ctx, listSummariesReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, listReq *settings.ListTokenManagementRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Created By"}} {{header "Comment"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Created By"}} {{header "Comment"}}
{{range .}}{{.TokenId|green}} {{.CreatedByUsername|cyan}} {{.Comment|cyan}}
{{end}}`)
}

View File

@ -422,11 +422,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.TokenManagement.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.TokenManagement.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -6,8 +6,9 @@ import (
)
func listOverride(listCmd *cobra.Command) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Expiry time"}} {{header "Comment"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Expiry time"}} {{header "Comment"}}
{{range .}}{{.TokenId|green}} {{cyan "%d" .ExpiryTime}} {{.Comment|cyan}}
{{end}}`)
}

View File

@ -232,11 +232,8 @@ func newList() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Tokens.ListAll(ctx)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Tokens.List(ctx)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -426,11 +426,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Users.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Users.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -308,11 +308,8 @@ func newListEndpoints() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.VectorSearchEndpoints.ListEndpointsAll(ctx, listEndpointsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.VectorSearchEndpoints.ListEndpoints(ctx, listEndpointsReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -389,11 +389,8 @@ func newListIndexes() *cobra.Command {
listIndexesReq.EndpointName = args[0]
response, err := w.VectorSearchIndexes.ListIndexesAll(ctx, listIndexesReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.VectorSearchIndexes.ListIndexes(ctx, listIndexesReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -292,11 +292,8 @@ func newList() *cobra.Command {
listReq.CatalogName = args[0]
listReq.SchemaName = args[1]
response, err := w.Volumes.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Volumes.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -7,8 +7,9 @@ import (
)
func listOverride(listCmd *cobra.Command, listReq *sql.ListWarehousesRequest) {
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "Size"}} {{header "State"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Name"}} {{header "Size"}} {{header "State"}}
{{range .}}{{.Id|green}} {{.Name|cyan}} {{.ClusterSize|cyan}} {{if eq .State "RUNNING"}}{{"RUNNING"|green}}{{else if eq .State "STOPPED"}}{{"STOPPED"|red}}{{else}}{{blue "%s" .State}}{{end}}
{{end}}`)
}

View File

@ -661,11 +661,8 @@ func newList() *cobra.Command {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response, err := w.Warehouses.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Warehouses.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -55,7 +55,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer.
// If a file exists, and overwrite is not set, we skip exporting the file
if _, err := os.Stat(targetPath); err == nil && !overwrite {
// Log event that this file/directory has been skipped
return cmdio.RenderWithTemplate(ctx, newFileSkippedEvent(relPath, targetPath), "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n")
return cmdio.RenderWithTemplate(ctx, newFileSkippedEvent(relPath, targetPath), "", "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n")
}
// create the file
@ -74,7 +74,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer.
if err != nil {
return err
}
return cmdio.RenderWithTemplate(ctx, newFileExportedEvent(sourcePath, targetPath), "{{.SourcePath}} -> {{.TargetPath}}\n")
return cmdio.RenderWithTemplate(ctx, newFileExportedEvent(sourcePath, targetPath), "", "{{.SourcePath}} -> {{.TargetPath}}\n")
}
}

View File

@ -93,14 +93,14 @@ func (opts importDirOptions) callback(ctx context.Context, workspaceFiler filer.
// Emit file skipped event with the appropriate template
fileSkippedEvent := newFileSkippedEvent(localName, path.Join(targetDir, remoteName))
template := "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n"
return cmdio.RenderWithTemplate(ctx, fileSkippedEvent, template)
return cmdio.RenderWithTemplate(ctx, fileSkippedEvent, "", template)
}
if err != nil {
return err
}
}
fileImportedEvent := newFileImportedEvent(localName, path.Join(targetDir, remoteName))
return cmdio.RenderWithTemplate(ctx, fileImportedEvent, "{{.SourcePath}} -> {{.TargetPath}}\n")
return cmdio.RenderWithTemplate(ctx, fileImportedEvent, "", "{{.SourcePath}} -> {{.TargetPath}}\n")
}
}

View File

@ -17,8 +17,9 @@ import (
func listOverride(listCmd *cobra.Command, listReq *workspace.ListWorkspaceRequest) {
listReq.Path = "/"
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Type"}} {{header "Language"}} {{header "Path"}}`)
listCmd.Annotations["template"] = cmdio.Heredoc(`
{{header "ID"}} {{header "Type"}} {{header "Language"}} {{header "Path"}}
{{range .}}{{green "%d" .ObjectId}} {{blue "%s" .ObjectType}} {{cyan "%s" .Language}} {{.Path|cyan}}
{{end}}`)
}

View File

@ -577,11 +577,8 @@ func newList() *cobra.Command {
listReq.Path = args[0]
response, err := w.Workspace.ListAll(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
response := w.Workspace.List(ctx, listReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -25,7 +25,7 @@ func initTestTemplate(t *testing.T, ctx context.Context, templateName string, co
}
ctx = root.SetWorkspaceClient(ctx, nil)
cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "bundles")
cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles")
ctx = cmdio.InContext(ctx, cmd)
err = template.Materialize(ctx, configFilePath, templateRoot, bundleRoot)

View File

@ -24,13 +24,14 @@ type cmdIO struct {
// e.g. if stdout is a terminal
interactive bool
outputFormat flags.Output
headerTemplate string
template string
in io.Reader
out io.Writer
err io.Writer
}
func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, template string) *cmdIO {
func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, headerTemplate, template string) *cmdIO {
// The check below is similar to color.NoColor but uses the specified err writer.
dumb := os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb"
if f, ok := err.(*os.File); ok && !dumb {
@ -39,6 +40,7 @@ func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer
return &cmdIO{
interactive: !dumb,
outputFormat: outputFormat,
headerTemplate: headerTemplate,
template: template,
in: in,
out: out,
@ -113,48 +115,6 @@ func IsGitBash(ctx context.Context) bool {
return false
}
func Render(ctx context.Context, v any) error {
c := fromContext(ctx)
return RenderWithTemplate(ctx, v, c.template)
}
func RenderWithTemplate(ctx context.Context, v any, template string) error {
// TODO: add terminal width & white/dark theme detection
c := fromContext(ctx)
switch c.outputFormat {
case flags.OutputJSON:
return renderJson(c.out, v)
case flags.OutputText:
if template != "" {
return renderTemplate(c.out, template, v)
}
return renderJson(c.out, v)
default:
return fmt.Errorf("invalid output format: %s", c.outputFormat)
}
}
func RenderJson(ctx context.Context, v any) error {
c := fromContext(ctx)
if c.outputFormat == flags.OutputJSON {
return renderJson(c.out, v)
}
return nil
}
func RenderReader(ctx context.Context, r io.Reader) error {
c := fromContext(ctx)
switch c.outputFormat {
case flags.OutputJSON:
return fmt.Errorf("json output not supported")
case flags.OutputText:
_, err := io.Copy(c.out, r)
return err
default:
return fmt.Errorf("invalid output format: %s", c.outputFormat)
}
}
type Tuple struct{ Name, Id string }
func (c *cmdIO) Select(items []Tuple, label string) (id string, err error) {

View File

@ -2,14 +2,19 @@ package cmdio
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"strings"
"text/tabwriter"
"text/template"
"time"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/listing"
"github.com/fatih/color"
"github.com/nwidger/jsoncolor"
)
@ -46,8 +51,123 @@ func Heredoc(tmpl string) (trimmed string) {
return strings.TrimSpace(trimmed)
}
func renderJson(w io.Writer, v any) error {
pretty, err := fancyJSON(v)
// writeFlusher represents a buffered writer that can be flushed. This is useful when
// buffering writing a large number of resources (such as during a list API).
type writeFlusher interface {
io.Writer
Flush() error
}
type jsonRenderer interface {
// Render an object as JSON to the provided writeFlusher.
renderJson(context.Context, writeFlusher) error
}
type textRenderer interface {
// Render an object as text to the provided writeFlusher.
renderText(context.Context, io.Writer) error
}
type templateRenderer interface {
// Render an object using the provided template and write to the provided tabwriter.Writer.
renderTemplate(context.Context, *template.Template, *tabwriter.Writer) error
}
type readerRenderer struct {
reader io.Reader
}
func (r readerRenderer) renderText(_ context.Context, w io.Writer) error {
_, err := io.Copy(w, r.reader)
return err
}
type iteratorRenderer[T any] struct {
t listing.Iterator[T]
bufferSize int
}
func (ir iteratorRenderer[T]) getBufferSize() int {
if ir.bufferSize == 0 {
return 20
}
return ir.bufferSize
}
func (ir iteratorRenderer[T]) renderJson(ctx context.Context, w writeFlusher) error {
// Iterators are always rendered as a list of resources in JSON.
_, err := w.Write([]byte("[\n "))
if err != nil {
return err
}
for i := 0; ir.t.HasNext(ctx); i++ {
if i != 0 {
_, err = w.Write([]byte(",\n "))
if err != nil {
return err
}
}
n, err := ir.t.Next(ctx)
if err != nil {
return err
}
res, err := json.MarshalIndent(n, " ", " ")
if err != nil {
return err
}
_, err = w.Write(res)
if err != nil {
return err
}
if (i+1)%ir.getBufferSize() == 0 {
err = w.Flush()
if err != nil {
return err
}
}
}
_, err = w.Write([]byte("\n]\n"))
if err != nil {
return err
}
return w.Flush()
}
func (ir iteratorRenderer[T]) renderTemplate(ctx context.Context, t *template.Template, w *tabwriter.Writer) error {
buf := make([]any, 0, ir.getBufferSize())
for i := 0; ir.t.HasNext(ctx); i++ {
n, err := ir.t.Next(ctx)
if err != nil {
return err
}
buf = append(buf, n)
if len(buf) == cap(buf) {
err = t.Execute(w, buf)
if err != nil {
return err
}
err = w.Flush()
if err != nil {
return err
}
buf = buf[:0]
}
}
if len(buf) > 0 {
err := t.Execute(w, buf)
if err != nil {
return err
}
}
return w.Flush()
}
type defaultRenderer struct {
t any
}
func (d defaultRenderer) renderJson(_ context.Context, w writeFlusher) error {
pretty, err := fancyJSON(d.t)
if err != nil {
return err
}
@ -56,12 +176,126 @@ func renderJson(w io.Writer, v any) error {
return err
}
_, err = w.Write([]byte("\n"))
if err != nil {
return err
}
return w.Flush()
}
func renderTemplate(w io.Writer, tmpl string, v any) error {
func (d defaultRenderer) renderTemplate(_ context.Context, t *template.Template, w *tabwriter.Writer) error {
return t.Execute(w, d.t)
}
// Returns something implementing one of the following interfaces:
// - jsonRenderer
// - textRenderer
// - templateRenderer
func newRenderer(t any) any {
if r, ok := t.(io.Reader); ok {
return readerRenderer{reader: r}
}
return defaultRenderer{t: t}
}
func newIteratorRenderer[T any](i listing.Iterator[T]) iteratorRenderer[T] {
return iteratorRenderer[T]{t: i}
}
type bufferedFlusher struct {
w io.Writer
b *bytes.Buffer
}
func (b bufferedFlusher) Write(bs []byte) (int, error) {
return b.b.Write(bs)
}
func (b bufferedFlusher) Flush() error {
_, err := b.w.Write(b.b.Bytes())
if err != nil {
return err
}
b.b.Reset()
return nil
}
func newBufferedFlusher(w io.Writer) writeFlusher {
return bufferedFlusher{
w: w,
b: &bytes.Buffer{},
}
}
func renderWithTemplate(r any, ctx context.Context, outputFormat flags.Output, w io.Writer, headerTemplate, template string) error {
// TODO: add terminal width & white/dark theme detection
switch outputFormat {
case flags.OutputJSON:
if jr, ok := r.(jsonRenderer); ok {
return jr.renderJson(ctx, newBufferedFlusher(w))
}
return errors.New("json output not supported")
case flags.OutputText:
if tr, ok := r.(templateRenderer); ok && template != "" {
return renderUsingTemplate(ctx, tr, w, headerTemplate, template)
}
if tr, ok := r.(textRenderer); ok {
return tr.renderText(ctx, w)
}
if jr, ok := r.(jsonRenderer); ok {
return jr.renderJson(ctx, newBufferedFlusher(w))
}
return errors.New("no renderer defined")
default:
return fmt.Errorf("invalid output format: %s", outputFormat)
}
}
type listingInterface interface {
HasNext(context.Context) bool
}
func Render(ctx context.Context, v any) error {
c := fromContext(ctx)
if _, ok := v.(listingInterface); ok {
panic("use RenderIterator instead")
}
return renderWithTemplate(newRenderer(v), ctx, c.outputFormat, c.out, c.headerTemplate, c.template)
}
func RenderIterator[T any](ctx context.Context, i listing.Iterator[T]) error {
c := fromContext(ctx)
return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template)
}
func RenderWithTemplate(ctx context.Context, v any, headerTemplate, template string) error {
c := fromContext(ctx)
if _, ok := v.(listingInterface); ok {
panic("use RenderIteratorWithTemplate instead")
}
return renderWithTemplate(newRenderer(v), ctx, c.outputFormat, c.out, headerTemplate, template)
}
func RenderIteratorWithTemplate[T any](ctx context.Context, i listing.Iterator[T], headerTemplate, template string) error {
c := fromContext(ctx)
return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, headerTemplate, template)
}
func RenderJson(ctx context.Context, v any) error {
c := fromContext(ctx)
if _, ok := v.(listingInterface); ok {
panic("use RenderIteratorJson instead")
}
return renderWithTemplate(newRenderer(v), ctx, flags.OutputJSON, c.out, c.headerTemplate, c.template)
}
func RenderIteratorJson[T any](ctx context.Context, i listing.Iterator[T]) error {
c := fromContext(ctx)
return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template)
}
func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, headerTmpl, tmpl string) error {
tw := tabwriter.NewWriter(w, 0, 4, 2, ' ', 0)
t, err := template.New("command").Funcs(template.FuncMap{
base := template.New("command").Funcs(template.FuncMap{
// we render colored output if stdout is TTY, otherwise we render text.
// in the future we'll check if we can explicitly check for stderr being
// a TTY
@ -116,11 +350,24 @@ func renderTemplate(w io.Writer, tmpl string, v any) error {
}
return string(out), nil
},
}).Parse(tmpl)
})
if headerTmpl != "" {
headerT, err := base.Parse(headerTmpl)
if err != nil {
return err
}
err = t.Execute(tw, v)
err = headerT.Execute(tw, nil)
if err != nil {
return err
}
tw.Write([]byte("\n"))
// Do not flush here. Instead, allow the first 100 resources to determine the initial spacing of the header columns.
}
t, err := base.Parse(tmpl)
if err != nil {
return err
}
err = r.renderTemplate(ctx, t, tw)
if err != nil {
return err
}

190
libs/cmdio/render_test.go Normal file
View File

@ -0,0 +1,190 @@
package cmdio
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"testing"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/listing"
"github.com/databricks/databricks-sdk-go/service/provisioning"
"github.com/stretchr/testify/assert"
)
type testCase struct {
name string
v any
outputFormat flags.Output
headerTemplate string
template string
expected string
errMessage string
}
var dummyWorkspace1 = provisioning.Workspace{
WorkspaceId: 123,
WorkspaceName: "abc",
}
var dummyWorkspace2 = provisioning.Workspace{
WorkspaceId: 456,
WorkspaceName: "def",
}
type dummyIterator struct {
items []*provisioning.Workspace
}
func (d *dummyIterator) HasNext(_ context.Context) bool {
return len(d.items) > 0
}
func (d *dummyIterator) Next(ctx context.Context) (*provisioning.Workspace, error) {
if !d.HasNext(ctx) {
return nil, errors.New("no more items")
}
item := d.items[0]
d.items = d.items[1:]
return item, nil
}
func makeWorkspaces(count int) []*provisioning.Workspace {
res := make([]*provisioning.Workspace, 0, count)
next := []*provisioning.Workspace{&dummyWorkspace1, &dummyWorkspace2}
for i := 0; i < count; i++ {
n := next[0]
next = append(next[1:], n)
res = append(res, n)
}
return res
}
func makeIterator(count int) listing.Iterator[*provisioning.Workspace] {
items := make([]*provisioning.Workspace, 0, count)
items = append(items, makeWorkspaces(count)...)
return &dummyIterator{
items: items,
}
}
func makeBigOutput(count int) string {
res := bytes.Buffer{}
for _, ws := range makeWorkspaces(count) {
res.Write([]byte(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName)))
}
return res.String()
}
func must[T any](a T, e error) T {
if e != nil {
panic(e)
}
return a
}
var testCases = []testCase{
{
name: "Workspace with header and template",
v: dummyWorkspace1,
outputFormat: flags.OutputText,
headerTemplate: "id\tname",
template: "{{.WorkspaceId}}\t{{.WorkspaceName}}",
expected: `id name
123 abc`,
},
{
name: "Workspace with no header and template",
v: dummyWorkspace1,
outputFormat: flags.OutputText,
template: "{{.WorkspaceId}}\t{{.WorkspaceName}}",
expected: `123 abc`,
},
{
name: "Workspace with no header and no template",
v: dummyWorkspace1,
outputFormat: flags.OutputText,
expected: `{
"workspace_id":123,
"workspace_name":"abc"
}
`,
},
{
name: "Workspace Iterator with header and template",
v: makeIterator(2),
outputFormat: flags.OutputText,
headerTemplate: "id\tname",
template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}",
expected: `id name
123 abc
456 def
`,
},
{
name: "Workspace Iterator with no header and template",
v: makeIterator(2),
outputFormat: flags.OutputText,
template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}",
expected: `123 abc
456 def
`,
},
{
name: "Workspace Iterator with no header and no template",
v: makeIterator(2),
outputFormat: flags.OutputText,
expected: string(must(json.MarshalIndent(makeWorkspaces(2), "", " "))) + "\n",
},
{
name: "Big Workspace Iterator with template",
v: makeIterator(234),
outputFormat: flags.OutputText,
headerTemplate: "id\tname",
template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}",
expected: "id name\n" + makeBigOutput(234),
},
{
name: "Big Workspace Iterator with no template",
v: makeIterator(234),
outputFormat: flags.OutputText,
expected: string(must(json.MarshalIndent(makeWorkspaces(234), "", " "))) + "\n",
},
{
name: "io.Reader",
v: strings.NewReader("a test"),
outputFormat: flags.OutputText,
expected: "a test",
},
{
name: "io.Reader",
v: strings.NewReader("a test"),
outputFormat: flags.OutputJSON,
errMessage: "json output not supported",
},
}
func TestRender(t *testing.T) {
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
output := &bytes.Buffer{}
cmdIO := NewIO(c.outputFormat, nil, output, output, c.headerTemplate, c.template)
ctx := InContext(context.Background(), cmdIO)
var err error
if vv, ok := c.v.(listing.Iterator[*provisioning.Workspace]); ok {
err = RenderIterator(ctx, vv)
} else {
err = Render(ctx, c.v)
}
if c.errMessage != "" {
assert.ErrorContains(t, err, c.errMessage)
} else {
assert.NoError(t, err)
assert.Equal(t, c.expected, output.String())
}
})
}
}

View File

@ -115,7 +115,7 @@ func TestFirstCompatibleCluster(t *testing.T) {
w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg)))
ctx := context.Background()
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "..."))
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "..."))
clusterID, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1"))
require.NoError(t, err)
require.Equal(t, "bcd-id", clusterID)
@ -162,7 +162,7 @@ func TestNoCompatibleClusters(t *testing.T) {
w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg)))
ctx := context.Background()
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "..."))
ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "..."))
_, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1"))
require.Equal(t, ErrNoCompatibleClusters, err)
}

View File

@ -111,7 +111,7 @@ func TestWorkspaceHost(t *testing.T) {
func TestWorkspaceHostNotConfigured(t *testing.T) {
ctx := context.Background()
cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "template")
cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template")
ctx = cmdio.InContext(ctx, cmd)
tmpDir := t.TempDir()