2022-05-14 17:54:35 +00:00
|
|
|
package root
|
2022-05-13 13:30:22 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2025-01-22 21:03:13 +00:00
|
|
|
"encoding/json"
|
2024-07-01 09:01:10 +00:00
|
|
|
"errors"
|
2022-12-21 10:38:30 +00:00
|
|
|
"fmt"
|
2025-01-22 21:03:13 +00:00
|
|
|
"io"
|
2023-08-15 13:50:40 +00:00
|
|
|
"log/slog"
|
2022-05-13 13:30:22 +00:00
|
|
|
"os"
|
2025-01-22 21:03:13 +00:00
|
|
|
"os/exec"
|
|
|
|
"runtime"
|
Log os.Args, bricks version, and exit status (#324)
## Changes
<!-- Summary of your changes that are easy to understand -->
1. Log os.Args and bricks version before every command execution
2. After a command execution, logs the error and exit code
## Tests
<!-- How is this tested? -->
Manually,
case 1: Run `bricks version` successfully
```
shreyas.goenka@THW32HFW6T bricks % bricks version --log-level=info --log-file stderr
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:34 msg="process args: [bricks, version, --log-level=info, --log-file, stderr]"
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
0.0.0-dev+375eb1c50283
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:68 msg="exit code: 0"
```
case 2: Run `bricks bundle deploy` in a working dir where `bundle.yml`
does not exist
```
shreyas.goenka@THW32HFW6T bricks % bricks bundle deploy --log-level=info --log-file=stderr
time=2023-04-12T00:19:16.783+02:00 level=INFO source=root.go:34 msg="process args: [bricks, bundle, deploy, --log-level=info, --log-file=stderr]"
time=2023-04-12T00:19:16.784+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
Error: unable to locate bundle root: bundle.yml not found
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:64 msg="unable to locate bundle root: bundle.yml not found"
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:65 msg="exit code: 1"
```
2023-04-12 20:12:36 +00:00
|
|
|
"strings"
|
2025-01-22 21:03:13 +00:00
|
|
|
"time"
|
2022-05-13 13:30:22 +00:00
|
|
|
|
2023-05-16 16:35:39 +00:00
|
|
|
"github.com/databricks/cli/internal/build"
|
|
|
|
"github.com/databricks/cli/libs/cmdio"
|
2024-11-14 16:10:45 +00:00
|
|
|
"github.com/databricks/cli/libs/dbr"
|
2025-01-22 21:03:13 +00:00
|
|
|
"github.com/databricks/cli/libs/env"
|
2023-05-16 16:35:39 +00:00
|
|
|
"github.com/databricks/cli/libs/log"
|
2025-01-22 21:03:13 +00:00
|
|
|
"github.com/databricks/cli/libs/telemetry"
|
|
|
|
"github.com/databricks/cli/libs/telemetry/protos"
|
2022-05-13 13:30:22 +00:00
|
|
|
"github.com/spf13/cobra"
|
|
|
|
)
|
|
|
|
|
2023-09-11 08:18:43 +00:00
|
|
|
func New(ctx context.Context) *cobra.Command {
|
2023-07-26 11:17:09 +00:00
|
|
|
cmd := &cobra.Command{
|
|
|
|
Use: "databricks",
|
|
|
|
Short: "Databricks CLI",
|
|
|
|
Version: build.GetInfo().Version,
|
|
|
|
|
|
|
|
// Cobra prints the usage string to stderr if a command returns an error.
|
|
|
|
// This usage string should only be displayed if an invalid combination of flags
|
|
|
|
// is specified and not when runtime errors occur (e.g. resource not found).
|
|
|
|
// The usage string is include in [flagErrorFunc] for flag errors only.
|
|
|
|
SilenceUsage: true,
|
|
|
|
|
|
|
|
// Silence error printing by cobra. Errors are printed through cmdio.
|
|
|
|
SilenceErrors: true,
|
|
|
|
}
|
|
|
|
|
2023-09-11 08:18:43 +00:00
|
|
|
// Pass the context along through the command during initialization.
|
|
|
|
// It will be overwritten when the command is executed.
|
|
|
|
cmd.SetContext(ctx)
|
|
|
|
|
2023-07-26 11:17:09 +00:00
|
|
|
// Initialize flags
|
|
|
|
logFlags := initLogFlags(cmd)
|
|
|
|
progressLoggerFlag := initProgressLoggerFlag(cmd, logFlags)
|
|
|
|
outputFlag := initOutputFlag(cmd)
|
|
|
|
initProfileFlag(cmd)
|
|
|
|
initEnvironmentFlag(cmd)
|
2023-08-17 15:22:32 +00:00
|
|
|
initTargetFlag(cmd)
|
2023-07-26 11:17:09 +00:00
|
|
|
|
|
|
|
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
2023-02-03 16:05:58 +00:00
|
|
|
ctx := cmd.Context()
|
|
|
|
|
Add structured logging infrastructure (#246)
New global flags:
* `--log-file FILE`: can be literal `stdout`, `stderr`, or a file name (default `stderr`)
* `--log-level LEVEL`: can be `error`, `warn`, `info`, `debug`, `trace`, or `disabled` (default `disabled`)
* `--log-format TYPE`: can be `text` or `json` (default `text`)
New functions in the `log` package take a `context.Context` and retrieve
the logger from said context.
Because we carry the logger in a context, adding
[attributes](https://pkg.go.dev/golang.org/x/exp/slog#hdr-Attrs_and_Values)
to the logger can be done as follows:
```go
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("foo", "bar"))
```
2023-03-16 13:46:53 +00:00
|
|
|
// Configure default logger.
|
2023-07-26 11:17:09 +00:00
|
|
|
ctx, err := logFlags.initializeContext(ctx)
|
2023-03-29 12:58:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Log os.Args, bricks version, and exit status (#324)
## Changes
<!-- Summary of your changes that are easy to understand -->
1. Log os.Args and bricks version before every command execution
2. After a command execution, logs the error and exit code
## Tests
<!-- How is this tested? -->
Manually,
case 1: Run `bricks version` successfully
```
shreyas.goenka@THW32HFW6T bricks % bricks version --log-level=info --log-file stderr
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:34 msg="process args: [bricks, version, --log-level=info, --log-file, stderr]"
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
0.0.0-dev+375eb1c50283
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:68 msg="exit code: 0"
```
case 2: Run `bricks bundle deploy` in a working dir where `bundle.yml`
does not exist
```
shreyas.goenka@THW32HFW6T bricks % bricks bundle deploy --log-level=info --log-file=stderr
time=2023-04-12T00:19:16.783+02:00 level=INFO source=root.go:34 msg="process args: [bricks, bundle, deploy, --log-level=info, --log-file=stderr]"
time=2023-04-12T00:19:16.784+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
Error: unable to locate bundle root: bundle.yml not found
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:64 msg="unable to locate bundle root: bundle.yml not found"
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:65 msg="exit code: 1"
```
2023-04-12 20:12:36 +00:00
|
|
|
logger := log.GetLogger(ctx)
|
|
|
|
logger.Info("start",
|
|
|
|
slog.String("version", build.GetInfo().Version),
|
|
|
|
slog.String("args", strings.Join(os.Args, ", ")))
|
|
|
|
|
2023-03-29 12:58:09 +00:00
|
|
|
// Configure progress logger
|
2023-07-26 11:17:09 +00:00
|
|
|
ctx, err = progressLoggerFlag.initializeContext(ctx)
|
Add structured logging infrastructure (#246)
New global flags:
* `--log-file FILE`: can be literal `stdout`, `stderr`, or a file name (default `stderr`)
* `--log-level LEVEL`: can be `error`, `warn`, `info`, `debug`, `trace`, or `disabled` (default `disabled`)
* `--log-format TYPE`: can be `text` or `json` (default `text`)
New functions in the `log` package take a `context.Context` and retrieve
the logger from said context.
Because we carry the logger in a context, adding
[attributes](https://pkg.go.dev/golang.org/x/exp/slog#hdr-Attrs_and_Values)
to the logger can be done as follows:
```go
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("foo", "bar"))
```
2023-03-16 13:46:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
Added OpenAPI command coverage (#357)
This PR adds the following command groups:
## Workspace-level command groups
* `bricks alerts` - The alerts API can be used to perform CRUD operations on alerts.
* `bricks catalogs` - A catalog is the first layer of Unity Catalog’s three-level namespace.
* `bricks cluster-policies` - Cluster policy limits the ability to configure clusters based on a set of rules.
* `bricks clusters` - The Clusters API allows you to create, start, edit, list, terminate, and delete clusters.
* `bricks current-user` - This API allows retrieving information about currently authenticated user or service principal.
* `bricks dashboards` - In general, there is little need to modify dashboards using the API.
* `bricks data-sources` - This API is provided to assist you in making new query objects.
* `bricks experiments` - MLflow Experiment tracking.
* `bricks external-locations` - An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path.
* `bricks functions` - Functions implement User-Defined Functions (UDFs) in Unity Catalog.
* `bricks git-credentials` - Registers personal access token for Databricks to do operations on behalf of the user.
* `bricks global-init-scripts` - The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace.
* `bricks grants` - In Unity Catalog, data is secure by default.
* `bricks groups` - Groups simplify identity management, making it easier to assign access to Databricks Workspace, data, and other securable objects.
* `bricks instance-pools` - Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times.
* `bricks instance-profiles` - The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with.
* `bricks ip-access-lists` - IP Access List enables admins to configure IP access lists.
* `bricks jobs` - The Jobs API allows you to create, edit, and delete jobs.
* `bricks libraries` - The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster.
* `bricks metastores` - A metastore is the top-level container of objects in Unity Catalog.
* `bricks model-registry` - MLflow Model Registry commands.
* `bricks permissions` - Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints.
* `bricks pipelines` - The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines.
* `bricks policy-families` - View available policy families.
* `bricks providers` - Databricks Providers REST API.
* `bricks queries` - These endpoints are used for CRUD operations on query definitions.
* `bricks query-history` - Access the history of queries through SQL warehouses.
* `bricks recipient-activation` - Databricks Recipient Activation REST API.
* `bricks recipients` - Databricks Recipients REST API.
* `bricks repos` - The Repos API allows users to manage their git repos.
* `bricks schemas` - A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace.
* `bricks secrets` - The Secrets API allows you to manage secrets, secret scopes, and access permissions.
* `bricks service-principals` - Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms.
* `bricks serving-endpoints` - The Serving Endpoints API allows you to create, update, and delete model serving endpoints.
* `bricks shares` - Databricks Shares REST API.
* `bricks storage-credentials` - A storage credential represents an authentication and authorization mechanism for accessing data stored on your cloud tenant.
* `bricks table-constraints` - Primary key and foreign key constraints encode relationships between fields in tables.
* `bricks tables` - A table resides in the third layer of Unity Catalog’s three-level namespace.
* `bricks token-management` - Enables administrators to get all tokens and delete tokens for other users.
* `bricks tokens` - The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks REST APIs.
* `bricks users` - User identities recognized by Databricks and represented by email addresses.
* `bricks volumes` - Volumes are a Unity Catalog (UC) capability for accessing, storing, governing, organizing and processing files.
* `bricks warehouses` - A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL.
* `bricks workspace` - The Workspace API allows you to list, import, export, and delete notebooks and folders.
* `bricks workspace-conf` - This API allows updating known workspace settings for advanced users.
## Account-level command groups
* `bricks account billable-usage` - This API allows you to download billable usage logs for the specified account and date range.
* `bricks account budgets` - These APIs manage budget configuration including notifications for exceeding a budget for a period.
* `bricks account credentials` - These APIs manage credential configurations for this workspace.
* `bricks account custom-app-integration` - These APIs enable administrators to manage custom oauth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.
* `bricks account encryption-keys` - These APIs manage encryption key configurations for this workspace (optional).
* `bricks account groups` - Groups simplify identity management, making it easier to assign access to Databricks Account, data, and other securable objects.
* `bricks account ip-access-lists` - The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console.
* `bricks account log-delivery` - These APIs manage log delivery configurations for this account.
* `bricks account metastore-assignments` - These APIs manage metastore assignments to a workspace.
* `bricks account metastores` - These APIs manage Unity Catalog metastores for an account.
* `bricks account networks` - These APIs manage network configurations for customer-managed VPCs (optional).
* `bricks account o-auth-enrollment` - These APIs enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration.
* `bricks account private-access` - These APIs manage private access settings for this account.
* `bricks account published-app-integration` - These APIs enable administrators to manage published oauth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.
* `bricks account service-principals` - Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms.
* `bricks account storage` - These APIs manage storage configurations for this workspace.
* `bricks account storage-credentials` - These APIs manage storage credentials for a particular metastore.
* `bricks account users` - User identities recognized by Databricks and represented by email addresses.
* `bricks account vpc-endpoints` - These APIs manage VPC endpoint configurations for this account.
* `bricks account workspace-assignment` - The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account.
* `bricks account workspaces` - These APIs manage workspaces for this account.
2023-04-26 11:06:16 +00:00
|
|
|
// set context, so that initializeIO can have the current context
|
|
|
|
cmd.SetContext(ctx)
|
|
|
|
|
|
|
|
// Configure command IO
|
2023-07-26 11:17:09 +00:00
|
|
|
err = outputFlag.initializeIO(cmd)
|
Added OpenAPI command coverage (#357)
This PR adds the following command groups:
## Workspace-level command groups
* `bricks alerts` - The alerts API can be used to perform CRUD operations on alerts.
* `bricks catalogs` - A catalog is the first layer of Unity Catalog’s three-level namespace.
* `bricks cluster-policies` - Cluster policy limits the ability to configure clusters based on a set of rules.
* `bricks clusters` - The Clusters API allows you to create, start, edit, list, terminate, and delete clusters.
* `bricks current-user` - This API allows retrieving information about currently authenticated user or service principal.
* `bricks dashboards` - In general, there is little need to modify dashboards using the API.
* `bricks data-sources` - This API is provided to assist you in making new query objects.
* `bricks experiments` - MLflow Experiment tracking.
* `bricks external-locations` - An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path.
* `bricks functions` - Functions implement User-Defined Functions (UDFs) in Unity Catalog.
* `bricks git-credentials` - Registers personal access token for Databricks to do operations on behalf of the user.
* `bricks global-init-scripts` - The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace.
* `bricks grants` - In Unity Catalog, data is secure by default.
* `bricks groups` - Groups simplify identity management, making it easier to assign access to Databricks Workspace, data, and other securable objects.
* `bricks instance-pools` - Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times.
* `bricks instance-profiles` - The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with.
* `bricks ip-access-lists` - IP Access List enables admins to configure IP access lists.
* `bricks jobs` - The Jobs API allows you to create, edit, and delete jobs.
* `bricks libraries` - The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster.
* `bricks metastores` - A metastore is the top-level container of objects in Unity Catalog.
* `bricks model-registry` - MLflow Model Registry commands.
* `bricks permissions` - Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints.
* `bricks pipelines` - The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines.
* `bricks policy-families` - View available policy families.
* `bricks providers` - Databricks Providers REST API.
* `bricks queries` - These endpoints are used for CRUD operations on query definitions.
* `bricks query-history` - Access the history of queries through SQL warehouses.
* `bricks recipient-activation` - Databricks Recipient Activation REST API.
* `bricks recipients` - Databricks Recipients REST API.
* `bricks repos` - The Repos API allows users to manage their git repos.
* `bricks schemas` - A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace.
* `bricks secrets` - The Secrets API allows you to manage secrets, secret scopes, and access permissions.
* `bricks service-principals` - Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms.
* `bricks serving-endpoints` - The Serving Endpoints API allows you to create, update, and delete model serving endpoints.
* `bricks shares` - Databricks Shares REST API.
* `bricks storage-credentials` - A storage credential represents an authentication and authorization mechanism for accessing data stored on your cloud tenant.
* `bricks table-constraints` - Primary key and foreign key constraints encode relationships between fields in tables.
* `bricks tables` - A table resides in the third layer of Unity Catalog’s three-level namespace.
* `bricks token-management` - Enables administrators to get all tokens and delete tokens for other users.
* `bricks tokens` - The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks REST APIs.
* `bricks users` - User identities recognized by Databricks and represented by email addresses.
* `bricks volumes` - Volumes are a Unity Catalog (UC) capability for accessing, storing, governing, organizing and processing files.
* `bricks warehouses` - A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL.
* `bricks workspace` - The Workspace API allows you to list, import, export, and delete notebooks and folders.
* `bricks workspace-conf` - This API allows updating known workspace settings for advanced users.
## Account-level command groups
* `bricks account billable-usage` - This API allows you to download billable usage logs for the specified account and date range.
* `bricks account budgets` - These APIs manage budget configuration including notifications for exceeding a budget for a period.
* `bricks account credentials` - These APIs manage credential configurations for this workspace.
* `bricks account custom-app-integration` - These APIs enable administrators to manage custom oauth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.
* `bricks account encryption-keys` - These APIs manage encryption key configurations for this workspace (optional).
* `bricks account groups` - Groups simplify identity management, making it easier to assign access to Databricks Account, data, and other securable objects.
* `bricks account ip-access-lists` - The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console.
* `bricks account log-delivery` - These APIs manage log delivery configurations for this account.
* `bricks account metastore-assignments` - These APIs manage metastore assignments to a workspace.
* `bricks account metastores` - These APIs manage Unity Catalog metastores for an account.
* `bricks account networks` - These APIs manage network configurations for customer-managed VPCs (optional).
* `bricks account o-auth-enrollment` - These APIs enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration.
* `bricks account private-access` - These APIs manage private access settings for this account.
* `bricks account published-app-integration` - These APIs enable administrators to manage published oauth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.
* `bricks account service-principals` - Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms.
* `bricks account storage` - These APIs manage storage configurations for this workspace.
* `bricks account storage-credentials` - These APIs manage storage credentials for a particular metastore.
* `bricks account users` - User identities recognized by Databricks and represented by email addresses.
* `bricks account vpc-endpoints` - These APIs manage VPC endpoint configurations for this account.
* `bricks account workspace-assignment` - The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account.
* `bricks account workspaces` - These APIs manage workspaces for this account.
2023-04-26 11:06:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// get the context back
|
|
|
|
ctx = cmd.Context()
|
Add structured logging infrastructure (#246)
New global flags:
* `--log-file FILE`: can be literal `stdout`, `stderr`, or a file name (default `stderr`)
* `--log-level LEVEL`: can be `error`, `warn`, `info`, `debug`, `trace`, or `disabled` (default `disabled`)
* `--log-format TYPE`: can be `text` or `json` (default `text`)
New functions in the `log` package take a `context.Context` and retrieve
the logger from said context.
Because we carry the logger in a context, adding
[attributes](https://pkg.go.dev/golang.org/x/exp/slog#hdr-Attrs_and_Values)
to the logger can be done as follows:
```go
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("foo", "bar"))
```
2023-03-16 13:46:53 +00:00
|
|
|
|
2023-02-03 15:47:33 +00:00
|
|
|
// Configure our user agent with the command that's about to be executed.
|
2023-02-03 16:05:58 +00:00
|
|
|
ctx = withCommandInUserAgent(ctx, cmd)
|
2024-11-01 14:08:09 +00:00
|
|
|
ctx = withCommandExecIdInUserAgent(ctx)
|
2023-02-03 16:05:58 +00:00
|
|
|
ctx = withUpstreamInUserAgent(ctx)
|
2023-02-03 15:47:33 +00:00
|
|
|
cmd.SetContext(ctx)
|
Add structured logging infrastructure (#246)
New global flags:
* `--log-file FILE`: can be literal `stdout`, `stderr`, or a file name (default `stderr`)
* `--log-level LEVEL`: can be `error`, `warn`, `info`, `debug`, `trace`, or `disabled` (default `disabled`)
* `--log-format TYPE`: can be `text` or `json` (default `text`)
New functions in the `log` package take a `context.Context` and retrieve
the logger from said context.
Because we carry the logger in a context, adding
[attributes](https://pkg.go.dev/golang.org/x/exp/slog#hdr-Attrs_and_Values)
to the logger can be done as follows:
```go
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("foo", "bar"))
```
2023-03-16 13:46:53 +00:00
|
|
|
return nil
|
2023-07-26 11:17:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cmd.SetFlagErrorFunc(flagErrorFunc)
|
|
|
|
cmd.SetVersionTemplate("Databricks CLI v{{.Version}}\n")
|
|
|
|
return cmd
|
2022-05-13 13:30:22 +00:00
|
|
|
}
|
|
|
|
|
2022-12-21 10:38:30 +00:00
|
|
|
// Wrap flag errors to include the usage string.
|
|
|
|
func flagErrorFunc(c *cobra.Command, err error) error {
|
|
|
|
return fmt.Errorf("%w\n\n%s", err, c.UsageString())
|
|
|
|
}
|
|
|
|
|
2025-01-22 21:03:13 +00:00
|
|
|
// TODO CONTINUE: This setup should mostly work. There are a couple of open questions:
|
|
|
|
// 4. I can print the output from the telemetry-worker command and a waiting mode
|
|
|
|
// to the root.Execution method here to see whether the expected output matches.
|
|
|
|
|
2022-05-13 13:30:22 +00:00
|
|
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
|
|
|
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
2025-01-22 21:03:13 +00:00
|
|
|
|
|
|
|
// TODO: The test runner also relies on this function. Create a separate function to
|
|
|
|
// avoid logging telemetry in our testcli runner.
|
2024-07-10 06:38:06 +00:00
|
|
|
func Execute(ctx context.Context, cmd *cobra.Command) error {
|
2025-01-22 21:03:13 +00:00
|
|
|
ctx = telemetry.WithNewLogger(ctx)
|
|
|
|
ctx = dbr.DetectRuntime(ctx)
|
|
|
|
start := time.Now()
|
Log os.Args, bricks version, and exit status (#324)
## Changes
<!-- Summary of your changes that are easy to understand -->
1. Log os.Args and bricks version before every command execution
2. After a command execution, logs the error and exit code
## Tests
<!-- How is this tested? -->
Manually,
case 1: Run `bricks version` successfully
```
shreyas.goenka@THW32HFW6T bricks % bricks version --log-level=info --log-file stderr
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:34 msg="process args: [bricks, version, --log-level=info, --log-file, stderr]"
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
0.0.0-dev+375eb1c50283
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:68 msg="exit code: 0"
```
case 2: Run `bricks bundle deploy` in a working dir where `bundle.yml`
does not exist
```
shreyas.goenka@THW32HFW6T bricks % bricks bundle deploy --log-level=info --log-file=stderr
time=2023-04-12T00:19:16.783+02:00 level=INFO source=root.go:34 msg="process args: [bricks, bundle, deploy, --log-level=info, --log-file=stderr]"
time=2023-04-12T00:19:16.784+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
Error: unable to locate bundle root: bundle.yml not found
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:64 msg="unable to locate bundle root: bundle.yml not found"
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:65 msg="exit code: 1"
```
2023-04-12 20:12:36 +00:00
|
|
|
|
|
|
|
// Run the command
|
2025-01-22 21:03:13 +00:00
|
|
|
cmd, cmdErr := cmd.ExecuteContextC(ctx)
|
|
|
|
if cmdErr != nil && !errors.Is(cmdErr, ErrAlreadyPrinted) {
|
2023-04-24 10:11:52 +00:00
|
|
|
// If cmdio logger initialization succeeds, then this function logs with the
|
|
|
|
// initialized cmdio logger, otherwise with the default cmdio logger
|
2025-01-22 21:03:13 +00:00
|
|
|
cmdio.LogError(cmd.Context(), cmdErr)
|
2023-04-24 10:11:52 +00:00
|
|
|
}
|
Log os.Args, bricks version, and exit status (#324)
## Changes
<!-- Summary of your changes that are easy to understand -->
1. Log os.Args and bricks version before every command execution
2. After a command execution, logs the error and exit code
## Tests
<!-- How is this tested? -->
Manually,
case 1: Run `bricks version` successfully
```
shreyas.goenka@THW32HFW6T bricks % bricks version --log-level=info --log-file stderr
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:34 msg="process args: [bricks, version, --log-level=info, --log-file, stderr]"
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
0.0.0-dev+375eb1c50283
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:68 msg="exit code: 0"
```
case 2: Run `bricks bundle deploy` in a working dir where `bundle.yml`
does not exist
```
shreyas.goenka@THW32HFW6T bricks % bricks bundle deploy --log-level=info --log-file=stderr
time=2023-04-12T00:19:16.783+02:00 level=INFO source=root.go:34 msg="process args: [bricks, bundle, deploy, --log-level=info, --log-file=stderr]"
time=2023-04-12T00:19:16.784+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
Error: unable to locate bundle root: bundle.yml not found
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:64 msg="unable to locate bundle root: bundle.yml not found"
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:65 msg="exit code: 1"
```
2023-04-12 20:12:36 +00:00
|
|
|
|
|
|
|
// Log exit status and error
|
|
|
|
// We only log if logger initialization succeeded and is stored in command
|
|
|
|
// context
|
|
|
|
if logger, ok := log.FromContext(cmd.Context()); ok {
|
2025-01-22 21:03:13 +00:00
|
|
|
if cmdErr == nil {
|
Log os.Args, bricks version, and exit status (#324)
## Changes
<!-- Summary of your changes that are easy to understand -->
1. Log os.Args and bricks version before every command execution
2. After a command execution, logs the error and exit code
## Tests
<!-- How is this tested? -->
Manually,
case 1: Run `bricks version` successfully
```
shreyas.goenka@THW32HFW6T bricks % bricks version --log-level=info --log-file stderr
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:34 msg="process args: [bricks, version, --log-level=info, --log-file, stderr]"
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
0.0.0-dev+375eb1c50283
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:68 msg="exit code: 0"
```
case 2: Run `bricks bundle deploy` in a working dir where `bundle.yml`
does not exist
```
shreyas.goenka@THW32HFW6T bricks % bricks bundle deploy --log-level=info --log-file=stderr
time=2023-04-12T00:19:16.783+02:00 level=INFO source=root.go:34 msg="process args: [bricks, bundle, deploy, --log-level=info, --log-file=stderr]"
time=2023-04-12T00:19:16.784+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
Error: unable to locate bundle root: bundle.yml not found
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:64 msg="unable to locate bundle root: bundle.yml not found"
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:65 msg="exit code: 1"
```
2023-04-12 20:12:36 +00:00
|
|
|
logger.Info("completed execution",
|
|
|
|
slog.String("exit_code", "0"))
|
|
|
|
} else {
|
|
|
|
logger.Error("failed execution",
|
|
|
|
slog.String("exit_code", "1"),
|
2025-01-22 21:03:13 +00:00
|
|
|
slog.String("error", cmdErr.Error()))
|
Log os.Args, bricks version, and exit status (#324)
## Changes
<!-- Summary of your changes that are easy to understand -->
1. Log os.Args and bricks version before every command execution
2. After a command execution, logs the error and exit code
## Tests
<!-- How is this tested? -->
Manually,
case 1: Run `bricks version` successfully
```
shreyas.goenka@THW32HFW6T bricks % bricks version --log-level=info --log-file stderr
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:34 msg="process args: [bricks, version, --log-level=info, --log-file, stderr]"
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
0.0.0-dev+375eb1c50283
time=2023-04-12T00:15:04.011+02:00 level=INFO source=root.go:68 msg="exit code: 0"
```
case 2: Run `bricks bundle deploy` in a working dir where `bundle.yml`
does not exist
```
shreyas.goenka@THW32HFW6T bricks % bricks bundle deploy --log-level=info --log-file=stderr
time=2023-04-12T00:19:16.783+02:00 level=INFO source=root.go:34 msg="process args: [bricks, bundle, deploy, --log-level=info, --log-file=stderr]"
time=2023-04-12T00:19:16.784+02:00 level=INFO source=root.go:35 msg="version: 0.0.0-dev+375eb1c50283"
Error: unable to locate bundle root: bundle.yml not found
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:64 msg="unable to locate bundle root: bundle.yml not found"
time=2023-04-12T00:19:16.784+02:00 level=ERROR source=root.go:65 msg="exit code: 1"
```
2023-04-12 20:12:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-22 21:03:13 +00:00
|
|
|
end := time.Now()
|
|
|
|
|
|
|
|
exitCode := 0
|
|
|
|
if cmdErr != nil {
|
|
|
|
exitCode = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
if env.Get(ctx, telemetry.SkipEnvVar) != "true" {
|
|
|
|
logTelemetry(ctx, commandString(cmd), start, end, exitCode)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmdErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Do not log for integration tests using the CLI.
|
|
|
|
// TODO: Skip telemetry if the credentials are invalid.
|
|
|
|
func logTelemetry(ctx context.Context, cmdStr string, start, end time.Time, exitCode int) {
|
|
|
|
telemetry.SetExecutionContext(ctx, protos.ExecutionContext{
|
2025-01-29 15:05:25 +00:00
|
|
|
CmdExecID: cmdExecId,
|
2025-01-22 21:03:13 +00:00
|
|
|
Version: build.GetInfo().Version,
|
|
|
|
Command: cmdStr,
|
|
|
|
OperatingSystem: runtime.GOOS,
|
|
|
|
DbrVersion: env.Get(ctx, dbr.EnvVarName),
|
|
|
|
FromWebTerminal: isWebTerminal(ctx),
|
|
|
|
ExecutionTimeMs: end.Sub(start).Milliseconds(),
|
|
|
|
ExitCode: int64(exitCode),
|
|
|
|
})
|
|
|
|
|
|
|
|
// TODO: Better check?
|
|
|
|
// Do not log telemetry for the telemetry-worker command to avoid fork bombs.
|
|
|
|
if cmdStr == "telemetry-worker" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
execPath, err := os.Executable()
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to get executable path: %s", err)
|
|
|
|
}
|
|
|
|
telemetryCmd := exec.Command(execPath, "telemetry-worker")
|
|
|
|
|
|
|
|
// TODO: Add test that ensures that the context key for cli commands stores a
|
|
|
|
// resolved auth configuration.
|
|
|
|
// TODO: Add test that the worker inherits the environment variables from the
|
|
|
|
// parent process.
|
|
|
|
in := telemetry.WorkerInput{
|
|
|
|
AuthConfig: ConfigUsed(ctx),
|
|
|
|
Logs: telemetry.GetLogs(ctx),
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(in.Logs) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
b, err := json.Marshal(in)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to marshal telemetry logs: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
stdin, err := telemetryCmd.StdinPipe()
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to create stdin pipe for telemetry worker: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
stdout, err := telemetryCmd.StdoutPipe()
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to create stdout pipe for telemetry worker: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = telemetryCmd.Start()
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to start telemetry worker: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set DATABRICKS_CLI_SKIP_TELEMETRY to true to ensure that the telemetry worker
|
|
|
|
// command accidentally does not call itself causing a fork bomb. This can happen
|
|
|
|
// if a change starts logging telemetry in the telemetry worker command's code
|
|
|
|
// path.
|
|
|
|
telemetryCmd.Env = os.Environ()
|
|
|
|
telemetryCmd.Env = append(telemetryCmd.Env, telemetry.SkipEnvVar+"=true")
|
|
|
|
|
|
|
|
_, err = stdin.Write(b)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to write to telemetry worker: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = stdin.Close()
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to close stdin for telemetry worker: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is only meant for testing purposes, to do assertions on the output
|
|
|
|
// of the telemetry worker command.
|
|
|
|
if env.Get(ctx, telemetry.BlockOnUploadEnvVar) == "true" {
|
|
|
|
err = telemetryCmd.Wait()
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to wait for telemetry worker: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cmdio.LogString(ctx, "telemetry-worker output:")
|
|
|
|
b, err := io.ReadAll(stdout)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf(ctx, "failed to read telemetry worker output: %s", err)
|
|
|
|
}
|
|
|
|
cmdio.LogString(ctx, string(b))
|
|
|
|
}
|
2022-05-13 13:30:22 +00:00
|
|
|
}
|