databricks-cli/cmd/auth/env.go

147 lines
3.4 KiB
Go
Raw Normal View History

package auth
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/fs"
"net/http"
"net/url"
"strings"
Improve token refresh flow (#1434) ## Changes Currently, there are a number of issues with the non-happy-path flows for token refresh in the CLI. If the token refresh fails, the raw error message is presented to the user, as seen below. This message is very difficult for users to interpret and doesn't give any clear direction on how to resolve this issue. ``` Error: token refresh: Post "https://adb-<WSID>.azuredatabricks.net/oidc/v1/token": http 400: {"error":"invalid_request","error_description":"Refresh token is invalid"} ``` When logging in again, I've noticed that the timeout for logging in is very short, only 45 seconds. If a user is using a password manager and needs to login to that first, or needs to do MFA, 45 seconds may not be enough time. to an account-level profile, it is quite frustrating for users to need to re-enter account ID information when that information is already stored in the user's `.databrickscfg` file. This PR tackles these two issues. First, the presentation of error messages from `databricks auth token` is improved substantially by converting the `error` into a human-readable message. When the refresh token is invalid, it will present a command for the user to run to reauthenticate. If the token fetching failed for some other reason, that reason will be presented in a nice way, providing front-line debugging steps and ultimately redirecting users to file a ticket at this repo if they can't resolve the issue themselves. After this PR, the new error message is: ``` Error: a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run `.databricks/databricks auth login --host https://adb-<WSID>.azuredatabricks.net` ``` To improve the login flow, this PR modifies `databricks auth login` to auto-complete the account ID from the profile when present. Additionally, it increases the login timeout from 45 seconds to 1 hour to give the user sufficient time to login as needed. To test this change, I needed to refactor some components of the CLI around profile management, the token cache, and the API client used to fetch OAuth tokens. These are now settable in the context, and a demonstration of how they can be set and used is found in `auth_test.go`. Separately, this also demonstrates a sort-of integration test of the CLI by executing the Cobra command for `databricks auth token` from tests, which may be useful for testing other end-to-end functionality in the CLI. In particular, I believe this is necessary in order to set flag values (like the `--profile` flag in this case) for use in testing. ## Tests Unit tests cover the unhappy and happy paths using the mocked API client, token cache, and profiler. Manually tested --------- Co-authored-by: Pieter Noordhuis <pieter.noordhuis@databricks.com>
2024-05-16 10:22:09 +00:00
"github.com/databricks/cli/libs/databrickscfg/profile"
"github.com/databricks/databricks-sdk-go/config"
"github.com/spf13/cobra"
"gopkg.in/ini.v1"
)
func canonicalHost(host string) (string, error) {
parsedHost, err := url.Parse(host)
if err != nil {
return "", err
}
// If the host is empty, assume the scheme wasn't included.
if parsedHost.Host == "" {
return fmt.Sprintf("https://%s", host), nil
}
return fmt.Sprintf("https://%s", parsedHost.Host), nil
}
var ErrNoMatchingProfiles = errors.New("no matching profiles found")
func resolveSection(cfg *config.Config, iniFile *config.File) (*ini.Section, error) {
var candidates []*ini.Section
configuredHost, err := canonicalHost(cfg.Host)
if err != nil {
return nil, err
}
for _, section := range iniFile.Sections() {
hash := section.KeysHash()
host, ok := hash["host"]
if !ok {
// if host is not set
continue
}
canonical, err := canonicalHost(host)
if err != nil {
// we're fine with other corrupt profiles
continue
}
if canonical != configuredHost {
continue
}
candidates = append(candidates, section)
}
if len(candidates) == 0 {
return nil, ErrNoMatchingProfiles
}
// in the real situations, we don't expect this to happen often
// (if not at all), hence we don't trim the list
if len(candidates) > 1 {
var profiles []string
for _, v := range candidates {
profiles = append(profiles, v.Name())
}
return nil, fmt.Errorf("%s match %s in %s",
strings.Join(profiles, " and "), cfg.Host, cfg.ConfigFile)
}
return candidates[0], nil
}
func loadFromDatabricksCfg(ctx context.Context, cfg *config.Config) error {
Improve token refresh flow (#1434) ## Changes Currently, there are a number of issues with the non-happy-path flows for token refresh in the CLI. If the token refresh fails, the raw error message is presented to the user, as seen below. This message is very difficult for users to interpret and doesn't give any clear direction on how to resolve this issue. ``` Error: token refresh: Post "https://adb-<WSID>.azuredatabricks.net/oidc/v1/token": http 400: {"error":"invalid_request","error_description":"Refresh token is invalid"} ``` When logging in again, I've noticed that the timeout for logging in is very short, only 45 seconds. If a user is using a password manager and needs to login to that first, or needs to do MFA, 45 seconds may not be enough time. to an account-level profile, it is quite frustrating for users to need to re-enter account ID information when that information is already stored in the user's `.databrickscfg` file. This PR tackles these two issues. First, the presentation of error messages from `databricks auth token` is improved substantially by converting the `error` into a human-readable message. When the refresh token is invalid, it will present a command for the user to run to reauthenticate. If the token fetching failed for some other reason, that reason will be presented in a nice way, providing front-line debugging steps and ultimately redirecting users to file a ticket at this repo if they can't resolve the issue themselves. After this PR, the new error message is: ``` Error: a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run `.databricks/databricks auth login --host https://adb-<WSID>.azuredatabricks.net` ``` To improve the login flow, this PR modifies `databricks auth login` to auto-complete the account ID from the profile when present. Additionally, it increases the login timeout from 45 seconds to 1 hour to give the user sufficient time to login as needed. To test this change, I needed to refactor some components of the CLI around profile management, the token cache, and the API client used to fetch OAuth tokens. These are now settable in the context, and a demonstration of how they can be set and used is found in `auth_test.go`. Separately, this also demonstrates a sort-of integration test of the CLI by executing the Cobra command for `databricks auth token` from tests, which may be useful for testing other end-to-end functionality in the CLI. In particular, I believe this is necessary in order to set flag values (like the `--profile` flag in this case) for use in testing. ## Tests Unit tests cover the unhappy and happy paths using the mocked API client, token cache, and profiler. Manually tested --------- Co-authored-by: Pieter Noordhuis <pieter.noordhuis@databricks.com>
2024-05-16 10:22:09 +00:00
iniFile, err := profile.DefaultProfiler.Get(ctx)
if errors.Is(err, fs.ErrNotExist) {
// it's fine not to have ~/.databrickscfg
return nil
}
if err != nil {
return err
}
profile, err := resolveSection(cfg, iniFile)
if err == ErrNoMatchingProfiles {
// it's also fine for Azure CLI or Databricks CLI, which
// are resolved by unified auth handling in the Go SDK.
return nil
}
if err != nil {
return err
}
cfg.Profile = profile.Name()
return nil
}
func newEnvCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "env",
Short: "Get env",
}
var host string
var profile string
cmd.Flags().StringVar(&host, "host", host, "Hostname to get auth env for")
cmd.Flags().StringVar(&profile, "profile", profile, "Profile to get auth env for")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
cfg := &config.Config{
Host: host,
Profile: profile,
}
if profile != "" {
cfg.Profile = profile
} else if cfg.Host == "" {
cfg.Profile = "DEFAULT"
} else if err := loadFromDatabricksCfg(cmd.Context(), cfg); err != nil {
return err
}
// Go SDK is lazy loaded because of Terraform semantics,
// so we're creating a dummy HTTP request as a placeholder
// for headers.
r := &http.Request{Header: http.Header{}}
err := cfg.Authenticate(r.WithContext(cmd.Context()))
if err != nil {
return err
}
vars := map[string]string{}
for _, a := range config.ConfigAttributes {
if a.IsZero(cfg) {
continue
}
envValue := a.GetString(cfg)
for _, envName := range a.EnvVars {
vars[envName] = envValue
}
}
raw, err := json.MarshalIndent(map[string]any{
"env": vars,
}, "", " ")
if err != nil {
return err
}
cmd.OutOrStdout().Write(raw)
return nil
}
return cmd
}