2023-11-17 12:47:37 +00:00
|
|
|
package project
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/databricks/cli/cmd/labs/github"
|
|
|
|
"github.com/databricks/cli/cmd/labs/unpack"
|
|
|
|
"github.com/databricks/cli/libs/cmdio"
|
|
|
|
"github.com/databricks/cli/libs/databrickscfg/cfgpickers"
|
Improve token refresh flow (#1434)
## Changes
Currently, there are a number of issues with the non-happy-path flows
for token refresh in the CLI.
If the token refresh fails, the raw error message is presented to the
user, as seen below. This message is very difficult for users to
interpret and doesn't give any clear direction on how to resolve this
issue.
```
Error: token refresh: Post "https://adb-<WSID>.azuredatabricks.net/oidc/v1/token": http 400: {"error":"invalid_request","error_description":"Refresh token is invalid"}
```
When logging in again, I've noticed that the timeout for logging in is
very short, only 45 seconds. If a user is using a password manager and
needs to login to that first, or needs to do MFA, 45 seconds may not be
enough time. to an account-level profile, it is quite frustrating for
users to need to re-enter account ID information when that information
is already stored in the user's `.databrickscfg` file.
This PR tackles these two issues. First, the presentation of error
messages from `databricks auth token` is improved substantially by
converting the `error` into a human-readable message. When the refresh
token is invalid, it will present a command for the user to run to
reauthenticate. If the token fetching failed for some other reason, that
reason will be presented in a nice way, providing front-line debugging
steps and ultimately redirecting users to file a ticket at this repo if
they can't resolve the issue themselves. After this PR, the new error
message is:
```
Error: a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run `.databricks/databricks auth login --host https://adb-<WSID>.azuredatabricks.net`
```
To improve the login flow, this PR modifies `databricks auth login` to
auto-complete the account ID from the profile when present.
Additionally, it increases the login timeout from 45 seconds to 1 hour
to give the user sufficient time to login as needed.
To test this change, I needed to refactor some components of the CLI
around profile management, the token cache, and the API client used to
fetch OAuth tokens. These are now settable in the context, and a
demonstration of how they can be set and used is found in
`auth_test.go`.
Separately, this also demonstrates a sort-of integration test of the CLI
by executing the Cobra command for `databricks auth token` from tests,
which may be useful for testing other end-to-end functionality in the
CLI. In particular, I believe this is necessary in order to set flag
values (like the `--profile` flag in this case) for use in testing.
## Tests
Unit tests cover the unhappy and happy paths using the mocked API
client, token cache, and profiler.
Manually tested
---------
Co-authored-by: Pieter Noordhuis <pieter.noordhuis@databricks.com>
2024-05-16 10:22:09 +00:00
|
|
|
"github.com/databricks/cli/libs/databrickscfg/profile"
|
2023-11-17 12:47:37 +00:00
|
|
|
"github.com/databricks/cli/libs/log"
|
|
|
|
"github.com/databricks/cli/libs/process"
|
|
|
|
"github.com/databricks/cli/libs/python"
|
|
|
|
"github.com/databricks/databricks-sdk-go"
|
|
|
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
|
|
|
"github.com/databricks/databricks-sdk-go/service/sql"
|
|
|
|
"github.com/fatih/color"
|
|
|
|
"github.com/spf13/cobra"
|
|
|
|
)
|
|
|
|
|
|
|
|
const ownerRWXworldRX = 0o755
|
|
|
|
|
|
|
|
type whTypes []sql.EndpointInfoWarehouseType
|
|
|
|
|
|
|
|
type hook struct {
|
|
|
|
*Entrypoint `yaml:",inline"`
|
|
|
|
Script string `yaml:"script"`
|
|
|
|
RequireDatabricksConnect bool `yaml:"require_databricks_connect,omitempty"`
|
|
|
|
MinRuntimeVersion string `yaml:"min_runtime_version,omitempty"`
|
|
|
|
WarehouseTypes whTypes `yaml:"warehouse_types,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hook) RequireRunningCluster() bool {
|
|
|
|
if h.Entrypoint == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return h.Entrypoint.RequireRunningCluster
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hook) HasPython() bool {
|
|
|
|
return strings.HasSuffix(h.Script, ".py")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hook) runHook(cmd *cobra.Command) error {
|
|
|
|
if h.Script == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
ctx := cmd.Context()
|
|
|
|
envs, err := h.Prepare(cmd)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("prepare: %w", err)
|
|
|
|
}
|
2023-11-29 19:08:27 +00:00
|
|
|
libDir := h.EffectiveLibDir()
|
2023-11-17 12:47:37 +00:00
|
|
|
args := []string{}
|
|
|
|
if strings.HasSuffix(h.Script, ".py") {
|
|
|
|
args = append(args, h.virtualEnvPython(ctx))
|
|
|
|
}
|
|
|
|
return process.Forwarded(ctx,
|
|
|
|
append(args, h.Script),
|
|
|
|
cmd.InOrStdin(),
|
|
|
|
cmd.OutOrStdout(),
|
|
|
|
cmd.ErrOrStderr(),
|
|
|
|
process.WithDir(libDir),
|
|
|
|
process.WithEnvs(envs))
|
|
|
|
}
|
|
|
|
|
|
|
|
type installer struct {
|
|
|
|
*Project
|
|
|
|
version string
|
|
|
|
|
|
|
|
// command instance is used for:
|
|
|
|
// - auth profile flag override
|
|
|
|
// - standard input, output, and error streams
|
|
|
|
cmd *cobra.Command
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) Install(ctx context.Context) error {
|
2023-11-29 19:08:27 +00:00
|
|
|
err := i.EnsureFoldersExist()
|
2023-11-17 12:47:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("folders: %w", err)
|
|
|
|
}
|
2023-11-29 19:08:27 +00:00
|
|
|
i.folder, err = PathInLabs(ctx, i.Name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-11-17 12:47:37 +00:00
|
|
|
w, err := i.login(ctx)
|
Improve token refresh flow (#1434)
## Changes
Currently, there are a number of issues with the non-happy-path flows
for token refresh in the CLI.
If the token refresh fails, the raw error message is presented to the
user, as seen below. This message is very difficult for users to
interpret and doesn't give any clear direction on how to resolve this
issue.
```
Error: token refresh: Post "https://adb-<WSID>.azuredatabricks.net/oidc/v1/token": http 400: {"error":"invalid_request","error_description":"Refresh token is invalid"}
```
When logging in again, I've noticed that the timeout for logging in is
very short, only 45 seconds. If a user is using a password manager and
needs to login to that first, or needs to do MFA, 45 seconds may not be
enough time. to an account-level profile, it is quite frustrating for
users to need to re-enter account ID information when that information
is already stored in the user's `.databrickscfg` file.
This PR tackles these two issues. First, the presentation of error
messages from `databricks auth token` is improved substantially by
converting the `error` into a human-readable message. When the refresh
token is invalid, it will present a command for the user to run to
reauthenticate. If the token fetching failed for some other reason, that
reason will be presented in a nice way, providing front-line debugging
steps and ultimately redirecting users to file a ticket at this repo if
they can't resolve the issue themselves. After this PR, the new error
message is:
```
Error: a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run `.databricks/databricks auth login --host https://adb-<WSID>.azuredatabricks.net`
```
To improve the login flow, this PR modifies `databricks auth login` to
auto-complete the account ID from the profile when present.
Additionally, it increases the login timeout from 45 seconds to 1 hour
to give the user sufficient time to login as needed.
To test this change, I needed to refactor some components of the CLI
around profile management, the token cache, and the API client used to
fetch OAuth tokens. These are now settable in the context, and a
demonstration of how they can be set and used is found in
`auth_test.go`.
Separately, this also demonstrates a sort-of integration test of the CLI
by executing the Cobra command for `databricks auth token` from tests,
which may be useful for testing other end-to-end functionality in the
CLI. In particular, I believe this is necessary in order to set flag
values (like the `--profile` flag in this case) for use in testing.
## Tests
Unit tests cover the unhappy and happy paths using the mocked API
client, token cache, and profiler.
Manually tested
---------
Co-authored-by: Pieter Noordhuis <pieter.noordhuis@databricks.com>
2024-05-16 10:22:09 +00:00
|
|
|
if err != nil && errors.Is(err, profile.ErrNoConfiguration) {
|
2023-11-29 19:08:27 +00:00
|
|
|
cfg, err := i.Installer.envAwareConfig(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-11-17 12:47:37 +00:00
|
|
|
w, err = databricks.NewWorkspaceClient((*databricks.Config)(cfg))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("no ~/.databrickscfg: %w", err)
|
|
|
|
}
|
|
|
|
} else if err != nil {
|
|
|
|
return fmt.Errorf("login: %w", err)
|
|
|
|
}
|
|
|
|
err = i.downloadLibrary(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("lib: %w", err)
|
|
|
|
}
|
|
|
|
err = i.setupPythonVirtualEnvironment(ctx, w)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("python: %w", err)
|
|
|
|
}
|
|
|
|
err = i.recordVersion(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("record version: %w", err)
|
|
|
|
}
|
|
|
|
// TODO: failing install hook for "clean installations" (not upgrages)
|
|
|
|
// should trigger removal of the project, otherwise users end up with
|
|
|
|
// misconfigured CLIs
|
|
|
|
err = i.runInstallHook(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("installer: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) Upgrade(ctx context.Context) error {
|
|
|
|
err := i.downloadLibrary(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("lib: %w", err)
|
|
|
|
}
|
|
|
|
err = i.recordVersion(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("record version: %w", err)
|
|
|
|
}
|
|
|
|
err = i.runInstallHook(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("installer: %w", err)
|
|
|
|
}
|
2024-04-24 17:34:09 +00:00
|
|
|
err = i.installPythonDependencies(ctx, ".")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("python dependencies: %w", err)
|
|
|
|
}
|
2023-11-17 12:47:37 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) warningf(text string, v ...any) {
|
|
|
|
i.cmd.PrintErrln(color.YellowString(text, v...))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) cleanupLib(ctx context.Context) error {
|
2023-11-29 19:08:27 +00:00
|
|
|
libDir := i.LibDir()
|
2023-11-17 12:47:37 +00:00
|
|
|
err := os.RemoveAll(libDir)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("remove all: %w", err)
|
|
|
|
}
|
|
|
|
return os.MkdirAll(libDir, ownerRWXworldRX)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) recordVersion(ctx context.Context) error {
|
|
|
|
return i.writeVersionFile(ctx, i.version)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, error) {
|
2023-12-18 15:01:59 +00:00
|
|
|
if !cmdio.IsPromptSupported(ctx) {
|
2023-11-17 12:47:37 +00:00
|
|
|
log.Debugf(ctx, "Skipping workspace profile prompts in non-interactive mode")
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
cfg, err := i.metaEntrypoint(ctx).validLogin(i.cmd)
|
|
|
|
if errors.Is(err, ErrNoLoginConfig) {
|
2023-11-29 19:08:27 +00:00
|
|
|
cfg, err = i.Installer.envAwareConfig(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-11-17 12:47:37 +00:00
|
|
|
} else if err != nil {
|
|
|
|
return nil, fmt.Errorf("valid: %w", err)
|
|
|
|
}
|
|
|
|
if !i.HasAccountLevelCommands() && cfg.IsAccountClient() {
|
|
|
|
return nil, fmt.Errorf("got account-level client, but no account-level commands")
|
|
|
|
}
|
|
|
|
lc := &loginConfig{Entrypoint: i.Installer.Entrypoint}
|
|
|
|
w, err := lc.askWorkspace(ctx, cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("ask for workspace: %w", err)
|
|
|
|
}
|
|
|
|
err = lc.save(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("save: %w", err)
|
|
|
|
}
|
|
|
|
return w, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) downloadLibrary(ctx context.Context) error {
|
|
|
|
feedback := cmdio.Spinner(ctx)
|
|
|
|
defer close(feedback)
|
|
|
|
feedback <- "Cleaning up previous installation if necessary"
|
|
|
|
err := i.cleanupLib(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cleanup: %w", err)
|
|
|
|
}
|
2023-11-29 19:08:27 +00:00
|
|
|
libTarget := i.LibDir()
|
2023-11-17 12:47:37 +00:00
|
|
|
// we may support wheels, jars, and golang binaries. but those are not zipballs
|
|
|
|
if i.IsZipball() {
|
|
|
|
feedback <- fmt.Sprintf("Downloading and unpacking zipball for %s", i.version)
|
|
|
|
return i.downloadAndUnpackZipball(ctx, libTarget)
|
|
|
|
}
|
|
|
|
return fmt.Errorf("we only support zipballs for now")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) downloadAndUnpackZipball(ctx context.Context, libTarget string) error {
|
|
|
|
raw, err := github.DownloadZipball(ctx, "databrickslabs", i.Name, i.version)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("download zipball from GitHub: %w", err)
|
|
|
|
}
|
|
|
|
zipball := unpack.GitHubZipball{Reader: bytes.NewBuffer(raw)}
|
|
|
|
log.Debugf(ctx, "Unpacking zipball to: %s", libTarget)
|
|
|
|
return zipball.UnpackTo(libTarget)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databricks.WorkspaceClient) error {
|
|
|
|
if !i.HasPython() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
feedback := cmdio.Spinner(ctx)
|
|
|
|
defer close(feedback)
|
|
|
|
feedback <- "Detecting all installed Python interpreters on the system"
|
|
|
|
pythonInterpreters, err := python.DetectInterpreters(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("detect: %w", err)
|
|
|
|
}
|
|
|
|
py, err := pythonInterpreters.AtLeast(i.MinPython)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("min version: %w", err)
|
|
|
|
}
|
|
|
|
log.Debugf(ctx, "Detected Python %s at: %s", py.Version, py.Path)
|
|
|
|
venvPath := i.virtualEnvPath(ctx)
|
|
|
|
log.Debugf(ctx, "Creating Python Virtual Environment at: %s", venvPath)
|
|
|
|
feedback <- fmt.Sprintf("Creating Virtual Environment with Python %s", py.Version)
|
|
|
|
_, err = process.Background(ctx, []string{py.Path, "-m", "venv", venvPath})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("create venv: %w", err)
|
|
|
|
}
|
|
|
|
if i.Installer != nil && i.Installer.RequireDatabricksConnect {
|
|
|
|
feedback <- "Determining Databricks Connect version"
|
|
|
|
cluster, err := w.Clusters.Get(ctx, compute.GetClusterRequest{
|
|
|
|
ClusterId: w.Config.ClusterID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cluster: %w", err)
|
|
|
|
}
|
|
|
|
runtimeVersion, ok := cfgpickers.GetRuntimeVersion(*cluster)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unsupported runtime: %s", cluster.SparkVersion)
|
|
|
|
}
|
|
|
|
feedback <- fmt.Sprintf("Installing Databricks Connect v%s", runtimeVersion)
|
|
|
|
pipSpec := fmt.Sprintf("databricks-connect==%s", runtimeVersion)
|
|
|
|
err = i.installPythonDependencies(ctx, pipSpec)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("dbconnect: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
feedback <- "Installing Python library dependencies"
|
|
|
|
return i.installPythonDependencies(ctx, ".")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) installPythonDependencies(ctx context.Context, spec string) error {
|
2023-11-29 19:08:27 +00:00
|
|
|
if !i.IsPythonProject() {
|
2023-11-17 12:47:37 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-11-29 19:08:27 +00:00
|
|
|
libDir := i.LibDir()
|
2023-11-17 12:47:37 +00:00
|
|
|
log.Debugf(ctx, "Installing Python dependencies for: %s", libDir)
|
|
|
|
// maybe we'll need to add call one of the two scripts:
|
|
|
|
// - python3 -m ensurepip --default-pip
|
|
|
|
// - curl -o https://bootstrap.pypa.io/get-pip.py | python3
|
|
|
|
var buf bytes.Buffer
|
|
|
|
_, err := process.Background(ctx,
|
|
|
|
[]string{i.virtualEnvPython(ctx), "-m", "pip", "install", spec},
|
|
|
|
process.WithCombinedOutput(&buf),
|
|
|
|
process.WithDir(libDir))
|
|
|
|
if err != nil {
|
|
|
|
i.warningf(buf.String())
|
|
|
|
return fmt.Errorf("failed to install dependencies of %s", spec)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *installer) runInstallHook(ctx context.Context) error {
|
|
|
|
if i.Installer == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if i.Installer.Script == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2023-11-29 19:08:27 +00:00
|
|
|
log.Debugf(ctx, "Launching installer script %s in %s", i.Installer.Script, i.LibDir())
|
2023-11-17 12:47:37 +00:00
|
|
|
return i.Installer.runHook(i.cmd)
|
|
|
|
}
|