Compare commits

..

3 Commits

Author SHA1 Message Date
Denis Bilenko 592474880d
Enable 'govet' linter; expand log/diag with non-f functions (#1996)
## Changes
Fix all the govet-found issues and enable govet linter.

This prompts adding non-formatting variants of logging functions (Errorf
-> Error).

## Tests
Existing tests.
2024-12-11 16:42:03 +00:00
Lennart Kats (databricks) 2ee7d56ae6
Show an error when using a cluster override with 'mode: production' (#1994)
## Changes

We should show a warning when using a cluster override with 'mode:
production'. Right now, we inadvertently show an error for this state.
This is a followup based on
https://github.com/databricks/cli/pull/1899#discussion_r1877765148.
2024-12-11 14:57:31 +00:00
Andrew Nester aa0b6080a4
Fixed TestAccBundleDeployUcSchema test (#1997)
## Changes
It was failing because when schema.yml was removed, `catalog: main`
option left set in the base pipeline definition in databricks.yml which
lead to incorrect config (empty target schema)
https://github.com/databricks/cli/pull/1413

Backend behaviour changed and DLT pipelines stopped to accept empty
targets leading to the error which was ignored before.

## Tests
```
--- PASS: TestAccBundleDeployUcSchema (41.75s)
PASS
coverage: 33.3% of statements in ./...
ok      github.com/databricks/cli/internal/bundle       42.210s coverage: 33.3% of statements in ./...
```
2024-12-11 14:00:43 +00:00
15 changed files with 68 additions and 18 deletions

View File

@ -4,12 +4,17 @@ linters:
- bodyclose - bodyclose
- errcheck - errcheck
- gosimple - gosimple
#- govet - govet
- ineffassign - ineffassign
- staticcheck - staticcheck
- unused - unused
- gofmt - gofmt
linters-settings: linters-settings:
govet:
enable-all: true
disable:
- fieldalignment
- shadow
gofmt: gofmt:
rewrite-rules: rewrite-rules:
- pattern: 'a[b:len(a)]' - pattern: 'a[b:len(a)]'

View File

@ -42,7 +42,7 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
return nil return nil
} }
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath)) log.Infof(ctx, "Found Python wheel project at %s", b.BundleRootPath)
module := extractModuleName(setupPy) module := extractModuleName(setupPy)
if b.Config.Artifacts == nil { if b.Config.Artifacts == nil {

View File

@ -45,8 +45,9 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag
if b.Config.Bundle.ClusterId != "" { if b.Config.Bundle.ClusterId != "" {
// Overriding compute via a command-line flag for production works, but is not recommended. // Overriding compute via a command-line flag for production works, but is not recommended.
diags = diags.Extend(diag.Diagnostics{{ diags = diags.Extend(diag.Diagnostics{{
Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended", Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended",
Detail: "It is recommended to always use the same compute for production target for consistency.", Detail: "It is recommended to always use the same compute for production target for consistency.",
Severity: diag.Warning,
}}) }})
} }
} }

View File

@ -8,6 +8,7 @@ import (
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -173,6 +174,7 @@ func TestOverrideComputeModeProduction(t *testing.T) {
diags := bundle.Apply(context.Background(), b, m) diags := bundle.Apply(context.Background(), b, m)
require.Len(t, diags, 1) require.Len(t, diags, 1)
assert.Equal(t, "Setting a cluster override for a target that uses 'mode: production' is not recommended", diags[0].Summary) assert.Equal(t, "Setting a cluster override for a target that uses 'mode: production' is not recommended", diags[0].Summary)
assert.Equal(t, diag.Warning, diags[0].Severity)
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
} }

View File

@ -19,7 +19,7 @@ func (t *mutatorWithError) Name() string {
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics { func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics {
t.applyCalled++ t.applyCalled++
return diag.Errorf(t.errorMsg) return diag.Errorf(t.errorMsg) // nolint:govet
} }
func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {

View File

@ -2,7 +2,6 @@ package terraform
import ( import (
"context" "context"
"fmt"
"path/filepath" "path/filepath"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
@ -57,7 +56,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
IsEmpty: !notEmpty, IsEmpty: !notEmpty,
} }
log.Debugf(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath)) log.Debugf(ctx, "Planning complete and persisted at %s\n", planPath)
return nil return nil
} }

View File

@ -173,7 +173,7 @@ func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) {
{ {
Severity: diag.Error, Severity: diag.Error,
Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API", Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API",
Locations: []dyn.Location{{"config.yml", 1, 2}, {"volume.yml", 1, 2}}, Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}, {File: "volume.yml", Line: 1, Column: 2}},
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")},
Detail: `You are using a volume in your artifact_path that is managed by Detail: `You are using a volume in your artifact_path that is managed by
this bundle but which has not been deployed yet. Please first deploy this bundle but which has not been deployed yet. Please first deploy

View File

@ -143,7 +143,7 @@ func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func
progressLogger.Log(event) progressLogger.Log(event)
// log progress events in using the default logger // log progress events in using the default logger
log.Infof(ctx, event.String()) log.Info(ctx, event.String())
} }
} }
@ -203,7 +203,7 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e
logDebug(r) logDebug(r)
logProgress(r) logProgress(r)
}).GetWithTimeout(jobRunTimeout) }).GetWithTimeout(jobRunTimeout)
if err != nil && runId != nil { if err != nil {
r.logFailedTasks(ctx, *runId) r.logFailedTasks(ctx, *runId)
} }
if err != nil { if err != nil {

View File

@ -37,7 +37,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE
} }
} }
if logString != "" { if logString != "" {
log.Errorf(ctx, fmt.Sprintf("[%s] %s", event.EventType, logString)) log.Errorf(ctx, "[%s] %s", event.EventType, logString)
} }
} }
@ -132,7 +132,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
} }
for _, event := range events { for _, event := range events {
progressLogger.Log(&event) progressLogger.Log(&event)
log.Infof(ctx, event.String()) log.Info(ctx, event.String())
} }
update, err := w.Pipelines.GetUpdateByPipelineIdAndUpdateId(ctx, pipelineID, updateID) update, err := w.Pipelines.GetUpdateByPipelineIdAndUpdateId(ctx, pipelineID, updateID)

View File

@ -190,9 +190,6 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C
if isNoLoginConfig && !e.IsBundleAware { if isNoLoginConfig && !e.IsBundleAware {
return nil, nil, ErrNoLoginConfig return nil, nil, ErrNoLoginConfig
} }
if !isNoLoginConfig && err != nil {
return nil, nil, fmt.Errorf("load: %w", err)
}
if e.IsAccountLevel { if e.IsAccountLevel {
log.Debugf(ctx, "Using account-level login profile: %s", lc.AccountProfile) log.Debugf(ctx, "Using account-level login profile: %s", lc.AccountProfile)
cfg, err := e.envAwareConfigWithProfile(ctx, lc.AccountProfile) cfg, err := e.envAwareConfigWithProfile(ctx, lc.AccountProfile)

View File

@ -15,7 +15,8 @@ import (
) )
func TestEmptyHttpRequest(t *testing.T) { func TestEmptyHttpRequest(t *testing.T) {
ctx, _ := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel()
req := emptyHttpRequest(ctx) req := emptyHttpRequest(ctx)
assert.Equal(t, req.Context(), ctx) assert.Equal(t, req.Context(), ctx)
} }

View File

@ -12,7 +12,6 @@ resources:
- notebook: - notebook:
path: ./nb.sql path: ./nb.sql
development: true development: true
catalog: main
include: include:
- "*.yml" - "*.yml"

View File

@ -11,3 +11,4 @@ targets:
pipelines: pipelines:
foo: foo:
target: ${resources.schemas.bar.id} target: ${resources.schemas.bar.id}
catalog: main

View File

@ -31,6 +31,51 @@ func log(logger *slog.Logger, ctx context.Context, level slog.Level, msg string)
_ = logger.Handler().Handle(ctx, r) _ = logger.Handler().Handle(ctx, r)
} }
// Trace logs a string using the context-local or global logger.
func Trace(ctx context.Context, msg string) {
logger := GetLogger(ctx)
if !logger.Enabled(ctx, LevelTrace) {
return
}
log(logger, ctx, LevelTrace, msg)
}
// Debug logs a string using the context-local or global logger.
func Debug(ctx context.Context, msg string) {
logger := GetLogger(ctx)
if !logger.Enabled(ctx, LevelDebug) {
return
}
log(logger, ctx, LevelDebug, msg)
}
// Info logs a string using the context-local or global logger.
func Info(ctx context.Context, msg string) {
logger := GetLogger(ctx)
if !logger.Enabled(ctx, LevelInfo) {
return
}
log(logger, ctx, LevelInfo, msg)
}
// Warn logs a string using the context-local or global logger.
func Warn(ctx context.Context, msg string) {
logger := GetLogger(ctx)
if !logger.Enabled(ctx, LevelWarn) {
return
}
log(logger, ctx, LevelWarn, msg)
}
// Error logs a string using the context-local or global logger.
func Error(ctx context.Context, msg string) {
logger := GetLogger(ctx)
if !logger.Enabled(ctx, LevelError) {
return
}
log(logger, ctx, LevelError, msg)
}
// Tracef logs a formatted string using the context-local or global logger. // Tracef logs a formatted string using the context-local or global logger.
func Tracef(ctx context.Context, format string, v ...any) { func Tracef(ctx context.Context, format string, v ...any) {
logger := GetLogger(ctx) logger := GetLogger(ctx)

View File

@ -310,7 +310,7 @@ func (r *renderer) persistToDisk(ctx context.Context, out filer.Filer) error {
if err == nil { if err == nil {
return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path)
} }
if err != nil && !errors.Is(err, fs.ErrNotExist) { if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err)
} }
} }