Merge branch 'main' of github.com:databricks/cli into feat/source-linked-deployment-file-path

This commit is contained in:
Ilya Kuznetsov 2025-01-07 18:54:10 +01:00
commit fcdc3eabd2
No known key found for this signature in database
GPG Key ID: 91F3DDCF5D21CDDF
170 changed files with 478 additions and 428 deletions

View File

@ -0,0 +1,40 @@
name: "Close Stale Issues"
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *" # Run at midnight every day
jobs:
cleanup:
name: Stale issue job
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
permissions:
issues: write
contents: read
pull-requests: write
steps:
- uses: actions/stale@v9
with:
stale-issue-message: This issue has not received a response in a while. If you want to keep this issue open, please leave a comment below and auto-close will be canceled.
stale-pr-message: This PR has not received an update in a while. If you want to keep this PR open, please leave a comment below or push a new commit and auto-close will be canceled.
# These labels are required
stale-issue-label: Stale
stale-pr-label: Stale
exempt-issue-labels: No Autoclose
exempt-pr-labels: No Autoclose
# Issue timing
days-before-stale: 30
days-before-close: 7
repo-token: ${{ secrets.GITHUB_TOKEN }}
loglevel: DEBUG
# TODO: Remove dry-run after merge when confirmed it works correctly
dry-run: true

View File

@ -13,10 +13,17 @@ on:
jobs:
comment-on-pr:
runs-on: ubuntu-latest
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
permissions:
pull-requests: write
# Only run this job for PRs from forks.
# Integration tests are not run automatically for PRs from forks.
if: "${{ github.event.pull_request.head.repo.fork }}"
steps:
- uses: actions/checkout@v4
@ -43,7 +50,7 @@ jobs:
run: |
gh pr comment ${{ github.event.pull_request.number }} --body \
"<!-- INTEGRATION_TESTS_MANUAL -->
If integration tests don't run automatically, an authorized user can run them manually by following the instructions below:
An authorized user can trigger integration tests manually by following the instructions below:
Trigger:
[go/deco-tests-run/cli](https://go/deco-tests-run/cli)

View File

@ -17,7 +17,9 @@ jobs:
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
#
trigger:
runs-on: ubuntu-latest
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
steps:
- name: Auto-approve squashed commit

View File

@ -11,7 +11,10 @@ jobs:
# This workflow triggers the integration test workflow in a different repository.
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
trigger:
runs-on: ubuntu-latest
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
environment: "test-trigger-is"
steps:

View File

@ -5,36 +5,20 @@ on:
types: [opened, synchronize]
jobs:
check-token:
runs-on: ubuntu-latest
environment: "test-trigger-is"
outputs:
has_token: ${{ steps.set-token-status.outputs.has_token }}
steps:
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
id: set-token-status
run: |
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
echo "::set-output name=has_token::false"
else
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
echo "::set-output name=has_token::true"
fi
# Trigger for pull requests.
#
# This workflow triggers the integration test workflow in a different repository.
# It requires secrets from the "test-trigger-is" environment, which are only available to authorized users.
# It depends on the "check-token" workflow to confirm access to this environment to avoid failures.
trigger:
runs-on: ubuntu-latest
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
environment: "test-trigger-is"
if: needs.check-token.outputs.has_token == 'true'
needs: check-token
# Only run this job for PRs from branches on the main repository and not from forks.
# Workflows triggered by PRs from forks don't have access to the "test-trigger-is" environment.
if: "${{ !github.event.pull_request.head.repo.fork }}"
steps:
- name: Generate GitHub App Token

View File

@ -13,9 +13,26 @@ on:
# seed the build cache.
branches:
- main
schedule:
- cron: '0 0,12 * * *' # Runs at 00:00 and 12:00 UTC daily
env:
GOTESTSUM_FORMAT: github-actions
jobs:
cleanups:
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
steps:
- name: Clean up cache if running on schedule
if: ${{ github.event_name == 'schedule' }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: gh cache delete --all --repo databricks/cli || true
tests:
needs: cleanups
runs-on: ${{ matrix.os }}
strategy:
@ -58,6 +75,7 @@ jobs:
run: make test
golangci:
needs: cleanups
name: lint
runs-on: ubuntu-latest
steps:
@ -65,6 +83,10 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version: 1.23.4
# Use different schema from regular job, to avoid overwriting the same key
cache-dependency-path: |
go.sum
.golangci.yaml
- name: Run go mod tidy
run: |
go mod tidy
@ -79,6 +101,7 @@ jobs:
args: --timeout=15m
validate-bundle-schema:
needs: cleanups
runs-on: ubuntu-latest
steps:
@ -89,6 +112,10 @@ jobs:
uses: actions/setup-go@v5
with:
go-version: 1.23.4
# Use different schema from regular job, to avoid overwriting the same key
cache-dependency-path: |
go.sum
bundle/internal/schema/*.*
- name: Verify that the schema is up to date
run: |

View File

@ -20,7 +20,10 @@ on:
jobs:
goreleaser:
runs-on: ubuntu-latest
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
steps:
- name: Checkout repository and submodules
uses: actions/checkout@v4

View File

@ -9,9 +9,13 @@ on:
jobs:
goreleaser:
runs-on:
group: databricks-deco-testing-runner-group
labels: ubuntu-latest-deco
outputs:
artifacts: ${{ steps.releaser.outputs.artifacts }}
runs-on: ubuntu-latest
steps:
- name: Checkout repository and submodules
uses: actions/checkout@v4

View File

@ -12,6 +12,9 @@ linters:
- gofumpt
- goimports
- testifylint
- intrange
- mirror
- perfsprint
linters-settings:
govet:
enable-all: true

View File

@ -2,6 +2,8 @@ default: build
PACKAGES=./libs/... ./internal/... ./cmd/... ./bundle/... .
GOTESTSUM_FORMAT ?= pkgname-and-test-fails
lint:
./lint.sh ./...
@ -9,10 +11,10 @@ lintcheck:
golangci-lint run ./...
test:
gotestsum --format pkgname-and-test-fails --no-summary=skipped -- ${PACKAGES}
gotestsum --format ${GOTESTSUM_FORMAT} --no-summary=skipped -- ${PACKAGES}
cover:
gotestsum --format pkgname-and-test-fails --no-summary=skipped -- -coverprofile=coverage.txt ${PACKAGES}
gotestsum --format ${GOTESTSUM_FORMAT} --no-summary=skipped -- -coverprofile=coverage.txt ${PACKAGES}
showcover:
go tool cover -html=coverage.txt

View File

@ -2,7 +2,6 @@ package artifacts
import (
"context"
"fmt"
"path/filepath"
"testing"
@ -88,16 +87,16 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
))
assert.Len(t, diags, 4)
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[0].Summary)
assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[0].Summary)
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[0].Locations[0].File)
assert.Equal(t, "artifacts.test.files[0].source", diags[0].Paths[0].String())
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[1].Summary)
assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[1].Summary)
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[1].Locations[0].File)
assert.Equal(t, "artifacts.test.files[1].source", diags[1].Paths[0].String())
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("../a[.txt")), diags[2].Summary)
assert.Equal(t, filepath.Clean("../a[.txt")+": syntax error in pattern", diags[2].Summary)
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[2].Locations[0].File)
assert.Equal(t, "artifacts.test.files[2].source", diags[2].Paths[0].String())
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("subdir/a[.txt")), diags[3].Summary)
assert.Equal(t, filepath.Clean("subdir/a[.txt")+": syntax error in pattern", diags[3].Summary)
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[3].Locations[0].File)
assert.Equal(t, "artifacts.test.files[3].source", diags[3].Paths[0].String())
}

View File

@ -32,7 +32,7 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
//)
py := python.GetExecutable()
artifact.BuildCommand = fmt.Sprintf(`%s setup.py bdist_wheel`, py)
artifact.BuildCommand = py + " setup.py bdist_wheel"
return nil
}

View File

@ -8,6 +8,7 @@ package bundle
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
@ -234,7 +235,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
// we call into from this bundle context.
func (b *Bundle) AuthEnv() (map[string]string, error) {
if b.client == nil {
return nil, fmt.Errorf("workspace client not initialized yet")
return nil, errors.New("workspace client not initialized yet")
}
cfg := b.client.Config

View File

@ -2,7 +2,7 @@ package config
import (
"context"
"fmt"
"errors"
"github.com/databricks/cli/libs/exec"
)
@ -37,7 +37,7 @@ type Artifact struct {
func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
if a.BuildCommand == "" {
return nil, fmt.Errorf("no build property defined")
return nil, errors.New("no build property defined")
}
var e *exec.Executor

View File

@ -2,7 +2,6 @@ package mutator
import (
"context"
"fmt"
"path"
"strings"
@ -33,7 +32,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.
}
if strings.HasPrefix(root, "~/") {
home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName)
home := "/Workspace/Users/" + currentUser.UserName
b.Config.Workspace.RootPath = path.Join(home, root[2:])
}

View File

@ -55,7 +55,7 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
}
}
return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil
return dyn.NewValue("/Workspace"+path, v.Locations()), nil
})
if err != nil {
return dyn.InvalidValue, err

View File

@ -380,7 +380,7 @@ func TestAllResourcesMocked(t *testing.T) {
b := mockBundle(config.Development)
resources := reflect.ValueOf(b.Config.Resources)
for i := 0; i < resources.NumField(); i++ {
for i := range resources.NumField() {
field := resources.Field(i)
if field.Kind() == reflect.Map {
assert.True(
@ -409,7 +409,7 @@ func TestAllNonUcResourcesAreRenamed(t *testing.T) {
require.NoError(t, diags.Error())
resources := reflect.ValueOf(b.Config.Resources)
for i := 0; i < resources.NumField(); i++ {
for i := range resources.NumField() {
field := resources.Field(i)
if field.Kind() == reflect.Map {

View File

@ -2,6 +2,7 @@ package python
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
@ -319,15 +320,15 @@ func TestCreateOverrideVisitor(t *testing.T) {
updatePath: dyn.MustPathFromString("resources.jobs.job0.name"),
deletePath: dyn.MustPathFromString("resources.jobs.job0.name"),
insertPath: dyn.MustPathFromString("resources.jobs.job0.name"),
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"),
insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"),
updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"),
deleteError: errors.New("unexpected change at \"resources.jobs.job0.name\" (delete)"),
insertError: errors.New("unexpected change at \"resources.jobs.job0.name\" (insert)"),
updateError: errors.New("unexpected change at \"resources.jobs.job0.name\" (update)"),
},
{
name: "load: can't delete an existing job",
phase: PythonMutatorPhaseLoad,
deletePath: dyn.MustPathFromString("resources.jobs.job0"),
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"),
deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"),
},
{
name: "load: can insert 'resources'",
@ -353,9 +354,9 @@ func TestCreateOverrideVisitor(t *testing.T) {
deletePath: dyn.MustPathFromString("include[0]"),
insertPath: dyn.MustPathFromString("include[0]"),
updatePath: dyn.MustPathFromString("include[0]"),
deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"),
insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"),
updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"),
deleteError: errors.New("unexpected change at \"include[0]\" (delete)"),
insertError: errors.New("unexpected change at \"include[0]\" (insert)"),
updateError: errors.New("unexpected change at \"include[0]\" (update)"),
},
{
name: "init: can change an existing job",
@ -371,7 +372,7 @@ func TestCreateOverrideVisitor(t *testing.T) {
name: "init: can't delete an existing job",
phase: PythonMutatorPhaseInit,
deletePath: dyn.MustPathFromString("resources.jobs.job0"),
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"),
deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"),
},
{
name: "init: can insert 'resources'",
@ -397,9 +398,9 @@ func TestCreateOverrideVisitor(t *testing.T) {
deletePath: dyn.MustPathFromString("include[0]"),
insertPath: dyn.MustPathFromString("include[0]"),
updatePath: dyn.MustPathFromString("include[0]"),
deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"),
insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"),
updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"),
deleteError: errors.New("unexpected change at \"include[0]\" (delete)"),
insertError: errors.New("unexpected change at \"include[0]\" (insert)"),
updateError: errors.New("unexpected change at \"include[0]\" (update)"),
},
}

View File

@ -2,7 +2,7 @@ package mutator
import (
"context"
"fmt"
"errors"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
@ -74,7 +74,7 @@ func lookupForComplexVariables(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dy
}
if vv.Type == variable.VariableTypeComplex {
return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables")
return dyn.InvalidValue, errors.New("complex variables cannot contain references to another complex variables")
}
return lookup(v, path, b)
@ -106,7 +106,7 @@ func lookupForVariables(v dyn.Value, path dyn.Path, b *bundle.Bundle) (dyn.Value
}
if vv.Lookup != nil && vv.Lookup.String() != "" {
return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables")
return dyn.InvalidValue, errors.New("lookup variables cannot contain references to another lookup variables")
}
return lookup(v, path, b)

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
@ -45,7 +44,7 @@ func (s *Cluster) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID)
baseURL.Path = "compute/clusters/" + s.ID
s.URL = baseURL.String()
}

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"strconv"
@ -52,7 +51,7 @@ func (j *Job) InitializeURL(baseURL url.URL) {
if j.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("jobs/%s", j.ID)
baseURL.Path = "jobs/" + j.ID
j.URL = baseURL.String()
}

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
@ -47,7 +46,7 @@ func (s *MlflowExperiment) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID)
baseURL.Path = "ml/experiments/" + s.ID
s.URL = baseURL.String()
}

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
@ -47,7 +46,7 @@ func (s *MlflowModel) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID)
baseURL.Path = "ml/models/" + s.ID
s.URL = baseURL.String()
}

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
@ -55,7 +54,7 @@ func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID)
baseURL.Path = "ml/endpoints/" + s.ID
s.URL = baseURL.String()
}

View File

@ -25,5 +25,5 @@ func (p Permission) String() string {
return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName)
}
return fmt.Sprintf("level: %s", p.Level)
return "level: " + p.Level
}

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"github.com/databricks/cli/libs/log"
@ -47,7 +46,7 @@ func (p *Pipeline) InitializeURL(baseURL url.URL) {
if p.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID)
baseURL.Path = "pipelines/" + p.ID
p.URL = baseURL.String()
}

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"strings"
@ -51,7 +50,7 @@ func (s *QualityMonitor) InitializeURL(baseURL url.URL) {
if s.TableName == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/"))
baseURL.Path = "explore/data/" + strings.ReplaceAll(s.TableName, ".", "/")
s.URL = baseURL.String()
}

View File

@ -2,7 +2,6 @@ package resources
import (
"context"
"fmt"
"net/url"
"strings"
@ -57,7 +56,7 @@ func (s *RegisteredModel) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/"))
baseURL.Path = "explore/data/models/" + strings.ReplaceAll(s.ID, ".", "/")
s.URL = baseURL.String()
}

View File

@ -2,7 +2,7 @@ package resources
import (
"context"
"fmt"
"errors"
"net/url"
"strings"
@ -26,7 +26,7 @@ type Schema struct {
}
func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
return false, fmt.Errorf("schema.Exists() is not supported")
return false, errors.New("schema.Exists() is not supported")
}
func (s *Schema) TerraformResourceName() string {
@ -37,7 +37,7 @@ func (s *Schema) InitializeURL(baseURL url.URL) {
if s.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/"))
baseURL.Path = "explore/data/" + strings.ReplaceAll(s.ID, ".", "/")
s.URL = baseURL.String()
}

View File

@ -2,7 +2,7 @@ package resources
import (
"context"
"fmt"
"errors"
"net/url"
"strings"
@ -34,7 +34,7 @@ func (v Volume) MarshalJSON() ([]byte, error) {
}
func (v *Volume) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
return false, fmt.Errorf("volume.Exists() is not supported")
return false, errors.New("volume.Exists() is not supported")
}
func (v *Volume) TerraformResourceName() string {
@ -45,7 +45,7 @@ func (v *Volume) InitializeURL(baseURL url.URL) {
if v.ID == "" {
return
}
baseURL.Path = fmt.Sprintf("explore/data/volumes/%s", strings.ReplaceAll(v.ID, ".", "/"))
baseURL.Path = "explore/data/volumes/" + strings.ReplaceAll(v.ID, ".", "/")
v.URL = baseURL.String()
}

View File

@ -33,7 +33,7 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
r := Resources{}
rt := reflect.TypeOf(r)
for i := 0; i < rt.NumField(); i++ {
for i := range rt.NumField() {
field := rt.Field(i)
// Fields in Resources are expected be of the form map[string]*resourceStruct
@ -75,7 +75,7 @@ func TestResourcesAllResourcesCompleteness(t *testing.T) {
types = append(types, group.Description.PluralName)
}
for i := 0; i < rt.NumField(); i++ {
for i := range rt.NumField() {
field := rt.Field(i)
jsonTag := field.Tag.Get("json")
@ -92,7 +92,7 @@ func TestSupportedResources(t *testing.T) {
actual := SupportedResources()
typ := reflect.TypeOf(Resources{})
for i := 0; i < typ.NumField(); i++ {
for i := range typ.NumField() {
field := typ.Field(i)
jsonTags := strings.Split(field.Tag.Get("json"), ",")
pluralName := jsonTags[0]

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"path"
"strconv"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/libraries"
@ -60,7 +61,7 @@ func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderP
}
objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10),
WorkspaceObjectType: "directories",
})
if err != nil {

View File

@ -2,7 +2,6 @@ package validate
import (
"context"
"fmt"
"sort"
"github.com/databricks/cli/bundle"
@ -102,7 +101,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D
// If there are multiple resources with the same key, report an error.
diags = append(diags, diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Sprintf("multiple resources have been defined with the same key: %s", k),
Summary: "multiple resources have been defined with the same key: " + k,
Locations: v.locations,
Paths: v.paths,
})

View File

@ -68,7 +68,7 @@ func findVolumeInBundle(r config.Root, catalogName, schemaName, volumeName strin
if v.SchemaName != schemaName && !isSchemaDefinedInBundle {
continue
}
pathString := fmt.Sprintf("resources.volumes.%s", k)
pathString := "resources.volumes." + k
return dyn.MustPathFromString(pathString), r.GetLocations(pathString), true
}
return nil, nil, false

View File

@ -2,7 +2,6 @@ package validate
import (
"context"
"fmt"
"testing"
"github.com/databricks/cli/bundle"
@ -152,7 +151,7 @@ func TestExtractVolumeFromPath(t *testing.T) {
for _, p := range invalidVolumePaths() {
_, _, _, err := extractVolumeFromPath(p)
assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p))
assert.EqualError(t, err, "expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got "+p)
}
}
@ -171,7 +170,7 @@ func TestValidateArtifactPathWithInvalidPaths(t *testing.T) {
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), ValidateArtifactPath())
require.Equal(t, diag.Diagnostics{{
Severity: diag.Error,
Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p),
Summary: "expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got " + p,
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
}}, diags)

View File

@ -2,7 +2,7 @@ package variable
import (
"context"
"fmt"
"errors"
"github.com/databricks/databricks-sdk-go"
)
@ -83,11 +83,11 @@ func (l *Lookup) constructResolver() (resolver, error) {
switch len(resolvers) {
case 0:
return nil, fmt.Errorf("no valid lookup fields provided")
return nil, errors.New("no valid lookup fields provided")
case 1:
return resolvers[0], nil
default:
return nil, fmt.Errorf("exactly one lookup field must be provided")
return nil, errors.New("exactly one lookup field must be provided")
}
}

View File

@ -13,7 +13,7 @@ func TestLookup_Coverage(t *testing.T) {
val := reflect.ValueOf(lookup)
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
for i := range val.NumField() {
field := val.Field(i)
if field.Kind() != reflect.String {
t.Fatalf("Field %s is not a string", typ.Field(i).Name)

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveAlert) Resolve(ctx context.Context, w *databricks.WorkspaceClient
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
return entity.Id, nil
}
func (l resolveAlert) String() string {
return fmt.Sprintf("alert: %s", l.name)
return "alert: " + l.name
}

View File

@ -42,5 +42,5 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie
}
func (l resolveCluster) String() string {
return fmt.Sprintf("cluster: %s", l.name)
return "cluster: " + l.name
}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveClusterPolicy) Resolve(ctx context.Context, w *databricks.Workspa
if err != nil {
return "", err
}
return fmt.Sprint(entity.PolicyId), nil
return entity.PolicyId, nil
}
func (l resolveClusterPolicy) String() string {
return fmt.Sprintf("cluster-policy: %s", l.name)
return "cluster-policy: " + l.name
}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveDashboard) Resolve(ctx context.Context, w *databricks.WorkspaceCl
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
return entity.Id, nil
}
func (l resolveDashboard) String() string {
return fmt.Sprintf("dashboard: %s", l.name)
return "dashboard: " + l.name
}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveInstancePool) Resolve(ctx context.Context, w *databricks.Workspac
if err != nil {
return "", err
}
return fmt.Sprint(entity.InstancePoolId), nil
return entity.InstancePoolId, nil
}
func (l resolveInstancePool) String() string {
return fmt.Sprintf("instance-pool: %s", l.name)
return "instance-pool: " + l.name
}

View File

@ -2,7 +2,7 @@ package variable
import (
"context"
"fmt"
"strconv"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +16,9 @@ func (l resolveJob) Resolve(ctx context.Context, w *databricks.WorkspaceClient)
if err != nil {
return "", err
}
return fmt.Sprint(entity.JobId), nil
return strconv.FormatInt(entity.JobId, 10), nil
}
func (l resolveJob) String() string {
return fmt.Sprintf("job: %s", l.name)
return "job: " + l.name
}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceCl
if err != nil {
return "", err
}
return fmt.Sprint(entity.MetastoreId), nil
return entity.MetastoreId, nil
}
func (l resolveMetastore) String() string {
return fmt.Sprintf("metastore: %s", l.name)
return "metastore: " + l.name
}

View File

@ -42,5 +42,5 @@ func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databric
}
func (l resolveNotificationDestination) String() string {
return fmt.Sprintf("notification-destination: %s", l.name)
return "notification-destination: " + l.name
}

View File

@ -2,7 +2,7 @@ package variable
import (
"context"
"fmt"
"errors"
"testing"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
@ -35,7 +35,7 @@ func TestResolveNotificationDestination_ResolveError(t *testing.T) {
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return(nil, fmt.Errorf("bad"))
Return(nil, errors.New("bad"))
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolvePipeline) Resolve(ctx context.Context, w *databricks.WorkspaceCli
if err != nil {
return "", err
}
return fmt.Sprint(entity.PipelineId), nil
return entity.PipelineId, nil
}
func (l resolvePipeline) String() string {
return fmt.Sprintf("pipeline: %s", l.name)
return "pipeline: " + l.name
}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveQuery) Resolve(ctx context.Context, w *databricks.WorkspaceClient
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
return entity.Id, nil
}
func (l resolveQuery) String() string {
return fmt.Sprintf("query: %s", l.name)
return "query: " + l.name
}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveServicePrincipal) Resolve(ctx context.Context, w *databricks.Work
if err != nil {
return "", err
}
return fmt.Sprint(entity.ApplicationId), nil
return entity.ApplicationId, nil
}
func (l resolveServicePrincipal) String() string {
return fmt.Sprintf("service-principal: %s", l.name)
return "service-principal: " + l.name
}

View File

@ -2,7 +2,6 @@ package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
@ -16,9 +15,9 @@ func (l resolveWarehouse) Resolve(ctx context.Context, w *databricks.WorkspaceCl
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
return entity.Id, nil
}
func (l resolveWarehouse) String() string {
return fmt.Sprintf("warehouse: %s", l.name)
return "warehouse: " + l.name
}

View File

@ -1,6 +1,7 @@
package variable
import (
"errors"
"fmt"
"reflect"
)
@ -68,7 +69,7 @@ func (v *Variable) Set(val VariableValue) error {
switch rv.Kind() {
case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map:
if v.Type != VariableTypeComplex {
return fmt.Errorf("variable type is not complex")
return errors.New("variable type is not complex")
}
}

View File

@ -3,6 +3,7 @@ package deploy
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
@ -95,7 +96,7 @@ func (e *entry) Type() fs.FileMode {
func (e *entry) Info() (fs.FileInfo, error) {
if e.info == nil {
return nil, fmt.Errorf("no info available")
return nil, errors.New("no info available")
}
return e.info, nil
}

View File

@ -72,7 +72,7 @@ func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.B
continue
}
path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name))
path := dyn.MustPathFromString("resources.dashboards." + dashboard.Name)
loc := b.Config.GetLocation(path.String())
actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID)
if err != nil {

View File

@ -2,7 +2,7 @@ package terraform
import (
"context"
"fmt"
"errors"
"path/filepath"
"testing"
@ -122,7 +122,7 @@ func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T)
dashboardsAPI := m.GetMockLakeviewAPI()
dashboardsAPI.EXPECT().
GetByDashboardId(mock.Anything, "id1").
Return(nil, fmt.Errorf("failure")).
Return(nil, errors.New("failure")).
Once()
b.SetWorkpaceClient(m.WorkspaceClient)

View File

@ -1261,7 +1261,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
func AssertFullResourceCoverage(t *testing.T, config *config.Root) {
resources := reflect.ValueOf(config.Resources)
for i := 0; i < resources.NumField(); i++ {
for i := range resources.NumField() {
field := resources.Field(i)
if field.Kind() == reflect.Map {
assert.True(

View File

@ -7,6 +7,7 @@ import (
"io"
"os"
"path/filepath"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/cmdio"
@ -67,7 +68,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
if changed && !m.opts.AutoApprove {
output := buf.String()
// Remove output starting from Warning until end of output
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
output = output[:strings.Index(output, "Warning:")]
cmdio.LogString(ctx, output)
if !cmdio.IsPromptSupported(ctx) {

View File

@ -230,7 +230,7 @@ func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error
// Add "cli" to the user agent in set by the Databricks Terraform provider.
// This will allow us to attribute downstream requests made by the Databricks
// Terraform provider to the CLI.
products := []string{fmt.Sprintf("cli/%s", build.GetInfo().Version)}
products := []string{"cli/" + build.GetInfo().Version}
if experimental := b.Config.Experimental; experimental != nil {
if experimental.PyDABs.Enabled {
products = append(products, "databricks-pydabs/0.0.0")

View File

@ -2,6 +2,7 @@ package terraform
import (
"context"
"errors"
"fmt"
"slices"
@ -58,7 +59,7 @@ func (l *load) validateState(state *resourcesState) error {
}
if len(state.Resources) == 0 && slices.Contains(l.modes, ErrorOnEmptyState) {
return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?")
return errors.New("no deployment state. Did you forget to run 'databricks bundle deploy'?")
}
return nil

View File

@ -71,7 +71,7 @@ func TestStatePushLargeState(t *testing.T) {
b := statePushTestBundle(t)
largeState := map[string]any{}
for i := 0; i < 1000000; i++ {
for i := range 1000000 {
largeState[fmt.Sprintf("field_%d", i)] = i
}

View File

@ -54,7 +54,7 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) {
// Check for embedded Databricks Go SDK types.
if typ.Kind() == reflect.Struct {
for i := 0; i < typ.NumField(); i++ {
for i := range typ.NumField() {
if !typ.Field(i).Anonymous {
continue
}

View File

@ -1,7 +1,7 @@
package libraries
import (
"fmt"
"errors"
"github.com/databricks/databricks-sdk-go/service/compute"
)
@ -20,5 +20,5 @@ func libraryPath(library *compute.Library) (string, error) {
return library.Requirements, nil
}
return "", fmt.Errorf("not supported library type")
return "", errors.New("not supported library type")
}

View File

@ -3,6 +3,7 @@ package permissions
import (
"context"
"fmt"
"strconv"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/libraries"
@ -78,7 +79,7 @@ func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path st
}
_, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10),
WorkspaceObjectType: "directories",
AccessControlList: permissions,
})

View File

@ -2,7 +2,7 @@ package phases
import (
"context"
"fmt"
"errors"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/artifacts"
@ -54,7 +54,7 @@ func filterDeleteOrRecreateActions(changes []*tfjson.ResourceChange, resourceTyp
func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) {
tf := b.Terraform
if tf == nil {
return false, fmt.Errorf("terraform not initialized")
return false, errors.New("terraform not initialized")
}
// read plan file
@ -111,7 +111,7 @@ is removed from the catalog, but the underlying files are not deleted:`
}
if !cmdio.IsPromptSupported(ctx) {
return false, fmt.Errorf("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
return false, errors.New("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
}
cmdio.LogString(ctx, "")

View File

@ -3,7 +3,6 @@ package phases
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/databricks/cli/bundle"
@ -34,7 +33,7 @@ func assertRootPathExists(ctx context.Context, b *bundle.Bundle) (bool, error) {
func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) {
tf := b.Terraform
if tf == nil {
return false, fmt.Errorf("terraform not initialized")
return false, errors.New("terraform not initialized")
}
// read plan file
@ -63,7 +62,7 @@ func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) {
}
cmdio.LogString(ctx, fmt.Sprintf("All files and directories at the following location will be deleted: %s", b.Config.Workspace.RootPath))
cmdio.LogString(ctx, "All files and directories at the following location will be deleted: "+b.Config.Workspace.RootPath)
cmdio.LogString(ctx, "")
if b.AutoApprove {

View File

@ -2,6 +2,7 @@ package bundle
import (
"context"
"errors"
"fmt"
"os"
@ -21,7 +22,7 @@ func getRootEnv(ctx context.Context) (string, error) {
}
stat, err := os.Stat(path)
if err == nil && !stat.IsDir() {
err = fmt.Errorf("not a directory")
err = errors.New("not a directory")
}
if err != nil {
return "", fmt.Errorf(`invalid bundle root %s="%s": %w`, env.RootVariable, path, err)

View File

@ -3,6 +3,7 @@ package run
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
@ -181,13 +182,13 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e
// callback to log progress events. Called on every poll request
progressLogger, ok := cmdio.FromContext(ctx)
if !ok {
return nil, fmt.Errorf("no progress logger found")
return nil, errors.New("no progress logger found")
}
logProgress := logProgressCallback(ctx, progressLogger)
waiter, err := w.Jobs.RunNow(ctx, *req)
if err != nil {
return nil, fmt.Errorf("cannot start job")
return nil, errors.New("cannot start job")
}
if opts.NoWait {
@ -266,7 +267,7 @@ func (r *jobRunner) convertPythonParams(opts *Options) error {
if len(opts.Job.pythonParams) > 0 {
if _, ok := opts.Job.notebookParams["__python_params"]; ok {
return fmt.Errorf("can't use __python_params as notebook param, the name is reserved for internal use")
return errors.New("can't use __python_params as notebook param, the name is reserved for internal use")
}
p, err := json.Marshal(opts.Job.pythonParams)
if err != nil {

View File

@ -1,7 +1,7 @@
package run
import (
"fmt"
"errors"
"strconv"
"github.com/databricks/cli/bundle/config/resources"
@ -60,16 +60,16 @@ func (o *JobOptions) hasJobParametersConfigured() bool {
// Validate returns if the combination of options is valid.
func (o *JobOptions) Validate(job *resources.Job) error {
if job == nil {
return fmt.Errorf("job not defined")
return errors.New("job not defined")
}
// Ensure mutual exclusion on job parameters and task parameters.
hasJobParams := len(job.Parameters) > 0
if hasJobParams && o.hasTaskParametersConfigured() {
return fmt.Errorf("the job to run defines job parameters; specifying task parameters is not allowed")
return errors.New("the job to run defines job parameters; specifying task parameters is not allowed")
}
if !hasJobParams && o.hasJobParametersConfigured() {
return fmt.Errorf("the job to run does not define job parameters; specifying job parameters is not allowed")
return errors.New("the job to run does not define job parameters; specifying job parameters is not allowed")
}
return nil
@ -80,7 +80,7 @@ func (o *JobOptions) validatePipelineParams() (*jobs.PipelineParams, error) {
return nil, nil
}
defaultErr := fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=<bool>`")
defaultErr := errors.New("job run argument --pipeline-params only supports `full_refresh=<bool>`")
v, ok := o.pipelineParams["full_refresh"]
if !ok {
return nil, defaultErr

View File

@ -47,7 +47,7 @@ func (out *JobOutput) String() (string, error) {
}
result.WriteString("=======\n")
result.WriteString(fmt.Sprintf("Task %s:\n", v.TaskKey))
result.WriteString(fmt.Sprintf("%s\n", taskString))
result.WriteString(taskString + "\n")
}
return result.String(), nil
}

View File

@ -2,7 +2,6 @@ package output
import (
"encoding/json"
"fmt"
"github.com/databricks/databricks-sdk-go/service/jobs"
)
@ -27,7 +26,7 @@ func structToString(val any) (string, error) {
func (out *NotebookOutput) String() (string, error) {
if out.Truncated {
return fmt.Sprintf("%s\n[truncated...]\n", out.Result), nil
return out.Result + "\n[truncated...]\n", nil
}
return out.Result, nil
}
@ -42,7 +41,7 @@ func (out *DbtOutput) String() (string, error) {
// JSON is used because it's a convenient representation.
// If user needs machine parsable output, they can use the --output json
// flag
return fmt.Sprintf("Dbt Task Output:\n%s", outputString), nil
return "Dbt Task Output:\n" + outputString, nil
}
func (out *SqlOutput) String() (string, error) {
@ -55,12 +54,12 @@ func (out *SqlOutput) String() (string, error) {
// JSON is used because it's a convenient representation.
// If user needs machine parsable output, they can use the --output json
// flag
return fmt.Sprintf("SQL Task Output:\n%s", outputString), nil
return "SQL Task Output:\n" + outputString, nil
}
func (out *LogsOutput) String() (string, error) {
if out.LogsTruncated {
return fmt.Sprintf("%s\n[truncated...]\n", out.Logs), nil
return out.Logs + "\n[truncated...]\n", nil
}
return out.Logs, nil
}

View File

@ -2,6 +2,7 @@ package run
import (
"context"
"errors"
"fmt"
"time"
@ -17,7 +18,7 @@ import (
func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent {
result := []pipelines.PipelineEvent{}
for i := 0; i < len(events); i++ {
for i := range events {
if events[i].Origin.UpdateId == updateId {
result = append(result, events[i])
}
@ -32,8 +33,8 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE
}
if event.Error != nil && len(event.Error.Exceptions) > 0 {
logString += "trace for most recent exception: \n"
for i := 0; i < len(event.Error.Exceptions); i++ {
logString += fmt.Sprintf("%s\n", event.Error.Exceptions[i].Message)
for i := range len(event.Error.Exceptions) {
logString += event.Error.Exceptions[i].Message + "\n"
}
}
if logString != "" {
@ -107,7 +108,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
updateTracker := progress.NewUpdateTracker(pipelineID, updateID, w)
progressLogger, ok := cmdio.FromContext(ctx)
if !ok {
return nil, fmt.Errorf("no progress logger found")
return nil, errors.New("no progress logger found")
}
// Log the pipeline update URL as soon as it is available.
@ -144,7 +145,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
if state == pipelines.UpdateInfoStateCanceled {
log.Infof(ctx, "Update was cancelled!")
return nil, fmt.Errorf("update cancelled")
return nil, errors.New("update cancelled")
}
if state == pipelines.UpdateInfoStateFailed {
log.Infof(ctx, "Update has failed!")
@ -152,7 +153,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
if err != nil {
return nil, err
}
return nil, fmt.Errorf("update failed")
return nil, errors.New("update failed")
}
if state == pipelines.UpdateInfoStateCompleted {
log.Infof(ctx, "Update has completed successfully!")

View File

@ -33,7 +33,7 @@ func (event *ProgressEvent) String() string {
// construct error string if level=`Error`
if event.Level == pipelines.EventLevelError && event.Error != nil {
for _, exception := range event.Error.Exceptions {
result.WriteString(fmt.Sprintf("\n%s", exception.Message))
result.WriteString("\n" + exception.Message)
}
}
return result.String()

View File

@ -2,7 +2,6 @@ package config_tests
import (
"context"
"fmt"
"testing"
"github.com/databricks/cli/bundle"
@ -219,7 +218,7 @@ func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) {
for _, tc := range tcases {
t.Run(tc.name, func(t *testing.T) {
bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name)
bundlePath := "./run_as/not_allowed/neither_sp_nor_user/" + tc.name
b := load(t, bundlePath)
ctx := context.Background()

View File

@ -2,6 +2,7 @@ package trampoline
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
@ -147,7 +148,7 @@ func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, err
func (t *pythonTrampoline) generateParameters(task *jobs.PythonWheelTask) (string, error) {
if task.Parameters != nil && task.NamedParameters != nil {
return "", fmt.Errorf("not allowed to pass both paramaters and named_parameters")
return "", errors.New("not allowed to pass both paramaters and named_parameters")
}
params := append([]string{task.PackageName}, task.Parameters...)
for k, v := range task.NamedParameters {

View File

@ -2,7 +2,7 @@ package trampoline
import (
"context"
"fmt"
"errors"
"os"
"path/filepath"
"testing"
@ -30,7 +30,7 @@ func (f *functions) GetTasks(b *bundle.Bundle) []TaskWithJobKey {
func (f *functions) GetTemplateData(task *jobs.Task) (map[string]any, error) {
if task.PythonWheelTask == nil {
return nil, fmt.Errorf("PythonWheelTask cannot be nil")
return nil, errors.New("PythonWheelTask cannot be nil")
}
data := make(map[string]any)

View File

@ -2,7 +2,7 @@ package auth
import (
"context"
"fmt"
"errors"
"github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/cmdio"
@ -36,7 +36,7 @@ GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`,
func promptForHost(ctx context.Context) (string, error) {
if !cmdio.IsInTTY(ctx) {
return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a host using --host")
return "", errors.New("the command is being run in a non-interactive environment, please specify a host using --host")
}
prompt := cmdio.Prompt(ctx)
@ -46,7 +46,7 @@ func promptForHost(ctx context.Context) (string, error) {
func promptForAccountID(ctx context.Context) (string, error) {
if !cmdio.IsInTTY(ctx) {
return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify an account ID using --account-id")
return "", errors.New("the command is being run in a non-interactive environment, please specify an account ID using --account-id")
}
prompt := cmdio.Prompt(ctx)

View File

@ -2,7 +2,7 @@ package auth
import (
"context"
"fmt"
"errors"
"testing"
"github.com/databricks/cli/cmd/root"
@ -102,7 +102,7 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) {
"token": "test-token",
"auth_type": "azure-cli",
})
return cfg, false, fmt.Errorf("auth error")
return cfg, false, errors.New("auth error")
})
require.NoError(t, err)
require.NotNil(t, status)
@ -151,7 +151,7 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) {
"token": "test-token",
"auth_type": "azure-cli",
})
return cfg, false, fmt.Errorf("auth error")
return cfg, false, errors.New("auth error")
})
require.NoError(t, err)
require.NotNil(t, status)

View File

@ -23,9 +23,9 @@ func canonicalHost(host string) (string, error) {
}
// If the host is empty, assume the scheme wasn't included.
if parsedHost.Host == "" {
return fmt.Sprintf("https://%s", host), nil
return "https://" + host, nil
}
return fmt.Sprintf("https://%s", parsedHost.Host), nil
return "https://" + parsedHost.Host, nil
}
var ErrNoMatchingProfiles = errors.New("no matching profiles found")

View File

@ -176,7 +176,7 @@ depends on the existing profiles you have set in your configuration file
func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error {
// If both [HOST] and --host are provided, return an error.
if len(args) > 0 && persistentAuth.Host != "" {
return fmt.Errorf("please only provide a host as an argument or a flag, not both")
return errors.New("please only provide a host as an argument or a flag, not both")
}
profiler := profile.GetProfiler(ctx)

View File

@ -2,7 +2,7 @@ package bundle
import (
"context"
"fmt"
"errors"
"os"
"github.com/databricks/cli/bundle"
@ -49,16 +49,16 @@ func newDestroyCommand() *cobra.Command {
// we require auto-approve for non tty terminals since interactive consent
// is not possible
if !term.IsTerminal(int(os.Stderr.Fd())) && !autoApprove {
return fmt.Errorf("please specify --auto-approve to skip interactive confirmation checks for non tty consoles")
return errors.New("please specify --auto-approve to skip interactive confirmation checks for non tty consoles")
}
// Check auto-approve is selected for json logging
logger, ok := cmdio.FromContext(ctx)
if !ok {
return fmt.Errorf("progress logger not found")
return errors.New("progress logger not found")
}
if logger.Mode == flags.ModeJson && !autoApprove {
return fmt.Errorf("please specify --auto-approve since selected logging format is json")
return errors.New("please specify --auto-approve since selected logging format is json")
}
diags = bundle.Apply(ctx, b, bundle.Seq(

View File

@ -96,7 +96,7 @@ func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (stri
return "", diag.Diagnostics{
{
Severity: diag.Error,
Summary: fmt.Sprintf("expected a dashboard, found a %s", found),
Summary: "expected a dashboard, found a " + found,
},
}
}
@ -188,7 +188,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle,
func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error {
// Save serialized dashboard definition to the dashboard directory.
dashboardBasename := fmt.Sprintf("%s.lvdash.json", key)
dashboardBasename := key + ".lvdash.json"
dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename)
err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
if err != nil {
@ -215,7 +215,7 @@ func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, das
}
// Save the configuration to the resource directory.
resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key))
resourcePath := filepath.Join(d.resourceDir, key+".dashboard.yml")
saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{
"display_name": yaml.DoubleQuotedStyle,
})

View File

@ -85,8 +85,8 @@ func NewGenerateJobCommand() *cobra.Command {
return err
}
oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey))
filename := filepath.Join(configDir, fmt.Sprintf("%s.job.yml", jobKey))
oldFilename := filepath.Join(configDir, jobKey+".yml")
filename := filepath.Join(configDir, jobKey+".job.yml")
// User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI.
// Due to changing in the generated file names, we need to first rename existing resource file to the new name.
@ -107,7 +107,7 @@ func NewGenerateJobCommand() *cobra.Command {
return err
}
cmdio.LogString(ctx, fmt.Sprintf("Job configuration successfully saved to %s", filename))
cmdio.LogString(ctx, "Job configuration successfully saved to "+filename)
return nil
}

View File

@ -85,8 +85,8 @@ func NewGeneratePipelineCommand() *cobra.Command {
return err
}
oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey))
filename := filepath.Join(configDir, fmt.Sprintf("%s.pipeline.yml", pipelineKey))
oldFilename := filepath.Join(configDir, pipelineKey+".yml")
filename := filepath.Join(configDir, pipelineKey+".pipeline.yml")
// User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI.
// Due to changing in the generated file names, we need to first rename existing resource file to the new name.
@ -109,7 +109,7 @@ func NewGeneratePipelineCommand() *cobra.Command {
return err
}
cmdio.LogString(ctx, fmt.Sprintf("Pipeline configuration successfully saved to %s", filename))
cmdio.LogString(ctx, "Pipeline configuration successfully saved to "+filename)
return nil
}

View File

@ -126,7 +126,7 @@ func (n *downloader) FlushToDisk(ctx context.Context, force bool) error {
return err
}
cmdio.LogString(errCtx, fmt.Sprintf("File successfully saved to %s", targetPath))
cmdio.LogString(errCtx, "File successfully saved to "+targetPath)
return reader.Close()
})
}

View File

@ -1,7 +1,7 @@
package bundle
import (
"fmt"
"errors"
"github.com/databricks/cli/cmd/root"
"github.com/spf13/cobra"
@ -19,7 +19,7 @@ func newLaunchCommand() *cobra.Command {
}
cmd.RunE = func(cmd *cobra.Command, args []string) error {
return fmt.Errorf("TODO")
return errors.New("TODO")
// contents, err := os.ReadFile(args[0])
// if err != nil {
// return err

View File

@ -44,7 +44,7 @@ func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) (
}
if len(args) < 1 {
return "", fmt.Errorf("expected a KEY of the resource to open")
return "", errors.New("expected a KEY of the resource to open")
}
return args[0], nil
@ -113,7 +113,7 @@ func newOpenCommand() *cobra.Command {
// Confirm that the resource has a URL.
url := ref.Resource.GetURL()
if url == "" {
return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)")
return errors.New("resource does not have a URL associated with it (has it been deployed?)")
}
return browser.OpenURL(url)

View File

@ -3,6 +3,7 @@ package bundle
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/databricks/cli/bundle"
@ -48,7 +49,7 @@ func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (s
}
if len(args) < 1 {
return "", nil, fmt.Errorf("expected a KEY of the resource to run")
return "", nil, errors.New("expected a KEY of the resource to run")
}
return args[0], args[1:], nil

View File

@ -1,7 +1,7 @@
package bundle
import (
"fmt"
"errors"
"github.com/spf13/cobra"
)
@ -17,7 +17,7 @@ func newTestCommand() *cobra.Command {
}
cmd.RunE = func(cmd *cobra.Command, args []string) error {
return fmt.Errorf("TODO")
return errors.New("TODO")
// results := project.RunPythonOnDev(cmd.Context(), `return 1`)
// if results.Failed() {
// return results.Err()

View File

@ -2,6 +2,7 @@ package bundle
import (
"encoding/json"
"errors"
"fmt"
"github.com/databricks/cli/bundle"
@ -39,7 +40,7 @@ func newValidateCommand() *cobra.Command {
if err := diags.Error(); err != nil {
return diags.Error()
} else {
return fmt.Errorf("invariant failed: returned bundle is nil")
return errors.New("invariant failed: returned bundle is nil")
}
}

View File

@ -1,6 +1,7 @@
package configure
import (
"errors"
"fmt"
"github.com/databricks/cli/libs/cmdio"
@ -62,12 +63,12 @@ func configureInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config
func configureNonInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config.Config) error {
if cfg.Host == "" {
return fmt.Errorf("host must be set in non-interactive mode")
return errors.New("host must be set in non-interactive mode")
}
// Check presence of cluster ID before reading token to fail fast.
if flags.ConfigureCluster && cfg.ClusterID == "" {
return fmt.Errorf("cluster ID must be set in non-interactive mode")
return errors.New("cluster ID must be set in non-interactive mode")
}
// Read token from stdin if not already set.

View File

@ -1,7 +1,7 @@
package configure
import (
"fmt"
"errors"
"net/url"
)
@ -11,10 +11,10 @@ func validateHost(s string) error {
return err
}
if u.Host == "" || u.Scheme != "https" {
return fmt.Errorf("must start with https://")
return errors.New("must start with https://")
}
if u.Path != "" && u.Path != "/" {
return fmt.Errorf("must use empty path")
return errors.New("must use empty path")
}
return nil
}

View File

@ -12,7 +12,7 @@ import (
const repositoryCacheTTL = 24 * time.Hour
func NewRepositoryCache(org, cacheDir string) *repositoryCache {
filename := fmt.Sprintf("%s-repositories", org)
filename := org + "-repositories"
return &repositoryCache{
cache: localcache.NewLocalCache[Repositories](cacheDir, filename, repositoryCacheTTL),
Org: org,

View File

@ -1,6 +1,7 @@
package labs
import (
"errors"
"fmt"
"github.com/databricks/cli/cmd/labs/project"
@ -49,7 +50,7 @@ func newInstalledCommand() *cobra.Command {
})
}
if len(info.Projects) == 0 {
return fmt.Errorf("no projects installed")
return errors.New("no projects installed")
}
return cmdio.Render(ctx, info)
},

View File

@ -93,7 +93,7 @@ func (r *LocalCache[T]) writeCache(ctx context.Context, data T) (T, error) {
}
func (r *LocalCache[T]) FileName() string {
return filepath.Join(r.dir, fmt.Sprintf("%s.json", r.name))
return filepath.Join(r.dir, r.name+".json")
}
func (r *LocalCache[T]) loadCache() (*cached[T], error) {

View File

@ -3,7 +3,6 @@ package localcache
import (
"context"
"errors"
"fmt"
"net/url"
"runtime"
"testing"
@ -115,7 +114,7 @@ func TestFolderDisappears(t *testing.T) {
func TestRefreshFails(t *testing.T) {
c := NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute)
tick := func() (int64, error) {
return 0, fmt.Errorf("nope")
return 0, errors.New("nope")
}
ctx := context.Background()
_, err := c.Load(ctx, tick)

View File

@ -175,7 +175,7 @@ func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, err
return nil, fmt.Errorf("valid: %w", err)
}
if !i.HasAccountLevelCommands() && cfg.IsAccountClient() {
return nil, fmt.Errorf("got account-level client, but no account-level commands")
return nil, errors.New("got account-level client, but no account-level commands")
}
lc := &loginConfig{Entrypoint: i.Installer.Entrypoint}
w, err := lc.askWorkspace(ctx, cfg)
@ -200,10 +200,10 @@ func (i *installer) downloadLibrary(ctx context.Context) error {
libTarget := i.LibDir()
// we may support wheels, jars, and golang binaries. but those are not zipballs
if i.IsZipball() {
feedback <- fmt.Sprintf("Downloading and unpacking zipball for %s", i.version)
feedback <- "Downloading and unpacking zipball for " + i.version
return i.downloadAndUnpackZipball(ctx, libTarget)
}
return fmt.Errorf("we only support zipballs for now")
return errors.New("we only support zipballs for now")
}
func (i *installer) downloadAndUnpackZipball(ctx context.Context, libTarget string) error {
@ -234,7 +234,7 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr
log.Debugf(ctx, "Detected Python %s at: %s", py.Version, py.Path)
venvPath := i.virtualEnvPath(ctx)
log.Debugf(ctx, "Creating Python Virtual Environment at: %s", venvPath)
feedback <- fmt.Sprintf("Creating Virtual Environment with Python %s", py.Version)
feedback <- "Creating Virtual Environment with Python " + py.Version
_, err = process.Background(ctx, []string{py.Path, "-m", "venv", venvPath})
if err != nil {
return fmt.Errorf("create venv: %w", err)
@ -251,8 +251,8 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr
if !ok {
return fmt.Errorf("unsupported runtime: %s", cluster.SparkVersion)
}
feedback <- fmt.Sprintf("Installing Databricks Connect v%s", runtimeVersion)
pipSpec := fmt.Sprintf("databricks-connect==%s", runtimeVersion)
feedback <- "Installing Databricks Connect v" + runtimeVersion
pipSpec := "databricks-connect==" + runtimeVersion
err = i.installPythonDependencies(ctx, pipSpec)
if err != nil {
return fmt.Errorf("dbconnect: %w", err)

View File

@ -1,7 +1,7 @@
package labs
import (
"fmt"
"errors"
"github.com/databricks/cli/cmd/labs/project"
"github.com/databricks/cli/cmd/root"
@ -34,7 +34,7 @@ func newShowCommand() *cobra.Command {
return err
}
if len(installed) == 0 {
return fmt.Errorf("no projects found")
return errors.New("no projects found")
}
name := args[0]
for _, v := range installed {

View File

@ -26,7 +26,7 @@ type ErrNoWorkspaceProfiles struct {
}
func (e ErrNoWorkspaceProfiles) Error() string {
return fmt.Sprintf("%s does not contain workspace profiles; please create one by running 'databricks configure'", e.path)
return e.path + " does not contain workspace profiles; please create one by running 'databricks configure'"
}
type ErrNoAccountProfiles struct {
@ -34,7 +34,7 @@ type ErrNoAccountProfiles struct {
}
func (e ErrNoAccountProfiles) Error() string {
return fmt.Sprintf("%s does not contain account profiles", e.path)
return e.path + " does not contain account profiles"
}
func initProfileFlag(cmd *cobra.Command) {
@ -253,7 +253,7 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) {
return profiles[0].Name, nil
}
i, _, err := cmdio.RunSelect(ctx, &promptui.Select{
Label: fmt.Sprintf("Workspace profiles defined in %s", path),
Label: "Workspace profiles defined in " + path,
Items: profiles,
Searcher: profiles.SearchCaseInsensitive,
StartInSearchMode: true,
@ -287,7 +287,7 @@ func AskForAccountProfile(ctx context.Context) (string, error) {
return profiles[0].Name, nil
}
i, _, err := cmdio.RunSelect(ctx, &promptui.Select{
Label: fmt.Sprintf("Account profiles defined in %s", path),
Label: "Account profiles defined in " + path,
Items: profiles,
Searcher: profiles.SearchCaseInsensitive,
StartInSearchMode: true,

View File

@ -2,7 +2,7 @@ package root
import (
"context"
"fmt"
"errors"
"os"
"github.com/databricks/cli/libs/cmdio"
@ -37,7 +37,7 @@ func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Con
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
f.ProgressLogFormat == flags.ModeInplace {
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
return nil, errors.New("inplace progress logging cannot be used when log-file is stderr")
}
format := f.ProgressLogFormat

View File

@ -2,7 +2,6 @@ package sync
import (
"context"
"fmt"
"path"
"strings"
@ -52,8 +51,8 @@ func completeRemotePath(
}
prefixes := []string{
path.Clean(fmt.Sprintf("/Users/%s", me.UserName)) + "/",
path.Clean(fmt.Sprintf("/Repos/%s", me.UserName)) + "/",
path.Clean("/Users/"+me.UserName) + "/",
path.Clean("/Repos/"+me.UserName) + "/",
}
validPrefix := false

View File

@ -2,6 +2,7 @@ package sync
import (
"context"
"errors"
"flag"
"fmt"
"io"
@ -29,7 +30,7 @@ type syncFlags struct {
func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b *bundle.Bundle) (*sync.SyncOptions, error) {
if len(args) > 0 {
return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle")
return nil, errors.New("SRC and DST are not configurable in the context of a bundle")
}
opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b))

Some files were not shown because too many files have changed in this diff Show More