Merge remote-tracking branch 'origin' into implement-async-logger

This commit is contained in:
Shreyas Goenka 2025-01-23 14:19:46 +01:00
commit 01d63dd20e
No known key found for this signature in database
GPG Key ID: 92A07DF49CCB0622
56 changed files with 2273 additions and 791 deletions

View File

@ -1 +1 @@
779817ed8d63031f5ea761fbd25ee84f38feec0d
0be1b914249781b5e903b7676fd02255755bc851

View File

@ -109,16 +109,19 @@ var {{.CamelName}}Overrides []func(
{{- end }}
)
{{- $excludeFromJson := list "http-request"}}
func new{{.PascalName}}() *cobra.Command {
cmd := &cobra.Command{}
{{- $canUseJson := and .CanUseJson (not (in $excludeFromJson .KebabName )) -}}
{{- if .Request}}
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
{{- if .RequestBodyField }}
{{.CamelName}}Req.{{.RequestBodyField.PascalName}} = &{{.Service.Package.Name}}.{{.RequestBodyField.Entity.PascalName}}{}
{{- end }}
{{- if .CanUseJson}}
{{- if $canUseJson}}
var {{.CamelName}}Json flags.JsonFlag
{{- end}}
{{- end}}
@ -135,7 +138,7 @@ func new{{.PascalName}}() *cobra.Command {
{{- $request = .RequestBodyField.Entity -}}
{{- end -}}
{{if $request }}// TODO: short flags
{{- if .CanUseJson}}
{{- if $canUseJson}}
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
{{- end}}
{{$method := .}}
@ -177,7 +180,7 @@ func new{{.PascalName}}() *cobra.Command {
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt $request) (eq 1 (len $request.RequiredRequestBodyFields)) -}}
{{- $onlyPathArgsRequiredAsPositionalArguments := and $request (eq (len .RequiredPositionalArguments) (len $request.RequiredPathFields)) -}}
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson (or $request.HasRequiredRequestBodyFields )) -}}
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and $canUseJson (or $request.HasRequiredRequestBodyFields )) -}}
{{- $hasCustomArgHandler := or $hasRequiredArgs $hasDifferentArgsWithJsonFlag -}}
{{- $atleastOneArgumentWithDescription := false -}}
@ -239,7 +242,7 @@ func new{{.PascalName}}() *cobra.Command {
ctx := cmd.Context()
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
{{- if .Request }}
{{ if .CanUseJson }}
{{ if $canUseJson }}
if cmd.Flags().Changed("json") {
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req{{ if .RequestBodyField }}.{{.RequestBodyField.PascalName}}{{ end }})
if diags.HasError() {
@ -255,7 +258,7 @@ func new{{.PascalName}}() *cobra.Command {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}{{- end}}
{{- if $hasPosArgs }}
{{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }} else {
{{- if and $canUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }} else {
{{- end}}
{{- if $hasIdPrompt}}
if len(args) == 0 {
@ -279,9 +282,9 @@ func new{{.PascalName}}() *cobra.Command {
{{$method := .}}
{{- range $arg, $field := .RequiredPositionalArguments}}
{{- template "args-scan" (dict "Arg" $arg "Field" $field "Method" $method "HasIdPrompt" $hasIdPrompt)}}
{{- template "args-scan" (dict "Arg" $arg "Field" $field "Method" $method "HasIdPrompt" $hasIdPrompt "ExcludeFromJson" $excludeFromJson)}}
{{- end -}}
{{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }}
{{- if and $canUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }}
}
{{- end}}
@ -392,7 +395,8 @@ func new{{.PascalName}}() *cobra.Command {
{{- $method := .Method -}}
{{- $arg := .Arg -}}
{{- $hasIdPrompt := .HasIdPrompt -}}
{{- $optionalIfJsonIsUsed := and (not $hasIdPrompt) (and $field.IsRequestBodyField $method.CanUseJson) }}
{{ $canUseJson := and $method.CanUseJson (not (in .ExcludeFromJson $method.KebabName)) }}
{{- $optionalIfJsonIsUsed := and (not $hasIdPrompt) (and $field.IsRequestBodyField $canUseJson) }}
{{- if $optionalIfJsonIsUsed }}
if !cmd.Flags().Changed("json") {
{{- end }}

1
.gitattributes vendored
View File

@ -31,6 +31,7 @@ cmd/account/users/users.go linguist-generated=true
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
cmd/account/workspaces/workspaces.go linguist-generated=true
cmd/workspace/access-control/access-control.go linguist-generated=true
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true

View File

@ -3,12 +3,12 @@ package acceptance_test
import (
"context"
"errors"
"flag"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"slices"
"sort"
@ -23,7 +23,22 @@ import (
"github.com/stretchr/testify/require"
)
var KeepTmp = os.Getenv("KEEP_TMP") != ""
var KeepTmp bool
// In order to debug CLI running under acceptance test, set this to full subtest name, e.g. "bundle/variables/empty"
// Then install your breakpoints and click "debug test" near TestAccept in VSCODE.
// example: var SingleTest = "bundle/variables/empty"
var SingleTest = ""
// If enabled, instead of compiling and running CLI externally, we'll start in-process server that accepts and runs
// CLI commands. The $CLI in test scripts is a helper that just forwards command-line arguments to this server (see bin/callserver.py).
// Also disables parallelism in tests.
var InprocessMode bool
func init() {
flag.BoolVar(&InprocessMode, "inprocess", SingleTest != "", "Run CLI in the same process as test (for debugging)")
flag.BoolVar(&KeepTmp, "keeptmp", false, "Do not delete TMP directory after run")
}
const (
EntryPointScript = "script"
@ -38,6 +53,23 @@ var Scripts = map[string]bool{
}
func TestAccept(t *testing.T) {
testAccept(t, InprocessMode, SingleTest)
}
func TestInprocessMode(t *testing.T) {
if InprocessMode {
t.Skip("Already tested by TestAccept")
}
if runtime.GOOS == "windows" {
// - catalogs A catalog is the first layer of Unity Catalogs three-level namespace.
// + catalogs A catalog is the first layer of Unity Catalog<6F>s three-level namespace.
t.Skip("Fails on CI on unicode characters")
}
require.NotZero(t, testAccept(t, true, "help"))
}
func testAccept(t *testing.T, InprocessMode bool, singleTest string) int {
repls := testdiff.ReplacementsContext{}
cwd, err := os.Getwd()
require.NoError(t, err)
@ -50,16 +82,22 @@ func TestAccept(t *testing.T) {
t.Logf("Writing coverage to %s", coverDir)
}
execPath := BuildCLI(t, cwd, coverDir)
// $CLI is what test scripts are using
execPath := ""
if InprocessMode {
cmdServer := StartCmdServer(t)
t.Setenv("CMD_SERVER_URL", cmdServer.URL)
execPath = filepath.Join(cwd, "bin", "callserver.py")
} else {
execPath = BuildCLI(t, cwd, coverDir)
}
t.Setenv("CLI", execPath)
repls.Set(execPath, "$CLI")
// Make helper scripts available
t.Setenv("PATH", fmt.Sprintf("%s%c%s", filepath.Join(cwd, "bin"), os.PathListSeparator, os.Getenv("PATH")))
repls := testdiff.ReplacementsContext{}
repls.Set(execPath, "$CLI")
tempHomeDir := t.TempDir()
repls.Set(tempHomeDir, "$TMPHOME")
t.Logf("$TMPHOME=%v", tempHomeDir)
@ -95,13 +133,25 @@ func TestAccept(t *testing.T) {
testDirs := getTests(t)
require.NotEmpty(t, testDirs)
if singleTest != "" {
testDirs = slices.DeleteFunc(testDirs, func(n string) bool {
return n != singleTest
})
require.NotEmpty(t, testDirs, "singleTest=%#v did not match any tests\n%#v", singleTest, testDirs)
}
for _, dir := range testDirs {
testName := strings.ReplaceAll(dir, "\\", "/")
t.Run(testName, func(t *testing.T) {
t.Parallel()
runTest(t, dir, coverDir, repls)
if !InprocessMode {
t.Parallel()
}
runTest(t, dir, coverDir, repls.Clone())
})
}
return len(testDirs)
}
func getTests(t *testing.T) []string {
@ -137,6 +187,13 @@ func runTest(t *testing.T, dir, coverDir string, repls testdiff.ReplacementsCont
tmpDir = t.TempDir()
}
// Converts C:\Users\DENIS~1.BIL -> C:\Users\denis.bilenko
tmpDirEvalled, err1 := filepath.EvalSymlinks(tmpDir)
if err1 == nil && tmpDirEvalled != tmpDir {
repls.SetPathWithParents(tmpDirEvalled, "$TMPDIR")
}
repls.SetPathWithParents(tmpDir, "$TMPDIR")
scriptContents := readMergedScriptContents(t, dir)
testutil.WriteFile(t, filepath.Join(tmpDir, EntryPointScript), scriptContents)
@ -175,8 +232,7 @@ func runTest(t *testing.T, dir, coverDir string, repls testdiff.ReplacementsCont
}
// Make sure there are not unaccounted for new files
files, err := ListDir(t, tmpDir)
require.NoError(t, err)
files := ListDir(t, tmpDir)
for _, relPath := range files {
if _, ok := inputs[relPath]; ok {
continue
@ -393,37 +449,19 @@ func CopyDir(src, dst string, inputs, outputs map[string]bool) error {
})
}
func ListDir(t *testing.T, src string) ([]string, error) {
// exclude folders in .gitignore from comparison
ignored := []string{
"\\.ruff_cache",
"\\.venv",
".*\\.egg-info",
"__pycache__",
// depends on uv version
"uv.lock",
}
func ListDir(t *testing.T, src string) []string {
var files []string
err := filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
// Do not FailNow here.
// The output comparison is happening after this call which includes output.txt which
// includes errors printed by commands which include explanation why a given file cannot be read.
t.Errorf("Error when listing %s: path=%s: %s", src, path, err)
return nil
}
if info.IsDir() {
for _, ignoredFolder := range ignored {
if matched, _ := regexp.MatchString(ignoredFolder, info.Name()); matched {
return filepath.SkipDir
}
}
return nil
} else {
for _, ignoredFolder := range ignored {
if matched, _ := regexp.MatchString(ignoredFolder, info.Name()); matched {
return nil
}
}
}
relPath, err := filepath.Rel(src, path)
@ -434,5 +472,8 @@ func ListDir(t *testing.T, src string) ([]string, error) {
files = append(files, relPath)
return nil
})
return files, err
if err != nil {
t.Errorf("Failed to list %s: %s", src, err)
}
return files
}

31
acceptance/bin/callserver.py Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/env python3
import sys
import os
import json
import urllib.request
from urllib.parse import urlencode
env = {}
for key, value in os.environ.items():
if len(value) > 10_000:
sys.stderr.write(f"Dropping key={key} value len={len(value)}\n")
continue
env[key] = value
q = {
"args": " ".join(sys.argv[1:]),
"cwd": os.getcwd(),
"env": json.dumps(env),
}
url = os.environ["CMD_SERVER_URL"] + "/?" + urlencode(q)
if len(url) > 100_000:
sys.exit("url too large")
resp = urllib.request.urlopen(url)
assert resp.status == 200, (resp.status, resp.url, resp.headers)
result = json.load(resp)
sys.stderr.write(result["stderr"])
sys.stdout.write(result["stdout"])
exitcode = int(result["exitcode"])
sys.exit(exitcode)

View File

@ -0,0 +1,73 @@
>>> $CLI bundle validate -o json -t development
{
"mode": "development",
"quality_monitors": {
"my_monitor": {
"assets_dir": "/Shared/provider-test/databricks_monitoring/main.test.thing1",
"inference_log": {
"granularities": [
"1 day"
],
"model_id_col": "model_id",
"prediction_col": "prediction",
"problem_type": "PROBLEM_TYPE_REGRESSION",
"timestamp_col": "timestamp"
},
"output_schema_name": "main.dev",
"schedule": null,
"table_name": "main.test.dev"
}
}
}
>>> $CLI bundle validate -o json -t staging
{
"mode": null,
"quality_monitors": {
"my_monitor": {
"assets_dir": "/Shared/provider-test/databricks_monitoring/main.test.thing1",
"inference_log": {
"granularities": [
"1 day"
],
"model_id_col": "model_id",
"prediction_col": "prediction",
"problem_type": "PROBLEM_TYPE_REGRESSION",
"timestamp_col": "timestamp"
},
"output_schema_name": "main.staging",
"schedule": {
"quartz_cron_expression": "0 0 12 * * ?",
"timezone_id": "UTC"
},
"table_name": "main.test.staging"
}
}
}
>>> $CLI bundle validate -o json -t production
{
"mode": null,
"quality_monitors": {
"my_monitor": {
"assets_dir": "/Shared/provider-test/databricks_monitoring/main.test.thing1",
"inference_log": {
"granularities": [
"1 day",
"1 hour"
],
"model_id_col": "model_id_prod",
"prediction_col": "prediction_prod",
"problem_type": "PROBLEM_TYPE_REGRESSION",
"timestamp_col": "timestamp_prod"
},
"output_schema_name": "main.prod",
"schedule": {
"quartz_cron_expression": "0 0 12 * * ?",
"timezone_id": "UTC"
},
"table_name": "main.test.prod"
}
}
}

View File

@ -0,0 +1,3 @@
trace $CLI bundle validate -o json -t development | jq '{ mode: .bundle.mode, quality_monitors: .resources.quality_monitors }'
trace $CLI bundle validate -o json -t staging | jq '{ mode: .bundle.mode, quality_monitors: .resources.quality_monitors }'
trace $CLI bundle validate -o json -t production | jq '{ mode: .bundle.mode, quality_monitors: .resources.quality_monitors }'

View File

@ -0,0 +1,5 @@
bundle:
name: test-bundle
sync:
paths:
- ..

View File

@ -0,0 +1,11 @@
Error: path "$TMPDIR" is not within repository root "$TMPDIR/myrepo"
Name: test-bundle
Target: default
Workspace:
User: $USERNAME
Path: /Workspace/Users/$USERNAME/.bundle/test-bundle/default
Found 1 error
Exit code: 1

View File

@ -0,0 +1,6 @@
# This should error, we do not allow syncroot outside of git repo.
mkdir myrepo
cd myrepo
cp ../databricks.yml .
git-repo-init
$CLI bundle validate | sed 's/\\\\/\//g'

View File

@ -0,0 +1,5 @@
bundle:
name: test-bundle
sync:
paths:
- ..

View File

@ -0,0 +1,7 @@
Name: test-bundle
Target: default
Workspace:
User: $USERNAME
Path: /Workspace/Users/$USERNAME/.bundle/test-bundle/default
Validation OK!

View File

@ -0,0 +1,2 @@
# This should not error, syncroot can be outside bundle root.
$CLI bundle validate

View File

@ -10,3 +10,5 @@ cat databricks.yml | grep -v databricks_cli_version > databricks.yml.new
mv databricks.yml.new databricks.yml
trace $CLI bundle validate -t dev --output json | jq ".resources"
rm -fr .venv resources/__pycache__ uv.lock my_jobs_as_code.egg-info

View File

@ -0,0 +1,24 @@
workspace:
profile: profile_name
root_path: ${var.workspace_root}/path/to/root
variables:
workspace_root:
description: "root directory in the Databricks workspace to store the asset bundle and associated artifacts"
default: /Users/${workspace.current_user.userName}
targets:
dev:
default: true
prod:
variables:
workspace_root: /Shared
resources:
jobs:
my_job:
tasks:
- existing_cluster_id: 500
python_wheel_task:
named_parameters:
conf-file: "${workspace.file_path}/path/to/config.yaml"

View File

@ -0,0 +1,67 @@
/Workspace should be prepended on all paths, but it is not the case:
{
"bundle": {
"environment": "dev",
"git": {
"bundle_root_path": ".",
"inferred": true
},
"target": "dev",
"terraform": {
"exec_path": "$TMPHOME"
}
},
"resources": {
"jobs": {
"my_job": {
"deployment": {
"kind": "BUNDLE",
"metadata_file_path": "/Users/$USERNAME/path/to/root/state/metadata.json"
},
"edit_mode": "UI_LOCKED",
"format": "MULTI_TASK",
"permissions": [],
"queue": {
"enabled": true
},
"tags": {},
"tasks": [
{
"existing_cluster_id": "500",
"python_wheel_task": {
"named_parameters": {
"conf-file": "/Users/$USERNAME/path/to/root/files/path/to/config.yaml"
}
},
"task_key": ""
}
]
}
}
},
"sync": {
"paths": [
"."
]
},
"targets": null,
"variables": {
"workspace_root": {
"default": "/Users/$USERNAME",
"description": "root directory in the Databricks workspace to store the asset bundle and associated artifacts",
"value": "/Users/$USERNAME"
}
},
"workspace": {
"artifact_path": "/Users/$USERNAME/path/to/root/artifacts",
"current_user": {
"short_name": "$USERNAME",
"userName": "$USERNAME"
},
"file_path": "/Users/$USERNAME/path/to/root/files",
"profile": "profile_name",
"resource_path": "/Users/$USERNAME/path/to/root/resources",
"root_path": "/Users/$USERNAME/path/to/root",
"state_path": "/Users/$USERNAME/path/to/root/state"
}
}

View File

@ -0,0 +1,2 @@
echo /Workspace should be prepended on all paths, but it is not the case: #2181
$CLI bundle validate -o json

View File

@ -0,0 +1,73 @@
package acceptance_test
import (
"encoding/json"
"net/http"
"os"
"strings"
"testing"
"github.com/databricks/cli/internal/testcli"
"github.com/stretchr/testify/require"
)
func StartCmdServer(t *testing.T) *TestServer {
server := StartServer(t)
server.Handle("/", func(r *http.Request) (any, error) {
q := r.URL.Query()
args := strings.Split(q.Get("args"), " ")
var env map[string]string
require.NoError(t, json.Unmarshal([]byte(q.Get("env")), &env))
for key, val := range env {
defer Setenv(t, key, val)()
}
defer Chdir(t, q.Get("cwd"))()
c := testcli.NewRunner(t, r.Context(), args...)
c.Verbose = false
stdout, stderr, err := c.Run()
result := map[string]any{
"stdout": stdout.String(),
"stderr": stderr.String(),
}
exitcode := 0
if err != nil {
exitcode = 1
}
result["exitcode"] = exitcode
return result, nil
})
return server
}
// Chdir variant that is intended to be used with defer so that it can switch back before function ends.
// This is unlike testutil.Chdir which switches back only when tests end.
func Chdir(t *testing.T, cwd string) func() {
require.NotEmpty(t, cwd)
prevDir, err := os.Getwd()
require.NoError(t, err)
err = os.Chdir(cwd)
require.NoError(t, err)
return func() {
_ = os.Chdir(prevDir)
}
}
// Setenv variant that is intended to be used with defer so that it can switch back before function ends.
// This is unlike t.Setenv which switches back only when tests end.
func Setenv(t *testing.T, key, value string) func() {
prevVal, exists := os.LookupEnv(key)
require.NoError(t, os.Setenv(key, value))
return func() {
if exists {
_ = os.Setenv(key, prevVal)
} else {
_ = os.Unsetenv(key)
}
}
}

View File

@ -34,7 +34,7 @@ trace() {
git-repo-init() {
git init -qb main
git config --global core.autocrlf false
git config core.autocrlf false
git config user.name "Tester"
git config user.email "tester@databricks.com"
git add databricks.yml

View File

@ -17,6 +17,7 @@ import (
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/env"
"github.com/databricks/cli/bundle/metadata"
"github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/fileset"
"github.com/databricks/cli/libs/locker"
"github.com/databricks/cli/libs/log"
@ -24,7 +25,6 @@ import (
"github.com/databricks/cli/libs/terraform"
"github.com/databricks/cli/libs/vfs"
"github.com/databricks/databricks-sdk-go"
sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/hashicorp/terraform-exec/tfexec"
)
@ -246,21 +246,5 @@ func (b *Bundle) AuthEnv() (map[string]string, error) {
}
cfg := b.client.Config
out := make(map[string]string)
for _, attr := range sdkconfig.ConfigAttributes {
// Ignore profile so that downstream tools don't try and reload
// the profile even though we know the current configuration is valid.
if attr.Name == "profile" {
continue
}
if len(attr.EnvVars) == 0 {
continue
}
if attr.IsZero(cfg) {
continue
}
out[attr.EnvVars[0]] = attr.GetString(cfg)
}
return out, nil
return auth.Env(cfg), nil
}

View File

@ -32,7 +32,7 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
}
if info.WorktreeRoot == "" {
b.WorktreeRoot = b.BundleRoot
b.WorktreeRoot = b.SyncRoot
} else {
b.WorktreeRoot = vfs.MustNew(info.WorktreeRoot)
}

View File

@ -9,6 +9,7 @@ import (
"github.com/databricks/cli/libs/dyn"
)
// pythonDiagnostic is a single entry in diagnostics.json
type pythonDiagnostic struct {
Severity pythonSeverity `json:"severity"`
Summary string `json:"summary"`

View File

@ -0,0 +1,194 @@
package python
import (
"encoding/json"
"fmt"
"io"
"path/filepath"
"github.com/databricks/cli/libs/dyn"
)
// generatedFileName is used as the virtual file name for YAML generated by Python code.
//
// mergePythonLocations replaces dyn.Location with generatedFileName with locations loaded
// from locations.json
const generatedFileName = "__generated_by_python__.yml"
// pythonLocations is data structure for efficient location lookup for a given path
//
// Locations form a tree, and we assign locations of the closest ancestor to each dyn.Value based on its path.
// We implement it as a trie (prefix tree) where keys are components of the path. With that, lookups are O(n)
// where n is the number of components in the path.
//
// For example, with locations.json:
//
// {"path": "resources.jobs.job_0", "file": "resources/job_0.py", "line": 3, "column": 5}
// {"path": "resources.jobs.job_0.tasks[0].task_key", "file": "resources/job_0.py", "line": 10, "column": 5}
// {"path": "resources.jobs.job_1", "file": "resources/job_1.py", "line": 5, "column": 7}
//
// - resources.jobs.job_0.tasks[0].task_key is located at job_0.py:10:5
//
// - resources.jobs.job_0.tasks[0].email_notifications is located at job_0.py:3:5,
// because we use the location of the job as the most precise approximation.
//
// See pythonLocationEntry for the structure of a single entry in locations.json
type pythonLocations struct {
// descendants referenced by index, e.g. '.foo'
keys map[string]*pythonLocations
// descendants referenced by key, e.g. '[0]'
indexes map[int]*pythonLocations
// location for the current node if it exists
location dyn.Location
// if true, location is present
exists bool
}
// pythonLocationEntry is a single entry in locations.json
type pythonLocationEntry struct {
Path string `json:"path"`
File string `json:"file"`
Line int `json:"line"`
Column int `json:"column"`
}
// mergePythonLocations applies locations from Python mutator into given dyn.Value
//
// The primary use-case is to merge locations.json with output.json, so that any
// validation errors will point to Python source code instead of generated YAML.
func mergePythonLocations(value dyn.Value, locations *pythonLocations) (dyn.Value, error) {
return dyn.Walk(value, func(path dyn.Path, value dyn.Value) (dyn.Value, error) {
newLocation, ok := findPythonLocation(locations, path)
if !ok {
return value, nil
}
// The first item in the list is the "last" location used for error reporting
//
// Loaded YAML uses virtual file path as location, we remove any of such references,
// because they should use 'newLocation' instead.
//
// We preserve any previous non-virtual locations in case when Python function modified
// resource defined in YAML.
newLocations := append(
[]dyn.Location{newLocation},
removeVirtualLocations(value.Locations())...,
)
return value.WithLocations(newLocations), nil
})
}
func removeVirtualLocations(locations []dyn.Location) []dyn.Location {
var newLocations []dyn.Location
for _, location := range locations {
if filepath.Base(location.File) == generatedFileName {
continue
}
newLocations = append(newLocations, location)
}
return newLocations
}
// parsePythonLocations parses locations.json from the Python mutator.
//
// locations file is newline-separated JSON objects with pythonLocationEntry structure.
func parsePythonLocations(input io.Reader) (*pythonLocations, error) {
decoder := json.NewDecoder(input)
locations := newPythonLocations()
for decoder.More() {
var entry pythonLocationEntry
err := decoder.Decode(&entry)
if err != nil {
return nil, fmt.Errorf("failed to parse python location: %s", err)
}
path, err := dyn.NewPathFromString(entry.Path)
if err != nil {
return nil, fmt.Errorf("failed to parse python location: %s", err)
}
location := dyn.Location{
File: entry.File,
Line: entry.Line,
Column: entry.Column,
}
putPythonLocation(locations, path, location)
}
return locations, nil
}
// putPythonLocation puts the location to the trie for the given path
func putPythonLocation(trie *pythonLocations, path dyn.Path, location dyn.Location) {
currentNode := trie
for _, component := range path {
if key := component.Key(); key != "" {
if _, ok := currentNode.keys[key]; !ok {
currentNode.keys[key] = newPythonLocations()
}
currentNode = currentNode.keys[key]
} else {
index := component.Index()
if _, ok := currentNode.indexes[index]; !ok {
currentNode.indexes[index] = newPythonLocations()
}
currentNode = currentNode.indexes[index]
}
}
currentNode.location = location
currentNode.exists = true
}
// newPythonLocations creates a new trie node
func newPythonLocations() *pythonLocations {
return &pythonLocations{
keys: make(map[string]*pythonLocations),
indexes: make(map[int]*pythonLocations),
}
}
// findPythonLocation finds the location or closest ancestor location in the trie for the given path
// if no ancestor or exact location is found, false is returned.
func findPythonLocation(locations *pythonLocations, path dyn.Path) (dyn.Location, bool) {
currentNode := locations
lastLocation := locations.location
exists := locations.exists
for _, component := range path {
if key := component.Key(); key != "" {
if _, ok := currentNode.keys[key]; !ok {
break
}
currentNode = currentNode.keys[key]
} else {
index := component.Index()
if _, ok := currentNode.indexes[index]; !ok {
break
}
currentNode = currentNode.indexes[index]
}
if currentNode.exists {
lastLocation = currentNode.location
exists = true
}
}
return lastLocation, exists
}

View File

@ -0,0 +1,179 @@
package python
import (
"bytes"
"path/filepath"
"testing"
"github.com/databricks/cli/libs/diag"
"github.com/stretchr/testify/require"
"github.com/databricks/cli/libs/dyn"
assert "github.com/databricks/cli/libs/dyn/dynassert"
)
func TestMergeLocations(t *testing.T) {
pythonLocation := dyn.Location{File: "foo.py", Line: 1, Column: 1}
generatedLocation := dyn.Location{File: generatedFileName, Line: 1, Column: 1}
yamlLocation := dyn.Location{File: "foo.yml", Line: 1, Column: 1}
locations := newPythonLocations()
putPythonLocation(locations, dyn.MustPathFromString("foo"), pythonLocation)
input := dyn.NewValue(
map[string]dyn.Value{
"foo": dyn.NewValue(
map[string]dyn.Value{
"baz": dyn.NewValue("baz", []dyn.Location{yamlLocation}),
"qux": dyn.NewValue("baz", []dyn.Location{generatedLocation, yamlLocation}),
},
[]dyn.Location{},
),
"bar": dyn.NewValue("baz", []dyn.Location{generatedLocation}),
},
[]dyn.Location{yamlLocation},
)
expected := dyn.NewValue(
map[string]dyn.Value{
"foo": dyn.NewValue(
map[string]dyn.Value{
// pythonLocation is appended to the beginning of the list if absent
"baz": dyn.NewValue("baz", []dyn.Location{pythonLocation, yamlLocation}),
// generatedLocation is replaced by pythonLocation
"qux": dyn.NewValue("baz", []dyn.Location{pythonLocation, yamlLocation}),
},
[]dyn.Location{pythonLocation},
),
// if location is unknown, we keep it as-is
"bar": dyn.NewValue("baz", []dyn.Location{generatedLocation}),
},
[]dyn.Location{yamlLocation},
)
actual, err := mergePythonLocations(input, locations)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
}
func TestFindLocation(t *testing.T) {
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
locations := newPythonLocations()
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
actual, exists := findPythonLocation(locations, dyn.MustPathFromString("foo.bar"))
assert.True(t, exists)
assert.Equal(t, location1, actual)
}
func TestFindLocation_indexPathComponent(t *testing.T) {
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
location2 := dyn.Location{File: "foo.py", Line: 3, Column: 1}
locations := newPythonLocations()
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
putPythonLocation(locations, dyn.MustPathFromString("foo.bar[0]"), location2)
actual, exists := findPythonLocation(locations, dyn.MustPathFromString("foo.bar[0]"))
assert.True(t, exists)
assert.Equal(t, location2, actual)
}
func TestFindLocation_closestAncestorLocation(t *testing.T) {
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
locations := newPythonLocations()
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
actual, exists := findPythonLocation(locations, dyn.MustPathFromString("foo.bar.baz"))
assert.True(t, exists)
assert.Equal(t, location1, actual)
}
func TestFindLocation_unknownLocation(t *testing.T) {
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
locations := newPythonLocations()
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
_, exists := findPythonLocation(locations, dyn.MustPathFromString("bar"))
assert.False(t, exists)
}
func TestLoadOutput(t *testing.T) {
location := dyn.Location{File: "my_job.py", Line: 1, Column: 1}
bundleRoot := t.TempDir()
output := `{
"resources": {
"jobs": {
"my_job": {
"name": "my_job",
"tasks": [
{
"task_key": "my_task",
"notebook_task": {
"notebook_path": "my_notebook"
}
}
]
}
}
}
}`
locations := newPythonLocations()
putPythonLocation(
locations,
dyn.MustPathFromString("resources.jobs.my_job"),
location,
)
value, diags := loadOutput(
bundleRoot,
bytes.NewReader([]byte(output)),
locations,
)
assert.Equal(t, diag.Diagnostics{}, diags)
name, err := dyn.Get(value, "resources.jobs.my_job.name")
require.NoError(t, err)
require.Equal(t, []dyn.Location{location}, name.Locations())
// until we implement path normalization, we have to keep locations of values
// that change semantic depending on their location
//
// note: it's important to have absolute path including 'bundleRoot'
// because mutator pipeline already has expanded locations into absolute path
notebookPath, err := dyn.Get(value, "resources.jobs.my_job.tasks[0].notebook_task.notebook_path")
require.NoError(t, err)
require.Len(t, notebookPath.Locations(), 1)
require.Equal(t, filepath.Join(bundleRoot, generatedFileName), notebookPath.Locations()[0].File)
}
func TestParsePythonLocations(t *testing.T) {
expected := dyn.Location{File: "foo.py", Line: 1, Column: 2}
input := `{"path": "foo", "file": "foo.py", "line": 1, "column": 2}`
reader := bytes.NewReader([]byte(input))
locations, err := parsePythonLocations(reader)
assert.NoError(t, err)
assert.True(t, locations.keys["foo"].exists)
assert.Equal(t, expected, locations.keys["foo"].location)
}

View File

@ -7,11 +7,14 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/databricks/cli/bundle/config/mutator/paths"
"github.com/databricks/databricks-sdk-go/logger"
"github.com/fatih/color"
@ -124,6 +127,15 @@ type opts struct {
enabled bool
venvPath string
loadLocations bool
}
type runPythonMutatorOpts struct {
cacheDir string
bundleRootPath string
pythonPath string
loadLocations bool
}
// getOpts adapts deprecated PyDABs and upcoming Python configuration
@ -148,8 +160,9 @@ func getOpts(b *bundle.Bundle, phase phase) (opts, error) {
// don't execute for phases for 'python' section
if phase == PythonMutatorPhaseInit || phase == PythonMutatorPhaseLoad {
return opts{
enabled: true,
venvPath: experimental.PyDABs.VEnvPath,
enabled: true,
venvPath: experimental.PyDABs.VEnvPath,
loadLocations: false, // not supported in PyDABs
}, nil
} else {
return opts{}, nil
@ -158,8 +171,9 @@ func getOpts(b *bundle.Bundle, phase phase) (opts, error) {
// don't execute for phases for 'pydabs' section
if phase == PythonMutatorPhaseLoadResources || phase == PythonMutatorPhaseApplyMutators {
return opts{
enabled: true,
venvPath: experimental.Python.VEnvPath,
enabled: true,
venvPath: experimental.Python.VEnvPath,
loadLocations: true,
}, nil
} else {
return opts{}, nil
@ -194,7 +208,12 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
}
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.BundleRootPath, pythonPath, leftRoot)
rightRoot, diags := m.runPythonMutator(ctx, leftRoot, runPythonMutatorOpts{
cacheDir: cacheDir,
bundleRootPath: b.BundleRootPath,
pythonPath: pythonPath,
loadLocations: opts.loadLocations,
})
mutateDiags = diags
if diags.HasError() {
return dyn.InvalidValue, mutateDiagsHasError
@ -238,13 +257,14 @@ func createCacheDir(ctx context.Context) (string, error) {
return os.MkdirTemp("", "-python")
}
func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) {
inputPath := filepath.Join(cacheDir, "input.json")
outputPath := filepath.Join(cacheDir, "output.json")
diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json")
func (m *pythonMutator) runPythonMutator(ctx context.Context, root dyn.Value, opts runPythonMutatorOpts) (dyn.Value, diag.Diagnostics) {
inputPath := filepath.Join(opts.cacheDir, "input.json")
outputPath := filepath.Join(opts.cacheDir, "output.json")
diagnosticsPath := filepath.Join(opts.cacheDir, "diagnostics.json")
locationsPath := filepath.Join(opts.cacheDir, "locations.json")
args := []string{
pythonPath,
opts.pythonPath,
"-m",
"databricks.bundles.build",
"--phase",
@ -257,6 +277,10 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath
diagnosticsPath,
}
if opts.loadLocations {
args = append(args, "--locations", locationsPath)
}
if err := writeInputFile(inputPath, root); err != nil {
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
}
@ -271,7 +295,7 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath
_, processErr := process.Background(
ctx,
args,
process.WithDir(rootPath),
process.WithDir(opts.bundleRootPath),
process.WithStderrWriter(stderrWriter),
process.WithStdoutWriter(stdoutWriter),
)
@ -307,7 +331,12 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
}
output, outputDiags := loadOutputFile(rootPath, outputPath)
locations, err := loadLocationsFile(locationsPath)
if err != nil {
return dyn.InvalidValue, diag.Errorf("failed to load locations: %s", err)
}
output, outputDiags := loadOutputFile(opts.bundleRootPath, outputPath, locations)
pythonDiagnostics = pythonDiagnostics.Extend(outputDiags)
// we pass through pythonDiagnostic because it contains warnings
@ -351,7 +380,21 @@ func writeInputFile(inputPath string, input dyn.Value) error {
return os.WriteFile(inputPath, rootConfigJson, 0o600)
}
func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) {
// loadLocationsFile loads locations.json containing source locations for generated YAML.
func loadLocationsFile(locationsPath string) (*pythonLocations, error) {
locationsFile, err := os.Open(locationsPath)
if errors.Is(err, fs.ErrNotExist) {
return newPythonLocations(), nil
} else if err != nil {
return nil, fmt.Errorf("failed to open locations file: %w", err)
}
defer locationsFile.Close()
return parsePythonLocations(locationsFile)
}
func loadOutputFile(rootPath, outputPath string, locations *pythonLocations) (dyn.Value, diag.Diagnostics) {
outputFile, err := os.Open(outputPath)
if err != nil {
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err))
@ -359,15 +402,19 @@ func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) {
defer outputFile.Close()
return loadOutput(rootPath, outputFile, locations)
}
func loadOutput(rootPath string, outputFile io.Reader, locations *pythonLocations) (dyn.Value, diag.Diagnostics) {
// we need absolute path because later parts of pipeline assume all paths are absolute
// and this file will be used as location to resolve relative paths.
//
// virtualPath has to stay in rootPath, because locations outside root path are not allowed:
// virtualPath has to stay in bundleRootPath, because locations outside root path are not allowed:
//
// Error: path /var/folders/.../python/dist/*.whl is not contained in bundle root path
//
// for that, we pass virtualPath instead of outputPath as file location
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_python__.yml"))
virtualPath, err := filepath.Abs(filepath.Join(rootPath, generatedFileName))
if err != nil {
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err))
}
@ -377,7 +424,29 @@ func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) {
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to parse output file: %w", err))
}
return strictNormalize(config.Root{}, generated)
// paths are resolved relative to locations of their values, if we change location
// we have to update each path, until we simplify that, we don't update locations
// for such values, so we don't change how paths are resolved
//
// we can remove this once we:
// - add variable interpolation before and after PythonMutator
// - implement path normalization (aka path normal form)
_, err = paths.VisitJobPaths(generated, func(p dyn.Path, kind paths.PathKind, v dyn.Value) (dyn.Value, error) {
putPythonLocation(locations, p, v.Location())
return v, nil
})
if err != nil {
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to update locations: %w", err))
}
// generated has dyn.Location as if it comes from generated YAML file
// earlier we loaded locations.json with source locations in Python code
generatedWithLocations, err := mergePythonLocations(generated, locations)
if err != nil {
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to update locations: %w", err))
}
return strictNormalize(config.Root{}, generatedWithLocations)
}
func strictNormalize(dst any, generated dyn.Value) (dyn.Value, diag.Diagnostics) {

View File

@ -7,7 +7,6 @@ import (
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"testing"
@ -93,6 +92,8 @@ func TestPythonMutator_loadResources(t *testing.T) {
}
}`,
`{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`,
`{"path": "resources.jobs.job0", "file": "src/examples/job0.py", "line": 3, "column": 5}
{"path": "resources.jobs.job1", "file": "src/examples/job1.py", "line": 5, "column": 7}`,
)
mutator := PythonMutator(PythonMutatorPhaseLoadResources)
@ -110,6 +111,25 @@ func TestPythonMutator_loadResources(t *testing.T) {
assert.Equal(t, "job_1", job1.Name)
}
// output of locations.json should be applied to underlying dyn.Value
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
name1, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job1.name"))
if err != nil {
return dyn.InvalidValue, err
}
assert.Equal(t, []dyn.Location{
{
File: "src/examples/job1.py",
Line: 5,
Column: 7,
},
}, name1.Locations())
return v, nil
})
assert.NoError(t, err)
assert.Equal(t, 1, len(diags))
assert.Equal(t, "job doesn't have any tasks", diags[0].Summary)
assert.Equal(t, []dyn.Location{
@ -157,7 +177,7 @@ func TestPythonMutator_loadResources_disallowed(t *testing.T) {
}
}
}
}`, "")
}`, "", "")
mutator := PythonMutator(PythonMutatorPhaseLoadResources)
diag := bundle.Apply(ctx, b, mutator)
@ -202,7 +222,7 @@ func TestPythonMutator_applyMutators(t *testing.T) {
}
}
}
}`, "")
}`, "", "")
mutator := PythonMutator(PythonMutatorPhaseApplyMutators)
diag := bundle.Apply(ctx, b, mutator)
@ -224,7 +244,7 @@ func TestPythonMutator_applyMutators(t *testing.T) {
description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description"))
require.NoError(t, err)
expectedVirtualPath, err := filepath.Abs("__generated_by_python__.yml")
expectedVirtualPath, err := filepath.Abs(generatedFileName)
require.NoError(t, err)
assert.Equal(t, expectedVirtualPath, description.Location().File)
@ -263,7 +283,7 @@ func TestPythonMutator_badOutput(t *testing.T) {
}
}
}
}`, "")
}`, "", "")
mutator := PythonMutator(PythonMutatorPhaseLoadResources)
diag := bundle.Apply(ctx, b, mutator)
@ -312,7 +332,7 @@ func TestGetOps_Python(t *testing.T) {
}, PythonMutatorPhaseLoadResources)
assert.NoError(t, err)
assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual)
assert.Equal(t, opts{venvPath: ".venv", enabled: true, loadLocations: true}, actual)
}
func TestGetOps_PyDABs(t *testing.T) {
@ -328,7 +348,7 @@ func TestGetOps_PyDABs(t *testing.T) {
}, PythonMutatorPhaseInit)
assert.NoError(t, err)
assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual)
assert.Equal(t, opts{venvPath: ".venv", enabled: true, loadLocations: false}, actual)
}
func TestGetOps_empty(t *testing.T) {
@ -661,7 +681,7 @@ or activate the environment before running CLI commands:
assert.Equal(t, expected, out)
}
func withProcessStub(t *testing.T, args []string, output, diagnostics string) context.Context {
func withProcessStub(t *testing.T, args []string, output, diagnostics, locations string) context.Context {
ctx := context.Background()
ctx, stub := process.WithStub(ctx)
@ -673,32 +693,51 @@ func withProcessStub(t *testing.T, args []string, output, diagnostics string) co
inputPath := filepath.Join(cacheDir, "input.json")
outputPath := filepath.Join(cacheDir, "output.json")
locationsPath := filepath.Join(cacheDir, "locations.json")
diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json")
args = append(args, "--input", inputPath)
args = append(args, "--output", outputPath)
args = append(args, "--diagnostics", diagnosticsPath)
stub.WithCallback(func(actual *exec.Cmd) error {
_, err := os.Stat(inputPath)
assert.NoError(t, err)
if reflect.DeepEqual(actual.Args, args) {
err := os.WriteFile(outputPath, []byte(output), 0o600)
require.NoError(t, err)
actualInputPath := getArg(actual.Args, "--input")
actualOutputPath := getArg(actual.Args, "--output")
actualDiagnosticsPath := getArg(actual.Args, "--diagnostics")
actualLocationsPath := getArg(actual.Args, "--locations")
err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600)
require.NoError(t, err)
require.Equal(t, inputPath, actualInputPath)
require.Equal(t, outputPath, actualOutputPath)
require.Equal(t, diagnosticsPath, actualDiagnosticsPath)
return nil
} else {
return fmt.Errorf("unexpected command: %v", actual.Args)
// locations is an optional argument
if locations != "" {
require.Equal(t, locationsPath, actualLocationsPath)
err = os.WriteFile(locationsPath, []byte(locations), 0o600)
require.NoError(t, err)
}
err = os.WriteFile(outputPath, []byte(output), 0o600)
require.NoError(t, err)
err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600)
require.NoError(t, err)
return nil
})
return ctx
}
func getArg(args []string, name string) string {
for i := range args {
if args[i] == name {
return args[i+1]
}
}
return ""
}
func loadYaml(name, content string) *bundle.Bundle {
v, diag := config.LoadFromBytes(name, []byte(content))

View File

@ -388,14 +388,6 @@ func (r *Root) MergeTargetOverrides(name string) error {
return err
}
// If the branch was overridden, we need to clear the inferred flag.
if branch := v.Get("branch"); branch.Kind() != dyn.KindInvalid {
out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.V(false))
if err != nil {
return err
}
}
// Set the merged value.
root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git")), out)
if err != nil {

View File

@ -419,7 +419,7 @@ func TestBundleToTerraformModelServing(t *testing.T) {
src := resources.ModelServingEndpoint{
CreateServingEndpoint: &serving.CreateServingEndpoint{
Name: "name",
Config: serving.EndpointCoreConfigInput{
Config: &serving.EndpointCoreConfigInput{
ServedModels: []serving.ServedModelInput{
{
ModelName: "model_name",
@ -474,7 +474,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) {
// and as such observed the `omitempty` tag.
// The new method leverages [dyn.Value] where any field that is not
// explicitly set is not part of the value.
Config: serving.EndpointCoreConfigInput{
Config: &serving.EndpointCoreConfigInput{
ServedModels: []serving.ServedModelInput{
{
ModelName: "model_name",

View File

@ -54,7 +54,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con
return tf.ExecPath, nil
}
binDir, err := b.CacheDir(context.Background(), "bin")
binDir, err := b.CacheDir(ctx, "bin")
if err != nil {
return "", err
}
@ -88,41 +88,35 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con
return tf.ExecPath, nil
}
// This function inherits some environment variables for Terraform CLI.
func inheritEnvVars(ctx context.Context, environ map[string]string) error {
var envCopy = []string{
// Include $HOME in set of environment variables to pass along.
home, ok := env.Lookup(ctx, "HOME")
if ok {
environ["HOME"] = home
}
"HOME",
// Include $USERPROFILE in set of environment variables to pass along.
// This variable is used by Azure CLI on Windows to find stored credentials and metadata
userProfile, ok := env.Lookup(ctx, "USERPROFILE")
if ok {
environ["USERPROFILE"] = userProfile
}
"USERPROFILE",
// Include $PATH in set of environment variables to pass along.
// This is necessary to ensure that our Terraform provider can use the
// same auxiliary programs (e.g. `az`, or `gcloud`) as the CLI.
path, ok := env.Lookup(ctx, "PATH")
if ok {
environ["PATH"] = path
}
"PATH",
// Include $AZURE_CONFIG_FILE in set of environment variables to pass along.
// This is set in Azure DevOps by the AzureCLI@2 task.
azureConfigFile, ok := env.Lookup(ctx, "AZURE_CONFIG_FILE")
if ok {
environ["AZURE_CONFIG_FILE"] = azureConfigFile
}
"AZURE_CONFIG_FILE",
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE")
if ok {
environ["TF_CLI_CONFIG_FILE"] = devConfigFile
"TF_CLI_CONFIG_FILE",
}
// This function inherits some environment variables for Terraform CLI.
func inheritEnvVars(ctx context.Context, environ map[string]string) error {
for _, key := range envCopy {
value, ok := env.Lookup(ctx, key)
if ok {
environ[key] = value
}
}
// Map $DATABRICKS_TF_CLI_CONFIG_FILE to $TF_CLI_CONFIG_FILE

View File

@ -17,7 +17,7 @@ func TestConvertModelServingEndpoint(t *testing.T) {
src := resources.ModelServingEndpoint{
CreateServingEndpoint: &serving.CreateServingEndpoint{
Name: "name",
Config: serving.EndpointCoreConfigInput{
Config: &serving.EndpointCoreConfigInput{
ServedModels: []serving.ServedModelInput{
{
ModelName: "model_name",

View File

@ -353,12 +353,12 @@ github.com/databricks/cli/bundle/config/resources.MlflowModel:
github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint:
"ai_gateway":
"description": |-
The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are supported as of now.
The AI Gateway configuration for the serving endpoint. NOTE: Only external model and provisioned throughput endpoints are currently supported.
"config":
"description": |-
The core config of the serving endpoint.
"name":
"description": |
"description": |-
The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.
An endpoint name can consist of alphanumeric characters, dashes, and underscores.
"rate_limits":
@ -1974,6 +1974,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask:
Parameters passed to the main method.
Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.
"run_as_repl":
"description": |-
Deprecated. A value of `false` is no longer supported.
github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask:
"parameters":
"description": |-
@ -2684,27 +2687,36 @@ github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScd
github.com/databricks/databricks-sdk-go/service/serving.Ai21LabsConfig:
"ai21labs_api_key":
"description": |-
The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.
The Databricks secret key reference for an AI21 Labs API key. If you
prefer to paste your API key directly, see `ai21labs_api_key_plaintext`.
You must provide an API key using one of the following fields:
`ai21labs_api_key` or `ai21labs_api_key_plaintext`.
"ai21labs_api_key_plaintext":
"description": |-
An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.
An AI21 Labs API key provided as a plaintext string. If you prefer to
reference your key using Databricks Secrets, see `ai21labs_api_key`. You
must provide an API key using one of the following fields:
`ai21labs_api_key` or `ai21labs_api_key_plaintext`.
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayConfig:
"guardrails":
"description": |-
Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses.
"inference_table_config":
"description": |-
Configuration for payload logging using inference tables. Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.
Configuration for payload logging using inference tables.
Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.
"rate_limits":
"description": |-
Configuration for rate limits which can be set to limit endpoint traffic.
"usage_tracking_config":
"description": |-
Configuration to enable usage tracking using system tables. These tables allow you to monitor operational usage on endpoints and their associated costs.
Configuration to enable usage tracking using system tables.
These tables allow you to monitor operational usage on endpoints and their associated costs.
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParameters:
"invalid_keywords":
"description": |-
List of invalid keywords. AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.
List of invalid keywords.
AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.
"pii":
"description": |-
Configuration for guardrail PII filter.
@ -2713,15 +2725,14 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParame
Indicates whether the safety filter is enabled.
"valid_topics":
"description": |-
The list of allowed topics. Given a chat request, this guardrail flags the request if its topic is not in the allowed topics.
The list of allowed topics.
Given a chat request, this guardrail flags the request if its topic is not in the allowed topics.
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehavior:
"behavior":
"description": |-
Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.
Configuration for input guardrail filters.
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior:
"_":
"description": |-
Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.
"enum":
- |-
NONE
@ -2737,30 +2748,32 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails:
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayInferenceTableConfig:
"catalog_name":
"description": |-
The name of the catalog in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the catalog name.
The name of the catalog in Unity Catalog. Required when enabling inference tables.
NOTE: On update, you have to disable inference table first in order to change the catalog name.
"enabled":
"description": |-
Indicates whether the inference table is enabled.
"schema_name":
"description": |-
The name of the schema in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the schema name.
The name of the schema in Unity Catalog. Required when enabling inference tables.
NOTE: On update, you have to disable inference table first in order to change the schema name.
"table_name_prefix":
"description": |-
The prefix of the table in Unity Catalog. NOTE: On update, you have to disable inference table first in order to change the prefix name.
The prefix of the table in Unity Catalog.
NOTE: On update, you have to disable inference table first in order to change the prefix name.
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit:
"calls":
"description": |-
Used to specify how many calls are allowed for a key within the renewal_period.
"key":
"description": |-
Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.
Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported,
with 'endpoint' being the default if not specified.
"renewal_period":
"description": |-
Renewal period field for a rate limit. Currently, only 'minute' is supported.
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey:
"_":
"description": |-
Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.
"enum":
- |-
user
@ -2768,8 +2781,6 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey:
endpoint
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod:
"_":
"description": |-
Renewal period field for a rate limit. Currently, only 'minute' is supported.
"enum":
- |-
minute
@ -2780,26 +2791,43 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayUsageTrackingCo
github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfig:
"aws_access_key_id":
"description": |-
The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.
The Databricks secret key reference for an AWS access key ID with
permissions to interact with Bedrock services. If you prefer to paste
your API key directly, see `aws_access_key_id_plaintext`. You must provide an API
key using one of the following fields: `aws_access_key_id` or
`aws_access_key_id_plaintext`.
"aws_access_key_id_plaintext":
"description": |-
An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.
An AWS access key ID with permissions to interact with Bedrock services
provided as a plaintext string. If you prefer to reference your key using
Databricks Secrets, see `aws_access_key_id`. You must provide an API key
using one of the following fields: `aws_access_key_id` or
`aws_access_key_id_plaintext`.
"aws_region":
"description": |-
The AWS region to use. Bedrock has to be enabled there.
"aws_secret_access_key":
"description": |-
The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`.
The Databricks secret key reference for an AWS secret access key paired
with the access key ID, with permissions to interact with Bedrock
services. If you prefer to paste your API key directly, see
`aws_secret_access_key_plaintext`. You must provide an API key using one
of the following fields: `aws_secret_access_key` or
`aws_secret_access_key_plaintext`.
"aws_secret_access_key_plaintext":
"description": |-
An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`.
An AWS secret access key paired with the access key ID, with permissions
to interact with Bedrock services provided as a plaintext string. If you
prefer to reference your key using Databricks Secrets, see
`aws_secret_access_key`. You must provide an API key using one of the
following fields: `aws_secret_access_key` or
`aws_secret_access_key_plaintext`.
"bedrock_provider":
"description": |-
The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
The underlying provider in Amazon Bedrock. Supported values (case
insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider:
"_":
"description": |-
The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
"enum":
- |-
anthropic
@ -2812,10 +2840,16 @@ github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedro
github.com/databricks/databricks-sdk-go/service/serving.AnthropicConfig:
"anthropic_api_key":
"description": |-
The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`.
The Databricks secret key reference for an Anthropic API key. If you
prefer to paste your API key directly, see `anthropic_api_key_plaintext`.
You must provide an API key using one of the following fields:
`anthropic_api_key` or `anthropic_api_key_plaintext`.
"anthropic_api_key_plaintext":
"description": |-
The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`.
The Anthropic API key provided as a plaintext string. If you prefer to
reference your key using Databricks Secrets, see `anthropic_api_key`. You
must provide an API key using one of the following fields:
`anthropic_api_key` or `anthropic_api_key_plaintext`.
github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput:
"catalog_name":
"description": |-
@ -2831,42 +2865,58 @@ github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput:
The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled.
github.com/databricks/databricks-sdk-go/service/serving.CohereConfig:
"cohere_api_base":
"description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n"
"description": |-
This is an optional field to provide a customized base URL for the Cohere
API. If left unspecified, the standard Cohere base URL is used.
"cohere_api_key":
"description": |-
The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.
The Databricks secret key reference for a Cohere API key. If you prefer
to paste your API key directly, see `cohere_api_key_plaintext`. You must
provide an API key using one of the following fields: `cohere_api_key` or
`cohere_api_key_plaintext`.
"cohere_api_key_plaintext":
"description": |-
The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.
The Cohere API key provided as a plaintext string. If you prefer to
reference your key using Databricks Secrets, see `cohere_api_key`. You
must provide an API key using one of the following fields:
`cohere_api_key` or `cohere_api_key_plaintext`.
github.com/databricks/databricks-sdk-go/service/serving.DatabricksModelServingConfig:
"databricks_api_token":
"description": |
The Databricks secret key reference for a Databricks API token that corresponds to a user or service
principal with Can Query access to the model serving endpoint pointed to by this external model.
If you prefer to paste your API key directly, see `databricks_api_token_plaintext`.
You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.
"description": |-
The Databricks secret key reference for a Databricks API token that
corresponds to a user or service principal with Can Query access to the
model serving endpoint pointed to by this external model. If you prefer
to paste your API key directly, see `databricks_api_token_plaintext`. You
must provide an API key using one of the following fields:
`databricks_api_token` or `databricks_api_token_plaintext`.
"databricks_api_token_plaintext":
"description": |
The Databricks API token that corresponds to a user or service
principal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string.
If you prefer to reference your key using Databricks Secrets, see `databricks_api_token`.
You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.
"description": |-
The Databricks API token that corresponds to a user or service principal
with Can Query access to the model serving endpoint pointed to by this
external model provided as a plaintext string. If you prefer to reference
your key using Databricks Secrets, see `databricks_api_token`. You must
provide an API key using one of the following fields:
`databricks_api_token` or `databricks_api_token_plaintext`.
"databricks_workspace_url":
"description": |
The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.
"description": |-
The URL of the Databricks workspace containing the model serving endpoint
pointed to by this external model.
github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput:
"auto_capture_config":
"description": |-
Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.
Note: this field is deprecated for creating new provisioned throughput endpoints,
or updating existing provisioned throughput endpoints that never have inference table configured;
in these cases please use AI Gateway to manage inference tables.
"served_entities":
"description": |-
A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities.
The list of served entities under the serving endpoint config.
"served_models":
"description": |-
(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models.
(Deprecated, use served_entities instead) The list of served models under the serving endpoint config.
"traffic_config":
"description": |-
The traffic config defining how invocations to the serving endpoint should be routed.
The traffic configuration associated with the serving endpoint config.
github.com/databricks/databricks-sdk-go/service/serving.EndpointTag:
"key":
"description": |-
@ -2903,17 +2953,13 @@ github.com/databricks/databricks-sdk-go/service/serving.ExternalModel:
"description": |-
PaLM Config. Only required if the provider is 'palm'.
"provider":
"description": |
The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',
'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.",
"description": |-
The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.
"task":
"description": |-
The task type of the external model.
github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider:
"_":
"description": |
The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',
'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.",
"enum":
- |-
ai21labs
@ -2934,70 +2980,114 @@ github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider:
github.com/databricks/databricks-sdk-go/service/serving.GoogleCloudVertexAiConfig:
"private_key":
"description": |-
The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`
The Databricks secret key reference for a private key for the service
account which has access to the Google Cloud Vertex AI Service. See [Best
practices for managing service account keys]. If you prefer to paste your
API key directly, see `private_key_plaintext`. You must provide an API
key using one of the following fields: `private_key` or
`private_key_plaintext`
[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys
"private_key_plaintext":
"description": |-
The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`.
The private key for the service account which has access to the Google
Cloud Vertex AI Service provided as a plaintext secret. See [Best
practices for managing service account keys]. If you prefer to reference
your key using Databricks Secrets, see `private_key`. You must provide an
API key using one of the following fields: `private_key` or
`private_key_plaintext`.
[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys
"project_id":
"description": |-
This is the Google Cloud project id that the service account is associated with.
This is the Google Cloud project id that the service account is
associated with.
"region":
"description": |-
This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions.
This is the region for the Google Cloud Vertex AI Service. See [supported
regions] for more details. Some models are only available in specific
regions.
[supported regions]: https://cloud.google.com/vertex-ai/docs/general/locations
github.com/databricks/databricks-sdk-go/service/serving.OpenAiConfig:
"_":
"description": |-
Configs needed to create an OpenAI model route.
"microsoft_entra_client_id":
"description": |
This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.
"description": |-
This field is only required for Azure AD OpenAI and is the Microsoft
Entra Client ID.
"microsoft_entra_client_secret":
"description": |
The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.
If you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.
You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.
"description": |-
The Databricks secret key reference for a client secret used for
Microsoft Entra ID authentication. If you prefer to paste your client
secret directly, see `microsoft_entra_client_secret_plaintext`. You must
provide an API key using one of the following fields:
`microsoft_entra_client_secret` or
`microsoft_entra_client_secret_plaintext`.
"microsoft_entra_client_secret_plaintext":
"description": |
The client secret used for Microsoft Entra ID authentication provided as a plaintext string.
If you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.
You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.
"description": |-
The client secret used for Microsoft Entra ID authentication provided as
a plaintext string. If you prefer to reference your key using Databricks
Secrets, see `microsoft_entra_client_secret`. You must provide an API key
using one of the following fields: `microsoft_entra_client_secret` or
`microsoft_entra_client_secret_plaintext`.
"microsoft_entra_tenant_id":
"description": |
This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.
"description": |-
This field is only required for Azure AD OpenAI and is the Microsoft
Entra Tenant ID.
"openai_api_base":
"description": |
This is a field to provide a customized base URl for the OpenAI API.
For Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service
provided by Azure.
For other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.
"description": |-
This is a field to provide a customized base URl for the OpenAI API. For
Azure OpenAI, this field is required, and is the base URL for the Azure
OpenAI API service provided by Azure. For other OpenAI API types, this
field is optional, and if left unspecified, the standard OpenAI base URL
is used.
"openai_api_key":
"description": |-
The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.
The Databricks secret key reference for an OpenAI API key using the
OpenAI or Azure service. If you prefer to paste your API key directly,
see `openai_api_key_plaintext`. You must provide an API key using one of
the following fields: `openai_api_key` or `openai_api_key_plaintext`.
"openai_api_key_plaintext":
"description": |-
The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.
The OpenAI API key using the OpenAI or Azure service provided as a
plaintext string. If you prefer to reference your key using Databricks
Secrets, see `openai_api_key`. You must provide an API key using one of
the following fields: `openai_api_key` or `openai_api_key_plaintext`.
"openai_api_type":
"description": |
This is an optional field to specify the type of OpenAI API to use.
For Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security
access validation protocol. For access token validation, use azure. For authentication using Azure Active
"description": |-
This is an optional field to specify the type of OpenAI API to use. For
Azure OpenAI, this field is required, and adjust this parameter to
represent the preferred security access validation protocol. For access
token validation, use azure. For authentication using Azure Active
Directory (Azure AD) use, azuread.
"openai_api_version":
"description": |
This is an optional field to specify the OpenAI API version.
For Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to
utilize, specified by a date.
"description": |-
This is an optional field to specify the OpenAI API version. For Azure
OpenAI, this field is required, and is the version of the Azure OpenAI
service to utilize, specified by a date.
"openai_deployment_name":
"description": |
This field is only required for Azure OpenAI and is the name of the deployment resource for the
Azure OpenAI service.
"description": |-
This field is only required for Azure OpenAI and is the name of the
deployment resource for the Azure OpenAI service.
"openai_organization":
"description": |
This is an optional field to specify the organization in OpenAI or Azure OpenAI.
"description": |-
This is an optional field to specify the organization in OpenAI or Azure
OpenAI.
github.com/databricks/databricks-sdk-go/service/serving.PaLmConfig:
"palm_api_key":
"description": |-
The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.
The Databricks secret key reference for a PaLM API key. If you prefer to
paste your API key directly, see `palm_api_key_plaintext`. You must
provide an API key using one of the following fields: `palm_api_key` or
`palm_api_key_plaintext`.
"palm_api_key_plaintext":
"description": |-
The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.
The PaLM API key provided as a plaintext string. If you prefer to
reference your key using Databricks Secrets, see `palm_api_key`. You must
provide an API key using one of the following fields: `palm_api_key` or
`palm_api_key_plaintext`.
github.com/databricks/databricks-sdk-go/service/serving.RateLimit:
"calls":
"description": |-
@ -3010,8 +3100,6 @@ github.com/databricks/databricks-sdk-go/service/serving.RateLimit:
Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.
github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey:
"_":
"description": |-
Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.
"enum":
- |-
user
@ -3019,8 +3107,6 @@ github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey:
endpoint
github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod:
"_":
"description": |-
Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.
"enum":
- |-
minute
@ -3033,21 +3119,15 @@ github.com/databricks/databricks-sdk-go/service/serving.Route:
The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive.
github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
"entity_name":
"description": |
The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC),
or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of
__catalog_name__.__schema_name__.__model_name__.
"entity_version":
"description": |-
The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC.
The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of **catalog_name.schema_name.model_name**.
"entity_version": {}
"environment_vars":
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`"
"description": |-
An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{"OPENAI_API_KEY": "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": "{{secrets/my_scope2/my_key2}}"}`
"external_model":
"description": |
The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)
can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model,
it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later.
The task type of all external models within an endpoint must be the same.
"description": |-
The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. The task type of all external models within an endpoint must be the same.
"instance_profile_arn":
"description": |-
ARN of the instance profile that the served entity uses to access AWS resources.
@ -3058,68 +3138,46 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
"description": |-
The minimum tokens per second that the endpoint can scale down to.
"name":
"description": |
The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.
If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other
entities, it defaults to <entity-name>-<entity-version>.
"description": |-
The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.
"scale_to_zero_enabled":
"description": |-
Whether the compute resources for the served entity should scale down to zero.
"workload_size":
"description": |
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between.
A single unit of provisioned concurrency can process one request at a time.
Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
"description": |-
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
"workload_type":
"description": |
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is
"CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.
See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
"description": |-
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput:
"environment_vars":
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this model.\nNote: this is an experimental feature and subject to change. \nExample model environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`"
"description": |-
An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{"OPENAI_API_KEY": "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": "{{secrets/my_scope2/my_key2}}"}`
"instance_profile_arn":
"description": |-
ARN of the instance profile that the served model will use to access AWS resources.
ARN of the instance profile that the served entity uses to access AWS resources.
"max_provisioned_throughput":
"description": |-
The maximum tokens per second that the endpoint can scale up to.
"min_provisioned_throughput":
"description": |-
The minimum tokens per second that the endpoint can scale down to.
"model_name":
"description": |
The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,
in the form of __catalog_name__.__schema_name__.__model_name__.
"model_version":
"description": |-
The version of the model in Databricks Model Registry or Unity Catalog to be served.
"model_name": {}
"model_version": {}
"name":
"description": |
The name of a served model. It must be unique across an endpoint. If not specified, this field will default to <model-name>-<model-version>.
A served model name can consist of alphanumeric characters, dashes, and underscores.
"description": |-
The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.
"scale_to_zero_enabled":
"description": |-
Whether the compute resources for the served model should scale down to zero.
Whether the compute resources for the served entity should scale down to zero.
"workload_size":
"description": |
The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.
A single unit of provisioned concurrency can process one request at a time.
Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.
"description": |-
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
"workload_type":
"description": |
The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is
"CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.
See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
"description": |-
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize:
"_":
"description": |
The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.
A single unit of provisioned concurrency can process one request at a time.
Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.
"enum":
- |-
Small
@ -3129,17 +3187,26 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkload
Large
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType:
"_":
"description": |
The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is
"CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.
See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
"enum":
- |-
CPU
- |-
GPU_MEDIUM
- |-
GPU_SMALL
- |-
GPU_LARGE
- |-
MULTIGPU_MEDIUM
github.com/databricks/databricks-sdk-go/service/serving.ServingModelWorkloadType:
"_":
"enum":
- |-
CPU
- |-
GPU_MEDIUM
- |-
GPU_SMALL
- |-
GPU_LARGE
- |-

View File

@ -197,3 +197,14 @@ github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger:
"manual":
"description": |-
PLACEHOLDER
github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
"entity_version":
"description": |-
PLACEHOLDER
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput:
"model_name":
"description": |-
PLACEHOLDER
"model_version":
"description": |-
PLACEHOLDER

View File

@ -40,6 +40,19 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema.
}
}
// Allows using variables in enum fields
if s.Type == jsonschema.StringType && s.Enum != nil {
return jsonschema.Schema{
OneOf: []jsonschema.Schema{
s,
{
Type: jsonschema.StringType,
Pattern: interpolationPattern("var"),
},
},
}
}
switch s.Type {
case jsonschema.ArrayType, jsonschema.ObjectType:
// arrays and objects can have complex variable values specified.
@ -159,6 +172,15 @@ func generateSchema(workdir, outputFile string) {
a.addAnnotations,
addInterpolationPatterns,
})
// AdditionalProperties is set to an empty schema to allow non-typed keys used as yaml-anchors
// Example:
// some_anchor: &some_anchor
// file_path: /some/path/
// workspace:
// <<: *some_anchor
s.AdditionalProperties = jsonschema.Schema{}
if err != nil {
log.Fatal(err)
}

View File

@ -1 +0,0 @@
unknown: value

View File

@ -13,6 +13,8 @@ variables:
simplevar:
default: true
description: "simplevar description"
schedule_status:
default: "PAUSED"
complexvar:
default:
@ -42,6 +44,8 @@ resources:
dependencies:
- python=3.7
client: "myclient"
trigger:
pause_status: ${var.schedule_status}
tags:
foo: bar
bar: baz

View File

@ -0,0 +1,11 @@
tags: &job-tags
environment: "some_environment"
resources:
jobs:
db1:
tags:
<<: *job-tags
db2:
tags:
<<: *job-tags

View File

@ -59,8 +59,8 @@ func TestJsonSchema(t *testing.T) {
}
providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider")
assert.Contains(t, providers.Enum, "gitHub")
assert.Contains(t, providers.Enum, "bitbucketCloud")
assert.Contains(t, providers.Enum, "gitHubEnterprise")
assert.Contains(t, providers.Enum, "bitbucketServer")
assert.Contains(t, providers.OneOf[0].Enum, "gitHub")
assert.Contains(t, providers.OneOf[0].Enum, "bitbucketCloud")
assert.Contains(t, providers.OneOf[0].Enum, "gitHubEnterprise")
assert.Contains(t, providers.OneOf[0].Enum, "bitbucketServer")
}

File diff suppressed because it is too large Load Diff

View File

@ -1,59 +0,0 @@
package config_tests
import (
"testing"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/stretchr/testify/assert"
)
func assertExpectedMonitor(t *testing.T, p *resources.QualityMonitor) {
assert.Equal(t, "timestamp", p.InferenceLog.TimestampCol)
assert.Equal(t, "prediction", p.InferenceLog.PredictionCol)
assert.Equal(t, "model_id", p.InferenceLog.ModelIdCol)
assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType)
}
func TestMonitorTableNames(t *testing.T) {
b := loadTarget(t, "./quality_monitor", "development")
assert.Len(t, b.Config.Resources.QualityMonitors, 1)
assert.Equal(t, config.Development, b.Config.Bundle.Mode)
p := b.Config.Resources.QualityMonitors["my_monitor"]
assert.Equal(t, "main.test.dev", p.TableName)
assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir)
assert.Equal(t, "main.dev", p.OutputSchemaName)
assertExpectedMonitor(t, p)
}
func TestMonitorStaging(t *testing.T) {
b := loadTarget(t, "./quality_monitor", "staging")
assert.Len(t, b.Config.Resources.QualityMonitors, 1)
p := b.Config.Resources.QualityMonitors["my_monitor"]
assert.Equal(t, "main.test.staging", p.TableName)
assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir)
assert.Equal(t, "main.staging", p.OutputSchemaName)
assertExpectedMonitor(t, p)
}
func TestMonitorProduction(t *testing.T) {
b := loadTarget(t, "./quality_monitor", "production")
assert.Len(t, b.Config.Resources.QualityMonitors, 1)
p := b.Config.Resources.QualityMonitors["my_monitor"]
assert.Equal(t, "main.test.prod", p.TableName)
assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir)
assert.Equal(t, "main.prod", p.OutputSchemaName)
inferenceLog := p.InferenceLog
assert.Equal(t, []string{"1 day", "1 hour"}, inferenceLog.Granularities)
assert.Equal(t, "timestamp_prod", p.InferenceLog.TimestampCol)
assert.Equal(t, "prediction_prod", p.InferenceLog.PredictionCol)
assert.Equal(t, "model_id_prod", p.InferenceLog.ModelIdCol)
assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType)
}

View File

@ -307,6 +307,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: array: redirect_urls
// TODO: array: scopes
// TODO: complex arg: token_access_policy
cmd.Use = "update INTEGRATION_ID"

View File

@ -62,7 +62,7 @@ func makeCommand(method string) *cobra.Command {
var response any
headers := map[string]string{"Content-Type": "application/json"}
err = api.Do(cmd.Context(), method, path, headers, request, &response)
err = api.Do(cmd.Context(), method, path, headers, nil, request, &response)
if err != nil {
return err
}

109
cmd/workspace/access-control/access-control.go generated Executable file
View File

@ -0,0 +1,109 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package access_control
import (
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "access-control",
Short: `Rule based Access Control for Databricks Resources.`,
Long: `Rule based Access Control for Databricks Resources.`,
GroupID: "iam",
Annotations: map[string]string{
"package": "iam",
},
// This service is being previewed; hide from help output.
Hidden: true,
}
// Add methods
cmd.AddCommand(newCheckPolicy())
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start check-policy command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var checkPolicyOverrides []func(
*cobra.Command,
*iam.CheckPolicyRequest,
)
func newCheckPolicy() *cobra.Command {
cmd := &cobra.Command{}
var checkPolicyReq iam.CheckPolicyRequest
var checkPolicyJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&checkPolicyJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: complex arg: resource_info
cmd.Use = "check-policy"
cmd.Short = `Check access policy to a resource.`
cmd.Long = `Check access policy to a resource.`
cmd.Annotations = make(map[string]string)
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
diags := checkPolicyJson.Unmarshal(&checkPolicyReq)
if diags.HasError() {
return diags.Error()
}
if len(diags) > 0 {
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
if err != nil {
return err
}
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.AccessControl.CheckPolicy(ctx, checkPolicyReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range checkPolicyOverrides {
fn(cmd, &checkPolicyReq)
}
return cmd
}
// end service AccessControl

2
cmd/workspace/cmd.go generated
View File

@ -3,6 +3,7 @@
package workspace
import (
access_control "github.com/databricks/cli/cmd/workspace/access-control"
alerts "github.com/databricks/cli/cmd/workspace/alerts"
alerts_legacy "github.com/databricks/cli/cmd/workspace/alerts-legacy"
apps "github.com/databricks/cli/cmd/workspace/apps"
@ -96,6 +97,7 @@ import (
func All() []*cobra.Command {
var out []*cobra.Command
out = append(out, access_control.New())
out = append(out, alerts.New())
out = append(out, alerts_legacy.New())
out = append(out, apps.New())

View File

@ -64,7 +64,7 @@ func newCreate() *cobra.Command {
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Description about the provider.`)
cmd.Flags().StringVar(&createReq.RecipientProfileStr, "recipient-profile-str", createReq.RecipientProfileStr, `This field is required when the __authentication_type__ is **TOKEN** or not provided.`)
cmd.Flags().StringVar(&createReq.RecipientProfileStr, "recipient-profile-str", createReq.RecipientProfileStr, `This field is required when the __authentication_type__ is **TOKEN**, **OAUTH_CLIENT_CREDENTIALS** or not provided.`)
cmd.Use = "create NAME AUTHENTICATION_TYPE"
cmd.Short = `Create an auth provider.`
@ -430,7 +430,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the provider.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the provider.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of Provider owner.`)
cmd.Flags().StringVar(&updateReq.RecipientProfileStr, "recipient-profile-str", updateReq.RecipientProfileStr, `This field is required when the __authentication_type__ is **TOKEN** or not provided.`)
cmd.Flags().StringVar(&updateReq.RecipientProfileStr, "recipient-profile-str", updateReq.RecipientProfileStr, `This field is required when the __authentication_type__ is **TOKEN**, **OAUTH_CLIENT_CREDENTIALS** or not provided.`)
cmd.Use = "update NAME"
cmd.Short = `Update a provider.`

View File

@ -91,7 +91,7 @@ func newCreate() *cobra.Command {
cmd.Long = `Create a share recipient.
Creates a new recipient with the delta sharing authentication type in the
metastore. The caller must be a metastore admin or has the
metastore. The caller must be a metastore admin or have the
**CREATE_RECIPIENT** privilege on the metastore.
Arguments:
@ -186,28 +186,16 @@ func newDelete() *cobra.Command {
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "Name of the recipient")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have name of the recipient")
}
deleteReq.Name = args[0]
err = w.Recipients.Delete(ctx, deleteReq)
@ -258,28 +246,16 @@ func newGet() *cobra.Command {
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "Name of the recipient")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have name of the recipient")
}
getReq.Name = args[0]
response, err := w.Recipients.Get(ctx, getReq)
@ -384,7 +360,7 @@ func newRotateToken() *cobra.Command {
the provided token info. The caller must be the owner of the recipient.
Arguments:
NAME: The name of the recipient.
NAME: The name of the Recipient.
EXISTING_TOKEN_EXPIRE_IN_SECONDS: The expiration time of the bearer token in ISO 8601 format. This will set
the expiration_time of existing token only to a smaller timestamp, it
cannot extend the expiration_time. Use 0 to expire the existing token
@ -479,28 +455,16 @@ func newSharePermissions() *cobra.Command {
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "The name of the Recipient")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have the name of the recipient")
}
sharePermissionsReq.Name = args[0]
response, err := w.Recipients.SharePermissions(ctx, sharePermissionsReq)
@ -560,6 +524,11 @@ func newUpdate() *cobra.Command {
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
@ -577,30 +546,13 @@ func newUpdate() *cobra.Command {
}
}
}
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "Name of the recipient")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have name of the recipient")
}
updateReq.Name = args[0]
err = w.Recipients.Update(ctx, updateReq)
response, err := w.Recipients.Update(ctx, updateReq)
if err != nil {
return err
}
return nil
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.

View File

@ -49,6 +49,7 @@ func New() *cobra.Command {
cmd.AddCommand(newGetOpenApi())
cmd.AddCommand(newGetPermissionLevels())
cmd.AddCommand(newGetPermissions())
cmd.AddCommand(newHttpRequest())
cmd.AddCommand(newList())
cmd.AddCommand(newLogs())
cmd.AddCommand(newPatch())
@ -153,16 +154,34 @@ func newCreate() *cobra.Command {
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: complex arg: ai_gateway
// TODO: complex arg: config
// TODO: array: rate_limits
cmd.Flags().BoolVar(&createReq.RouteOptimized, "route-optimized", createReq.RouteOptimized, `Enable route optimization for the serving endpoint.`)
// TODO: array: tags
cmd.Use = "create"
cmd.Use = "create NAME"
cmd.Short = `Create a new serving endpoint.`
cmd.Long = `Create a new serving endpoint.`
cmd.Long = `Create a new serving endpoint.
Arguments:
NAME: The name of the serving endpoint. This field is required and must be
unique across a Databricks workspace. An endpoint name can consist of
alphanumeric characters, dashes, and underscores.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := root.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input")
}
return nil
}
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
@ -179,8 +198,9 @@ func newCreate() *cobra.Command {
return err
}
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
if !cmd.Flags().Changed("json") {
createReq.Name = args[0]
}
wait, err := w.ServingEndpoints.Create(ctx, createReq)
@ -233,10 +253,7 @@ func newDelete() *cobra.Command {
cmd.Use = "delete NAME"
cmd.Short = `Delete a serving endpoint.`
cmd.Long = `Delete a serving endpoint.
Arguments:
NAME: The name of the serving endpoint. This field is required.`
cmd.Long = `Delete a serving endpoint.`
cmd.Annotations = make(map[string]string)
@ -432,11 +449,12 @@ func newGetOpenApi() *cobra.Command {
getOpenApiReq.Name = args[0]
err = w.ServingEndpoints.GetOpenApi(ctx, getOpenApiReq)
response, err := w.ServingEndpoints.GetOpenApi(ctx, getOpenApiReq)
if err != nil {
return err
}
return nil
defer response.Contents.Close()
return cmdio.Render(ctx, response.Contents)
}
// Disable completions since they are not applicable.
@ -568,6 +586,77 @@ func newGetPermissions() *cobra.Command {
return cmd
}
// start http-request command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var httpRequestOverrides []func(
*cobra.Command,
*serving.ExternalFunctionRequest,
)
func newHttpRequest() *cobra.Command {
cmd := &cobra.Command{}
var httpRequestReq serving.ExternalFunctionRequest
// TODO: short flags
cmd.Flags().StringVar(&httpRequestReq.Headers, "headers", httpRequestReq.Headers, `Additional headers for the request.`)
cmd.Flags().StringVar(&httpRequestReq.Json, "json", httpRequestReq.Json, `The JSON payload to send in the request body.`)
cmd.Flags().StringVar(&httpRequestReq.Params, "params", httpRequestReq.Params, `Query parameters for the request.`)
cmd.Use = "http-request CONNECTION_NAME METHOD PATH"
cmd.Short = `Make external services call using the credentials stored in UC Connection.`
cmd.Long = `Make external services call using the credentials stored in UC Connection.
Arguments:
CONNECTION_NAME: The connection name to use. This is required to identify the external
connection.
METHOD: The HTTP method to use (e.g., 'GET', 'POST').
PATH: The relative path for the API endpoint. This is required.`
// This command is being previewed; hide from help output.
cmd.Hidden = true
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(3)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
httpRequestReq.ConnectionName = args[0]
_, err = fmt.Sscan(args[1], &httpRequestReq.Method)
if err != nil {
return fmt.Errorf("invalid METHOD: %s", args[1])
}
httpRequestReq.Path = args[2]
response, err := w.ServingEndpoints.HttpRequest(ctx, httpRequestReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range httpRequestOverrides {
fn(cmd, &httpRequestReq)
}
return cmd
}
// start list command
// Slice with functions to override default command behavior.
@ -849,7 +938,7 @@ func newPutAiGateway() *cobra.Command {
cmd.Long = `Update AI Gateway of a serving endpoint.
Used to update the AI Gateway of a serving endpoint. NOTE: Only external model
endpoints are currently supported.
and provisioned throughput endpoints are currently supported.
Arguments:
NAME: The name of the serving endpoint whose AI Gateway is being updated. This

2
go.mod
View File

@ -7,7 +7,7 @@ toolchain go1.23.4
require (
github.com/Masterminds/semver/v3 v3.3.1 // MIT
github.com/briandowns/spinner v1.23.1 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.55.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.56.0 // Apache 2.0
github.com/fatih/color v1.18.0 // MIT
github.com/google/uuid v1.6.0 // BSD-3-Clause
github.com/hashicorp/go-version v1.7.0 // MPL 2.0

4
go.sum generated
View File

@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/databricks/databricks-sdk-go v0.55.0 h1:ReziD6spzTDltM0ml80LggKo27F3oUjgTinCFDJDnak=
github.com/databricks/databricks-sdk-go v0.55.0/go.mod h1:JpLizplEs+up9/Z4Xf2x++o3sM9eTTWFGzIXAptKJzI=
github.com/databricks/databricks-sdk-go v0.56.0 h1:8BsqjrSLbm2ET+/SLCN8qD+v+HFvs891dzi1OaiyRfc=
github.com/databricks/databricks-sdk-go v0.56.0/go.mod h1:JpLizplEs+up9/Z4Xf2x++o3sM9eTTWFGzIXAptKJzI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

View File

@ -158,7 +158,7 @@ func (a *syncTest) remoteFileContent(ctx context.Context, relativePath, expected
var res []byte
a.c.Eventually(func() bool {
err = apiClient.Do(ctx, http.MethodGet, urlPath, nil, nil, &res)
err = apiClient.Do(ctx, http.MethodGet, urlPath, nil, nil, nil, &res)
require.NoError(a.t, err)
actualContent := string(res)
return actualContent == expectedContent

42
libs/auth/env_test.go Normal file
View File

@ -0,0 +1,42 @@
package auth
import (
"testing"
"github.com/databricks/databricks-sdk-go/config"
"github.com/stretchr/testify/assert"
)
func TestAuthEnv(t *testing.T) {
in := &config.Config{
Profile: "thisshouldbeignored",
Host: "https://test.com",
Token: "test-token",
Password: "test-password",
MetadataServiceURL: "http://somurl.com",
AzureUseMSI: true,
AzureTenantID: "test-tenant-id",
AzureClientID: "test-client-id",
AzureClientSecret: "test-client-secret",
ActionsIDTokenRequestToken: "test-actions-id-token-request-token",
}
expected := map[string]string{
"DATABRICKS_HOST": "https://test.com",
"DATABRICKS_TOKEN": "test-token",
"DATABRICKS_PASSWORD": "test-password",
"DATABRICKS_METADATA_SERVICE_URL": "http://somurl.com",
"ARM_USE_MSI": "true",
"ARM_TENANT_ID": "test-tenant-id",
"ARM_CLIENT_ID": "test-client-id",
"ARM_CLIENT_SECRET": "test-client-secret",
"ACTIONS_ID_TOKEN_REQUEST_TOKEN": "test-actions-id-token-request-token",
}
out := Env(in)
assert.Equal(t, expected, out)
}

View File

@ -148,7 +148,7 @@ func (w *FilesClient) Write(ctx context.Context, name string, reader io.Reader,
overwrite := slices.Contains(mode, OverwriteIfExists)
urlPath = fmt.Sprintf("%s?overwrite=%t", urlPath, overwrite)
headers := map[string]string{"Content-Type": "application/octet-stream"}
err = w.apiClient.Do(ctx, http.MethodPut, urlPath, headers, reader, nil)
err = w.apiClient.Do(ctx, http.MethodPut, urlPath, headers, nil, reader, nil)
// Return early on success.
if err == nil {
@ -176,7 +176,7 @@ func (w *FilesClient) Read(ctx context.Context, name string) (io.ReadCloser, err
}
var reader io.ReadCloser
err = w.apiClient.Do(ctx, http.MethodGet, urlPath, nil, nil, &reader)
err = w.apiClient.Do(ctx, http.MethodGet, urlPath, nil, nil, nil, &reader)
// Return early on success.
if err == nil {

View File

@ -106,7 +106,7 @@ func (info *wsfsFileInfo) MarshalJSON() ([]byte, error) {
// as an interface to allow for mocking in tests.
type apiClient interface {
Do(ctx context.Context, method, path string,
headers map[string]string, request, response any,
headers map[string]string, queryParams map[string]any, request, response any,
visitors ...func(*http.Request) error) error
}
@ -156,7 +156,7 @@ func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io
return err
}
err = w.apiClient.Do(ctx, http.MethodPost, urlPath, nil, body, nil)
err = w.apiClient.Do(ctx, http.MethodPost, urlPath, nil, nil, body, nil)
// Return early on success.
if err == nil {
@ -341,6 +341,7 @@ func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileIn
http.MethodGet,
"/api/2.0/workspace/get-status",
nil,
nil,
map[string]string{
"path": absPath,
"return_export_info": "true",

View File

@ -17,7 +17,7 @@ type mockApiClient struct {
}
func (m *mockApiClient) Do(ctx context.Context, method, path string,
headers map[string]string, request, response any,
headers map[string]string, queryParams map[string]any, request, response any,
visitors ...func(*http.Request) error,
) error {
args := m.Called(ctx, method, path, headers, request, response, visitors)

View File

@ -66,6 +66,7 @@ func fetchRepositoryInfoAPI(ctx context.Context, path string, w *databricks.Work
http.MethodGet,
apiEndpoint,
nil,
nil,
map[string]string{
"path": path,
"return_git_info": "true",

View File

@ -3,7 +3,10 @@ package testdiff
import (
"encoding/json"
"fmt"
"path/filepath"
"regexp"
"runtime"
"slices"
"strings"
"github.com/databricks/cli/internal/testutil"
@ -31,6 +34,10 @@ type ReplacementsContext struct {
Repls []Replacement
}
func (r *ReplacementsContext) Clone() ReplacementsContext {
return ReplacementsContext{Repls: slices.Clone(r.Repls)}
}
func (r *ReplacementsContext) Replace(s string) string {
// QQQ Should probably only replace whole words
for _, repl := range r.Repls {
@ -69,13 +76,48 @@ func (r *ReplacementsContext) Set(old, new string) {
if err == nil {
encodedOld, err := json.Marshal(old)
if err == nil {
r.appendLiteral(string(encodedOld), string(encodedNew))
r.appendLiteral(trimQuotes(string(encodedOld)), trimQuotes(string(encodedNew)))
}
}
r.appendLiteral(old, new)
}
func trimQuotes(s string) string {
if len(s) > 0 && s[0] == '"' {
s = s[1:]
}
if len(s) > 0 && s[len(s)-1] == '"' {
s = s[:len(s)-1]
}
return s
}
func (r *ReplacementsContext) SetPath(old, new string) {
r.Set(old, new)
if runtime.GOOS != "windows" {
return
}
// Support both forward and backward slashes
m1 := strings.ReplaceAll(old, "\\", "/")
if m1 != old {
r.Set(m1, new)
}
m2 := strings.ReplaceAll(old, "/", "\\")
if m2 != old && m2 != m1 {
r.Set(m2, new)
}
}
func (r *ReplacementsContext) SetPathWithParents(old, new string) {
r.SetPath(old, new)
r.SetPath(filepath.Dir(old), new+"_PARENT")
r.SetPath(filepath.Dir(filepath.Dir(old)), new+"_GPARENT")
}
func PrepareReplacementsWorkspaceClient(t testutil.TestingT, r *ReplacementsContext, w *databricks.WorkspaceClient) {
t.Helper()
// in some clouds (gcp) w.Config.Host includes "https://" prefix in others it's really just a host (azure)