mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into test-protos
This commit is contained in:
commit
809423de3f
|
@ -1 +1 @@
|
|||
779817ed8d63031f5ea761fbd25ee84f38feec0d
|
||||
0be1b914249781b5e903b7676fd02255755bc851
|
|
@ -109,16 +109,19 @@ var {{.CamelName}}Overrides []func(
|
|||
{{- end }}
|
||||
)
|
||||
|
||||
{{- $excludeFromJson := list "http-request"}}
|
||||
|
||||
func new{{.PascalName}}() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
{{- $canUseJson := and .CanUseJson (not (in $excludeFromJson .KebabName )) -}}
|
||||
{{- if .Request}}
|
||||
|
||||
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||
{{- if .RequestBodyField }}
|
||||
{{.CamelName}}Req.{{.RequestBodyField.PascalName}} = &{{.Service.Package.Name}}.{{.RequestBodyField.Entity.PascalName}}{}
|
||||
{{- end }}
|
||||
{{- if .CanUseJson}}
|
||||
{{- if $canUseJson}}
|
||||
var {{.CamelName}}Json flags.JsonFlag
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
@ -135,7 +138,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- $request = .RequestBodyField.Entity -}}
|
||||
{{- end -}}
|
||||
{{if $request }}// TODO: short flags
|
||||
{{- if .CanUseJson}}
|
||||
{{- if $canUseJson}}
|
||||
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
{{- end}}
|
||||
{{$method := .}}
|
||||
|
@ -177,7 +180,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt $request) (eq 1 (len $request.RequiredRequestBodyFields)) -}}
|
||||
{{- $onlyPathArgsRequiredAsPositionalArguments := and $request (eq (len .RequiredPositionalArguments) (len $request.RequiredPathFields)) -}}
|
||||
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson (or $request.HasRequiredRequestBodyFields )) -}}
|
||||
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and $canUseJson (or $request.HasRequiredRequestBodyFields )) -}}
|
||||
{{- $hasCustomArgHandler := or $hasRequiredArgs $hasDifferentArgsWithJsonFlag -}}
|
||||
|
||||
{{- $atleastOneArgumentWithDescription := false -}}
|
||||
|
@ -239,7 +242,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
ctx := cmd.Context()
|
||||
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
|
||||
{{- if .Request }}
|
||||
{{ if .CanUseJson }}
|
||||
{{ if $canUseJson }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req{{ if .RequestBodyField }}.{{.RequestBodyField.PascalName}}{{ end }})
|
||||
if diags.HasError() {
|
||||
|
@ -255,7 +258,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}{{- end}}
|
||||
{{- if $hasPosArgs }}
|
||||
{{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }} else {
|
||||
{{- if and $canUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }} else {
|
||||
{{- end}}
|
||||
{{- if $hasIdPrompt}}
|
||||
if len(args) == 0 {
|
||||
|
@ -279,9 +282,9 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
|
||||
{{$method := .}}
|
||||
{{- range $arg, $field := .RequiredPositionalArguments}}
|
||||
{{- template "args-scan" (dict "Arg" $arg "Field" $field "Method" $method "HasIdPrompt" $hasIdPrompt)}}
|
||||
{{- template "args-scan" (dict "Arg" $arg "Field" $field "Method" $method "HasIdPrompt" $hasIdPrompt "ExcludeFromJson" $excludeFromJson)}}
|
||||
{{- end -}}
|
||||
{{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }}
|
||||
{{- if and $canUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }}
|
||||
}
|
||||
{{- end}}
|
||||
|
||||
|
@ -392,7 +395,8 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- $method := .Method -}}
|
||||
{{- $arg := .Arg -}}
|
||||
{{- $hasIdPrompt := .HasIdPrompt -}}
|
||||
{{- $optionalIfJsonIsUsed := and (not $hasIdPrompt) (and $field.IsRequestBodyField $method.CanUseJson) }}
|
||||
{{ $canUseJson := and $method.CanUseJson (not (in .ExcludeFromJson $method.KebabName)) }}
|
||||
{{- $optionalIfJsonIsUsed := and (not $hasIdPrompt) (and $field.IsRequestBodyField $canUseJson) }}
|
||||
{{- if $optionalIfJsonIsUsed }}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
{{- end }}
|
||||
|
|
|
@ -31,6 +31,7 @@ cmd/account/users/users.go linguist-generated=true
|
|||
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
||||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||
cmd/workspace/access-control/access-control.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true
|
||||
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true
|
||||
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true
|
||||
|
|
|
@ -10,19 +10,65 @@ on:
|
|||
jobs:
|
||||
publish-to-winget-pkgs:
|
||||
runs-on:
|
||||
group: databricks-protected-runner-group
|
||||
labels: windows-server-latest
|
||||
group: databricks-deco-testing-runner-group
|
||||
labels: ubuntu-latest-deco
|
||||
|
||||
environment: release
|
||||
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # v2
|
||||
with:
|
||||
identifier: Databricks.DatabricksCLI
|
||||
installers-regex: 'windows_.*-signed\.zip$' # Only signed Windows releases
|
||||
token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }}
|
||||
fork-user: eng-dev-ecosystem-bot
|
||||
- name: Checkout repository and submodules
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
# Use the tag from the input, or the ref name if the input is not provided.
|
||||
# The ref name is equal to the tag name when this workflow is triggered by the "sign-cli" command.
|
||||
release-tag: ${{ inputs.tag || github.ref_name }}
|
||||
# When updating the version of komac, make sure to update the checksum in the next step.
|
||||
# Find both at https://github.com/russellbanks/Komac/releases.
|
||||
- name: Download komac binary
|
||||
run: |
|
||||
curl -s -L -o $RUNNER_TEMP/komac-2.9.0-x86_64-unknown-linux-gnu.tar.gz https://github.com/russellbanks/Komac/releases/download/v2.9.0/komac-2.9.0-x86_64-unknown-linux-gnu.tar.gz
|
||||
|
||||
- name: Verify komac binary
|
||||
run: |
|
||||
echo "d07a12831ad5418fee715488542a98ce3c0e591d05c850dd149fe78432be8c4c $RUNNER_TEMP/komac-2.9.0-x86_64-unknown-linux-gnu.tar.gz" | sha256sum -c -
|
||||
|
||||
- name: Untar komac binary to temporary path
|
||||
run: |
|
||||
mkdir -p $RUNNER_TEMP/komac
|
||||
tar -xzf $RUNNER_TEMP/komac-2.9.0-x86_64-unknown-linux-gnu.tar.gz -C $RUNNER_TEMP/komac
|
||||
|
||||
- name: Add komac to PATH
|
||||
run: echo "$RUNNER_TEMP/komac" >> $GITHUB_PATH
|
||||
|
||||
- name: Confirm komac version
|
||||
run: komac --version
|
||||
|
||||
# Use the tag from the input, or the ref name if the input is not provided.
|
||||
# The ref name is equal to the tag name when this workflow is triggered by the "sign-cli" command.
|
||||
- name: Strip "v" prefix from version
|
||||
id: strip_version
|
||||
run: echo "version=$(echo ${{ inputs.tag || github.ref_name }} | sed 's/^v//')" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Get URLs of signed Windows binaries
|
||||
id: get_windows_urls
|
||||
run: |
|
||||
urls=$(
|
||||
gh api https://api.github.com/repos/databricks/cli/releases/tags/${{ inputs.tag || github.ref_name }} | \
|
||||
jq -r .assets[].browser_download_url | \
|
||||
grep -E '_windows_.*-signed\.zip$' | \
|
||||
tr '\n' ' '
|
||||
)
|
||||
if [ -z "$urls" ]; then
|
||||
echo "No signed Windows binaries found" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "urls=$urls" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Winget
|
||||
run: |
|
||||
komac update Databricks.DatabricksCLI \
|
||||
--version ${{ steps.strip_version.outputs.version }} \
|
||||
--submit \
|
||||
--urls ${{ steps.get_windows_urls.outputs.urls }} \
|
||||
env:
|
||||
KOMAC_FORK_OWNER: eng-dev-ecosystem-bot
|
||||
GITHUB_TOKEN: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }}
|
||||
|
|
|
@ -60,12 +60,6 @@ jobs:
|
|||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@887a942a15af3a7626099df99e897a18d9e5ab3a # v5.1.0
|
||||
|
||||
- name: Run ruff
|
||||
uses: astral-sh/ruff-action@31a518504640beb4897d0b9f9e50a2a9196e75ba # v3.0.1
|
||||
with:
|
||||
version: "0.9.1"
|
||||
args: "format --check"
|
||||
|
||||
- name: Set go env
|
||||
run: |
|
||||
echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
|
||||
|
@ -80,7 +74,7 @@ jobs:
|
|||
- name: Run tests with coverage
|
||||
run: make cover
|
||||
|
||||
golangci:
|
||||
linters:
|
||||
needs: cleanups
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -105,6 +99,11 @@ jobs:
|
|||
with:
|
||||
version: v1.63.4
|
||||
args: --timeout=15m
|
||||
- name: Run ruff
|
||||
uses: astral-sh/ruff-action@31a518504640beb4897d0b9f9e50a2a9196e75ba # v3.0.1
|
||||
with:
|
||||
version: "0.9.1"
|
||||
args: "format --check"
|
||||
|
||||
validate-bundle-schema:
|
||||
needs: cleanups
|
||||
|
|
20
CHANGELOG.md
20
CHANGELOG.md
|
@ -1,5 +1,25 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.239.1
|
||||
|
||||
CLI:
|
||||
* Added text output templates for apps list and list-deployments ([#2175](https://github.com/databricks/cli/pull/2175)).
|
||||
* Fix duplicate "apps" entry in help output ([#2191](https://github.com/databricks/cli/pull/2191)).
|
||||
|
||||
Bundles:
|
||||
* Allow yaml-anchors in schema ([#2200](https://github.com/databricks/cli/pull/2200)).
|
||||
* Show an error when non-yaml files used in include section ([#2201](https://github.com/databricks/cli/pull/2201)).
|
||||
* Set WorktreeRoot to sync root outside git repo ([#2197](https://github.com/databricks/cli/pull/2197)).
|
||||
* fix: Detailed message for using source-linked deployment with file_path specified ([#2119](https://github.com/databricks/cli/pull/2119)).
|
||||
* Allow using variables in enum fields ([#2199](https://github.com/databricks/cli/pull/2199)).
|
||||
* Add experimental-jobs-as-code template ([#2177](https://github.com/databricks/cli/pull/2177)).
|
||||
* Reading variables from file ([#2171](https://github.com/databricks/cli/pull/2171)).
|
||||
* Fixed an apps message order and added output test ([#2174](https://github.com/databricks/cli/pull/2174)).
|
||||
* Default to forward slash-separated paths for path translation ([#2145](https://github.com/databricks/cli/pull/2145)).
|
||||
* Include a materialized copy of built-in templates ([#2146](https://github.com/databricks/cli/pull/2146)).
|
||||
|
||||
|
||||
|
||||
## [Release] Release v0.239.0
|
||||
|
||||
### New feature announcement
|
||||
|
|
4
NOTICE
4
NOTICE
|
@ -105,3 +105,7 @@ License - https://github.com/wI2L/jsondiff/blob/master/LICENSE
|
|||
https://github.com/hexops/gotextdiff
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
License - https://github.com/hexops/gotextdiff/blob/main/LICENSE
|
||||
|
||||
https://github.com/BurntSushi/toml
|
||||
Copyright (c) 2013 TOML authors
|
||||
https://github.com/BurntSushi/toml/blob/master/COPYING
|
||||
|
|
|
@ -17,3 +17,5 @@ For more complex tests one can also use:
|
|||
- `errcode` helper: if the command fails with non-zero code, it appends `Exit code: N` to the output but returns success to caller (bash), allowing continuation of script.
|
||||
- `trace` helper: prints the arguments before executing the command.
|
||||
- custom output files: redirect output to custom file (it must start with `out`), e.g. `$CLI bundle validate > out.txt 2> out.error.txt`.
|
||||
|
||||
See [selftest](./selftest) for a toy test.
|
||||
|
|
|
@ -9,17 +9,18 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
"github.com/databricks/cli/libs/testdiff"
|
||||
"github.com/databricks/cli/libs/testserver"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -28,8 +29,8 @@ var KeepTmp bool
|
|||
|
||||
// In order to debug CLI running under acceptance test, set this to full subtest name, e.g. "bundle/variables/empty"
|
||||
// Then install your breakpoints and click "debug test" near TestAccept in VSCODE.
|
||||
// example: var singleTest = "bundle/variables/empty"
|
||||
var singleTest = ""
|
||||
// example: var SingleTest = "bundle/variables/empty"
|
||||
var SingleTest = ""
|
||||
|
||||
// If enabled, instead of compiling and running CLI externally, we'll start in-process server that accepts and runs
|
||||
// CLI commands. The $CLI in test scripts is a helper that just forwards command-line arguments to this server (see bin/callserver.py).
|
||||
|
@ -37,7 +38,7 @@ var singleTest = ""
|
|||
var InprocessMode bool
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&InprocessMode, "inprocess", singleTest != "", "Run CLI in the same process as test (for debugging)")
|
||||
flag.BoolVar(&InprocessMode, "inprocess", SingleTest != "", "Run CLI in the same process as test (for debugging)")
|
||||
flag.BoolVar(&KeepTmp, "keeptmp", false, "Do not delete TMP directory after run")
|
||||
}
|
||||
|
||||
|
@ -45,6 +46,7 @@ const (
|
|||
EntryPointScript = "script"
|
||||
CleanupScript = "script.cleanup"
|
||||
PrepareScript = "script.prepare"
|
||||
MaxFileSize = 100_000
|
||||
)
|
||||
|
||||
var Scripts = map[string]bool{
|
||||
|
@ -54,19 +56,14 @@ var Scripts = map[string]bool{
|
|||
}
|
||||
|
||||
func TestAccept(t *testing.T) {
|
||||
testAccept(t, InprocessMode, "")
|
||||
testAccept(t, InprocessMode, SingleTest)
|
||||
}
|
||||
|
||||
func TestInprocessMode(t *testing.T) {
|
||||
if InprocessMode {
|
||||
t.Skip("Already tested by TestAccept")
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
// - catalogs A catalog is the first layer of Unity Catalog’s three-level namespace.
|
||||
// + catalogs A catalog is the first layer of Unity Catalog<6F>s three-level namespace.
|
||||
t.Skip("Fails on CI on unicode characters")
|
||||
}
|
||||
require.NotZero(t, testAccept(t, true, "help"))
|
||||
require.Equal(t, 1, testAccept(t, true, "selftest"))
|
||||
}
|
||||
|
||||
func testAccept(t *testing.T, InprocessMode bool, singleTest string) int {
|
||||
|
@ -94,23 +91,24 @@ func testAccept(t *testing.T, InprocessMode bool, singleTest string) int {
|
|||
}
|
||||
|
||||
t.Setenv("CLI", execPath)
|
||||
repls.Set(execPath, "$CLI")
|
||||
repls.SetPath(execPath, "$CLI")
|
||||
|
||||
// Make helper scripts available
|
||||
t.Setenv("PATH", fmt.Sprintf("%s%c%s", filepath.Join(cwd, "bin"), os.PathListSeparator, os.Getenv("PATH")))
|
||||
|
||||
tempHomeDir := t.TempDir()
|
||||
repls.Set(tempHomeDir, "$TMPHOME")
|
||||
repls.SetPath(tempHomeDir, "$TMPHOME")
|
||||
t.Logf("$TMPHOME=%v", tempHomeDir)
|
||||
|
||||
// Prevent CLI from downloading terraform in each test:
|
||||
t.Setenv("DATABRICKS_TF_EXEC_PATH", tempHomeDir)
|
||||
// Make use of uv cache; since we set HomeEnvVar to temporary directory, it is not picked up automatically
|
||||
uvCache := getUVDefaultCacheDir(t)
|
||||
t.Setenv("UV_CACHE_DIR", uvCache)
|
||||
|
||||
ctx := context.Background()
|
||||
cloudEnv := os.Getenv("CLOUD_ENV")
|
||||
|
||||
if cloudEnv == "" {
|
||||
server := StartServer(t)
|
||||
server := testserver.New(t)
|
||||
AddHandlers(server)
|
||||
// Redirect API access to local server:
|
||||
t.Setenv("DATABRICKS_HOST", server.URL)
|
||||
|
@ -119,6 +117,9 @@ func testAccept(t *testing.T, InprocessMode bool, singleTest string) int {
|
|||
homeDir := t.TempDir()
|
||||
// Do not read user's ~/.databrickscfg
|
||||
t.Setenv(env.HomeEnvVar(), homeDir)
|
||||
|
||||
// Prevent CLI from downloading terraform in each test:
|
||||
t.Setenv("DATABRICKS_TF_EXEC_PATH", tempHomeDir)
|
||||
}
|
||||
|
||||
workspaceClient, err := databricks.NewWorkspaceClient()
|
||||
|
@ -130,6 +131,7 @@ func testAccept(t *testing.T, InprocessMode bool, singleTest string) int {
|
|||
testdiff.PrepareReplacementsUser(t, &repls, *user)
|
||||
testdiff.PrepareReplacementsWorkspaceClient(t, &repls, workspaceClient)
|
||||
testdiff.PrepareReplacementsUUID(t, &repls)
|
||||
testdiff.PrepareReplacementsDevVersion(t, &repls)
|
||||
|
||||
testDirs := getTests(t)
|
||||
require.NotEmpty(t, testDirs)
|
||||
|
@ -176,6 +178,13 @@ func getTests(t *testing.T) []string {
|
|||
}
|
||||
|
||||
func runTest(t *testing.T, dir, coverDir string, repls testdiff.ReplacementsContext) {
|
||||
config, configPath := LoadConfig(t, dir)
|
||||
|
||||
isEnabled, isPresent := config.GOOS[runtime.GOOS]
|
||||
if isPresent && !isEnabled {
|
||||
t.Skipf("Disabled via GOOS.%s setting in %s", runtime.GOOS, configPath)
|
||||
}
|
||||
|
||||
var tmpDir string
|
||||
var err error
|
||||
if KeepTmp {
|
||||
|
@ -188,12 +197,8 @@ func runTest(t *testing.T, dir, coverDir string, repls testdiff.ReplacementsCont
|
|||
tmpDir = t.TempDir()
|
||||
}
|
||||
|
||||
// Converts C:\Users\DENIS~1.BIL -> C:\Users\denis.bilenko
|
||||
tmpDirEvalled, err1 := filepath.EvalSymlinks(tmpDir)
|
||||
if err1 == nil && tmpDirEvalled != tmpDir {
|
||||
repls.SetPathWithParents(tmpDirEvalled, "$TMPDIR")
|
||||
}
|
||||
repls.SetPathWithParents(tmpDir, "$TMPDIR")
|
||||
repls.Repls = append(repls.Repls, config.Repls...)
|
||||
|
||||
scriptContents := readMergedScriptContents(t, dir)
|
||||
testutil.WriteFile(t, filepath.Join(tmpDir, EntryPointScript), scriptContents)
|
||||
|
@ -227,14 +232,15 @@ func runTest(t *testing.T, dir, coverDir string, repls testdiff.ReplacementsCont
|
|||
formatOutput(out, err)
|
||||
require.NoError(t, out.Close())
|
||||
|
||||
printedRepls := false
|
||||
|
||||
// Compare expected outputs
|
||||
for relPath := range outputs {
|
||||
doComparison(t, repls, dir, tmpDir, relPath)
|
||||
doComparison(t, repls, dir, tmpDir, relPath, &printedRepls)
|
||||
}
|
||||
|
||||
// Make sure there are not unaccounted for new files
|
||||
files, err := ListDir(t, tmpDir)
|
||||
require.NoError(t, err)
|
||||
files := ListDir(t, tmpDir)
|
||||
for _, relPath := range files {
|
||||
if _, ok := inputs[relPath]; ok {
|
||||
continue
|
||||
|
@ -242,26 +248,27 @@ func runTest(t *testing.T, dir, coverDir string, repls testdiff.ReplacementsCont
|
|||
if _, ok := outputs[relPath]; ok {
|
||||
continue
|
||||
}
|
||||
t.Errorf("Unexpected output: %s", relPath)
|
||||
if strings.HasPrefix(relPath, "out") {
|
||||
// We have a new file starting with "out"
|
||||
// Show the contents & support overwrite mode for it:
|
||||
doComparison(t, repls, dir, tmpDir, relPath)
|
||||
doComparison(t, repls, dir, tmpDir, relPath, &printedRepls)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doComparison(t *testing.T, repls testdiff.ReplacementsContext, dirRef, dirNew, relPath string) {
|
||||
func doComparison(t *testing.T, repls testdiff.ReplacementsContext, dirRef, dirNew, relPath string, printedRepls *bool) {
|
||||
pathRef := filepath.Join(dirRef, relPath)
|
||||
pathNew := filepath.Join(dirNew, relPath)
|
||||
bufRef, okRef := readIfExists(t, pathRef)
|
||||
bufNew, okNew := readIfExists(t, pathNew)
|
||||
bufRef, okRef := tryReading(t, pathRef)
|
||||
bufNew, okNew := tryReading(t, pathNew)
|
||||
if !okRef && !okNew {
|
||||
t.Errorf("Both files are missing: %s, %s", pathRef, pathNew)
|
||||
t.Errorf("Both files are missing or have errors: %s, %s", pathRef, pathNew)
|
||||
return
|
||||
}
|
||||
|
||||
valueRef := testdiff.NormalizeNewlines(string(bufRef))
|
||||
valueNew := testdiff.NormalizeNewlines(string(bufNew))
|
||||
valueRef := testdiff.NormalizeNewlines(bufRef)
|
||||
valueNew := testdiff.NormalizeNewlines(bufNew)
|
||||
|
||||
// Apply replacements to the new value only.
|
||||
// The reference value is stored after applying replacements.
|
||||
|
@ -295,6 +302,15 @@ func doComparison(t *testing.T, repls testdiff.ReplacementsContext, dirRef, dirN
|
|||
t.Logf("Overwriting existing output file: %s", relPath)
|
||||
testutil.WriteFile(t, pathRef, valueNew)
|
||||
}
|
||||
|
||||
if !equal && printedRepls != nil && !*printedRepls {
|
||||
*printedRepls = true
|
||||
var items []string
|
||||
for _, item := range repls.Repls {
|
||||
items = append(items, fmt.Sprintf("REPL %s => %s", item.Old, item.New))
|
||||
}
|
||||
t.Log("Available replacements:\n" + strings.Join(items, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// Returns combined script.prepare (root) + script.prepare (parent) + ... + script + ... + script.cleanup (parent) + ...
|
||||
|
@ -310,14 +326,14 @@ func readMergedScriptContents(t *testing.T, dir string) string {
|
|||
cleanups := []string{}
|
||||
|
||||
for {
|
||||
x, ok := readIfExists(t, filepath.Join(dir, CleanupScript))
|
||||
x, ok := tryReading(t, filepath.Join(dir, CleanupScript))
|
||||
if ok {
|
||||
cleanups = append(cleanups, string(x))
|
||||
cleanups = append(cleanups, x)
|
||||
}
|
||||
|
||||
x, ok = readIfExists(t, filepath.Join(dir, PrepareScript))
|
||||
x, ok = tryReading(t, filepath.Join(dir, PrepareScript))
|
||||
if ok {
|
||||
prepares = append(prepares, string(x))
|
||||
prepares = append(prepares, x)
|
||||
}
|
||||
|
||||
if dir == "" || dir == "." {
|
||||
|
@ -404,16 +420,33 @@ func formatOutput(w io.Writer, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
func readIfExists(t *testing.T, path string) ([]byte, bool) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
return data, true
|
||||
func tryReading(t *testing.T, path string) (string, bool) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
t.Errorf("%s: %s", path, err)
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
t.Fatalf("%s: %s", path, err)
|
||||
if info.Size() > MaxFileSize {
|
||||
t.Errorf("%s: ignoring, too large: %d", path, info.Size())
|
||||
return "", false
|
||||
}
|
||||
return []byte{}, false
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
// already checked ErrNotExist above
|
||||
t.Errorf("%s: %s", path, err)
|
||||
return "", false
|
||||
}
|
||||
|
||||
if !utf8.Valid(data) {
|
||||
t.Errorf("%s: not valid utf-8", path)
|
||||
return "", false
|
||||
}
|
||||
|
||||
return string(data), true
|
||||
}
|
||||
|
||||
func CopyDir(src, dst string, inputs, outputs map[string]bool) error {
|
||||
|
@ -451,37 +484,19 @@ func CopyDir(src, dst string, inputs, outputs map[string]bool) error {
|
|||
})
|
||||
}
|
||||
|
||||
func ListDir(t *testing.T, src string) ([]string, error) {
|
||||
// exclude folders in .gitignore from comparison
|
||||
ignored := []string{
|
||||
"\\.ruff_cache",
|
||||
"\\.venv",
|
||||
".*\\.egg-info",
|
||||
"__pycache__",
|
||||
// depends on uv version
|
||||
"uv.lock",
|
||||
}
|
||||
|
||||
func ListDir(t *testing.T, src string) []string {
|
||||
var files []string
|
||||
err := filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
// Do not FailNow here.
|
||||
// The output comparison is happening after this call which includes output.txt which
|
||||
// includes errors printed by commands which include explanation why a given file cannot be read.
|
||||
t.Errorf("Error when listing %s: path=%s: %s", src, path, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
for _, ignoredFolder := range ignored {
|
||||
if matched, _ := regexp.MatchString(ignoredFolder, info.Name()); matched {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
} else {
|
||||
for _, ignoredFolder := range ignored {
|
||||
if matched, _ := regexp.MatchString(ignoredFolder, info.Name()); matched {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(src, path)
|
||||
|
@ -492,5 +507,21 @@ func ListDir(t *testing.T, src string) ([]string, error) {
|
|||
files = append(files, relPath)
|
||||
return nil
|
||||
})
|
||||
return files, err
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list %s: %s", src, err)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func getUVDefaultCacheDir(t *testing.T) string {
|
||||
// According to uv docs https://docs.astral.sh/uv/concepts/cache/#caching-in-continuous-integration
|
||||
// the default cache directory is
|
||||
// "A system-appropriate cache directory, e.g., $XDG_CACHE_HOME/uv or $HOME/.cache/uv on Unix and %LOCALAPPDATA%\uv\cache on Windows"
|
||||
cacheDir, err := os.UserCacheDir()
|
||||
require.NoError(t, err)
|
||||
if runtime.GOOS == "windows" {
|
||||
return cacheDir + "\\uv\\cache"
|
||||
} else {
|
||||
return cacheDir + "/uv"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
bundle:
|
||||
name: git-permerror
|
|
@ -0,0 +1,78 @@
|
|||
=== No permission to access .git. Badness: inferred flag is set to true even though we did not infer branch. bundle_root_path is not correct in subdir case.
|
||||
|
||||
>>> chmod 000 .git
|
||||
|
||||
>>> $CLI bundle validate
|
||||
Error: unable to load repository specific gitconfig: open config: permission denied
|
||||
|
||||
Name: git-permerror
|
||||
Target: default
|
||||
Workspace:
|
||||
User: $USERNAME
|
||||
Path: /Workspace/Users/$USERNAME/.bundle/git-permerror/default
|
||||
|
||||
Found 1 error
|
||||
|
||||
Exit code: 1
|
||||
|
||||
>>> $CLI bundle validate -o json
|
||||
Error: unable to load repository specific gitconfig: open config: permission denied
|
||||
|
||||
|
||||
Exit code: 1
|
||||
{
|
||||
"bundle_root_path": ".",
|
||||
"inferred": true
|
||||
}
|
||||
|
||||
>>> withdir subdir/a/b $CLI bundle validate -o json
|
||||
Error: unable to load repository specific gitconfig: open config: permission denied
|
||||
|
||||
|
||||
Exit code: 1
|
||||
{
|
||||
"bundle_root_path": ".",
|
||||
"inferred": true
|
||||
}
|
||||
|
||||
|
||||
=== No permissions to read .git/HEAD. Badness: warning is not shown. inferred is incorrectly set to true. bundle_root_path is not correct in subdir case.
|
||||
|
||||
>>> chmod 000 .git/HEAD
|
||||
|
||||
>>> $CLI bundle validate -o json
|
||||
{
|
||||
"bundle_root_path": ".",
|
||||
"inferred": true
|
||||
}
|
||||
|
||||
>>> withdir subdir/a/b $CLI bundle validate -o json
|
||||
{
|
||||
"bundle_root_path": ".",
|
||||
"inferred": true
|
||||
}
|
||||
|
||||
|
||||
=== No permissions to read .git/config. Badness: inferred is incorretly set to true. bundle_root_path is not correct is subdir case.
|
||||
|
||||
>>> chmod 000 .git/config
|
||||
|
||||
>>> $CLI bundle validate -o json
|
||||
Error: unable to load repository specific gitconfig: open config: permission denied
|
||||
|
||||
|
||||
Exit code: 1
|
||||
{
|
||||
"bundle_root_path": ".",
|
||||
"inferred": true
|
||||
}
|
||||
|
||||
>>> withdir subdir/a/b $CLI bundle validate -o json
|
||||
Error: unable to load repository specific gitconfig: open config: permission denied
|
||||
|
||||
|
||||
Exit code: 1
|
||||
{
|
||||
"bundle_root_path": ".",
|
||||
"inferred": true
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
mkdir myrepo
|
||||
cd myrepo
|
||||
cp ../databricks.yml .
|
||||
git-repo-init
|
||||
mkdir -p subdir/a/b
|
||||
|
||||
printf "=== No permission to access .git. Badness: inferred flag is set to true even though we did not infer branch. bundle_root_path is not correct in subdir case.\n"
|
||||
trace chmod 000 .git
|
||||
errcode trace $CLI bundle validate
|
||||
errcode trace $CLI bundle validate -o json | jq .bundle.git
|
||||
errcode trace withdir subdir/a/b $CLI bundle validate -o json | jq .bundle.git
|
||||
|
||||
printf "\n\n=== No permissions to read .git/HEAD. Badness: warning is not shown. inferred is incorrectly set to true. bundle_root_path is not correct in subdir case.\n"
|
||||
chmod 700 .git
|
||||
trace chmod 000 .git/HEAD
|
||||
errcode trace $CLI bundle validate -o json | jq .bundle.git
|
||||
errcode trace withdir subdir/a/b $CLI bundle validate -o json | jq .bundle.git
|
||||
|
||||
printf "\n\n=== No permissions to read .git/config. Badness: inferred is incorretly set to true. bundle_root_path is not correct is subdir case.\n"
|
||||
chmod 666 .git/HEAD
|
||||
trace chmod 000 .git/config
|
||||
errcode trace $CLI bundle validate -o json | jq .bundle.git
|
||||
errcode trace withdir subdir/a/b $CLI bundle validate -o json | jq .bundle.git
|
||||
|
||||
cd ..
|
||||
rm -fr myrepo
|
|
@ -0,0 +1,5 @@
|
|||
Badness = "Warning logs not shown; inferred flag is set to true incorrect; bundle_root_path is not correct"
|
||||
|
||||
[GOOS]
|
||||
# This test relies on chmod which does not work on Windows
|
||||
windows = false
|
|
@ -0,0 +1,6 @@
|
|||
bundle:
|
||||
name: non_yaml_in_includes
|
||||
|
||||
include:
|
||||
- test.py
|
||||
- resources/*.yml
|
|
@ -0,0 +1,10 @@
|
|||
Error: Files in the 'include' configuration section must be YAML files.
|
||||
in databricks.yml:5:4
|
||||
|
||||
The file test.py in the 'include' configuration section is not a YAML file, and only YAML files are supported. To include files to sync, specify them in the 'sync.include' configuration section instead.
|
||||
|
||||
Name: non_yaml_in_includes
|
||||
|
||||
Found 1 error
|
||||
|
||||
Exit code: 1
|
|
@ -0,0 +1 @@
|
|||
$CLI bundle validate
|
|
@ -0,0 +1 @@
|
|||
print("Hello world")
|
|
@ -1,8 +1,6 @@
|
|||
|
||||
>>> $CLI bundle validate -t development -o json
|
||||
|
||||
Exit code: 0
|
||||
|
||||
>>> $CLI bundle validate -t error
|
||||
Error: notebook this value is overridden not found. Local notebook references are expected
|
||||
to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb]
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
|
||||
>>> $CLI bundle validate -t development -o json
|
||||
|
||||
Exit code: 0
|
||||
|
||||
>>> $CLI bundle validate -t error
|
||||
Error: notebook this value is overridden not found. Local notebook references are expected
|
||||
to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb]
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
bundle:
|
||||
name: scripts
|
||||
|
||||
experimental:
|
||||
scripts:
|
||||
preinit: "python3 ./myscript.py $EXITCODE preinit"
|
||||
postinit: "python3 ./myscript.py 0 postinit"
|
||||
prebuild: "python3 ./myscript.py 0 prebuild"
|
||||
postbuild: "python3 ./myscript.py 0 postbuild"
|
||||
predeploy: "python3 ./myscript.py 0 predeploy"
|
||||
postdeploy: "python3 ./myscript.py 0 postdeploy"
|
|
@ -0,0 +1,8 @@
|
|||
import sys
|
||||
|
||||
info = " ".join(sys.argv[1:])
|
||||
sys.stderr.write(f"from myscript.py {info}: hello stderr!\n")
|
||||
sys.stdout.write(f"from myscript.py {info}: hello stdout!\n")
|
||||
|
||||
exitcode = int(sys.argv[1])
|
||||
sys.exit(exitcode)
|
|
@ -0,0 +1,52 @@
|
|||
|
||||
>>> EXITCODE=0 errcode $CLI bundle validate
|
||||
Executing 'preinit' script
|
||||
from myscript.py 0 preinit: hello stdout!
|
||||
from myscript.py 0 preinit: hello stderr!
|
||||
Executing 'postinit' script
|
||||
from myscript.py 0 postinit: hello stdout!
|
||||
from myscript.py 0 postinit: hello stderr!
|
||||
Name: scripts
|
||||
Target: default
|
||||
Workspace:
|
||||
User: $USERNAME
|
||||
Path: /Workspace/Users/$USERNAME/.bundle/scripts/default
|
||||
|
||||
Validation OK!
|
||||
|
||||
>>> EXITCODE=1 errcode $CLI bundle validate
|
||||
Executing 'preinit' script
|
||||
from myscript.py 1 preinit: hello stdout!
|
||||
from myscript.py 1 preinit: hello stderr!
|
||||
Error: failed to execute script: exit status 1
|
||||
|
||||
Name: scripts
|
||||
|
||||
Found 1 error
|
||||
|
||||
Exit code: 1
|
||||
|
||||
>>> EXITCODE=0 errcode $CLI bundle deploy
|
||||
Executing 'preinit' script
|
||||
from myscript.py 0 preinit: hello stdout!
|
||||
from myscript.py 0 preinit: hello stderr!
|
||||
Executing 'postinit' script
|
||||
from myscript.py 0 postinit: hello stdout!
|
||||
from myscript.py 0 postinit: hello stderr!
|
||||
Executing 'prebuild' script
|
||||
from myscript.py 0 prebuild: hello stdout!
|
||||
from myscript.py 0 prebuild: hello stderr!
|
||||
Executing 'postbuild' script
|
||||
from myscript.py 0 postbuild: hello stdout!
|
||||
from myscript.py 0 postbuild: hello stderr!
|
||||
Executing 'predeploy' script
|
||||
from myscript.py 0 predeploy: hello stdout!
|
||||
from myscript.py 0 predeploy: hello stderr!
|
||||
Error: unable to deploy to /Workspace/Users/$USERNAME/.bundle/scripts/default/state as $USERNAME.
|
||||
Please make sure the current user or one of their groups is listed under the permissions of this bundle.
|
||||
For assistance, contact the owners of this project.
|
||||
They may need to redeploy the bundle to apply the new permissions.
|
||||
Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions.
|
||||
|
||||
|
||||
Exit code: 1
|
|
@ -0,0 +1,3 @@
|
|||
trace EXITCODE=0 errcode $CLI bundle validate
|
||||
trace EXITCODE=1 errcode $CLI bundle validate
|
||||
trace EXITCODE=0 errcode $CLI bundle deploy
|
|
@ -3,4 +3,6 @@ mkdir myrepo
|
|||
cd myrepo
|
||||
cp ../databricks.yml .
|
||||
git-repo-init
|
||||
$CLI bundle validate | sed 's/\\\\/\//g'
|
||||
errcode $CLI bundle validate
|
||||
cd ..
|
||||
rm -fr myrepo
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
[[Repls]]
|
||||
Old = '\\\\myrepo'
|
||||
New = '/myrepo'
|
|
@ -3,7 +3,7 @@
|
|||
# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation.
|
||||
bundle:
|
||||
name: my_dbt_sql
|
||||
uuid: <UUID>
|
||||
uuid: [UUID]
|
||||
|
||||
include:
|
||||
- resources/*.yml
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation.
|
||||
bundle:
|
||||
name: my_default_python
|
||||
uuid: <UUID>
|
||||
uuid: [UUID]
|
||||
|
||||
include:
|
||||
- resources/*.yml
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
"rowLimit": 10000
|
||||
},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"application/vnd.databricks.v1+cell": {
|
||||
"cellMetadata": {},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
@ -24,7 +24,7 @@
|
|||
"application/vnd.databricks.v1+cell": {
|
||||
"cellMetadata": {},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
@ -47,7 +47,7 @@
|
|||
"application/vnd.databricks.v1+cell": {
|
||||
"cellMetadata": {},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"application/vnd.databricks.v1+cell": {
|
||||
"cellMetadata": {},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
@ -37,7 +37,7 @@
|
|||
"rowLimit": 10000
|
||||
},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation.
|
||||
bundle:
|
||||
name: my_default_sql
|
||||
uuid: <UUID>
|
||||
uuid: [UUID]
|
||||
|
||||
include:
|
||||
- resources/*.yml
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"application/vnd.databricks.v1+cell": {
|
||||
"cellMetadata": {},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
|
|
@ -10,6 +10,8 @@ Please refer to the README.md file for "getting started" instructions.
|
|||
See also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html.
|
||||
|
||||
>>> $CLI bundle validate -t dev --output json
|
||||
Warning: Ignoring Databricks CLI version constraint for development build. Required: >= 0.238.0, current: $DEV_VERSION
|
||||
|
||||
{
|
||||
"jobs": {
|
||||
"my_jobs_as_code_job": {
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation.
|
||||
bundle:
|
||||
name: my_jobs_as_code
|
||||
uuid: <UUID>
|
||||
uuid: [UUID]
|
||||
databricks_cli_version: ">= 0.238.0"
|
||||
|
||||
experimental:
|
||||
python:
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"application/vnd.databricks.v1+cell": {
|
||||
"cellMetadata": {},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
@ -37,7 +37,7 @@
|
|||
"rowLimit": 10000
|
||||
},
|
||||
"inputWidgets": {},
|
||||
"nuid": "<UUID>",
|
||||
"nuid": "[UUID]",
|
||||
"showTitle": false,
|
||||
"title": ""
|
||||
}
|
||||
|
|
|
@ -3,10 +3,8 @@ trace $CLI bundle init experimental-jobs-as-code --config-file ./input.json --ou
|
|||
cd output/my_jobs_as_code
|
||||
|
||||
# silence uv output because it's non-deterministic
|
||||
uv sync 2> /dev/null
|
||||
|
||||
# remove version constraint because it always creates a warning on dev builds
|
||||
cat databricks.yml | grep -v databricks_cli_version > databricks.yml.new
|
||||
mv databricks.yml.new databricks.yml
|
||||
uv sync -q
|
||||
|
||||
trace $CLI bundle validate -t dev --output json | jq ".resources"
|
||||
|
||||
rm -fr .venv resources/__pycache__ uv.lock my_jobs_as_code.egg-info
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
Error: not a bundle template: expected to find a template schema file at databricks_template_schema.json
|
||||
|
||||
Exit code: 1
|
|
@ -0,0 +1,2 @@
|
|||
export NO_COLOR=1
|
||||
$CLI bundle init /DOES/NOT/EXIST
|
|
@ -0,0 +1 @@
|
|||
Badness = 'The error message should include full path: "expected to find a template schema file at databricks_template_schema.json"'
|
|
@ -0,0 +1,5 @@
|
|||
Error: git clone failed: git clone https://invalid-domain-123.databricks.com/hello/world $TMPDIR_GPARENT/world-123456 --no-tags --depth=1: exit status 128. Cloning into '$TMPDIR_GPARENT/world-123456'...
|
||||
fatal: unable to access 'https://invalid-domain-123.databricks.com/hello/world/': Could not resolve host: invalid-domain-123.databricks.com
|
||||
|
||||
|
||||
Exit code: 1
|
|
@ -0,0 +1,2 @@
|
|||
export NO_COLOR=1
|
||||
$CLI bundle init https://invalid-domain-123.databricks.com/hello/world
|
|
@ -0,0 +1,7 @@
|
|||
[[Repls]]
|
||||
Old = '\\'
|
||||
New = '/'
|
||||
|
||||
[[Repls]]
|
||||
Old = '/world-[0-9]+'
|
||||
New = '/world-123456'
|
|
@ -1,7 +1,5 @@
|
|||
|
||||
>>> errcode $CLI bundle validate --var a=one -o json
|
||||
|
||||
Exit code: 0
|
||||
{
|
||||
"a": {
|
||||
"default": "hello",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Error: no value assigned to required variable a. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_a environment variable
|
||||
Error: no value assigned to required variable a. Assignment can be done using "--var", by setting the BUNDLE_VAR_a environment variable, or in .databricks/bundle/<target>/variable-overrides.json file
|
||||
|
||||
Name: empty${var.a}
|
||||
Target: default
|
||||
|
|
|
@ -18,12 +18,13 @@ variables:
|
|||
description: variable with lookup
|
||||
lookup:
|
||||
cluster_policy: wrong-cluster-policy
|
||||
|
||||
result:
|
||||
default: ${var.a} ${var.b}
|
||||
|
||||
bundle:
|
||||
name: test bundle
|
||||
|
||||
workspace:
|
||||
profile: ${var.a} ${var.b}
|
||||
|
||||
targets:
|
||||
env-with-single-variable-override:
|
||||
variables:
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
"prod-a env-var-b"
|
||||
|
||||
>>> errcode $CLI bundle validate -t env-missing-a-required-variable-assignment
|
||||
Error: no value assigned to required variable b. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_b environment variable
|
||||
Error: no value assigned to required variable b. Assignment can be done using "--var", by setting the BUNDLE_VAR_b environment variable, or in .databricks/bundle/<target>/variable-overrides.json file
|
||||
|
||||
Name: test bundle
|
||||
Target: env-missing-a-required-variable-assignment
|
||||
|
@ -36,5 +36,6 @@ Exit code: 1
|
|||
"b": "prod-b",
|
||||
"d": "4321",
|
||||
"e": "1234",
|
||||
"f": "9876"
|
||||
"f": "9876",
|
||||
"result": "default-a prod-b"
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
trace $CLI bundle validate -t env-with-single-variable-override -o json | jq .workspace.profile
|
||||
trace $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .workspace.profile
|
||||
trace BUNDLE_VAR_b=env-var-b $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .workspace.profile
|
||||
trace $CLI bundle validate -t env-with-single-variable-override -o json | jq .variables.result.value
|
||||
trace $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .variables.result.value
|
||||
trace BUNDLE_VAR_b=env-var-b $CLI bundle validate -t env-with-two-variable-overrides -o json | jq .variables.result.value
|
||||
trace errcode $CLI bundle validate -t env-missing-a-required-variable-assignment
|
||||
trace errcode $CLI bundle validate -t env-using-an-undefined-variable
|
||||
trace $CLI bundle validate -t env-overrides-lookup -o json | jq '.variables | map_values(.value)'
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"cluster_key": {
|
||||
"node_type_id": "Standard_DS3_v2"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"cluster": {
|
||||
"node_type_id": "Standard_DS3_v2"
|
||||
},
|
||||
"cluster_key": "mlops_stacks-cluster",
|
||||
"cluster_workers": 2
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
foo
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"cluster": "mlops_stacks-cluster"
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"cluster_key": "mlops_stacks-cluster-from-file"
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"cluster_key": "mlops_stacks-cluster",
|
||||
"cluster_workers": 2
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
[
|
||||
"foo"
|
||||
]
|
|
@ -0,0 +1 @@
|
|||
!.databricks
|
|
@ -0,0 +1,53 @@
|
|||
bundle:
|
||||
name: TestResolveVariablesFromFile
|
||||
|
||||
variables:
|
||||
cluster:
|
||||
type: "complex"
|
||||
cluster_key:
|
||||
cluster_workers:
|
||||
|
||||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
job_clusters:
|
||||
- job_cluster_key: ${var.cluster_key}
|
||||
new_cluster:
|
||||
node_type_id: "${var.cluster.node_type_id}"
|
||||
num_workers: ${var.cluster_workers}
|
||||
|
||||
targets:
|
||||
default:
|
||||
default: true
|
||||
variables:
|
||||
cluster_workers: 1
|
||||
cluster:
|
||||
node_type_id: "default"
|
||||
cluster_key: "default"
|
||||
|
||||
without_defaults:
|
||||
|
||||
complex_to_string:
|
||||
variables:
|
||||
cluster_workers: 1
|
||||
cluster:
|
||||
node_type_id: "default"
|
||||
cluster_key: "default"
|
||||
|
||||
string_to_complex:
|
||||
variables:
|
||||
cluster_workers: 1
|
||||
cluster:
|
||||
node_type_id: "default"
|
||||
cluster_key: "default"
|
||||
|
||||
wrong_file_structure:
|
||||
|
||||
invalid_json:
|
||||
|
||||
with_value:
|
||||
variables:
|
||||
cluster_workers: 1
|
||||
cluster:
|
||||
node_type_id: "default"
|
||||
cluster_key: cluster_key_value
|
|
@ -0,0 +1,82 @@
|
|||
|
||||
=== variable file
|
||||
>>> $CLI bundle validate -o json
|
||||
{
|
||||
"job_cluster_key": "mlops_stacks-cluster",
|
||||
"new_cluster": {
|
||||
"node_type_id": "Standard_DS3_v2",
|
||||
"num_workers": 2
|
||||
}
|
||||
}
|
||||
|
||||
=== variable file and variable flag
|
||||
>>> $CLI bundle validate -o json --var=cluster_key=mlops_stacks-cluster-overriden
|
||||
{
|
||||
"job_cluster_key": "mlops_stacks-cluster-overriden",
|
||||
"new_cluster": {
|
||||
"node_type_id": "Standard_DS3_v2",
|
||||
"num_workers": 2
|
||||
}
|
||||
}
|
||||
|
||||
=== variable file and environment variable
|
||||
>>> BUNDLE_VAR_cluster_key=mlops_stacks-cluster-overriden $CLI bundle validate -o json
|
||||
{
|
||||
"job_cluster_key": "mlops_stacks-cluster-overriden",
|
||||
"new_cluster": {
|
||||
"node_type_id": "Standard_DS3_v2",
|
||||
"num_workers": 2
|
||||
}
|
||||
}
|
||||
|
||||
=== variable has value in config file
|
||||
>>> $CLI bundle validate -o json --target with_value
|
||||
{
|
||||
"job_cluster_key": "mlops_stacks-cluster-from-file",
|
||||
"new_cluster": {
|
||||
"node_type_id": "default",
|
||||
"num_workers": 1
|
||||
}
|
||||
}
|
||||
|
||||
=== file has variable that is complex but default is string
|
||||
>>> errcode $CLI bundle validate -o json --target complex_to_string
|
||||
Error: variable cluster_key is not of type complex, but the value in the variable file is a complex type
|
||||
|
||||
|
||||
Exit code: 1
|
||||
{
|
||||
"job_cluster_key": "${var.cluster_key}",
|
||||
"new_cluster": {
|
||||
"node_type_id": "${var.cluster.node_type_id}",
|
||||
"num_workers": "${var.cluster_workers}"
|
||||
}
|
||||
}
|
||||
|
||||
=== file has variable that is string but default is complex
|
||||
>>> errcode $CLI bundle validate -o json --target string_to_complex
|
||||
Error: variable cluster is of type complex, but the value in the variable file is not a complex type
|
||||
|
||||
|
||||
Exit code: 1
|
||||
{
|
||||
"job_cluster_key": "${var.cluster_key}",
|
||||
"new_cluster": {
|
||||
"node_type_id": "${var.cluster.node_type_id}",
|
||||
"num_workers": "${var.cluster_workers}"
|
||||
}
|
||||
}
|
||||
|
||||
=== variable is required but it's not provided in the file
|
||||
>>> errcode $CLI bundle validate -o json --target without_defaults
|
||||
Error: no value assigned to required variable cluster. Assignment can be done using "--var", by setting the BUNDLE_VAR_cluster environment variable, or in .databricks/bundle/<target>/variable-overrides.json file
|
||||
|
||||
|
||||
Exit code: 1
|
||||
{
|
||||
"job_cluster_key": "${var.cluster_key}",
|
||||
"new_cluster": {
|
||||
"node_type_id": "${var.cluster.node_type_id}",
|
||||
"num_workers": "${var.cluster_workers}"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
cluster_expr=".resources.jobs.job1.job_clusters[0]"
|
||||
|
||||
# defaults from variable file, see .databricks/bundle/<target>/variable-overrides.json
|
||||
|
||||
title "variable file"
|
||||
trace $CLI bundle validate -o json | jq $cluster_expr
|
||||
|
||||
title "variable file and variable flag"
|
||||
trace $CLI bundle validate -o json --var="cluster_key=mlops_stacks-cluster-overriden" | jq $cluster_expr
|
||||
|
||||
title "variable file and environment variable"
|
||||
trace BUNDLE_VAR_cluster_key=mlops_stacks-cluster-overriden $CLI bundle validate -o json | jq $cluster_expr
|
||||
|
||||
title "variable has value in config file"
|
||||
trace $CLI bundle validate -o json --target with_value | jq $cluster_expr
|
||||
|
||||
# title "file cannot be parsed"
|
||||
# trace errcode $CLI bundle validate -o json --target invalid_json | jq $cluster_expr
|
||||
|
||||
# title "file has wrong structure"
|
||||
# trace errcode $CLI bundle validate -o json --target wrong_file_structure | jq $cluster_expr
|
||||
|
||||
title "file has variable that is complex but default is string"
|
||||
trace errcode $CLI bundle validate -o json --target complex_to_string | jq $cluster_expr
|
||||
|
||||
title "file has variable that is string but default is complex"
|
||||
trace errcode $CLI bundle validate -o json --target string_to_complex | jq $cluster_expr
|
||||
|
||||
title "variable is required but it's not provided in the file"
|
||||
trace errcode $CLI bundle validate -o json --target without_defaults | jq $cluster_expr
|
|
@ -3,7 +3,7 @@
|
|||
"abc def"
|
||||
|
||||
>>> errcode $CLI bundle validate
|
||||
Error: no value assigned to required variable b. Assignment can be done through the "--var" flag or by setting the BUNDLE_VAR_b environment variable
|
||||
Error: no value assigned to required variable b. Assignment can be done using "--var", by setting the BUNDLE_VAR_b environment variable, or in .databricks/bundle/<target>/variable-overrides.json file
|
||||
|
||||
Name: ${var.a} ${var.b}
|
||||
Target: default
|
||||
|
|
|
@ -8,10 +8,11 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/testcli"
|
||||
"github.com/databricks/cli/libs/testserver"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func StartCmdServer(t *testing.T) *TestServer {
|
||||
func StartCmdServer(t *testing.T) *testserver.Server {
|
||||
server := StartServer(t)
|
||||
server.Handle("/", func(r *http.Request) (any, error) {
|
||||
q := r.URL.Query()
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
package acceptance_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/databricks/cli/libs/testdiff"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const configFilename = "test.toml"
|
||||
|
||||
var (
|
||||
configCache map[string]TestConfig
|
||||
configMutex sync.Mutex
|
||||
)
|
||||
|
||||
type TestConfig struct {
|
||||
// Place to describe what's wrong with this test. Does not affect how the test is run.
|
||||
Badness string
|
||||
|
||||
// Which OSes the test is enabled on. Each string is compared against runtime.GOOS.
|
||||
// If absent, default to true.
|
||||
GOOS map[string]bool
|
||||
|
||||
// List of additional replacements to apply on this test.
|
||||
// Old is a regexp, New is a replacement expression.
|
||||
Repls []testdiff.Replacement
|
||||
}
|
||||
|
||||
// FindConfig finds the closest config file.
|
||||
func FindConfig(t *testing.T, dir string) (string, bool) {
|
||||
shared := false
|
||||
for {
|
||||
path := filepath.Join(dir, configFilename)
|
||||
_, err := os.Stat(path)
|
||||
|
||||
if err == nil {
|
||||
return path, shared
|
||||
}
|
||||
|
||||
shared = true
|
||||
|
||||
if dir == "" || dir == "." {
|
||||
break
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
dir = filepath.Dir(dir)
|
||||
continue
|
||||
}
|
||||
|
||||
t.Fatalf("Error while reading %s: %s", path, err)
|
||||
}
|
||||
|
||||
t.Fatal("Config not found: " + configFilename)
|
||||
return "", shared
|
||||
}
|
||||
|
||||
// LoadConfig loads the config file. Non-leaf configs are cached.
|
||||
func LoadConfig(t *testing.T, dir string) (TestConfig, string) {
|
||||
path, leafConfig := FindConfig(t, dir)
|
||||
|
||||
if leafConfig {
|
||||
return DoLoadConfig(t, path), path
|
||||
}
|
||||
|
||||
configMutex.Lock()
|
||||
defer configMutex.Unlock()
|
||||
|
||||
if configCache == nil {
|
||||
configCache = make(map[string]TestConfig)
|
||||
}
|
||||
|
||||
result, ok := configCache[path]
|
||||
if ok {
|
||||
return result, path
|
||||
}
|
||||
|
||||
result = DoLoadConfig(t, path)
|
||||
configCache[path] = result
|
||||
return result, path
|
||||
}
|
||||
|
||||
func DoLoadConfig(t *testing.T, path string) TestConfig {
|
||||
bytes, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read config: %s", err)
|
||||
}
|
||||
|
||||
var config TestConfig
|
||||
meta, err := toml.Decode(string(bytes), &config)
|
||||
require.NoError(t, err)
|
||||
|
||||
keys := meta.Undecoded()
|
||||
if len(keys) > 0 {
|
||||
t.Fatalf("Undecoded keys in %s: %#v", path, keys)
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
|
@ -6,7 +6,9 @@ errcode() {
|
|||
local exit_code=$?
|
||||
# Re-enable 'set -e' if it was previously set
|
||||
set -e
|
||||
>&2 printf "\nExit code: $exit_code\n"
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
>&2 printf "\nExit code: $exit_code\n"
|
||||
fi
|
||||
}
|
||||
|
||||
trace() {
|
||||
|
@ -37,6 +39,23 @@ git-repo-init() {
|
|||
git config core.autocrlf false
|
||||
git config user.name "Tester"
|
||||
git config user.email "tester@databricks.com"
|
||||
git config core.hooksPath no-hooks
|
||||
git add databricks.yml
|
||||
git commit -qm 'Add databricks.yml'
|
||||
}
|
||||
|
||||
title() {
|
||||
local label="$1"
|
||||
printf "\n=== %s" "$label"
|
||||
}
|
||||
|
||||
withdir() {
|
||||
local dir="$1"
|
||||
shift
|
||||
local orig_dir="$(pwd)"
|
||||
cd "$dir" || return $?
|
||||
"$@"
|
||||
local exit_code=$?
|
||||
cd "$orig_dir" || return $?
|
||||
return $exit_code
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
HELLO
|
|
@ -0,0 +1,39 @@
|
|||
=== Capturing STDERR
|
||||
>>> python3 -c import sys; sys.stderr.write("STDERR\n")
|
||||
STDERR
|
||||
|
||||
=== Capturing STDOUT
|
||||
>>> python3 -c import sys; sys.stderr.write("STDOUT\n")
|
||||
STDOUT
|
||||
|
||||
=== Capturing exit code
|
||||
>>> errcode python3 -c raise SystemExit(5)
|
||||
|
||||
Exit code: 5
|
||||
|
||||
=== Capturing exit code (alt)
|
||||
>>> python3 -c raise SystemExit(7)
|
||||
|
||||
Exit code: 7
|
||||
|
||||
=== Capturing pwd
|
||||
>>> python3 -c import os; print(os.getcwd())
|
||||
$TMPDIR
|
||||
|
||||
=== Capturing subdir
|
||||
>>> mkdir -p subdir/a/b/c
|
||||
|
||||
>>> withdir subdir/a/b/c python3 -c import os; print(os.getcwd())
|
||||
$TMPDIR/subdir/a/b/c
|
||||
|
||||
=== Custom output files - everything starting with out is captured and compared
|
||||
>>> echo HELLO
|
||||
|
||||
=== Custom regex can be specified in [[Repl]] section
|
||||
1234
|
||||
CUSTOM_NUMBER_REGEX
|
||||
123456
|
||||
|
||||
=== Testing --version
|
||||
>>> $CLI --version
|
||||
Databricks CLI v$DEV_VERSION
|
|
@ -0,0 +1,29 @@
|
|||
printf "=== Capturing STDERR"
|
||||
trace python3 -c 'import sys; sys.stderr.write("STDERR\n")'
|
||||
|
||||
printf "\n=== Capturing STDOUT"
|
||||
trace python3 -c 'import sys; sys.stderr.write("STDOUT\n")'
|
||||
|
||||
printf "\n=== Capturing exit code"
|
||||
trace errcode python3 -c 'raise SystemExit(5)'
|
||||
|
||||
printf "\n=== Capturing exit code (alt)"
|
||||
errcode trace python3 -c 'raise SystemExit(7)'
|
||||
|
||||
printf "\n=== Capturing pwd"
|
||||
trace python3 -c 'import os; print(os.getcwd())'
|
||||
|
||||
printf "\n=== Capturing subdir"
|
||||
trace mkdir -p subdir/a/b/c
|
||||
trace withdir subdir/a/b/c python3 -c 'import os; print(os.getcwd())'
|
||||
|
||||
printf "\n=== Custom output files - everything starting with out is captured and compared"
|
||||
trace echo HELLO > out.hello.txt
|
||||
|
||||
printf "\n=== Custom regex can be specified in [[Repl]] section\n"
|
||||
echo 1234
|
||||
echo 12345
|
||||
echo 123456
|
||||
|
||||
printf "\n=== Testing --version"
|
||||
trace $CLI --version
|
|
@ -0,0 +1,20 @@
|
|||
# Badness = "Brief description of what's wrong with the test output, if anything"
|
||||
|
||||
#[GOOS]
|
||||
# Disable on Windows
|
||||
#windows = false
|
||||
|
||||
# Disable on Mac
|
||||
#mac = false
|
||||
|
||||
# Disable on Linux
|
||||
#linux = false
|
||||
|
||||
[[Repls]]
|
||||
Old = '\b[0-9]{5}\b'
|
||||
New = "CUSTOM_NUMBER_REGEX"
|
||||
|
||||
[[Repls]]
|
||||
# Fix path with reverse slashes in the output for Windows.
|
||||
Old = '\$TMPDIR\\subdir\\a\\b\\c'
|
||||
New = '$$TMPDIR/subdir/a/b/c'
|
|
@ -1,74 +1,26 @@
|
|||
package acceptance_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/libs/testserver"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
type TestServer struct {
|
||||
*httptest.Server
|
||||
Mux *http.ServeMux
|
||||
}
|
||||
|
||||
type HandlerFunc func(r *http.Request) (any, error)
|
||||
|
||||
func NewTestServer() *TestServer {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
|
||||
return &TestServer{
|
||||
Server: server,
|
||||
Mux: mux,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestServer) Handle(pattern string, handler HandlerFunc) {
|
||||
s.Mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
|
||||
resp, err := handler(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
var respBytes []byte
|
||||
|
||||
respString, ok := resp.(string)
|
||||
if ok {
|
||||
respBytes = []byte(respString)
|
||||
} else {
|
||||
respBytes, err = json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := w.Write(respBytes); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func StartServer(t *testing.T) *TestServer {
|
||||
server := NewTestServer()
|
||||
func StartServer(t *testing.T) *testserver.Server {
|
||||
server := testserver.New(t)
|
||||
t.Cleanup(func() {
|
||||
server.Close()
|
||||
})
|
||||
return server
|
||||
}
|
||||
|
||||
func AddHandlers(server *TestServer) {
|
||||
server.Handle("/api/2.0/policies/clusters/list", func(r *http.Request) (any, error) {
|
||||
func AddHandlers(server *testserver.Server) {
|
||||
server.Handle("GET /api/2.0/policies/clusters/list", func(r *http.Request) (any, error) {
|
||||
return compute.ListPoliciesResponse{
|
||||
Policies: []compute.Policy{
|
||||
{
|
||||
|
@ -83,7 +35,7 @@ func AddHandlers(server *TestServer) {
|
|||
}, nil
|
||||
})
|
||||
|
||||
server.Handle("/api/2.0/instance-pools/list", func(r *http.Request) (any, error) {
|
||||
server.Handle("GET /api/2.0/instance-pools/list", func(r *http.Request) (any, error) {
|
||||
return compute.ListInstancePools{
|
||||
InstancePools: []compute.InstancePoolAndStats{
|
||||
{
|
||||
|
@ -94,7 +46,7 @@ func AddHandlers(server *TestServer) {
|
|||
}, nil
|
||||
})
|
||||
|
||||
server.Handle("/api/2.1/clusters/list", func(r *http.Request) (any, error) {
|
||||
server.Handle("GET /api/2.1/clusters/list", func(r *http.Request) (any, error) {
|
||||
return compute.ListClustersResponse{
|
||||
Clusters: []compute.ClusterDetails{
|
||||
{
|
||||
|
@ -109,13 +61,13 @@ func AddHandlers(server *TestServer) {
|
|||
}, nil
|
||||
})
|
||||
|
||||
server.Handle("/api/2.0/preview/scim/v2/Me", func(r *http.Request) (any, error) {
|
||||
server.Handle("GET /api/2.0/preview/scim/v2/Me", func(r *http.Request) (any, error) {
|
||||
return iam.User{
|
||||
UserName: "tester@databricks.com",
|
||||
}, nil
|
||||
})
|
||||
|
||||
server.Handle("/api/2.0/workspace/get-status", func(r *http.Request) (any, error) {
|
||||
server.Handle("GET /api/2.0/workspace/get-status", func(r *http.Request) (any, error) {
|
||||
return workspace.ObjectInfo{
|
||||
ObjectId: 1001,
|
||||
ObjectType: "DIRECTORY",
|
||||
|
@ -124,13 +76,13 @@ func AddHandlers(server *TestServer) {
|
|||
}, nil
|
||||
})
|
||||
|
||||
server.Handle("/api/2.1/unity-catalog/current-metastore-assignment", func(r *http.Request) (any, error) {
|
||||
server.Handle("GET /api/2.1/unity-catalog/current-metastore-assignment", func(r *http.Request) (any, error) {
|
||||
return catalog.MetastoreAssignment{
|
||||
DefaultCatalogName: "main",
|
||||
}, nil
|
||||
})
|
||||
|
||||
server.Handle("/api/2.0/permissions/directories/1001", func(r *http.Request) (any, error) {
|
||||
server.Handle("GET /api/2.0/permissions/directories/1001", func(r *http.Request) (any, error) {
|
||||
return workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1001",
|
||||
ObjectType: "DIRECTORY",
|
||||
|
@ -146,4 +98,8 @@ func AddHandlers(server *TestServer) {
|
|||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
server.Handle("POST /api/2.0/workspace/mkdirs", func(r *http.Request) (any, error) {
|
||||
return "{}", nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
# If test directory nor any of its parents do not have test.toml then this file serves as fallback configuration.
|
||||
# The configurations are not merged across parents; the closest one is used fully.
|
|
@ -1,29 +0,0 @@
|
|||
package apps
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
type slowDeployMessage struct{}
|
||||
|
||||
// TODO: needs to be removed when when no_compute option becomes available in TF provider and used in DABs
|
||||
// See https://github.com/databricks/cli/pull/2144
|
||||
func (v *slowDeployMessage) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if len(b.Config.Resources.Apps) > 0 {
|
||||
cmdio.LogString(ctx, "Note: Databricks apps included in this bundle may increase initial deployment time due to compute provisioning.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *slowDeployMessage) Name() string {
|
||||
return "apps.SlowDeployMessage"
|
||||
}
|
||||
|
||||
func SlowDeployMessage() bundle.Mutator {
|
||||
return &slowDeployMessage{}
|
||||
}
|
|
@ -72,6 +72,7 @@ type Bundle struct {
|
|||
// It can be initialized on demand after loading the configuration.
|
||||
clientOnce sync.Once
|
||||
client *databricks.WorkspaceClient
|
||||
clientErr error
|
||||
|
||||
// Files that are synced to the workspace.file_path
|
||||
Files []fileset.File
|
||||
|
@ -134,23 +135,25 @@ func TryLoad(ctx context.Context) (*Bundle, error) {
|
|||
return Load(ctx, root)
|
||||
}
|
||||
|
||||
func (b *Bundle) InitializeWorkspaceClient() (*databricks.WorkspaceClient, error) {
|
||||
client, err := b.Config.Workspace.Client()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot resolve bundle auth configuration: %w", err)
|
||||
}
|
||||
return client, nil
|
||||
func (b *Bundle) WorkspaceClientE() (*databricks.WorkspaceClient, error) {
|
||||
b.clientOnce.Do(func() {
|
||||
var err error
|
||||
b.client, err = b.Config.Workspace.Client()
|
||||
if err != nil {
|
||||
b.clientErr = fmt.Errorf("cannot resolve bundle auth configuration: %w", err)
|
||||
}
|
||||
})
|
||||
|
||||
return b.client, b.clientErr
|
||||
}
|
||||
|
||||
func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||
b.clientOnce.Do(func() {
|
||||
var err error
|
||||
b.client, err = b.InitializeWorkspaceClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
return b.client
|
||||
client, err := b.WorkspaceClientE()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// SetWorkpaceClient sets the workspace client for this bundle.
|
||||
|
|
|
@ -2,6 +2,7 @@ package loader
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
@ -36,6 +37,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
// Maintain list of files in order of files being loaded.
|
||||
// This is stored in the bundle configuration for observability.
|
||||
var files []string
|
||||
var diags diag.Diagnostics
|
||||
|
||||
// For each glob, find all files to load.
|
||||
// Ordering of the list of globs is maintained in the output.
|
||||
|
@ -60,7 +62,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
|
||||
// Filter matches to ones we haven't seen yet.
|
||||
var includes []string
|
||||
for _, match := range matches {
|
||||
for i, match := range matches {
|
||||
rel, err := filepath.Rel(b.BundleRootPath, match)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
@ -69,9 +71,22 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
continue
|
||||
}
|
||||
seen[rel] = true
|
||||
if filepath.Ext(rel) != ".yaml" && filepath.Ext(rel) != ".yml" {
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: "Files in the 'include' configuration section must be YAML files.",
|
||||
Detail: fmt.Sprintf("The file %s in the 'include' configuration section is not a YAML file, and only YAML files are supported. To include files to sync, specify them in the 'sync.include' configuration section instead.", rel),
|
||||
Locations: b.Config.GetLocations(fmt.Sprintf("include[%d]", i)),
|
||||
})
|
||||
continue
|
||||
}
|
||||
includes = append(includes, rel)
|
||||
}
|
||||
|
||||
if len(diags) > 0 {
|
||||
return diags
|
||||
}
|
||||
|
||||
// Add matches to list of mutators to return.
|
||||
slices.Sort(includes)
|
||||
files = append(files, includes...)
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
type initializeWorkspaceClient struct{}
|
||||
|
||||
func InitializeWorkspaceClient() bundle.Mutator {
|
||||
return &initializeWorkspaceClient{}
|
||||
}
|
||||
|
||||
func (m *initializeWorkspaceClient) Name() string {
|
||||
return "InitializeWorkspaceClient"
|
||||
}
|
||||
|
||||
// Apply initializes the workspace client for the bundle. We do this here so
|
||||
// downstream calls to b.WorkspaceClient() do not panic if there's an error in the
|
||||
// auth configuration.
|
||||
func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
_, err := b.InitializeWorkspaceClient()
|
||||
return diag.FromErr(err)
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
// pythonDiagnostic is a single entry in diagnostics.json
|
||||
type pythonDiagnostic struct {
|
||||
Severity pythonSeverity `json:"severity"`
|
||||
Summary string `json:"summary"`
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
package python
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
// generatedFileName is used as the virtual file name for YAML generated by Python code.
|
||||
//
|
||||
// mergePythonLocations replaces dyn.Location with generatedFileName with locations loaded
|
||||
// from locations.json
|
||||
const generatedFileName = "__generated_by_python__.yml"
|
||||
|
||||
// pythonLocations is data structure for efficient location lookup for a given path
|
||||
//
|
||||
// Locations form a tree, and we assign locations of the closest ancestor to each dyn.Value based on its path.
|
||||
// We implement it as a trie (prefix tree) where keys are components of the path. With that, lookups are O(n)
|
||||
// where n is the number of components in the path.
|
||||
//
|
||||
// For example, with locations.json:
|
||||
//
|
||||
// {"path": "resources.jobs.job_0", "file": "resources/job_0.py", "line": 3, "column": 5}
|
||||
// {"path": "resources.jobs.job_0.tasks[0].task_key", "file": "resources/job_0.py", "line": 10, "column": 5}
|
||||
// {"path": "resources.jobs.job_1", "file": "resources/job_1.py", "line": 5, "column": 7}
|
||||
//
|
||||
// - resources.jobs.job_0.tasks[0].task_key is located at job_0.py:10:5
|
||||
//
|
||||
// - resources.jobs.job_0.tasks[0].email_notifications is located at job_0.py:3:5,
|
||||
// because we use the location of the job as the most precise approximation.
|
||||
//
|
||||
// See pythonLocationEntry for the structure of a single entry in locations.json
|
||||
type pythonLocations struct {
|
||||
// descendants referenced by index, e.g. '.foo'
|
||||
keys map[string]*pythonLocations
|
||||
|
||||
// descendants referenced by key, e.g. '[0]'
|
||||
indexes map[int]*pythonLocations
|
||||
|
||||
// location for the current node if it exists
|
||||
location dyn.Location
|
||||
|
||||
// if true, location is present
|
||||
exists bool
|
||||
}
|
||||
|
||||
// pythonLocationEntry is a single entry in locations.json
|
||||
type pythonLocationEntry struct {
|
||||
Path string `json:"path"`
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
}
|
||||
|
||||
// mergePythonLocations applies locations from Python mutator into given dyn.Value
|
||||
//
|
||||
// The primary use-case is to merge locations.json with output.json, so that any
|
||||
// validation errors will point to Python source code instead of generated YAML.
|
||||
func mergePythonLocations(value dyn.Value, locations *pythonLocations) (dyn.Value, error) {
|
||||
return dyn.Walk(value, func(path dyn.Path, value dyn.Value) (dyn.Value, error) {
|
||||
newLocation, ok := findPythonLocation(locations, path)
|
||||
if !ok {
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// The first item in the list is the "last" location used for error reporting
|
||||
//
|
||||
// Loaded YAML uses virtual file path as location, we remove any of such references,
|
||||
// because they should use 'newLocation' instead.
|
||||
//
|
||||
// We preserve any previous non-virtual locations in case when Python function modified
|
||||
// resource defined in YAML.
|
||||
newLocations := append(
|
||||
[]dyn.Location{newLocation},
|
||||
removeVirtualLocations(value.Locations())...,
|
||||
)
|
||||
|
||||
return value.WithLocations(newLocations), nil
|
||||
})
|
||||
}
|
||||
|
||||
func removeVirtualLocations(locations []dyn.Location) []dyn.Location {
|
||||
var newLocations []dyn.Location
|
||||
|
||||
for _, location := range locations {
|
||||
if filepath.Base(location.File) == generatedFileName {
|
||||
continue
|
||||
}
|
||||
|
||||
newLocations = append(newLocations, location)
|
||||
}
|
||||
|
||||
return newLocations
|
||||
}
|
||||
|
||||
// parsePythonLocations parses locations.json from the Python mutator.
|
||||
//
|
||||
// locations file is newline-separated JSON objects with pythonLocationEntry structure.
|
||||
func parsePythonLocations(input io.Reader) (*pythonLocations, error) {
|
||||
decoder := json.NewDecoder(input)
|
||||
locations := newPythonLocations()
|
||||
|
||||
for decoder.More() {
|
||||
var entry pythonLocationEntry
|
||||
|
||||
err := decoder.Decode(&entry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse python location: %s", err)
|
||||
}
|
||||
|
||||
path, err := dyn.NewPathFromString(entry.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse python location: %s", err)
|
||||
}
|
||||
|
||||
location := dyn.Location{
|
||||
File: entry.File,
|
||||
Line: entry.Line,
|
||||
Column: entry.Column,
|
||||
}
|
||||
|
||||
putPythonLocation(locations, path, location)
|
||||
}
|
||||
|
||||
return locations, nil
|
||||
}
|
||||
|
||||
// putPythonLocation puts the location to the trie for the given path
|
||||
func putPythonLocation(trie *pythonLocations, path dyn.Path, location dyn.Location) {
|
||||
currentNode := trie
|
||||
|
||||
for _, component := range path {
|
||||
if key := component.Key(); key != "" {
|
||||
if _, ok := currentNode.keys[key]; !ok {
|
||||
currentNode.keys[key] = newPythonLocations()
|
||||
}
|
||||
|
||||
currentNode = currentNode.keys[key]
|
||||
} else {
|
||||
index := component.Index()
|
||||
if _, ok := currentNode.indexes[index]; !ok {
|
||||
currentNode.indexes[index] = newPythonLocations()
|
||||
}
|
||||
|
||||
currentNode = currentNode.indexes[index]
|
||||
}
|
||||
}
|
||||
|
||||
currentNode.location = location
|
||||
currentNode.exists = true
|
||||
}
|
||||
|
||||
// newPythonLocations creates a new trie node
|
||||
func newPythonLocations() *pythonLocations {
|
||||
return &pythonLocations{
|
||||
keys: make(map[string]*pythonLocations),
|
||||
indexes: make(map[int]*pythonLocations),
|
||||
}
|
||||
}
|
||||
|
||||
// findPythonLocation finds the location or closest ancestor location in the trie for the given path
|
||||
// if no ancestor or exact location is found, false is returned.
|
||||
func findPythonLocation(locations *pythonLocations, path dyn.Path) (dyn.Location, bool) {
|
||||
currentNode := locations
|
||||
lastLocation := locations.location
|
||||
exists := locations.exists
|
||||
|
||||
for _, component := range path {
|
||||
if key := component.Key(); key != "" {
|
||||
if _, ok := currentNode.keys[key]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
currentNode = currentNode.keys[key]
|
||||
} else {
|
||||
index := component.Index()
|
||||
if _, ok := currentNode.indexes[index]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
currentNode = currentNode.indexes[index]
|
||||
}
|
||||
|
||||
if currentNode.exists {
|
||||
lastLocation = currentNode.location
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
|
||||
return lastLocation, exists
|
||||
}
|
|
@ -0,0 +1,179 @@
|
|||
package python
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||
)
|
||||
|
||||
func TestMergeLocations(t *testing.T) {
|
||||
pythonLocation := dyn.Location{File: "foo.py", Line: 1, Column: 1}
|
||||
generatedLocation := dyn.Location{File: generatedFileName, Line: 1, Column: 1}
|
||||
yamlLocation := dyn.Location{File: "foo.yml", Line: 1, Column: 1}
|
||||
|
||||
locations := newPythonLocations()
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo"), pythonLocation)
|
||||
|
||||
input := dyn.NewValue(
|
||||
map[string]dyn.Value{
|
||||
"foo": dyn.NewValue(
|
||||
map[string]dyn.Value{
|
||||
"baz": dyn.NewValue("baz", []dyn.Location{yamlLocation}),
|
||||
"qux": dyn.NewValue("baz", []dyn.Location{generatedLocation, yamlLocation}),
|
||||
},
|
||||
[]dyn.Location{},
|
||||
),
|
||||
"bar": dyn.NewValue("baz", []dyn.Location{generatedLocation}),
|
||||
},
|
||||
[]dyn.Location{yamlLocation},
|
||||
)
|
||||
|
||||
expected := dyn.NewValue(
|
||||
map[string]dyn.Value{
|
||||
"foo": dyn.NewValue(
|
||||
map[string]dyn.Value{
|
||||
// pythonLocation is appended to the beginning of the list if absent
|
||||
"baz": dyn.NewValue("baz", []dyn.Location{pythonLocation, yamlLocation}),
|
||||
// generatedLocation is replaced by pythonLocation
|
||||
"qux": dyn.NewValue("baz", []dyn.Location{pythonLocation, yamlLocation}),
|
||||
},
|
||||
[]dyn.Location{pythonLocation},
|
||||
),
|
||||
// if location is unknown, we keep it as-is
|
||||
"bar": dyn.NewValue("baz", []dyn.Location{generatedLocation}),
|
||||
},
|
||||
[]dyn.Location{yamlLocation},
|
||||
)
|
||||
|
||||
actual, err := mergePythonLocations(input, locations)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestFindLocation(t *testing.T) {
|
||||
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
|
||||
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
|
||||
|
||||
locations := newPythonLocations()
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
|
||||
|
||||
actual, exists := findPythonLocation(locations, dyn.MustPathFromString("foo.bar"))
|
||||
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, location1, actual)
|
||||
}
|
||||
|
||||
func TestFindLocation_indexPathComponent(t *testing.T) {
|
||||
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
|
||||
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
|
||||
location2 := dyn.Location{File: "foo.py", Line: 3, Column: 1}
|
||||
|
||||
locations := newPythonLocations()
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo.bar[0]"), location2)
|
||||
|
||||
actual, exists := findPythonLocation(locations, dyn.MustPathFromString("foo.bar[0]"))
|
||||
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, location2, actual)
|
||||
}
|
||||
|
||||
func TestFindLocation_closestAncestorLocation(t *testing.T) {
|
||||
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
|
||||
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
|
||||
|
||||
locations := newPythonLocations()
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
|
||||
|
||||
actual, exists := findPythonLocation(locations, dyn.MustPathFromString("foo.bar.baz"))
|
||||
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, location1, actual)
|
||||
}
|
||||
|
||||
func TestFindLocation_unknownLocation(t *testing.T) {
|
||||
location0 := dyn.Location{File: "foo.py", Line: 1, Column: 1}
|
||||
location1 := dyn.Location{File: "foo.py", Line: 2, Column: 1}
|
||||
|
||||
locations := newPythonLocations()
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo"), location0)
|
||||
putPythonLocation(locations, dyn.MustPathFromString("foo.bar"), location1)
|
||||
|
||||
_, exists := findPythonLocation(locations, dyn.MustPathFromString("bar"))
|
||||
|
||||
assert.False(t, exists)
|
||||
}
|
||||
|
||||
func TestLoadOutput(t *testing.T) {
|
||||
location := dyn.Location{File: "my_job.py", Line: 1, Column: 1}
|
||||
bundleRoot := t.TempDir()
|
||||
output := `{
|
||||
"resources": {
|
||||
"jobs": {
|
||||
"my_job": {
|
||||
"name": "my_job",
|
||||
"tasks": [
|
||||
{
|
||||
"task_key": "my_task",
|
||||
"notebook_task": {
|
||||
"notebook_path": "my_notebook"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
locations := newPythonLocations()
|
||||
putPythonLocation(
|
||||
locations,
|
||||
dyn.MustPathFromString("resources.jobs.my_job"),
|
||||
location,
|
||||
)
|
||||
|
||||
value, diags := loadOutput(
|
||||
bundleRoot,
|
||||
bytes.NewReader([]byte(output)),
|
||||
locations,
|
||||
)
|
||||
|
||||
assert.Equal(t, diag.Diagnostics{}, diags)
|
||||
|
||||
name, err := dyn.Get(value, "resources.jobs.my_job.name")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []dyn.Location{location}, name.Locations())
|
||||
|
||||
// until we implement path normalization, we have to keep locations of values
|
||||
// that change semantic depending on their location
|
||||
//
|
||||
// note: it's important to have absolute path including 'bundleRoot'
|
||||
// because mutator pipeline already has expanded locations into absolute path
|
||||
notebookPath, err := dyn.Get(value, "resources.jobs.my_job.tasks[0].notebook_task.notebook_path")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, notebookPath.Locations(), 1)
|
||||
require.Equal(t, filepath.Join(bundleRoot, generatedFileName), notebookPath.Locations()[0].File)
|
||||
}
|
||||
|
||||
func TestParsePythonLocations(t *testing.T) {
|
||||
expected := dyn.Location{File: "foo.py", Line: 1, Column: 2}
|
||||
|
||||
input := `{"path": "foo", "file": "foo.py", "line": 1, "column": 2}`
|
||||
reader := bytes.NewReader([]byte(input))
|
||||
locations, err := parsePythonLocations(reader)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, locations.keys["foo"].exists)
|
||||
assert.Equal(t, expected, locations.keys["foo"].location)
|
||||
}
|
|
@ -7,11 +7,14 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/mutator/paths"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/logger"
|
||||
"github.com/fatih/color"
|
||||
|
||||
|
@ -124,6 +127,15 @@ type opts struct {
|
|||
enabled bool
|
||||
|
||||
venvPath string
|
||||
|
||||
loadLocations bool
|
||||
}
|
||||
|
||||
type runPythonMutatorOpts struct {
|
||||
cacheDir string
|
||||
bundleRootPath string
|
||||
pythonPath string
|
||||
loadLocations bool
|
||||
}
|
||||
|
||||
// getOpts adapts deprecated PyDABs and upcoming Python configuration
|
||||
|
@ -148,8 +160,9 @@ func getOpts(b *bundle.Bundle, phase phase) (opts, error) {
|
|||
// don't execute for phases for 'python' section
|
||||
if phase == PythonMutatorPhaseInit || phase == PythonMutatorPhaseLoad {
|
||||
return opts{
|
||||
enabled: true,
|
||||
venvPath: experimental.PyDABs.VEnvPath,
|
||||
enabled: true,
|
||||
venvPath: experimental.PyDABs.VEnvPath,
|
||||
loadLocations: false, // not supported in PyDABs
|
||||
}, nil
|
||||
} else {
|
||||
return opts{}, nil
|
||||
|
@ -158,8 +171,9 @@ func getOpts(b *bundle.Bundle, phase phase) (opts, error) {
|
|||
// don't execute for phases for 'pydabs' section
|
||||
if phase == PythonMutatorPhaseLoadResources || phase == PythonMutatorPhaseApplyMutators {
|
||||
return opts{
|
||||
enabled: true,
|
||||
venvPath: experimental.Python.VEnvPath,
|
||||
enabled: true,
|
||||
venvPath: experimental.Python.VEnvPath,
|
||||
loadLocations: true,
|
||||
}, nil
|
||||
} else {
|
||||
return opts{}, nil
|
||||
|
@ -194,7 +208,12 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
|
||||
}
|
||||
|
||||
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.BundleRootPath, pythonPath, leftRoot)
|
||||
rightRoot, diags := m.runPythonMutator(ctx, leftRoot, runPythonMutatorOpts{
|
||||
cacheDir: cacheDir,
|
||||
bundleRootPath: b.BundleRootPath,
|
||||
pythonPath: pythonPath,
|
||||
loadLocations: opts.loadLocations,
|
||||
})
|
||||
mutateDiags = diags
|
||||
if diags.HasError() {
|
||||
return dyn.InvalidValue, mutateDiagsHasError
|
||||
|
@ -238,13 +257,14 @@ func createCacheDir(ctx context.Context) (string, error) {
|
|||
return os.MkdirTemp("", "-python")
|
||||
}
|
||||
|
||||
func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||
inputPath := filepath.Join(cacheDir, "input.json")
|
||||
outputPath := filepath.Join(cacheDir, "output.json")
|
||||
diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json")
|
||||
func (m *pythonMutator) runPythonMutator(ctx context.Context, root dyn.Value, opts runPythonMutatorOpts) (dyn.Value, diag.Diagnostics) {
|
||||
inputPath := filepath.Join(opts.cacheDir, "input.json")
|
||||
outputPath := filepath.Join(opts.cacheDir, "output.json")
|
||||
diagnosticsPath := filepath.Join(opts.cacheDir, "diagnostics.json")
|
||||
locationsPath := filepath.Join(opts.cacheDir, "locations.json")
|
||||
|
||||
args := []string{
|
||||
pythonPath,
|
||||
opts.pythonPath,
|
||||
"-m",
|
||||
"databricks.bundles.build",
|
||||
"--phase",
|
||||
|
@ -257,6 +277,10 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath
|
|||
diagnosticsPath,
|
||||
}
|
||||
|
||||
if opts.loadLocations {
|
||||
args = append(args, "--locations", locationsPath)
|
||||
}
|
||||
|
||||
if err := writeInputFile(inputPath, root); err != nil {
|
||||
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
||||
}
|
||||
|
@ -271,7 +295,7 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath
|
|||
_, processErr := process.Background(
|
||||
ctx,
|
||||
args,
|
||||
process.WithDir(rootPath),
|
||||
process.WithDir(opts.bundleRootPath),
|
||||
process.WithStderrWriter(stderrWriter),
|
||||
process.WithStdoutWriter(stdoutWriter),
|
||||
)
|
||||
|
@ -307,7 +331,12 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath
|
|||
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
||||
}
|
||||
|
||||
output, outputDiags := loadOutputFile(rootPath, outputPath)
|
||||
locations, err := loadLocationsFile(locationsPath)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.Errorf("failed to load locations: %s", err)
|
||||
}
|
||||
|
||||
output, outputDiags := loadOutputFile(opts.bundleRootPath, outputPath, locations)
|
||||
pythonDiagnostics = pythonDiagnostics.Extend(outputDiags)
|
||||
|
||||
// we pass through pythonDiagnostic because it contains warnings
|
||||
|
@ -351,7 +380,21 @@ func writeInputFile(inputPath string, input dyn.Value) error {
|
|||
return os.WriteFile(inputPath, rootConfigJson, 0o600)
|
||||
}
|
||||
|
||||
func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) {
|
||||
// loadLocationsFile loads locations.json containing source locations for generated YAML.
|
||||
func loadLocationsFile(locationsPath string) (*pythonLocations, error) {
|
||||
locationsFile, err := os.Open(locationsPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return newPythonLocations(), nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to open locations file: %w", err)
|
||||
}
|
||||
|
||||
defer locationsFile.Close()
|
||||
|
||||
return parsePythonLocations(locationsFile)
|
||||
}
|
||||
|
||||
func loadOutputFile(rootPath, outputPath string, locations *pythonLocations) (dyn.Value, diag.Diagnostics) {
|
||||
outputFile, err := os.Open(outputPath)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err))
|
||||
|
@ -359,15 +402,19 @@ func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) {
|
|||
|
||||
defer outputFile.Close()
|
||||
|
||||
return loadOutput(rootPath, outputFile, locations)
|
||||
}
|
||||
|
||||
func loadOutput(rootPath string, outputFile io.Reader, locations *pythonLocations) (dyn.Value, diag.Diagnostics) {
|
||||
// we need absolute path because later parts of pipeline assume all paths are absolute
|
||||
// and this file will be used as location to resolve relative paths.
|
||||
//
|
||||
// virtualPath has to stay in rootPath, because locations outside root path are not allowed:
|
||||
// virtualPath has to stay in bundleRootPath, because locations outside root path are not allowed:
|
||||
//
|
||||
// Error: path /var/folders/.../python/dist/*.whl is not contained in bundle root path
|
||||
//
|
||||
// for that, we pass virtualPath instead of outputPath as file location
|
||||
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_python__.yml"))
|
||||
virtualPath, err := filepath.Abs(filepath.Join(rootPath, generatedFileName))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err))
|
||||
}
|
||||
|
@ -377,7 +424,29 @@ func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) {
|
|||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to parse output file: %w", err))
|
||||
}
|
||||
|
||||
return strictNormalize(config.Root{}, generated)
|
||||
// paths are resolved relative to locations of their values, if we change location
|
||||
// we have to update each path, until we simplify that, we don't update locations
|
||||
// for such values, so we don't change how paths are resolved
|
||||
//
|
||||
// we can remove this once we:
|
||||
// - add variable interpolation before and after PythonMutator
|
||||
// - implement path normalization (aka path normal form)
|
||||
_, err = paths.VisitJobPaths(generated, func(p dyn.Path, kind paths.PathKind, v dyn.Value) (dyn.Value, error) {
|
||||
putPythonLocation(locations, p, v.Location())
|
||||
return v, nil
|
||||
})
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to update locations: %w", err))
|
||||
}
|
||||
|
||||
// generated has dyn.Location as if it comes from generated YAML file
|
||||
// earlier we loaded locations.json with source locations in Python code
|
||||
generatedWithLocations, err := mergePythonLocations(generated, locations)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to update locations: %w", err))
|
||||
}
|
||||
|
||||
return strictNormalize(config.Root{}, generatedWithLocations)
|
||||
}
|
||||
|
||||
func strictNormalize(dst any, generated dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
|
@ -93,6 +92,8 @@ func TestPythonMutator_loadResources(t *testing.T) {
|
|||
}
|
||||
}`,
|
||||
`{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`,
|
||||
`{"path": "resources.jobs.job0", "file": "src/examples/job0.py", "line": 3, "column": 5}
|
||||
{"path": "resources.jobs.job1", "file": "src/examples/job1.py", "line": 5, "column": 7}`,
|
||||
)
|
||||
|
||||
mutator := PythonMutator(PythonMutatorPhaseLoadResources)
|
||||
|
@ -110,6 +111,25 @@ func TestPythonMutator_loadResources(t *testing.T) {
|
|||
assert.Equal(t, "job_1", job1.Name)
|
||||
}
|
||||
|
||||
// output of locations.json should be applied to underlying dyn.Value
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
name1, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job1.name"))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
||||
assert.Equal(t, []dyn.Location{
|
||||
{
|
||||
File: "src/examples/job1.py",
|
||||
Line: 5,
|
||||
Column: 7,
|
||||
},
|
||||
}, name1.Locations())
|
||||
|
||||
return v, nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(diags))
|
||||
assert.Equal(t, "job doesn't have any tasks", diags[0].Summary)
|
||||
assert.Equal(t, []dyn.Location{
|
||||
|
@ -157,7 +177,7 @@ func TestPythonMutator_loadResources_disallowed(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}`, "")
|
||||
}`, "", "")
|
||||
|
||||
mutator := PythonMutator(PythonMutatorPhaseLoadResources)
|
||||
diag := bundle.Apply(ctx, b, mutator)
|
||||
|
@ -202,7 +222,7 @@ func TestPythonMutator_applyMutators(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}`, "")
|
||||
}`, "", "")
|
||||
|
||||
mutator := PythonMutator(PythonMutatorPhaseApplyMutators)
|
||||
diag := bundle.Apply(ctx, b, mutator)
|
||||
|
@ -224,7 +244,7 @@ func TestPythonMutator_applyMutators(t *testing.T) {
|
|||
description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description"))
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedVirtualPath, err := filepath.Abs("__generated_by_python__.yml")
|
||||
expectedVirtualPath, err := filepath.Abs(generatedFileName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedVirtualPath, description.Location().File)
|
||||
|
||||
|
@ -263,7 +283,7 @@ func TestPythonMutator_badOutput(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}`, "")
|
||||
}`, "", "")
|
||||
|
||||
mutator := PythonMutator(PythonMutatorPhaseLoadResources)
|
||||
diag := bundle.Apply(ctx, b, mutator)
|
||||
|
@ -312,7 +332,7 @@ func TestGetOps_Python(t *testing.T) {
|
|||
}, PythonMutatorPhaseLoadResources)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual)
|
||||
assert.Equal(t, opts{venvPath: ".venv", enabled: true, loadLocations: true}, actual)
|
||||
}
|
||||
|
||||
func TestGetOps_PyDABs(t *testing.T) {
|
||||
|
@ -328,7 +348,7 @@ func TestGetOps_PyDABs(t *testing.T) {
|
|||
}, PythonMutatorPhaseInit)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, opts{venvPath: ".venv", enabled: true}, actual)
|
||||
assert.Equal(t, opts{venvPath: ".venv", enabled: true, loadLocations: false}, actual)
|
||||
}
|
||||
|
||||
func TestGetOps_empty(t *testing.T) {
|
||||
|
@ -661,7 +681,7 @@ or activate the environment before running CLI commands:
|
|||
assert.Equal(t, expected, out)
|
||||
}
|
||||
|
||||
func withProcessStub(t *testing.T, args []string, output, diagnostics string) context.Context {
|
||||
func withProcessStub(t *testing.T, args []string, output, diagnostics, locations string) context.Context {
|
||||
ctx := context.Background()
|
||||
ctx, stub := process.WithStub(ctx)
|
||||
|
||||
|
@ -673,32 +693,51 @@ func withProcessStub(t *testing.T, args []string, output, diagnostics string) co
|
|||
|
||||
inputPath := filepath.Join(cacheDir, "input.json")
|
||||
outputPath := filepath.Join(cacheDir, "output.json")
|
||||
locationsPath := filepath.Join(cacheDir, "locations.json")
|
||||
diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json")
|
||||
|
||||
args = append(args, "--input", inputPath)
|
||||
args = append(args, "--output", outputPath)
|
||||
args = append(args, "--diagnostics", diagnosticsPath)
|
||||
|
||||
stub.WithCallback(func(actual *exec.Cmd) error {
|
||||
_, err := os.Stat(inputPath)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if reflect.DeepEqual(actual.Args, args) {
|
||||
err := os.WriteFile(outputPath, []byte(output), 0o600)
|
||||
require.NoError(t, err)
|
||||
actualInputPath := getArg(actual.Args, "--input")
|
||||
actualOutputPath := getArg(actual.Args, "--output")
|
||||
actualDiagnosticsPath := getArg(actual.Args, "--diagnostics")
|
||||
actualLocationsPath := getArg(actual.Args, "--locations")
|
||||
|
||||
err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, inputPath, actualInputPath)
|
||||
require.Equal(t, outputPath, actualOutputPath)
|
||||
require.Equal(t, diagnosticsPath, actualDiagnosticsPath)
|
||||
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("unexpected command: %v", actual.Args)
|
||||
// locations is an optional argument
|
||||
if locations != "" {
|
||||
require.Equal(t, locationsPath, actualLocationsPath)
|
||||
|
||||
err = os.WriteFile(locationsPath, []byte(locations), 0o600)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(outputPath, []byte(output), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func getArg(args []string, name string) string {
|
||||
for i := range args {
|
||||
if args[i] == name {
|
||||
return args[i+1]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func loadYaml(name, content string) *bundle.Bundle {
|
||||
v, diag := config.LoadFromBytes(name, []byte(content))
|
||||
|
||||
|
|
|
@ -3,11 +3,14 @@ package mutator
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/jsonloader"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
)
|
||||
|
||||
|
@ -23,7 +26,11 @@ func (m *setVariables) Name() string {
|
|||
return "SetVariables"
|
||||
}
|
||||
|
||||
func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, name string) (dyn.Value, error) {
|
||||
func getDefaultVariableFilePath(target string) string {
|
||||
return ".databricks/bundle/" + target + "/variable-overrides.json"
|
||||
}
|
||||
|
||||
func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, name string, fileDefault dyn.Value) (dyn.Value, error) {
|
||||
// case: variable already has value initialized, so skip
|
||||
if variable.HasValue() {
|
||||
return v, nil
|
||||
|
@ -49,6 +56,26 @@ func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable,
|
|||
return v, nil
|
||||
}
|
||||
|
||||
// case: Set the variable to the default value from the variable file
|
||||
if fileDefault.Kind() != dyn.KindInvalid && fileDefault.Kind() != dyn.KindNil {
|
||||
hasComplexType := variable.IsComplex()
|
||||
hasComplexValue := fileDefault.Kind() == dyn.KindMap || fileDefault.Kind() == dyn.KindSequence
|
||||
|
||||
if hasComplexType && !hasComplexValue {
|
||||
return dyn.InvalidValue, fmt.Errorf(`variable %s is of type complex, but the value in the variable file is not a complex type`, name)
|
||||
}
|
||||
if !hasComplexType && hasComplexValue {
|
||||
return dyn.InvalidValue, fmt.Errorf(`variable %s is not of type complex, but the value in the variable file is a complex type`, name)
|
||||
}
|
||||
|
||||
v, err := dyn.Set(v, "value", fileDefault)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf(`failed to assign default value from variable file to variable %s with error: %v`, name, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// case: Set the variable to its default value
|
||||
if variable.HasDefault() {
|
||||
vDefault, err := dyn.Get(v, "default")
|
||||
|
@ -64,10 +91,43 @@ func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable,
|
|||
}
|
||||
|
||||
// We should have had a value to set for the variable at this point.
|
||||
return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
||||
return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done using "--var", by setting the %s environment variable, or in %s file`, name, bundleVarPrefix+name, getDefaultVariableFilePath("<target>"))
|
||||
}
|
||||
|
||||
func readVariablesFromFile(b *bundle.Bundle) (dyn.Value, diag.Diagnostics) {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
filePath := filepath.Join(b.BundleRootPath, getDefaultVariableFilePath(b.Config.Bundle.Target))
|
||||
if _, err := os.Stat(filePath); err != nil {
|
||||
return dyn.InvalidValue, nil
|
||||
}
|
||||
|
||||
f, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to read variables file: %w", err))
|
||||
}
|
||||
|
||||
val, err := jsonloader.LoadJSON(f, filePath)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to parse variables file %s: %w", filePath, err))
|
||||
}
|
||||
|
||||
if val.Kind() != dyn.KindMap {
|
||||
return dyn.InvalidValue, diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("failed to parse variables file %s: invalid format", filePath),
|
||||
Detail: "Variables file must be a JSON object with the following format:\n{\"var1\": \"value1\", \"var2\": \"value2\"}",
|
||||
})
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
defaults, diags := readVariablesFromFile(b)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Map(v, "variables", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) {
|
||||
name := p[1].Key()
|
||||
|
@ -76,9 +136,10 @@ func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
return dyn.InvalidValue, fmt.Errorf(`variable "%s" is not defined`, name)
|
||||
}
|
||||
|
||||
return setVariable(ctx, variable, v, name)
|
||||
fileDefault, _ := dyn.Get(defaults, name)
|
||||
return setVariable(ctx, variable, v, name, fileDefault)
|
||||
}))
|
||||
})
|
||||
|
||||
return diag.FromErr(err)
|
||||
return diags.Extend(diag.FromErr(err))
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) {
|
|||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo", dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
|
@ -43,7 +43,7 @@ func TestSetVariableUsingDefaultValue(t *testing.T) {
|
|||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo", dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
|
@ -65,7 +65,7 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) {
|
|||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo", dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
|
@ -90,7 +90,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) {
|
|||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo", dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
|
@ -107,8 +107,8 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) {
|
|||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
||||
_, err = setVariable(context.Background(), v, &variable, "foo", dyn.NilValue)
|
||||
assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done using \"--var\", by setting the BUNDLE_VAR_foo environment variable, or in .databricks/bundle/<target>/variable-overrides.json file")
|
||||
}
|
||||
|
||||
func TestSetVariablesMutator(t *testing.T) {
|
||||
|
@ -157,6 +157,6 @@ func TestSetComplexVariablesViaEnvVariablesIsNotAllowed(t *testing.T) {
|
|||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
_, err = setVariable(context.Background(), v, &variable, "foo", dyn.NilValue)
|
||||
assert.ErrorContains(t, err, "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo")
|
||||
}
|
||||
|
|
|
@ -36,11 +36,12 @@ type Variable struct {
|
|||
// This field stores the resolved value for the variable. The variable are
|
||||
// resolved in the following priority order (from highest to lowest)
|
||||
//
|
||||
// 1. Command line flag. For example: `--var="foo=bar"`
|
||||
// 2. Target variable. eg: BUNDLE_VAR_foo=bar
|
||||
// 3. Default value as defined in the applicable environments block
|
||||
// 4. Default value defined in variable definition
|
||||
// 5. Throw error, since if no default value is defined, then the variable
|
||||
// 1. Command line flag `--var="foo=bar"`
|
||||
// 2. Environment variable. eg: BUNDLE_VAR_foo=bar
|
||||
// 3. Load defaults from .databricks/bundle/<target>/variable-overrides.json
|
||||
// 4. Default value as defined in the applicable targets block
|
||||
// 5. Default value defined in variable definition
|
||||
// 6. Throw error, since if no default value is defined, then the variable
|
||||
// is required
|
||||
Value VariableValue `json:"value,omitempty" bundle:"readonly"`
|
||||
|
||||
|
|
|
@ -419,7 +419,7 @@ func TestBundleToTerraformModelServing(t *testing.T) {
|
|||
src := resources.ModelServingEndpoint{
|
||||
CreateServingEndpoint: &serving.CreateServingEndpoint{
|
||||
Name: "name",
|
||||
Config: serving.EndpointCoreConfigInput{
|
||||
Config: &serving.EndpointCoreConfigInput{
|
||||
ServedModels: []serving.ServedModelInput{
|
||||
{
|
||||
ModelName: "model_name",
|
||||
|
@ -474,7 +474,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) {
|
|||
// and as such observed the `omitempty` tag.
|
||||
// The new method leverages [dyn.Value] where any field that is not
|
||||
// explicitly set is not part of the value.
|
||||
Config: serving.EndpointCoreConfigInput{
|
||||
Config: &serving.EndpointCoreConfigInput{
|
||||
ServedModels: []serving.ServedModelInput{
|
||||
{
|
||||
ModelName: "model_name",
|
||||
|
|
|
@ -54,7 +54,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con
|
|||
return tf.ExecPath, nil
|
||||
}
|
||||
|
||||
binDir, err := b.CacheDir(context.Background(), "bin")
|
||||
binDir, err := b.CacheDir(ctx, "bin")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -108,6 +108,14 @@ var envCopy = []string{
|
|||
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
|
||||
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
|
||||
"TF_CLI_CONFIG_FILE",
|
||||
|
||||
// Include $USE_SDK_V2_RESOURCES and $USE_SDK_V2_DATA_SOURCES, these are used to switch back from plugin framework to SDKv2.
|
||||
// This is used for mitigation issues with resource migrated to plugin framework, as recommended here:
|
||||
// https://registry.terraform.io/providers/databricks/databricks/latest/docs/guides/troubleshooting#plugin-framework-migration-problems
|
||||
// It is currently a workaround for deploying quality_monitors
|
||||
// https://github.com/databricks/terraform-provider-databricks/issues/4229#issuecomment-2520344690
|
||||
"USE_SDK_V2_RESOURCES",
|
||||
"USE_SDK_V2_DATA_SOURCES",
|
||||
}
|
||||
|
||||
// This function inherits some environment variables for Terraform CLI.
|
||||
|
|
|
@ -38,6 +38,12 @@ func (appConverter) Convert(ctx context.Context, key string, vin dyn.Value, out
|
|||
return err
|
||||
}
|
||||
|
||||
// We always set no_compute to true as it allows DABs not to wait for app compute to be started when app is created.
|
||||
vout, err = dyn.Set(vout, "no_compute", dyn.V(true))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the converted resource to the output.
|
||||
out.App[key] = vout.AsAny()
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@ func TestConvertApp(t *testing.T) {
|
|||
assert.Equal(t, map[string]any{
|
||||
"description": "app description",
|
||||
"name": "app_id",
|
||||
"no_compute": true,
|
||||
"resources": []any{
|
||||
map[string]any{
|
||||
"name": "job1",
|
||||
|
@ -136,6 +137,7 @@ func TestConvertAppWithNoDescription(t *testing.T) {
|
|||
assert.Equal(t, map[string]any{
|
||||
"name": "app_id",
|
||||
"description": "", // Due to Apps API always returning a description field, we set it in the output as well to avoid permanent TF drift
|
||||
"no_compute": true,
|
||||
"resources": []any{
|
||||
map[string]any{
|
||||
"name": "job1",
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestConvertModelServingEndpoint(t *testing.T) {
|
|||
src := resources.ModelServingEndpoint{
|
||||
CreateServingEndpoint: &serving.CreateServingEndpoint{
|
||||
Name: "name",
|
||||
Config: serving.EndpointCoreConfigInput{
|
||||
Config: &serving.EndpointCoreConfigInput{
|
||||
ServedModels: []serving.ServedModelInput{
|
||||
{
|
||||
ModelName: "model_name",
|
||||
|
|
|
@ -353,12 +353,12 @@ github.com/databricks/cli/bundle/config/resources.MlflowModel:
|
|||
github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint:
|
||||
"ai_gateway":
|
||||
"description": |-
|
||||
The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are supported as of now.
|
||||
The AI Gateway configuration for the serving endpoint. NOTE: Only external model and provisioned throughput endpoints are currently supported.
|
||||
"config":
|
||||
"description": |-
|
||||
The core config of the serving endpoint.
|
||||
"name":
|
||||
"description": |
|
||||
"description": |-
|
||||
The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.
|
||||
An endpoint name can consist of alphanumeric characters, dashes, and underscores.
|
||||
"rate_limits":
|
||||
|
@ -1974,6 +1974,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask:
|
|||
Parameters passed to the main method.
|
||||
|
||||
Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.
|
||||
"run_as_repl":
|
||||
"description": |-
|
||||
Deprecated. A value of `false` is no longer supported.
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask:
|
||||
"parameters":
|
||||
"description": |-
|
||||
|
@ -2684,27 +2687,36 @@ github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScd
|
|||
github.com/databricks/databricks-sdk-go/service/serving.Ai21LabsConfig:
|
||||
"ai21labs_api_key":
|
||||
"description": |-
|
||||
The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.
|
||||
The Databricks secret key reference for an AI21 Labs API key. If you
|
||||
prefer to paste your API key directly, see `ai21labs_api_key_plaintext`.
|
||||
You must provide an API key using one of the following fields:
|
||||
`ai21labs_api_key` or `ai21labs_api_key_plaintext`.
|
||||
"ai21labs_api_key_plaintext":
|
||||
"description": |-
|
||||
An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.
|
||||
An AI21 Labs API key provided as a plaintext string. If you prefer to
|
||||
reference your key using Databricks Secrets, see `ai21labs_api_key`. You
|
||||
must provide an API key using one of the following fields:
|
||||
`ai21labs_api_key` or `ai21labs_api_key_plaintext`.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayConfig:
|
||||
"guardrails":
|
||||
"description": |-
|
||||
Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses.
|
||||
"inference_table_config":
|
||||
"description": |-
|
||||
Configuration for payload logging using inference tables. Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.
|
||||
Configuration for payload logging using inference tables.
|
||||
Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.
|
||||
"rate_limits":
|
||||
"description": |-
|
||||
Configuration for rate limits which can be set to limit endpoint traffic.
|
||||
"usage_tracking_config":
|
||||
"description": |-
|
||||
Configuration to enable usage tracking using system tables. These tables allow you to monitor operational usage on endpoints and their associated costs.
|
||||
Configuration to enable usage tracking using system tables.
|
||||
These tables allow you to monitor operational usage on endpoints and their associated costs.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParameters:
|
||||
"invalid_keywords":
|
||||
"description": |-
|
||||
List of invalid keywords. AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.
|
||||
List of invalid keywords.
|
||||
AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.
|
||||
"pii":
|
||||
"description": |-
|
||||
Configuration for guardrail PII filter.
|
||||
|
@ -2713,15 +2725,14 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParame
|
|||
Indicates whether the safety filter is enabled.
|
||||
"valid_topics":
|
||||
"description": |-
|
||||
The list of allowed topics. Given a chat request, this guardrail flags the request if its topic is not in the allowed topics.
|
||||
The list of allowed topics.
|
||||
Given a chat request, this guardrail flags the request if its topic is not in the allowed topics.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehavior:
|
||||
"behavior":
|
||||
"description": |-
|
||||
Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.
|
||||
Configuration for input guardrail filters.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior:
|
||||
"_":
|
||||
"description": |-
|
||||
Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.
|
||||
"enum":
|
||||
- |-
|
||||
NONE
|
||||
|
@ -2737,30 +2748,32 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails:
|
|||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayInferenceTableConfig:
|
||||
"catalog_name":
|
||||
"description": |-
|
||||
The name of the catalog in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the catalog name.
|
||||
The name of the catalog in Unity Catalog. Required when enabling inference tables.
|
||||
NOTE: On update, you have to disable inference table first in order to change the catalog name.
|
||||
"enabled":
|
||||
"description": |-
|
||||
Indicates whether the inference table is enabled.
|
||||
"schema_name":
|
||||
"description": |-
|
||||
The name of the schema in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the schema name.
|
||||
The name of the schema in Unity Catalog. Required when enabling inference tables.
|
||||
NOTE: On update, you have to disable inference table first in order to change the schema name.
|
||||
"table_name_prefix":
|
||||
"description": |-
|
||||
The prefix of the table in Unity Catalog. NOTE: On update, you have to disable inference table first in order to change the prefix name.
|
||||
The prefix of the table in Unity Catalog.
|
||||
NOTE: On update, you have to disable inference table first in order to change the prefix name.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit:
|
||||
"calls":
|
||||
"description": |-
|
||||
Used to specify how many calls are allowed for a key within the renewal_period.
|
||||
"key":
|
||||
"description": |-
|
||||
Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.
|
||||
Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported,
|
||||
with 'endpoint' being the default if not specified.
|
||||
"renewal_period":
|
||||
"description": |-
|
||||
Renewal period field for a rate limit. Currently, only 'minute' is supported.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey:
|
||||
"_":
|
||||
"description": |-
|
||||
Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.
|
||||
"enum":
|
||||
- |-
|
||||
user
|
||||
|
@ -2768,8 +2781,6 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey:
|
|||
endpoint
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod:
|
||||
"_":
|
||||
"description": |-
|
||||
Renewal period field for a rate limit. Currently, only 'minute' is supported.
|
||||
"enum":
|
||||
- |-
|
||||
minute
|
||||
|
@ -2780,26 +2791,43 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayUsageTrackingCo
|
|||
github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfig:
|
||||
"aws_access_key_id":
|
||||
"description": |-
|
||||
The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.
|
||||
The Databricks secret key reference for an AWS access key ID with
|
||||
permissions to interact with Bedrock services. If you prefer to paste
|
||||
your API key directly, see `aws_access_key_id_plaintext`. You must provide an API
|
||||
key using one of the following fields: `aws_access_key_id` or
|
||||
`aws_access_key_id_plaintext`.
|
||||
"aws_access_key_id_plaintext":
|
||||
"description": |-
|
||||
An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.
|
||||
An AWS access key ID with permissions to interact with Bedrock services
|
||||
provided as a plaintext string. If you prefer to reference your key using
|
||||
Databricks Secrets, see `aws_access_key_id`. You must provide an API key
|
||||
using one of the following fields: `aws_access_key_id` or
|
||||
`aws_access_key_id_plaintext`.
|
||||
"aws_region":
|
||||
"description": |-
|
||||
The AWS region to use. Bedrock has to be enabled there.
|
||||
"aws_secret_access_key":
|
||||
"description": |-
|
||||
The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`.
|
||||
The Databricks secret key reference for an AWS secret access key paired
|
||||
with the access key ID, with permissions to interact with Bedrock
|
||||
services. If you prefer to paste your API key directly, see
|
||||
`aws_secret_access_key_plaintext`. You must provide an API key using one
|
||||
of the following fields: `aws_secret_access_key` or
|
||||
`aws_secret_access_key_plaintext`.
|
||||
"aws_secret_access_key_plaintext":
|
||||
"description": |-
|
||||
An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`.
|
||||
An AWS secret access key paired with the access key ID, with permissions
|
||||
to interact with Bedrock services provided as a plaintext string. If you
|
||||
prefer to reference your key using Databricks Secrets, see
|
||||
`aws_secret_access_key`. You must provide an API key using one of the
|
||||
following fields: `aws_secret_access_key` or
|
||||
`aws_secret_access_key_plaintext`.
|
||||
"bedrock_provider":
|
||||
"description": |-
|
||||
The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
|
||||
The underlying provider in Amazon Bedrock. Supported values (case
|
||||
insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider:
|
||||
"_":
|
||||
"description": |-
|
||||
The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
|
||||
"enum":
|
||||
- |-
|
||||
anthropic
|
||||
|
@ -2812,10 +2840,16 @@ github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedro
|
|||
github.com/databricks/databricks-sdk-go/service/serving.AnthropicConfig:
|
||||
"anthropic_api_key":
|
||||
"description": |-
|
||||
The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`.
|
||||
The Databricks secret key reference for an Anthropic API key. If you
|
||||
prefer to paste your API key directly, see `anthropic_api_key_plaintext`.
|
||||
You must provide an API key using one of the following fields:
|
||||
`anthropic_api_key` or `anthropic_api_key_plaintext`.
|
||||
"anthropic_api_key_plaintext":
|
||||
"description": |-
|
||||
The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`.
|
||||
The Anthropic API key provided as a plaintext string. If you prefer to
|
||||
reference your key using Databricks Secrets, see `anthropic_api_key`. You
|
||||
must provide an API key using one of the following fields:
|
||||
`anthropic_api_key` or `anthropic_api_key_plaintext`.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput:
|
||||
"catalog_name":
|
||||
"description": |-
|
||||
|
@ -2831,42 +2865,58 @@ github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput:
|
|||
The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.CohereConfig:
|
||||
"cohere_api_base":
|
||||
"description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n"
|
||||
"description": |-
|
||||
This is an optional field to provide a customized base URL for the Cohere
|
||||
API. If left unspecified, the standard Cohere base URL is used.
|
||||
"cohere_api_key":
|
||||
"description": |-
|
||||
The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.
|
||||
The Databricks secret key reference for a Cohere API key. If you prefer
|
||||
to paste your API key directly, see `cohere_api_key_plaintext`. You must
|
||||
provide an API key using one of the following fields: `cohere_api_key` or
|
||||
`cohere_api_key_plaintext`.
|
||||
"cohere_api_key_plaintext":
|
||||
"description": |-
|
||||
The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.
|
||||
The Cohere API key provided as a plaintext string. If you prefer to
|
||||
reference your key using Databricks Secrets, see `cohere_api_key`. You
|
||||
must provide an API key using one of the following fields:
|
||||
`cohere_api_key` or `cohere_api_key_plaintext`.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.DatabricksModelServingConfig:
|
||||
"databricks_api_token":
|
||||
"description": |
|
||||
The Databricks secret key reference for a Databricks API token that corresponds to a user or service
|
||||
principal with Can Query access to the model serving endpoint pointed to by this external model.
|
||||
If you prefer to paste your API key directly, see `databricks_api_token_plaintext`.
|
||||
You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.
|
||||
"description": |-
|
||||
The Databricks secret key reference for a Databricks API token that
|
||||
corresponds to a user or service principal with Can Query access to the
|
||||
model serving endpoint pointed to by this external model. If you prefer
|
||||
to paste your API key directly, see `databricks_api_token_plaintext`. You
|
||||
must provide an API key using one of the following fields:
|
||||
`databricks_api_token` or `databricks_api_token_plaintext`.
|
||||
"databricks_api_token_plaintext":
|
||||
"description": |
|
||||
The Databricks API token that corresponds to a user or service
|
||||
principal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string.
|
||||
If you prefer to reference your key using Databricks Secrets, see `databricks_api_token`.
|
||||
You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.
|
||||
"description": |-
|
||||
The Databricks API token that corresponds to a user or service principal
|
||||
with Can Query access to the model serving endpoint pointed to by this
|
||||
external model provided as a plaintext string. If you prefer to reference
|
||||
your key using Databricks Secrets, see `databricks_api_token`. You must
|
||||
provide an API key using one of the following fields:
|
||||
`databricks_api_token` or `databricks_api_token_plaintext`.
|
||||
"databricks_workspace_url":
|
||||
"description": |
|
||||
The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.
|
||||
"description": |-
|
||||
The URL of the Databricks workspace containing the model serving endpoint
|
||||
pointed to by this external model.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput:
|
||||
"auto_capture_config":
|
||||
"description": |-
|
||||
Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.
|
||||
Note: this field is deprecated for creating new provisioned throughput endpoints,
|
||||
or updating existing provisioned throughput endpoints that never have inference table configured;
|
||||
in these cases please use AI Gateway to manage inference tables.
|
||||
"served_entities":
|
||||
"description": |-
|
||||
A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities.
|
||||
The list of served entities under the serving endpoint config.
|
||||
"served_models":
|
||||
"description": |-
|
||||
(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models.
|
||||
(Deprecated, use served_entities instead) The list of served models under the serving endpoint config.
|
||||
"traffic_config":
|
||||
"description": |-
|
||||
The traffic config defining how invocations to the serving endpoint should be routed.
|
||||
The traffic configuration associated with the serving endpoint config.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.EndpointTag:
|
||||
"key":
|
||||
"description": |-
|
||||
|
@ -2903,17 +2953,13 @@ github.com/databricks/databricks-sdk-go/service/serving.ExternalModel:
|
|||
"description": |-
|
||||
PaLM Config. Only required if the provider is 'palm'.
|
||||
"provider":
|
||||
"description": |
|
||||
The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',
|
||||
'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.",
|
||||
"description": |-
|
||||
The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.
|
||||
"task":
|
||||
"description": |-
|
||||
The task type of the external model.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider:
|
||||
"_":
|
||||
"description": |
|
||||
The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',
|
||||
'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.",
|
||||
"enum":
|
||||
- |-
|
||||
ai21labs
|
||||
|
@ -2934,70 +2980,114 @@ github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider:
|
|||
github.com/databricks/databricks-sdk-go/service/serving.GoogleCloudVertexAiConfig:
|
||||
"private_key":
|
||||
"description": |-
|
||||
The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`
|
||||
The Databricks secret key reference for a private key for the service
|
||||
account which has access to the Google Cloud Vertex AI Service. See [Best
|
||||
practices for managing service account keys]. If you prefer to paste your
|
||||
API key directly, see `private_key_plaintext`. You must provide an API
|
||||
key using one of the following fields: `private_key` or
|
||||
`private_key_plaintext`
|
||||
|
||||
[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys
|
||||
"private_key_plaintext":
|
||||
"description": |-
|
||||
The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`.
|
||||
The private key for the service account which has access to the Google
|
||||
Cloud Vertex AI Service provided as a plaintext secret. See [Best
|
||||
practices for managing service account keys]. If you prefer to reference
|
||||
your key using Databricks Secrets, see `private_key`. You must provide an
|
||||
API key using one of the following fields: `private_key` or
|
||||
`private_key_plaintext`.
|
||||
|
||||
[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys
|
||||
"project_id":
|
||||
"description": |-
|
||||
This is the Google Cloud project id that the service account is associated with.
|
||||
This is the Google Cloud project id that the service account is
|
||||
associated with.
|
||||
"region":
|
||||
"description": |-
|
||||
This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions.
|
||||
This is the region for the Google Cloud Vertex AI Service. See [supported
|
||||
regions] for more details. Some models are only available in specific
|
||||
regions.
|
||||
|
||||
[supported regions]: https://cloud.google.com/vertex-ai/docs/general/locations
|
||||
github.com/databricks/databricks-sdk-go/service/serving.OpenAiConfig:
|
||||
"_":
|
||||
"description": |-
|
||||
Configs needed to create an OpenAI model route.
|
||||
"microsoft_entra_client_id":
|
||||
"description": |
|
||||
This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.
|
||||
"description": |-
|
||||
This field is only required for Azure AD OpenAI and is the Microsoft
|
||||
Entra Client ID.
|
||||
"microsoft_entra_client_secret":
|
||||
"description": |
|
||||
The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.
|
||||
If you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.
|
||||
You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.
|
||||
"description": |-
|
||||
The Databricks secret key reference for a client secret used for
|
||||
Microsoft Entra ID authentication. If you prefer to paste your client
|
||||
secret directly, see `microsoft_entra_client_secret_plaintext`. You must
|
||||
provide an API key using one of the following fields:
|
||||
`microsoft_entra_client_secret` or
|
||||
`microsoft_entra_client_secret_plaintext`.
|
||||
"microsoft_entra_client_secret_plaintext":
|
||||
"description": |
|
||||
The client secret used for Microsoft Entra ID authentication provided as a plaintext string.
|
||||
If you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.
|
||||
You must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.
|
||||
"description": |-
|
||||
The client secret used for Microsoft Entra ID authentication provided as
|
||||
a plaintext string. If you prefer to reference your key using Databricks
|
||||
Secrets, see `microsoft_entra_client_secret`. You must provide an API key
|
||||
using one of the following fields: `microsoft_entra_client_secret` or
|
||||
`microsoft_entra_client_secret_plaintext`.
|
||||
"microsoft_entra_tenant_id":
|
||||
"description": |
|
||||
This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.
|
||||
"description": |-
|
||||
This field is only required for Azure AD OpenAI and is the Microsoft
|
||||
Entra Tenant ID.
|
||||
"openai_api_base":
|
||||
"description": |
|
||||
This is a field to provide a customized base URl for the OpenAI API.
|
||||
For Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service
|
||||
provided by Azure.
|
||||
For other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.
|
||||
"description": |-
|
||||
This is a field to provide a customized base URl for the OpenAI API. For
|
||||
Azure OpenAI, this field is required, and is the base URL for the Azure
|
||||
OpenAI API service provided by Azure. For other OpenAI API types, this
|
||||
field is optional, and if left unspecified, the standard OpenAI base URL
|
||||
is used.
|
||||
"openai_api_key":
|
||||
"description": |-
|
||||
The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.
|
||||
The Databricks secret key reference for an OpenAI API key using the
|
||||
OpenAI or Azure service. If you prefer to paste your API key directly,
|
||||
see `openai_api_key_plaintext`. You must provide an API key using one of
|
||||
the following fields: `openai_api_key` or `openai_api_key_plaintext`.
|
||||
"openai_api_key_plaintext":
|
||||
"description": |-
|
||||
The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.
|
||||
The OpenAI API key using the OpenAI or Azure service provided as a
|
||||
plaintext string. If you prefer to reference your key using Databricks
|
||||
Secrets, see `openai_api_key`. You must provide an API key using one of
|
||||
the following fields: `openai_api_key` or `openai_api_key_plaintext`.
|
||||
"openai_api_type":
|
||||
"description": |
|
||||
This is an optional field to specify the type of OpenAI API to use.
|
||||
For Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security
|
||||
access validation protocol. For access token validation, use azure. For authentication using Azure Active
|
||||
"description": |-
|
||||
This is an optional field to specify the type of OpenAI API to use. For
|
||||
Azure OpenAI, this field is required, and adjust this parameter to
|
||||
represent the preferred security access validation protocol. For access
|
||||
token validation, use azure. For authentication using Azure Active
|
||||
Directory (Azure AD) use, azuread.
|
||||
"openai_api_version":
|
||||
"description": |
|
||||
This is an optional field to specify the OpenAI API version.
|
||||
For Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to
|
||||
utilize, specified by a date.
|
||||
"description": |-
|
||||
This is an optional field to specify the OpenAI API version. For Azure
|
||||
OpenAI, this field is required, and is the version of the Azure OpenAI
|
||||
service to utilize, specified by a date.
|
||||
"openai_deployment_name":
|
||||
"description": |
|
||||
This field is only required for Azure OpenAI and is the name of the deployment resource for the
|
||||
Azure OpenAI service.
|
||||
"description": |-
|
||||
This field is only required for Azure OpenAI and is the name of the
|
||||
deployment resource for the Azure OpenAI service.
|
||||
"openai_organization":
|
||||
"description": |
|
||||
This is an optional field to specify the organization in OpenAI or Azure OpenAI.
|
||||
"description": |-
|
||||
This is an optional field to specify the organization in OpenAI or Azure
|
||||
OpenAI.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.PaLmConfig:
|
||||
"palm_api_key":
|
||||
"description": |-
|
||||
The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.
|
||||
The Databricks secret key reference for a PaLM API key. If you prefer to
|
||||
paste your API key directly, see `palm_api_key_plaintext`. You must
|
||||
provide an API key using one of the following fields: `palm_api_key` or
|
||||
`palm_api_key_plaintext`.
|
||||
"palm_api_key_plaintext":
|
||||
"description": |-
|
||||
The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.
|
||||
The PaLM API key provided as a plaintext string. If you prefer to
|
||||
reference your key using Databricks Secrets, see `palm_api_key`. You must
|
||||
provide an API key using one of the following fields: `palm_api_key` or
|
||||
`palm_api_key_plaintext`.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.RateLimit:
|
||||
"calls":
|
||||
"description": |-
|
||||
|
@ -3010,8 +3100,6 @@ github.com/databricks/databricks-sdk-go/service/serving.RateLimit:
|
|||
Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey:
|
||||
"_":
|
||||
"description": |-
|
||||
Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.
|
||||
"enum":
|
||||
- |-
|
||||
user
|
||||
|
@ -3019,8 +3107,6 @@ github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey:
|
|||
endpoint
|
||||
github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod:
|
||||
"_":
|
||||
"description": |-
|
||||
Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.
|
||||
"enum":
|
||||
- |-
|
||||
minute
|
||||
|
@ -3033,21 +3119,15 @@ github.com/databricks/databricks-sdk-go/service/serving.Route:
|
|||
The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
|
||||
"entity_name":
|
||||
"description": |
|
||||
The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC),
|
||||
or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of
|
||||
__catalog_name__.__schema_name__.__model_name__.
|
||||
"entity_version":
|
||||
"description": |-
|
||||
The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC.
|
||||
The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of **catalog_name.schema_name.model_name**.
|
||||
"entity_version": {}
|
||||
"environment_vars":
|
||||
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`"
|
||||
"description": |-
|
||||
An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{"OPENAI_API_KEY": "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": "{{secrets/my_scope2/my_key2}}"}`
|
||||
"external_model":
|
||||
"description": |
|
||||
The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)
|
||||
can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model,
|
||||
it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later.
|
||||
The task type of all external models within an endpoint must be the same.
|
||||
"description": |-
|
||||
The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. The task type of all external models within an endpoint must be the same.
|
||||
"instance_profile_arn":
|
||||
"description": |-
|
||||
ARN of the instance profile that the served entity uses to access AWS resources.
|
||||
|
@ -3058,68 +3138,46 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
|
|||
"description": |-
|
||||
The minimum tokens per second that the endpoint can scale down to.
|
||||
"name":
|
||||
"description": |
|
||||
The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.
|
||||
If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other
|
||||
entities, it defaults to <entity-name>-<entity-version>.
|
||||
"description": |-
|
||||
The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.
|
||||
"scale_to_zero_enabled":
|
||||
"description": |-
|
||||
Whether the compute resources for the served entity should scale down to zero.
|
||||
"workload_size":
|
||||
"description": |
|
||||
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between.
|
||||
A single unit of provisioned concurrency can process one request at a time.
|
||||
Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
||||
If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
|
||||
"description": |-
|
||||
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
|
||||
"workload_type":
|
||||
"description": |
|
||||
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is
|
||||
"CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.
|
||||
See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
|
||||
"description": |-
|
||||
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput:
|
||||
"environment_vars":
|
||||
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this model.\nNote: this is an experimental feature and subject to change. \nExample model environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`"
|
||||
"description": |-
|
||||
An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{"OPENAI_API_KEY": "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": "{{secrets/my_scope2/my_key2}}"}`
|
||||
"instance_profile_arn":
|
||||
"description": |-
|
||||
ARN of the instance profile that the served model will use to access AWS resources.
|
||||
ARN of the instance profile that the served entity uses to access AWS resources.
|
||||
"max_provisioned_throughput":
|
||||
"description": |-
|
||||
The maximum tokens per second that the endpoint can scale up to.
|
||||
"min_provisioned_throughput":
|
||||
"description": |-
|
||||
The minimum tokens per second that the endpoint can scale down to.
|
||||
"model_name":
|
||||
"description": |
|
||||
The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,
|
||||
in the form of __catalog_name__.__schema_name__.__model_name__.
|
||||
"model_version":
|
||||
"description": |-
|
||||
The version of the model in Databricks Model Registry or Unity Catalog to be served.
|
||||
"model_name": {}
|
||||
"model_version": {}
|
||||
"name":
|
||||
"description": |
|
||||
The name of a served model. It must be unique across an endpoint. If not specified, this field will default to <model-name>-<model-version>.
|
||||
A served model name can consist of alphanumeric characters, dashes, and underscores.
|
||||
"description": |-
|
||||
The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.
|
||||
"scale_to_zero_enabled":
|
||||
"description": |-
|
||||
Whether the compute resources for the served model should scale down to zero.
|
||||
Whether the compute resources for the served entity should scale down to zero.
|
||||
"workload_size":
|
||||
"description": |
|
||||
The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.
|
||||
A single unit of provisioned concurrency can process one request at a time.
|
||||
Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
||||
If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.
|
||||
"description": |-
|
||||
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
|
||||
"workload_type":
|
||||
"description": |
|
||||
The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is
|
||||
"CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.
|
||||
See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
|
||||
"description": |-
|
||||
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize:
|
||||
"_":
|
||||
"description": |
|
||||
The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.
|
||||
A single unit of provisioned concurrency can process one request at a time.
|
||||
Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
||||
If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.
|
||||
"enum":
|
||||
- |-
|
||||
Small
|
||||
|
@ -3129,17 +3187,26 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkload
|
|||
Large
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType:
|
||||
"_":
|
||||
"description": |
|
||||
The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is
|
||||
"CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.
|
||||
See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
|
||||
"enum":
|
||||
- |-
|
||||
CPU
|
||||
- |-
|
||||
GPU_MEDIUM
|
||||
- |-
|
||||
GPU_SMALL
|
||||
- |-
|
||||
GPU_LARGE
|
||||
- |-
|
||||
MULTIGPU_MEDIUM
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ServingModelWorkloadType:
|
||||
"_":
|
||||
"enum":
|
||||
- |-
|
||||
CPU
|
||||
- |-
|
||||
GPU_MEDIUM
|
||||
- |-
|
||||
GPU_SMALL
|
||||
- |-
|
||||
GPU_LARGE
|
||||
- |-
|
||||
|
|
|
@ -197,3 +197,14 @@ github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger:
|
|||
"manual":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
|
||||
"entity_version":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput:
|
||||
"model_name":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"model_version":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
|
|
|
@ -172,6 +172,15 @@ func generateSchema(workdir, outputFile string) {
|
|||
a.addAnnotations,
|
||||
addInterpolationPatterns,
|
||||
})
|
||||
|
||||
// AdditionalProperties is set to an empty schema to allow non-typed keys used as yaml-anchors
|
||||
// Example:
|
||||
// some_anchor: &some_anchor
|
||||
// file_path: /some/path/
|
||||
// workspace:
|
||||
// <<: *some_anchor
|
||||
s.AdditionalProperties = jsonschema.Schema{}
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
unknown: value
|
|
@ -0,0 +1,11 @@
|
|||
tags: &job-tags
|
||||
environment: "some_environment"
|
||||
|
||||
resources:
|
||||
jobs:
|
||||
db1:
|
||||
tags:
|
||||
<<: *job-tags
|
||||
db2:
|
||||
tags:
|
||||
<<: *job-tags
|
|
@ -1,3 +1,3 @@
|
|||
package schema
|
||||
|
||||
const ProviderVersion = "1.63.0"
|
||||
const ProviderVersion = "1.64.1"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
package schema
|
||||
|
||||
type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii struct {
|
||||
Behavior string `json:"behavior"`
|
||||
Behavior string `json:"behavior,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInput struct {
|
||||
|
@ -14,7 +14,7 @@ type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInput struct {
|
|||
}
|
||||
|
||||
type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct {
|
||||
Behavior string `json:"behavior"`
|
||||
Behavior string `json:"behavior,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutput struct {
|
||||
|
@ -87,8 +87,8 @@ type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelDatabri
|
|||
type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig struct {
|
||||
PrivateKey string `json:"private_key,omitempty"`
|
||||
PrivateKeyPlaintext string `json:"private_key_plaintext,omitempty"`
|
||||
ProjectId string `json:"project_id,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
ProjectId string `json:"project_id"`
|
||||
Region string `json:"region"`
|
||||
}
|
||||
|
||||
type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelOpenaiConfig struct {
|
||||
|
|
|
@ -91,6 +91,7 @@ type ResourceApp struct {
|
|||
DefaultSourceCodePath string `json:"default_source_code_path,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Name string `json:"name"`
|
||||
NoCompute bool `json:"no_compute,omitempty"`
|
||||
PendingDeployment *ResourceAppPendingDeployment `json:"pending_deployment,omitempty"`
|
||||
Resources []ResourceAppResources `json:"resources,omitempty"`
|
||||
ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"`
|
||||
|
|
|
@ -904,6 +904,7 @@ type ResourceJobTaskForEachTaskTaskSparkJarTask struct {
|
|||
JarUri string `json:"jar_uri,omitempty"`
|
||||
MainClassName string `json:"main_class_name,omitempty"`
|
||||
Parameters []string `json:"parameters,omitempty"`
|
||||
RunAsRepl bool `json:"run_as_repl,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskForEachTaskTaskSparkPythonTask struct {
|
||||
|
@ -1299,6 +1300,7 @@ type ResourceJobTaskSparkJarTask struct {
|
|||
JarUri string `json:"jar_uri,omitempty"`
|
||||
MainClassName string `json:"main_class_name,omitempty"`
|
||||
Parameters []string `json:"parameters,omitempty"`
|
||||
RunAsRepl bool `json:"run_as_repl,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskSparkPythonTask struct {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
package schema
|
||||
|
||||
type ResourceModelServingAiGatewayGuardrailsInputPii struct {
|
||||
Behavior string `json:"behavior"`
|
||||
Behavior string `json:"behavior,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingAiGatewayGuardrailsInput struct {
|
||||
|
@ -14,7 +14,7 @@ type ResourceModelServingAiGatewayGuardrailsInput struct {
|
|||
}
|
||||
|
||||
type ResourceModelServingAiGatewayGuardrailsOutputPii struct {
|
||||
Behavior string `json:"behavior"`
|
||||
Behavior string `json:"behavior,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingAiGatewayGuardrailsOutput struct {
|
||||
|
@ -94,8 +94,8 @@ type ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServing
|
|||
type ResourceModelServingConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig struct {
|
||||
PrivateKey string `json:"private_key,omitempty"`
|
||||
PrivateKeyPlaintext string `json:"private_key_plaintext,omitempty"`
|
||||
ProjectId string `json:"project_id,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
ProjectId string `json:"project_id"`
|
||||
Region string `json:"region"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct {
|
||||
|
|
|
@ -29,6 +29,7 @@ type ResourceRecipient struct {
|
|||
CreatedAt int `json:"created_at,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"`
|
||||
ExpirationTime int `json:"expiration_time,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
|||
|
||||
const ProviderHost = "registry.terraform.io"
|
||||
const ProviderSource = "databricks/databricks"
|
||||
const ProviderVersion = "1.63.0"
|
||||
const ProviderVersion = "1.64.1"
|
||||
|
||||
func NewRoot() *Root {
|
||||
return &Root{
|
||||
|
|
|
@ -130,7 +130,6 @@ func Deploy(outputHandler sync.OutputHandler) bundle.Mutator {
|
|||
// mutators need informed consent if they are potentially destructive.
|
||||
deployCore := bundle.Defer(
|
||||
bundle.Seq(
|
||||
apps.SlowDeployMessage(),
|
||||
bundle.LogString("Deploying resources..."),
|
||||
terraform.Apply(),
|
||||
),
|
||||
|
|
|
@ -34,7 +34,6 @@ func Initialize() bundle.Mutator {
|
|||
// If it is an ancestor, this updates all paths to be relative to the sync root path.
|
||||
mutator.SyncInferRoot(),
|
||||
|
||||
mutator.InitializeWorkspaceClient(),
|
||||
mutator.PopulateCurrentUser(),
|
||||
mutator.LoadGitDetails(),
|
||||
|
||||
|
|
|
@ -546,7 +546,7 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"ai_gateway": {
|
||||
"description": "The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are supported as of now.",
|
||||
"description": "The AI Gateway configuration for the serving endpoint. NOTE: Only external model and provisioned throughput endpoints are currently supported.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayConfig"
|
||||
},
|
||||
"config": {
|
||||
|
@ -554,7 +554,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.\n",
|
||||
"description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"permissions": {
|
||||
|
@ -575,7 +575,6 @@
|
|||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"config",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
|
@ -4142,6 +4141,10 @@
|
|||
"parameters": {
|
||||
"description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"run_as_repl": {
|
||||
"description": "Deprecated. A value of `false` is no longer supported.",
|
||||
"$ref": "#/$defs/bool"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
|
@ -5502,11 +5505,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"ai21labs_api_key": {
|
||||
"description": "The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.",
|
||||
"description": "The Databricks secret key reference for an AI21 Labs API key. If you\nprefer to paste your API key directly, see `ai21labs_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"ai21labs_api_key_plaintext": {
|
||||
"description": "An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`.",
|
||||
"description": "An AI21 Labs API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `ai21labs_api_key`. You\nmust provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
|
@ -5528,7 +5531,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails"
|
||||
},
|
||||
"inference_table_config": {
|
||||
"description": "Configuration for payload logging using inference tables. Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.",
|
||||
"description": "Configuration for payload logging using inference tables.\nUse these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayInferenceTableConfig"
|
||||
},
|
||||
"rate_limits": {
|
||||
|
@ -5536,7 +5539,7 @@
|
|||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit"
|
||||
},
|
||||
"usage_tracking_config": {
|
||||
"description": "Configuration to enable usage tracking using system tables. These tables allow you to monitor operational usage on endpoints and their associated costs.",
|
||||
"description": "Configuration to enable usage tracking using system tables.\nThese tables allow you to monitor operational usage on endpoints and their associated costs.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayUsageTrackingConfig"
|
||||
}
|
||||
},
|
||||
|
@ -5554,7 +5557,7 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"invalid_keywords": {
|
||||
"description": "List of invalid keywords. AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.",
|
||||
"description": "List of invalid keywords.\nAI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"pii": {
|
||||
|
@ -5566,7 +5569,7 @@
|
|||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"valid_topics": {
|
||||
"description": "The list of allowed topics. Given a chat request, this guardrail flags the request if its topic is not in the allowed topics.",
|
||||
"description": "The list of allowed topics.\nGiven a chat request, this guardrail flags the request if its topic is not in the allowed topics.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
}
|
||||
},
|
||||
|
@ -5584,14 +5587,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"behavior": {
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"description": "Configuration for input guardrail filters.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"behavior"
|
||||
]
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
|
@ -5603,7 +5603,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"BLOCK"
|
||||
|
@ -5643,7 +5642,7 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"catalog_name": {
|
||||
"description": "The name of the catalog in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the catalog name.",
|
||||
"description": "The name of the catalog in Unity Catalog. Required when enabling inference tables.\nNOTE: On update, you have to disable inference table first in order to change the catalog name.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"enabled": {
|
||||
|
@ -5651,11 +5650,11 @@
|
|||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"schema_name": {
|
||||
"description": "The name of the schema in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the schema name.",
|
||||
"description": "The name of the schema in Unity Catalog. Required when enabling inference tables.\nNOTE: On update, you have to disable inference table first in order to change the schema name.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"table_name_prefix": {
|
||||
"description": "The prefix of the table in Unity Catalog. NOTE: On update, you have to disable inference table first in order to change the prefix name.",
|
||||
"description": "The prefix of the table in Unity Catalog.\nNOTE: On update, you have to disable inference table first in order to change the prefix name.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
|
@ -5674,10 +5673,10 @@
|
|||
"properties": {
|
||||
"calls": {
|
||||
"description": "Used to specify how many calls are allowed for a key within the renewal_period.",
|
||||
"$ref": "#/$defs/int"
|
||||
"$ref": "#/$defs/int64"
|
||||
},
|
||||
"key": {
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported,\nwith 'endpoint' being the default if not specified.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey"
|
||||
},
|
||||
"renewal_period": {
|
||||
|
@ -5701,7 +5700,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
|
@ -5717,7 +5715,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
|
@ -5752,11 +5749,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"aws_access_key_id": {
|
||||
"description": "The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.",
|
||||
"description": "The Databricks secret key reference for an AWS access key ID with\npermissions to interact with Bedrock services. If you prefer to paste\nyour API key directly, see `aws_access_key_id_plaintext`. You must provide an API\nkey using one of the following fields: `aws_access_key_id` or\n`aws_access_key_id_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"aws_access_key_id_plaintext": {
|
||||
"description": "An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.",
|
||||
"description": "An AWS access key ID with permissions to interact with Bedrock services\nprovided as a plaintext string. If you prefer to reference your key using\nDatabricks Secrets, see `aws_access_key_id`. You must provide an API key\nusing one of the following fields: `aws_access_key_id` or\n`aws_access_key_id_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"aws_region": {
|
||||
|
@ -5764,15 +5761,15 @@
|
|||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"description": "The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`.",
|
||||
"description": "The Databricks secret key reference for an AWS secret access key paired\nwith the access key ID, with permissions to interact with Bedrock\nservices. If you prefer to paste your API key directly, see\n`aws_secret_access_key_plaintext`. You must provide an API key using one\nof the following fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"aws_secret_access_key_plaintext": {
|
||||
"description": "An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`.",
|
||||
"description": "An AWS secret access key paired with the access key ID, with permissions\nto interact with Bedrock services provided as a plaintext string. If you\nprefer to reference your key using Databricks Secrets, see\n`aws_secret_access_key`. You must provide an API key using one of the\nfollowing fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"bedrock_provider": {
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case\ninsensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
|
||||
}
|
||||
},
|
||||
|
@ -5792,7 +5789,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"enum": [
|
||||
"anthropic",
|
||||
"cohere",
|
||||
|
@ -5812,11 +5808,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"anthropic_api_key": {
|
||||
"description": "The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`.",
|
||||
"description": "The Databricks secret key reference for an Anthropic API key. If you\nprefer to paste your API key directly, see `anthropic_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"anthropic_api_key_plaintext": {
|
||||
"description": "The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`.",
|
||||
"description": "The Anthropic API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `anthropic_api_key`. You\nmust provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
|
@ -5864,15 +5860,15 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"cohere_api_base": {
|
||||
"description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n",
|
||||
"description": "This is an optional field to provide a customized base URL for the Cohere\nAPI. If left unspecified, the standard Cohere base URL is used.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"cohere_api_key": {
|
||||
"description": "The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.",
|
||||
"description": "The Databricks secret key reference for a Cohere API key. If you prefer\nto paste your API key directly, see `cohere_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `cohere_api_key` or\n`cohere_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"cohere_api_key_plaintext": {
|
||||
"description": "The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.",
|
||||
"description": "The Cohere API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `cohere_api_key`. You\nmust provide an API key using one of the following fields:\n`cohere_api_key` or `cohere_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
|
@ -5890,15 +5886,15 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"databricks_api_token": {
|
||||
"description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\nIf you prefer to paste your API key directly, see `databricks_api_token_plaintext`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n",
|
||||
"description": "The Databricks secret key reference for a Databricks API token that\ncorresponds to a user or service principal with Can Query access to the\nmodel serving endpoint pointed to by this external model. If you prefer\nto paste your API key directly, see `databricks_api_token_plaintext`. You\nmust provide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"databricks_api_token_plaintext": {
|
||||
"description": "The Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `databricks_api_token`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n",
|
||||
"description": "The Databricks API token that corresponds to a user or service principal\nwith Can Query access to the model serving endpoint pointed to by this\nexternal model provided as a plaintext string. If you prefer to reference\nyour key using Databricks Secrets, see `databricks_api_token`. You must\nprovide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"databricks_workspace_url": {
|
||||
"description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n",
|
||||
"description": "The URL of the Databricks workspace containing the model serving endpoint\npointed to by this external model.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
|
@ -5919,19 +5915,19 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"auto_capture_config": {
|
||||
"description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.",
|
||||
"description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.\nNote: this field is deprecated for creating new provisioned throughput endpoints,\nor updating existing provisioned throughput endpoints that never have inference table configured;\nin these cases please use AI Gateway to manage inference tables.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput"
|
||||
},
|
||||
"served_entities": {
|
||||
"description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities.",
|
||||
"description": "The list of served entities under the serving endpoint config.",
|
||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput"
|
||||
},
|
||||
"served_models": {
|
||||
"description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models.",
|
||||
"description": "(Deprecated, use served_entities instead) The list of served models under the serving endpoint config.",
|
||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput"
|
||||
},
|
||||
"traffic_config": {
|
||||
"description": "The traffic config defining how invocations to the serving endpoint should be routed.",
|
||||
"description": "The traffic configuration associated with the serving endpoint config.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.TrafficConfig"
|
||||
}
|
||||
},
|
||||
|
@ -6010,7 +6006,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.PaLmConfig"
|
||||
},
|
||||
"provider": {
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider"
|
||||
},
|
||||
"task": {
|
||||
|
@ -6035,7 +6031,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
|
||||
"enum": [
|
||||
"ai21labs",
|
||||
"anthropic",
|
||||
|
@ -6059,23 +6054,27 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"private_key": {
|
||||
"description": "The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`",
|
||||
"description": "The Databricks secret key reference for a private key for the service\naccount which has access to the Google Cloud Vertex AI Service. See [Best\npractices for managing service account keys]. If you prefer to paste your\nAPI key directly, see `private_key_plaintext`. You must provide an API\nkey using one of the following fields: `private_key` or\n`private_key_plaintext`\n\n[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"private_key_plaintext": {
|
||||
"description": "The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`.",
|
||||
"description": "The private key for the service account which has access to the Google\nCloud Vertex AI Service provided as a plaintext secret. See [Best\npractices for managing service account keys]. If you prefer to reference\nyour key using Databricks Secrets, see `private_key`. You must provide an\nAPI key using one of the following fields: `private_key` or\n`private_key_plaintext`.\n\n[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"project_id": {
|
||||
"description": "This is the Google Cloud project id that the service account is associated with.",
|
||||
"description": "This is the Google Cloud project id that the service account is\nassociated with.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"region": {
|
||||
"description": "This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions.",
|
||||
"description": "This is the region for the Google Cloud Vertex AI Service. See [supported\nregions] for more details. Some models are only available in specific\nregions.\n\n[supported regions]: https://cloud.google.com/vertex-ai/docs/general/locations",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"project_id",
|
||||
"region"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
|
@ -6087,49 +6086,50 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Configs needed to create an OpenAI model route.",
|
||||
"properties": {
|
||||
"microsoft_entra_client_id": {
|
||||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n",
|
||||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Client ID.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"microsoft_entra_client_secret": {
|
||||
"description": "The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.\nIf you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n",
|
||||
"description": "The Databricks secret key reference for a client secret used for\nMicrosoft Entra ID authentication. If you prefer to paste your client\nsecret directly, see `microsoft_entra_client_secret_plaintext`. You must\nprovide an API key using one of the following fields:\n`microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"microsoft_entra_client_secret_plaintext": {
|
||||
"description": "The client secret used for Microsoft Entra ID authentication provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n",
|
||||
"description": "The client secret used for Microsoft Entra ID authentication provided as\na plaintext string. If you prefer to reference your key using Databricks\nSecrets, see `microsoft_entra_client_secret`. You must provide an API key\nusing one of the following fields: `microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"microsoft_entra_tenant_id": {
|
||||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n",
|
||||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Tenant ID.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"openai_api_base": {
|
||||
"description": "This is a field to provide a customized base URl for the OpenAI API.\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\nFor other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.\n",
|
||||
"description": "This is a field to provide a customized base URl for the OpenAI API. For\nAzure OpenAI, this field is required, and is the base URL for the Azure\nOpenAI API service provided by Azure. For other OpenAI API types, this\nfield is optional, and if left unspecified, the standard OpenAI base URL\nis used.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"openai_api_key": {
|
||||
"description": "The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.",
|
||||
"description": "The Databricks secret key reference for an OpenAI API key using the\nOpenAI or Azure service. If you prefer to paste your API key directly,\nsee `openai_api_key_plaintext`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"openai_api_key_plaintext": {
|
||||
"description": "The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`.",
|
||||
"description": "The OpenAI API key using the OpenAI or Azure service provided as a\nplaintext string. If you prefer to reference your key using Databricks\nSecrets, see `openai_api_key`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"openai_api_type": {
|
||||
"description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n",
|
||||
"description": "This is an optional field to specify the type of OpenAI API to use. For\nAzure OpenAI, this field is required, and adjust this parameter to\nrepresent the preferred security access validation protocol. For access\ntoken validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"openai_api_version": {
|
||||
"description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n",
|
||||
"description": "This is an optional field to specify the OpenAI API version. For Azure\nOpenAI, this field is required, and is the version of the Azure OpenAI\nservice to utilize, specified by a date.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"openai_deployment_name": {
|
||||
"description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n",
|
||||
"description": "This field is only required for Azure OpenAI and is the name of the\ndeployment resource for the Azure OpenAI service.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"openai_organization": {
|
||||
"description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n",
|
||||
"description": "This is an optional field to specify the organization in OpenAI or Azure\nOpenAI.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
|
@ -6147,11 +6147,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"palm_api_key": {
|
||||
"description": "The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.",
|
||||
"description": "The Databricks secret key reference for a PaLM API key. If you prefer to\npaste your API key directly, see `palm_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"palm_api_key_plaintext": {
|
||||
"description": "The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`.",
|
||||
"description": "The PaLM API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `palm_api_key`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
|
@ -6170,7 +6170,7 @@
|
|||
"properties": {
|
||||
"calls": {
|
||||
"description": "Used to specify how many calls are allowed for a key within the renewal_period.",
|
||||
"$ref": "#/$defs/int"
|
||||
"$ref": "#/$defs/int64"
|
||||
},
|
||||
"key": {
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
|
@ -6197,7 +6197,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||
"enum": [
|
||||
"user",
|
||||
"endpoint"
|
||||
|
@ -6213,7 +6212,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
|
||||
"enum": [
|
||||
"minute"
|
||||
]
|
||||
|
@ -6256,19 +6254,18 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"entity_name": {
|
||||
"description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC),\nor a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of\n__catalog_name__.__schema_name__.__model_name__.\n",
|
||||
"description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of **catalog_name.schema_name.model_name**.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"entity_version": {
|
||||
"description": "The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"environment_vars": {
|
||||
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`",
|
||||
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`",
|
||||
"$ref": "#/$defs/map/string"
|
||||
},
|
||||
"external_model": {
|
||||
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model,\nit cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later.\nThe task type of all external models within an endpoint must be the same.\n",
|
||||
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. The task type of all external models within an endpoint must be the same.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModel"
|
||||
},
|
||||
"instance_profile_arn": {
|
||||
|
@ -6284,7 +6281,7 @@
|
|||
"$ref": "#/$defs/int"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n",
|
||||
"description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"scale_to_zero_enabled": {
|
||||
|
@ -6292,12 +6289,12 @@
|
|||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"workload_size": {
|
||||
"description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.\n",
|
||||
"description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"workload_type": {
|
||||
"description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"$ref": "#/$defs/string"
|
||||
"description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is \"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServingModelWorkloadType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
|
@ -6314,11 +6311,11 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"environment_vars": {
|
||||
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this model.\nNote: this is an experimental feature and subject to change. \nExample model environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`",
|
||||
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`",
|
||||
"$ref": "#/$defs/map/string"
|
||||
},
|
||||
"instance_profile_arn": {
|
||||
"description": "ARN of the instance profile that the served model will use to access AWS resources.",
|
||||
"description": "ARN of the instance profile that the served entity uses to access AWS resources.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"max_provisioned_throughput": {
|
||||
|
@ -6330,27 +6327,25 @@
|
|||
"$ref": "#/$defs/int"
|
||||
},
|
||||
"model_name": {
|
||||
"description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,\nin the form of __catalog_name__.__schema_name__.__model_name__.\n",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"model_version": {
|
||||
"description": "The version of the model in Databricks Model Registry or Unity Catalog to be served.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of a served model. It must be unique across an endpoint. If not specified, this field will default to \u003cmodel-name\u003e-\u003cmodel-version\u003e.\nA served model name can consist of alphanumeric characters, dashes, and underscores.\n",
|
||||
"description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"scale_to_zero_enabled": {
|
||||
"description": "Whether the compute resources for the served model should scale down to zero.",
|
||||
"description": "Whether the compute resources for the served entity should scale down to zero.",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"workload_size": {
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize"
|
||||
},
|
||||
"workload_type": {
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is \"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType"
|
||||
}
|
||||
},
|
||||
|
@ -6371,7 +6366,6 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
|
||||
"enum": [
|
||||
"Small",
|
||||
"Medium",
|
||||
|
@ -6388,11 +6382,28 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_SMALL",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_SMALL",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"serving.ServingModelWorkloadType": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"CPU",
|
||||
"GPU_MEDIUM",
|
||||
"GPU_SMALL",
|
||||
"GPU_LARGE",
|
||||
"MULTIGPU_MEDIUM"
|
||||
]
|
||||
|
@ -7269,5 +7280,5 @@
|
|||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
"additionalProperties": {}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue