mirror of https://github.com/databricks/cli.git
Merge branch 'main' into denik/wheel-patch
This commit is contained in:
commit
a100671b8b
|
@ -0,0 +1,3 @@
|
||||||
|
command:
|
||||||
|
- python
|
||||||
|
- app.py
|
|
@ -0,0 +1,8 @@
|
||||||
|
bundle:
|
||||||
|
name: apps_yaml
|
||||||
|
|
||||||
|
resources:
|
||||||
|
apps:
|
||||||
|
myapp:
|
||||||
|
name: myapp
|
||||||
|
source_code_path: ./app
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"method": "POST",
|
||||||
|
"path": "/api/2.0/workspace-files/import-file/Workspace/Users/[USERNAME]/.bundle/apps_yaml/default/files/app/app.yml",
|
||||||
|
"raw_body": "command:\n - python\n - app.py\n"
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
|
||||||
|
>>> [CLI] bundle validate
|
||||||
|
Name: apps_yaml
|
||||||
|
Target: default
|
||||||
|
Workspace:
|
||||||
|
User: [USERNAME]
|
||||||
|
Path: /Workspace/Users/[USERNAME]/.bundle/apps_yaml/default
|
||||||
|
|
||||||
|
Validation OK!
|
||||||
|
|
||||||
|
>>> [CLI] bundle deploy
|
||||||
|
Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/apps_yaml/default/files...
|
||||||
|
Deploying resources...
|
||||||
|
Updating deployment state...
|
||||||
|
Deployment complete!
|
|
@ -0,0 +1,4 @@
|
||||||
|
trace $CLI bundle validate
|
||||||
|
trace $CLI bundle deploy
|
||||||
|
jq 'select(.path == "/api/2.0/workspace-files/import-file/Workspace/Users/[USERNAME]/.bundle/apps_yaml/default/files/app/app.yml")' out.requests.txt | sed 's/\\r//g' > out.app.yml.txt
|
||||||
|
rm out.requests.txt
|
|
@ -0,0 +1 @@
|
||||||
|
print("Hello world!")
|
|
@ -0,0 +1,12 @@
|
||||||
|
bundle:
|
||||||
|
name: apps_config_section
|
||||||
|
|
||||||
|
resources:
|
||||||
|
apps:
|
||||||
|
myapp:
|
||||||
|
name: myapp
|
||||||
|
source_code_path: ./app
|
||||||
|
config:
|
||||||
|
command:
|
||||||
|
- python
|
||||||
|
- app.py
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"method": "POST",
|
||||||
|
"path": "/api/2.0/workspace-files/import-file/Workspace/Users/[USERNAME]/.bundle/apps_config_section/default/files/app/app.yml",
|
||||||
|
"raw_body": "command:\n - python\n - app.py\n"
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
|
||||||
|
>>> [CLI] bundle validate
|
||||||
|
Warning: App config section detected
|
||||||
|
|
||||||
|
remove 'config' from app resource 'myapp' section and use app.yml file in the root of this app instead
|
||||||
|
|
||||||
|
Name: apps_config_section
|
||||||
|
Target: default
|
||||||
|
Workspace:
|
||||||
|
User: [USERNAME]
|
||||||
|
Path: /Workspace/Users/[USERNAME]/.bundle/apps_config_section/default
|
||||||
|
|
||||||
|
Found 1 warning
|
||||||
|
|
||||||
|
>>> [CLI] bundle deploy
|
||||||
|
Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/apps_config_section/default/files...
|
||||||
|
Deploying resources...
|
||||||
|
Updating deployment state...
|
||||||
|
Deployment complete!
|
||||||
|
Warning: App config section detected
|
||||||
|
|
||||||
|
remove 'config' from app resource 'myapp' section and use app.yml file in the root of this app instead
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
trace $CLI bundle validate
|
||||||
|
trace $CLI bundle deploy
|
||||||
|
jq 'select(.path == "/api/2.0/workspace-files/import-file/Workspace/Users/[USERNAME]/.bundle/apps_config_section/default/files/app/app.yml")' out.requests.txt > out.app.yml.txt
|
||||||
|
rm out.requests.txt
|
|
@ -0,0 +1,26 @@
|
||||||
|
Cloud = false
|
||||||
|
RecordRequests = true
|
||||||
|
|
||||||
|
Ignore = [
|
||||||
|
'.databricks',
|
||||||
|
]
|
||||||
|
|
||||||
|
[[Server]]
|
||||||
|
Pattern = "POST /api/2.0/apps"
|
||||||
|
|
||||||
|
[[Server]]
|
||||||
|
Pattern = "GET /api/2.0/apps/myapp"
|
||||||
|
Response.Body = '''
|
||||||
|
{
|
||||||
|
"name": "myapp",
|
||||||
|
"description": "",
|
||||||
|
"compute_status": {
|
||||||
|
"state": "ACTIVE",
|
||||||
|
"message": "App compute is active."
|
||||||
|
},
|
||||||
|
"app_status": {
|
||||||
|
"state": "RUNNING",
|
||||||
|
"message": "Application is running."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'''
|
|
@ -0,0 +1,21 @@
|
||||||
|
bundle:
|
||||||
|
name: state
|
||||||
|
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "test"
|
||||||
|
tasks:
|
||||||
|
- task_key: "test-task"
|
||||||
|
spark_python_task:
|
||||||
|
python_file: ./test.py
|
||||||
|
new_cluster:
|
||||||
|
spark_version: 15.4.x-scala2.12
|
||||||
|
node_type_id: i3.xlarge
|
||||||
|
data_security_mode: SINGLE_USER
|
||||||
|
num_workers: 0
|
||||||
|
spark_conf:
|
||||||
|
spark.master: "local[*, 4]"
|
||||||
|
spark.databricks.cluster.profile: singleNode
|
||||||
|
custom_tags:
|
||||||
|
ResourceClass: SingleNode
|
|
@ -0,0 +1,4 @@
|
||||||
|
{
|
||||||
|
"method": "GET",
|
||||||
|
"path": "/api/2.0/workspace-files/Workspace/Users/[USERNAME]/.bundle/state/default/state/terraform.tfstate"
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
|
||||||
|
>>> [CLI] bundle deploy
|
||||||
|
Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/state/default/files...
|
||||||
|
Deploying resources...
|
||||||
|
Updating deployment state...
|
||||||
|
Deployment complete!
|
||||||
|
|
||||||
|
>>> [CLI] bundle deploy
|
||||||
|
Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/state/default/files...
|
||||||
|
Deploying resources...
|
||||||
|
Updating deployment state...
|
||||||
|
Deployment complete!
|
|
@ -0,0 +1,4 @@
|
||||||
|
trace $CLI bundle deploy
|
||||||
|
trace $CLI bundle deploy # We do 2 deploys because only 2nd deploy will pull state from remote after 1st created it
|
||||||
|
jq 'select(.path == "/api/2.0/workspace-files/Workspace/Users/[USERNAME]/.bundle/state/default/state/terraform.tfstate")' out.requests.txt > out.state.txt
|
||||||
|
rm out.requests.txt
|
|
@ -0,0 +1 @@
|
||||||
|
print("Hello world!")
|
|
@ -0,0 +1,2 @@
|
||||||
|
Cloud = false
|
||||||
|
RecordRequests = true
|
|
@ -111,6 +111,11 @@ func AddHandlers(server *testserver.Server) {
|
||||||
return ""
|
return ""
|
||||||
})
|
})
|
||||||
|
|
||||||
|
server.Handle("GET", "/api/2.0/workspace-files/{path:.*}", func(req testserver.Request) any {
|
||||||
|
path := req.Vars["path"]
|
||||||
|
return req.Workspace.WorkspaceFilesExportFile(path)
|
||||||
|
})
|
||||||
|
|
||||||
server.Handle("GET", "/api/2.1/unity-catalog/current-metastore-assignment", func(req testserver.Request) any {
|
server.Handle("GET", "/api/2.1/unity-catalog/current-metastore-assignment", func(req testserver.Request) any {
|
||||||
return testMetastore
|
return testMetastore
|
||||||
})
|
})
|
||||||
|
|
|
@ -3,8 +3,6 @@ package apps
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
@ -14,7 +12,6 @@ type validate struct{}
|
||||||
|
|
||||||
func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
var diags diag.Diagnostics
|
var diags diag.Diagnostics
|
||||||
possibleConfigFiles := []string{"app.yml", "app.yaml"}
|
|
||||||
usedSourceCodePaths := make(map[string]string)
|
usedSourceCodePaths := make(map[string]string)
|
||||||
|
|
||||||
for key, app := range b.Config.Resources.Apps {
|
for key, app := range b.Config.Resources.Apps {
|
||||||
|
@ -28,16 +25,12 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
||||||
}
|
}
|
||||||
usedSourceCodePaths[app.SourceCodePath] = key
|
usedSourceCodePaths[app.SourceCodePath] = key
|
||||||
|
|
||||||
for _, configFile := range possibleConfigFiles {
|
if app.Config != nil {
|
||||||
appPath := strings.TrimPrefix(app.SourceCodePath, b.Config.Workspace.FilePath)
|
diags = append(diags, diag.Diagnostic{
|
||||||
cf := path.Join(appPath, configFile)
|
Severity: diag.Warning,
|
||||||
if _, err := b.SyncRoot.Stat(cf); err == nil {
|
Summary: "App config section detected",
|
||||||
diags = append(diags, diag.Diagnostic{
|
Detail: fmt.Sprintf("remove 'config' from app resource '%s' section and use app.yml file in the root of this app instead", key),
|
||||||
Severity: diag.Error,
|
})
|
||||||
Summary: configFile + " detected",
|
|
||||||
Detail: fmt.Sprintf("remove %s and use 'config' property for app resource '%s' instead", cf, app.Name),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,46 +17,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAppsValidate(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
testutil.Touch(t, tmpDir, "app1", "app.yml")
|
|
||||||
testutil.Touch(t, tmpDir, "app2", "app.py")
|
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
BundleRootPath: tmpDir,
|
|
||||||
SyncRootPath: tmpDir,
|
|
||||||
SyncRoot: vfs.MustNew(tmpDir),
|
|
||||||
Config: config.Root{
|
|
||||||
Workspace: config.Workspace{
|
|
||||||
FilePath: "/foo/bar/",
|
|
||||||
},
|
|
||||||
Resources: config.Resources{
|
|
||||||
Apps: map[string]*resources.App{
|
|
||||||
"app1": {
|
|
||||||
App: &apps.App{
|
|
||||||
Name: "app1",
|
|
||||||
},
|
|
||||||
SourceCodePath: "./app1",
|
|
||||||
},
|
|
||||||
"app2": {
|
|
||||||
App: &apps.App{
|
|
||||||
Name: "app2",
|
|
||||||
},
|
|
||||||
SourceCodePath: "./app2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
|
||||||
|
|
||||||
diags := bundle.ApplySeq(context.Background(), b, mutator.TranslatePaths(), Validate())
|
|
||||||
require.Len(t, diags, 1)
|
|
||||||
require.Equal(t, "app.yml detected", diags[0].Summary)
|
|
||||||
require.Contains(t, diags[0].Detail, "app.yml and use 'config' property for app resource")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAppsValidateSameSourcePath(t *testing.T) {
|
func TestAppsValidateSameSourcePath(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
testutil.Touch(t, tmpDir, "app1", "app.py")
|
testutil.Touch(t, tmpDir, "app1", "app.py")
|
||||||
|
|
|
@ -112,6 +112,12 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for k := range r.Schemas {
|
||||||
|
if k == key {
|
||||||
|
found = append(found, r.Schemas[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(found) == 0 {
|
if len(found) == 0 {
|
||||||
return nil, fmt.Errorf("no such resource: %s", key)
|
return nil, fmt.Errorf("no such resource: %s", key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,10 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/apierr"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/marshal"
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
@ -25,8 +29,23 @@ type Schema struct {
|
||||||
URL string `json:"url,omitempty" bundle:"internal"`
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, fullName string) (bool, error) {
|
||||||
return false, errors.New("schema.Exists() is not supported")
|
log.Tracef(ctx, "Checking if schema with fullName=%s exists", fullName)
|
||||||
|
|
||||||
|
_, err := w.Schemas.GetByFullName(ctx, fullName)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf(ctx, "schema with full name %s does not exist: %v", fullName, err)
|
||||||
|
|
||||||
|
var aerr *apierr.APIError
|
||||||
|
if errors.As(err, &aerr) {
|
||||||
|
if aerr.StatusCode == 404 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Schema) TerraformResourceName() string {
|
func (s *Schema) TerraformResourceName() string {
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/apierr"
|
||||||
|
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSchemaNotFound(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
m.GetMockSchemasAPI().On("GetByFullName", mock.Anything, "non-existent-schema").Return(nil, &apierr.APIError{
|
||||||
|
StatusCode: 404,
|
||||||
|
})
|
||||||
|
|
||||||
|
s := &Schema{}
|
||||||
|
exists, err := s.Exists(ctx, m.WorkspaceClient, "non-existent-schema")
|
||||||
|
|
||||||
|
require.Falsef(t, exists, "Exists should return false when getting a 404 response from Workspace")
|
||||||
|
require.NoErrorf(t, err, "Exists should not return an error when getting a 404 response from Workspace")
|
||||||
|
}
|
|
@ -1,14 +1,94 @@
|
||||||
package deploy
|
package deploy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
|
"github.com/databricks/databricks-sdk-go/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FilerFactory is a function that returns a filer.Filer.
|
// FilerFactory is a function that returns a filer.Filer.
|
||||||
type FilerFactory func(b *bundle.Bundle) (filer.Filer, error)
|
type FilerFactory func(b *bundle.Bundle) (filer.Filer, error)
|
||||||
|
|
||||||
// StateFiler returns a filer.Filer that can be used to read/write state files.
|
type stateFiler struct {
|
||||||
func StateFiler(b *bundle.Bundle) (filer.Filer, error) {
|
filer filer.Filer
|
||||||
return filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
|
||||||
|
apiClient *client.DatabricksClient
|
||||||
|
root filer.WorkspaceRootPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stateFiler) Delete(ctx context.Context, path string, mode ...filer.DeleteMode) error {
|
||||||
|
return s.filer.Delete(ctx, path, mode...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir implements filer.Filer.
|
||||||
|
func (s stateFiler) Mkdir(ctx context.Context, path string) error {
|
||||||
|
return s.filer.Mkdir(ctx, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stateFiler) Read(ctx context.Context, path string) (io.ReadCloser, error) {
|
||||||
|
absPath, err := s.root.Join(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := s.Stat(ctx, path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if stat.IsDir() {
|
||||||
|
return nil, fmt.Errorf("not a file: %s", absPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
urlPath := "/api/2.0/workspace-files/" + url.PathEscape(strings.TrimLeft(absPath, "/"))
|
||||||
|
err = s.apiClient.Do(ctx, http.MethodGet, urlPath, nil, nil, nil, &buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return io.NopCloser(&buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stateFiler) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
|
||||||
|
return s.filer.ReadDir(ctx, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stateFiler) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
|
||||||
|
return s.filer.Stat(ctx, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stateFiler) Write(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) error {
|
||||||
|
return s.filer.Write(ctx, path, reader, mode...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateFiler returns a filer.Filer that can be used to read/write state files.
|
||||||
|
// We use a custom workspace filer which uses workspace-files API to read state files.
|
||||||
|
// This API has a higher than 10 MB limits and allows to export large state files.
|
||||||
|
// We don't use the same API for read because it doesn't correct get the file content for notebooks and returns
|
||||||
|
// "File Not Found" error instead.
|
||||||
|
func StateFiler(b *bundle.Bundle) (filer.Filer, error) {
|
||||||
|
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
apiClient, err := client.New(b.WorkspaceClient().Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create API client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stateFiler{
|
||||||
|
filer: f,
|
||||||
|
root: filer.NewWorkspaceRootPath(b.Config.Workspace.StatePath),
|
||||||
|
apiClient: apiClient,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,8 +10,6 @@ import (
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const MaxStateFileSize = 10 * 1024 * 1024 // 10MB
|
|
||||||
|
|
||||||
type statePush struct {
|
type statePush struct {
|
||||||
filerFactory FilerFactory
|
filerFactory FilerFactory
|
||||||
}
|
}
|
||||||
|
@ -37,17 +35,6 @@ func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
if !b.Config.Bundle.Force {
|
|
||||||
state, err := local.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if state.Size() > MaxStateFileSize {
|
|
||||||
return diag.Errorf("Deployment state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag.", MaxStateFileSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
||||||
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -47,17 +47,6 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
if !b.Config.Bundle.Force {
|
|
||||||
state, err := local.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if state.Size() > deploy.MaxStateFileSize {
|
|
||||||
return diag.Errorf("Terraform state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag", deploy.MaxStateFileSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload state file from local cache directory to filer.
|
// Upload state file from local cache directory to filer.
|
||||||
cmdio.LogString(ctx, "Updating deployment state...")
|
cmdio.LogString(ctx, "Updating deployment state...")
|
||||||
log.Infof(ctx, "Writing local state file to remote state directory")
|
log.Infof(ctx, "Writing local state file to remote state directory")
|
||||||
|
|
|
@ -3,7 +3,6 @@ package terraform
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -60,29 +59,3 @@ func TestStatePush(t *testing.T) {
|
||||||
diags := bundle.Apply(ctx, b, m)
|
diags := bundle.Apply(ctx, b, m)
|
||||||
assert.NoError(t, diags.Error())
|
assert.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStatePushLargeState(t *testing.T) {
|
|
||||||
mock := mockfiler.NewMockFiler(t)
|
|
||||||
m := &statePush{
|
|
||||||
identityFiler(mock),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
b := statePushTestBundle(t)
|
|
||||||
|
|
||||||
largeState := map[string]any{}
|
|
||||||
for i := range 1000000 {
|
|
||||||
largeState[fmt.Sprintf("field_%d", i)] = i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write a stale local state file.
|
|
||||||
writeLocalState(t, ctx, b, largeState)
|
|
||||||
diags := bundle.Apply(ctx, b, m)
|
|
||||||
assert.ErrorContains(t, diags.Error(), "Terraform state file size exceeds the maximum allowed size of 10485760 bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag")
|
|
||||||
|
|
||||||
// Force the write.
|
|
||||||
b = statePushTestBundle(t)
|
|
||||||
b.Config.Bundle.Force = true
|
|
||||||
diags = bundle.Apply(ctx, b, m)
|
|
||||||
assert.NoError(t, diags.Error())
|
|
||||||
}
|
|
||||||
|
|
|
@ -13,8 +13,10 @@ import (
|
||||||
type checkForSameNameLibraries struct{}
|
type checkForSameNameLibraries struct{}
|
||||||
|
|
||||||
var patterns = []dyn.Pattern{
|
var patterns = []dyn.Pattern{
|
||||||
taskLibrariesPattern.Append(dyn.AnyIndex(), dyn.AnyKey()),
|
taskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("whl")),
|
||||||
forEachTaskLibrariesPattern.Append(dyn.AnyIndex(), dyn.AnyKey()),
|
taskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("jar")),
|
||||||
|
forEachTaskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("whl")),
|
||||||
|
forEachTaskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("jar")),
|
||||||
envDepsPattern.Append(dyn.AnyIndex()),
|
envDepsPattern.Append(dyn.AnyIndex()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,62 @@ import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestBindSchemaToExistingSchema(t *testing.T) {
|
||||||
|
ctx, wt := acc.UcWorkspaceTest(t)
|
||||||
|
|
||||||
|
// create a pre-defined schema:
|
||||||
|
uniqueId := uuid.New().String()
|
||||||
|
predefinedSchema, err := wt.W.Schemas.Create(ctx, catalog.CreateSchema{
|
||||||
|
CatalogName: "main",
|
||||||
|
Name: "test-schema-" + uniqueId,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
err := wt.W.Schemas.DeleteByFullName(ctx, predefinedSchema.FullName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
// setup the bundle:
|
||||||
|
bundleRoot := initTestTemplate(t, ctx, "uc_schema_only", map[string]any{
|
||||||
|
"unique_id": uniqueId,
|
||||||
|
})
|
||||||
|
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||||
|
|
||||||
|
// run the bind command:
|
||||||
|
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "schema1", predefinedSchema.FullName, "--auto-approve")
|
||||||
|
_, _, err = c.Run()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// deploy the bundle:
|
||||||
|
deployBundle(t, ctx, bundleRoot)
|
||||||
|
|
||||||
|
// Check that predefinedSchema is updated with config from bundle
|
||||||
|
w, err := databricks.NewWorkspaceClient()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
updatedSchema, err := w.Schemas.GetByFullName(ctx, predefinedSchema.FullName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, updatedSchema.SchemaId, predefinedSchema.SchemaId)
|
||||||
|
require.Equal(t, "This schema was created from DABs", updatedSchema.Comment)
|
||||||
|
|
||||||
|
// unbind the schema:
|
||||||
|
c = testcli.NewRunner(t, ctx, "bundle", "deployment", "unbind", "schema1")
|
||||||
|
_, _, err = c.Run()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// destroy the bundle:
|
||||||
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
|
|
||||||
|
// Check that schema is unbound and exists after bundle is destroyed
|
||||||
|
postDestroySchema, err := w.Schemas.GetByFullName(ctx, predefinedSchema.FullName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, postDestroySchema.SchemaId, predefinedSchema.SchemaId)
|
||||||
|
}
|
||||||
|
|
||||||
func TestBindJobToExistingJob(t *testing.T) {
|
func TestBindJobToExistingJob(t *testing.T) {
|
||||||
ctx, wt := acc.WorkspaceTest(t)
|
ctx, wt := acc.WorkspaceTest(t)
|
||||||
gt := &generateJobTest{T: wt, w: wt.W}
|
gt := &generateJobTest{T: wt, w: wt.W}
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
{
|
||||||
|
"properties": {
|
||||||
|
"unique_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Unique ID for the schema name"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
bundle:
|
||||||
|
name: uc-schema-only
|
||||||
|
|
||||||
|
workspace:
|
||||||
|
root_path: "~/.bundle/{{.unique_id}}"
|
||||||
|
|
||||||
|
resources:
|
||||||
|
schemas:
|
||||||
|
schema1:
|
||||||
|
name: test-schema-{{.unique_id}}
|
||||||
|
catalog_name: main
|
||||||
|
comment: This schema was created from DABs
|
||||||
|
|
|
@ -31,8 +31,8 @@ var pythonVersionsShort = []string{
|
||||||
}
|
}
|
||||||
|
|
||||||
var extraInstalls = map[string][]string{
|
var extraInstalls = map[string][]string{
|
||||||
"3.12": {"setuptools"},
|
"3.12": {"setuptools==75.8.2"},
|
||||||
"3.13": {"setuptools"},
|
"3.13": {"setuptools==75.8.2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDefaultPython(t *testing.T) {
|
func TestDefaultPython(t *testing.T) {
|
||||||
|
|
|
@ -2,3 +2,7 @@ Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/$UNIQUE_PRJ/files.
|
||||||
Deploying resources...
|
Deploying resources...
|
||||||
Updating deployment state...
|
Updating deployment state...
|
||||||
Deployment complete!
|
Deployment complete!
|
||||||
|
Warning: App config section detected
|
||||||
|
|
||||||
|
remove 'config' from app resource 'test_app' section and use app.yml file in the root of this app instead
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
|
Warning: App config section detected
|
||||||
|
|
||||||
|
remove 'config' from app resource 'test_app' section and use app.yml file in the root of this app instead
|
||||||
|
|
||||||
Name: basic
|
Name: basic
|
||||||
Target: default
|
Target: default
|
||||||
Workspace:
|
Workspace:
|
||||||
User: [USERNAME]
|
User: [USERNAME]
|
||||||
Path: /Workspace/Users/[USERNAME]/.bundle/$UNIQUE_PRJ
|
Path: /Workspace/Users/[USERNAME]/.bundle/$UNIQUE_PRJ
|
||||||
|
|
||||||
Validation OK!
|
Found 1 warning
|
||||||
|
|
|
@ -2,10 +2,15 @@ package auth_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/internal/testutil"
|
||||||
|
"github.com/databricks/cli/libs/databrickscfg"
|
||||||
|
"github.com/databricks/databricks-sdk-go/config"
|
||||||
|
|
||||||
"github.com/databricks/cli/internal/testcli"
|
"github.com/databricks/cli/internal/testcli"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -34,8 +39,19 @@ func TestAuthDescribeSuccess(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAuthDescribeFailure(t *testing.T) {
|
func TestAuthDescribeFailure(t *testing.T) {
|
||||||
t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010")
|
testutil.CleanupEnvironment(t)
|
||||||
|
|
||||||
|
// set up a custom config file:
|
||||||
|
home := t.TempDir()
|
||||||
|
cfg := &config.Config{
|
||||||
|
ConfigFile: filepath.Join(home, "customcfg"),
|
||||||
|
Profile: "profile1",
|
||||||
|
}
|
||||||
|
err := databrickscfg.SaveToProfile(context.Background(), cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg"))
|
||||||
|
|
||||||
|
// run the command:
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe", "--profile", "nonexistent")
|
stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe", "--profile", "nonexistent")
|
||||||
outStr := stdout.String()
|
outStr := stdout.String()
|
||||||
|
@ -44,10 +60,5 @@ func TestAuthDescribeFailure(t *testing.T) {
|
||||||
require.Contains(t, outStr, "Unable to authenticate: resolve")
|
require.Contains(t, outStr, "Unable to authenticate: resolve")
|
||||||
require.Contains(t, outStr, "has no nonexistent profile configured")
|
require.Contains(t, outStr, "has no nonexistent profile configured")
|
||||||
require.Contains(t, outStr, "Current configuration:")
|
require.Contains(t, outStr, "Current configuration:")
|
||||||
|
|
||||||
w, err := databricks.NewWorkspaceClient(&databricks.Config{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Contains(t, outStr, "✓ host: "+w.Config.Host)
|
|
||||||
require.Contains(t, outStr, "✓ profile: nonexistent (from --profile flag)")
|
require.Contains(t, outStr, "✓ profile: nonexistent (from --profile flag)")
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,19 +13,11 @@ import (
|
||||||
// The original environment is restored upon test completion.
|
// The original environment is restored upon test completion.
|
||||||
// Note: use of this function is incompatible with parallel execution.
|
// Note: use of this function is incompatible with parallel execution.
|
||||||
func CleanupEnvironment(t TestingT) {
|
func CleanupEnvironment(t TestingT) {
|
||||||
// Restore environment when test finishes.
|
|
||||||
environ := os.Environ()
|
|
||||||
t.Cleanup(func() {
|
|
||||||
// Restore original environment.
|
|
||||||
for _, kv := range environ {
|
|
||||||
kvs := strings.SplitN(kv, "=", 2)
|
|
||||||
os.Setenv(kvs[0], kvs[1])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
path := os.Getenv("PATH")
|
path := os.Getenv("PATH")
|
||||||
pwd := os.Getenv("PWD")
|
pwd := os.Getenv("PWD")
|
||||||
os.Clearenv()
|
|
||||||
|
// Clear all environment variables.
|
||||||
|
NullEnvironment(t)
|
||||||
|
|
||||||
// We use t.Setenv instead of os.Setenv because the former actively
|
// We use t.Setenv instead of os.Setenv because the former actively
|
||||||
// prevents a test being run with t.Parallel. Modifying the environment
|
// prevents a test being run with t.Parallel. Modifying the environment
|
||||||
|
@ -38,6 +30,23 @@ func CleanupEnvironment(t TestingT) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NullEnvironment sets up an empty environment with absolutely no environment variables set.
|
||||||
|
// The original environment is restored upon test completion.
|
||||||
|
// Note: use of this function is incompatible with parallel execution
|
||||||
|
func NullEnvironment(t TestingT) {
|
||||||
|
// Restore environment when test finishes.
|
||||||
|
environ := os.Environ()
|
||||||
|
t.Cleanup(func() {
|
||||||
|
// Restore original environment.
|
||||||
|
for _, kv := range environ {
|
||||||
|
kvs := strings.SplitN(kv, "=", 2)
|
||||||
|
os.Setenv(kvs[0], kvs[1])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
os.Clearenv()
|
||||||
|
}
|
||||||
|
|
||||||
// Changes into specified directory for the duration of the test.
|
// Changes into specified directory for the duration of the test.
|
||||||
// Returns the current working directory.
|
// Returns the current working directory.
|
||||||
func Chdir(t TestingT, dir string) string {
|
func Chdir(t TestingT, dir string) string {
|
||||||
|
|
|
@ -1,6 +1,13 @@
|
||||||
package auth
|
package auth
|
||||||
|
|
||||||
import "github.com/databricks/databricks-sdk-go/config"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/config"
|
||||||
|
)
|
||||||
|
|
||||||
// Env generates the authentication environment variables we need to set for
|
// Env generates the authentication environment variables we need to set for
|
||||||
// downstream applications from the CLI to work correctly.
|
// downstream applications from the CLI to work correctly.
|
||||||
|
@ -38,3 +45,71 @@ func GetEnvFor(name string) (string, bool) {
|
||||||
|
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnvVars returns the list of environment variables that the SDK reads to configure
|
||||||
|
// authentication.
|
||||||
|
// This is useful for spawning subprocesses since you can unset all auth environment
|
||||||
|
// variables to clean up the environment before configuring authentication for the
|
||||||
|
// child process.
|
||||||
|
func envVars() []string {
|
||||||
|
out := []string{}
|
||||||
|
|
||||||
|
for _, attr := range config.ConfigAttributes {
|
||||||
|
if len(attr.EnvVars) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, attr.EnvVars...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessEnv generates the environment variables that should be set to authenticate
|
||||||
|
// downstream processes to use the same auth credentials as in cfg.
|
||||||
|
func ProcessEnv(cfg *config.Config) []string {
|
||||||
|
// We want child processes to inherit environment variables like $HOME or $HTTPS_PROXY
|
||||||
|
// because they influence auth resolution.
|
||||||
|
base := os.Environ()
|
||||||
|
|
||||||
|
out := []string{}
|
||||||
|
authEnvVars := envVars()
|
||||||
|
|
||||||
|
// Remove any existing auth environment variables. This is done because
|
||||||
|
// the CLI offers multiple modalities of configuring authentication like
|
||||||
|
// `--profile` or `DATABRICKS_CONFIG_PROFILE` or `profile: <profile>` in the
|
||||||
|
// bundle config file.
|
||||||
|
//
|
||||||
|
// Each of these modalities have different priorities and thus we don't want
|
||||||
|
// any auth configuration to piggyback into the child process environment.
|
||||||
|
//
|
||||||
|
// This is a precaution to avoid conflicting auth configurations being passed
|
||||||
|
// to the child telemetry process.
|
||||||
|
//
|
||||||
|
// Normally this should be unnecessary because the SDK should error if multiple
|
||||||
|
// authentication methods have been configured. But there is no harm in doing this
|
||||||
|
// as a precaution.
|
||||||
|
for _, v := range base {
|
||||||
|
k, _, found := strings.Cut(v, "=")
|
||||||
|
if !found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if slices.Contains(authEnvVars, k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now add the necessary authentication environment variables.
|
||||||
|
newEnv := Env(cfg)
|
||||||
|
for k, v := range newEnv {
|
||||||
|
out = append(out, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the environment variables so that the output is deterministic.
|
||||||
|
// Keeping the output deterministic helps with reproducibility and keeping the
|
||||||
|
// behavior consistent incase there are any issues.
|
||||||
|
slices.Sort(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
|
@ -79,3 +79,51 @@ func TestGetEnvFor(t *testing.T) {
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
assert.Empty(t, out)
|
assert.Empty(t, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAuthEnvVars(t *testing.T) {
|
||||||
|
// Few common environment variables that we expect the SDK to support.
|
||||||
|
contains := []string{
|
||||||
|
// Generic attributes.
|
||||||
|
"DATABRICKS_HOST",
|
||||||
|
"DATABRICKS_CONFIG_PROFILE",
|
||||||
|
"DATABRICKS_AUTH_TYPE",
|
||||||
|
"DATABRICKS_METADATA_SERVICE_URL",
|
||||||
|
"DATABRICKS_CONFIG_FILE",
|
||||||
|
|
||||||
|
// OAuth specific attributes.
|
||||||
|
"DATABRICKS_CLIENT_ID",
|
||||||
|
"DATABRICKS_CLIENT_SECRET",
|
||||||
|
"DATABRICKS_CLI_PATH",
|
||||||
|
|
||||||
|
// Google specific attributes.
|
||||||
|
"DATABRICKS_GOOGLE_SERVICE_ACCOUNT",
|
||||||
|
"GOOGLE_CREDENTIALS",
|
||||||
|
|
||||||
|
// Personal access token specific attributes.
|
||||||
|
"DATABRICKS_TOKEN",
|
||||||
|
|
||||||
|
// Databricks password specific attributes.
|
||||||
|
"DATABRICKS_USERNAME",
|
||||||
|
"DATABRICKS_PASSWORD",
|
||||||
|
|
||||||
|
// Account authentication attributes.
|
||||||
|
"DATABRICKS_ACCOUNT_ID",
|
||||||
|
|
||||||
|
// Azure attributes
|
||||||
|
"DATABRICKS_AZURE_RESOURCE_ID",
|
||||||
|
"ARM_USE_MSI",
|
||||||
|
"ARM_CLIENT_SECRET",
|
||||||
|
"ARM_CLIENT_ID",
|
||||||
|
"ARM_TENANT_ID",
|
||||||
|
"ARM_ENVIRONMENT",
|
||||||
|
|
||||||
|
// Github attributes
|
||||||
|
"ACTIONS_ID_TOKEN_REQUEST_URL",
|
||||||
|
"ACTIONS_ID_TOKEN_REQUEST_TOKEN",
|
||||||
|
}
|
||||||
|
|
||||||
|
out := envVars()
|
||||||
|
for _, v := range contains {
|
||||||
|
assert.Contains(t, out, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -83,6 +83,13 @@ func (s *FakeWorkspace) WorkspaceFilesImportFile(path string, body []byte) {
|
||||||
s.files[path] = body
|
s.files[path] = body
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FakeWorkspace) WorkspaceFilesExportFile(path string) []byte {
|
||||||
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
path = "/" + path
|
||||||
|
}
|
||||||
|
return s.files[path]
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FakeWorkspace) JobsCreate(request jobs.CreateJob) Response {
|
func (s *FakeWorkspace) JobsCreate(request jobs.CreateJob) Response {
|
||||||
jobId := s.nextJobId
|
jobId := s.nextJobId
|
||||||
s.nextJobId++
|
s.nextJobId++
|
||||||
|
|
Loading…
Reference in New Issue