mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin/main' into dashboards
This commit is contained in:
commit
3be89dfc59
|
@ -0,0 +1,60 @@
|
|||
name: integration
|
||||
|
||||
on:
|
||||
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
merge_group:
|
||||
|
||||
|
||||
jobs:
|
||||
trigger-tests:
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
environment: "test-trigger-is"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Generate GitHub App Token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}
|
||||
private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }}
|
||||
owner: ${{ secrets.ORG_NAME }}
|
||||
repositories: ${{secrets.REPO_NAME}}
|
||||
|
||||
- name: Trigger Workflow in Another Repo
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
run: |
|
||||
gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \
|
||||
--ref main \
|
||||
-f pull_request_number=${{ github.event.pull_request.number }} \
|
||||
-f commit_sha=${{ github.event.pull_request.head.sha }}
|
||||
|
||||
|
||||
|
||||
# Statuses and checks apply to specific commits (by hash).
|
||||
# Enforcement of required checks is done both at the PR level and the merge queue level.
|
||||
# In case of multiple commits in a single PR, the hash of the squashed commit
|
||||
# will not match the one for the latest (approved) commit in the PR.
|
||||
# We auto approve the check for the merge queue for two reasons:
|
||||
# * Queue times out due to duration of tests.
|
||||
# * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing.
|
||||
auto-approve:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark Check
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh api -X POST -H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.sha }} \
|
||||
-f 'state=success' \
|
||||
-f 'context=Integration Tests Check'
|
|
@ -1,5 +1,7 @@
|
|||
package resources
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Permission holds the permission level setting for a single principal.
|
||||
// Multiple of these can be defined on any resource.
|
||||
type Permission struct {
|
||||
|
@ -9,3 +11,19 @@ type Permission struct {
|
|||
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
}
|
||||
|
||||
func (p Permission) String() string {
|
||||
if p.UserName != "" {
|
||||
return fmt.Sprintf("level: %s, user_name: %s", p.Level, p.UserName)
|
||||
}
|
||||
|
||||
if p.ServicePrincipalName != "" {
|
||||
return fmt.Sprintf("level: %s, service_principal_name: %s", p.Level, p.ServicePrincipalName)
|
||||
}
|
||||
|
||||
if p.GroupName != "" {
|
||||
return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("level: %s", p.Level)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type folderPermissions struct {
|
||||
}
|
||||
|
||||
// Apply implements bundle.ReadOnlyMutator.
|
||||
func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
if len(b.Config().Permissions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rootPath := b.Config().Workspace.RootPath
|
||||
paths := []string{}
|
||||
if !libraries.IsVolumesPath(rootPath) && !libraries.IsWorkspaceSharedPath(rootPath) {
|
||||
paths = append(paths, rootPath)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(rootPath, "/") {
|
||||
rootPath += "/"
|
||||
}
|
||||
|
||||
for _, p := range []string{
|
||||
b.Config().Workspace.ArtifactPath,
|
||||
b.Config().Workspace.FilePath,
|
||||
b.Config().Workspace.StatePath,
|
||||
b.Config().Workspace.ResourcePath,
|
||||
} {
|
||||
if libraries.IsWorkspaceSharedPath(p) || libraries.IsVolumesPath(p) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(p, rootPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
paths = append(paths, p)
|
||||
}
|
||||
|
||||
var diags diag.Diagnostics
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
results := make([]diag.Diagnostics, len(paths))
|
||||
for i, p := range paths {
|
||||
g.Go(func() error {
|
||||
results[i] = checkFolderPermission(ctx, b, p)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
diags = diags.Extend(r)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderPath string) diag.Diagnostics {
|
||||
w := b.WorkspaceClient().Workspace
|
||||
obj, err := getClosestExistingObject(ctx, w, folderPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||
WorkspaceObjectType: "directories",
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
p := permissions.ObjectAclToResourcePermissions(folderPath, objPermissions.AccessControlList)
|
||||
return p.Compare(b.Config().Permissions)
|
||||
}
|
||||
|
||||
func getClosestExistingObject(ctx context.Context, w workspace.WorkspaceInterface, folderPath string) (*workspace.ObjectInfo, error) {
|
||||
for {
|
||||
obj, err := w.GetStatusByPath(ctx, folderPath)
|
||||
if err == nil {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
if !apierr.IsMissing(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parent := path.Dir(folderPath)
|
||||
// If the parent is the same as the current folder, then we have reached the root
|
||||
if folderPath == parent {
|
||||
break
|
||||
}
|
||||
|
||||
folderPath = parent
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("folder %s and its parent folders do not exist", folderPath)
|
||||
}
|
||||
|
||||
// Name implements bundle.ReadOnlyMutator.
|
||||
func (f *folderPermissions) Name() string {
|
||||
return "validate:folder_permissions"
|
||||
}
|
||||
|
||||
// ValidateFolderPermissions validates that permissions for the folders in Workspace file system matches
|
||||
// the permissions in the top-level permissions section of the bundle.
|
||||
func ValidateFolderPermissions() bundle.ReadOnlyMutator {
|
||||
return &folderPermissions{}
|
||||
}
|
|
@ -0,0 +1,208 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFolderPermissionsInheritedWhenRootPathDoesNotExist(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/otherfoo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com/artifacts").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/otherfoo@bar.com").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnMissingBundlePermission(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
require.Equal(t, "The following permissions apply to the workspace folder at \"/Workspace/Users/foo@bar.com\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n", diags[0].Detail)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnPermissionMismatch(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/foo@bar.com",
|
||||
ArtifactPath: "/Workspace/Users/foo@bar.com/artifacts",
|
||||
FilePath: "/Workspace/Users/foo@bar.com/files",
|
||||
StatePath: "/Workspace/Users/foo@bar.com/state",
|
||||
ResourcePath: "/Workspace/Users/foo@bar.com/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/Workspace/Users/foo@bar.com").Return(&workspace.ObjectInfo{
|
||||
ObjectId: 1234,
|
||||
}, nil)
|
||||
|
||||
api.EXPECT().GetPermissions(mock.Anything, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: "1234",
|
||||
WorkspaceObjectType: "directories",
|
||||
}).Return(&workspace.WorkspaceObjectPermissions{
|
||||
ObjectId: "1234",
|
||||
AccessControlList: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "untracked permissions apply to target workspace path", diags[0].Summary)
|
||||
require.Equal(t, diag.Warning, diags[0].Severity)
|
||||
}
|
||||
|
||||
func TestValidateFolderPermissionsFailsOnNoRootFolder(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/NotExisting",
|
||||
ArtifactPath: "/NotExisting/artifacts",
|
||||
FilePath: "/NotExisting/files",
|
||||
StatePath: "/NotExisting/state",
|
||||
ResourcePath: "/NotExisting/resources",
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
{Level: permissions.CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
},
|
||||
}
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
api := m.GetMockWorkspaceAPI()
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/NotExisting").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
api.EXPECT().GetStatusByPath(mock.Anything, "/").Return(nil, &apierr.APIError{
|
||||
StatusCode: 404,
|
||||
ErrorCode: "RESOURCE_DOES_NOT_EXIST",
|
||||
})
|
||||
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
rb := bundle.ReadOnly(b)
|
||||
|
||||
diags := bundle.ApplyReadOnly(context.Background(), rb, ValidateFolderPermissions())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "folder / and its parent folders do not exist", diags[0].Summary)
|
||||
require.Equal(t, diag.Error, diags[0].Severity)
|
||||
}
|
|
@ -35,6 +35,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
FilesToSync(),
|
||||
ValidateSyncPatterns(),
|
||||
JobTaskClusterSpec(),
|
||||
ValidateFolderPermissions(),
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
@ -36,3 +36,12 @@ func IsWorkspaceLibrary(library *compute.Library) bool {
|
|||
|
||||
return IsWorkspacePath(path)
|
||||
}
|
||||
|
||||
// IsVolumesPath returns true if the specified path indicates that
|
||||
func IsVolumesPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Volumes/")
|
||||
}
|
||||
|
||||
func IsWorkspaceSharedPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Workspace/Shared/")
|
||||
}
|
|
@ -31,3 +31,13 @@ func TestIsWorkspaceLibrary(t *testing.T) {
|
|||
// Empty.
|
||||
assert.False(t, IsWorkspaceLibrary(&compute.Library{}))
|
||||
}
|
||||
|
||||
func TestIsVolumesPath(t *testing.T) {
|
||||
// Absolute paths with particular prefixes.
|
||||
assert.True(t, IsVolumesPath("/Volumes/path/to/package"))
|
||||
|
||||
// Relative paths.
|
||||
assert.False(t, IsVolumesPath("myfile.txt"))
|
||||
assert.False(t, IsVolumesPath("./myfile.txt"))
|
||||
assert.False(t, IsVolumesPath("../myfile.txt"))
|
||||
}
|
|
@ -3,9 +3,9 @@ package permissions
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
|
@ -21,17 +21,13 @@ func (*validateSharedRootPermissions) Name() string {
|
|||
}
|
||||
|
||||
func (*validateSharedRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if isWorkspaceSharedRoot(b.Config.Workspace.RootPath) {
|
||||
if libraries.IsWorkspaceSharedPath(b.Config.Workspace.RootPath) {
|
||||
return isUsersGroupPermissionSet(b)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isWorkspaceSharedRoot(path string) bool {
|
||||
return strings.HasPrefix(path, "/Workspace/Shared/")
|
||||
}
|
||||
|
||||
// isUsersGroupPermissionSet checks that top-level permissions set for bundle contain group_name: users with CAN_MANAGE permission.
|
||||
func isUsersGroupPermissionSet(b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
package permissions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
type WorkspacePathPermissions struct {
|
||||
Path string
|
||||
Permissions []resources.Permission
|
||||
}
|
||||
|
||||
func ObjectAclToResourcePermissions(path string, acl []workspace.WorkspaceObjectAccessControlResponse) *WorkspacePathPermissions {
|
||||
permissions := make([]resources.Permission, 0)
|
||||
for _, a := range acl {
|
||||
// Skip the admin group because it's added to all resources by default.
|
||||
if a.GroupName == "admins" {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pl := range a.AllPermissions {
|
||||
permissions = append(permissions, resources.Permission{
|
||||
Level: convertWorkspaceObjectPermissionLevel(pl.PermissionLevel),
|
||||
GroupName: a.GroupName,
|
||||
UserName: a.UserName,
|
||||
ServicePrincipalName: a.ServicePrincipalName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &WorkspacePathPermissions{Permissions: permissions, Path: path}
|
||||
}
|
||||
|
||||
func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
// Check the permissions in the workspace and see if they are all set in the bundle.
|
||||
ok, missing := containsAll(p.Permissions, perms)
|
||||
if !ok {
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: fmt.Sprintf("The following permissions apply to the workspace folder at %q but are not configured in the bundle:\n%s", p.Path, toString(missing)),
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// containsAll checks if permA contains all permissions in permB.
|
||||
func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) {
|
||||
missing := make([]resources.Permission, 0)
|
||||
for _, a := range permA {
|
||||
found := false
|
||||
for _, b := range permB {
|
||||
if a == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
missing = append(missing, a)
|
||||
}
|
||||
}
|
||||
return len(missing) == 0, missing
|
||||
}
|
||||
|
||||
// convertWorkspaceObjectPermissionLevel converts matching object permission levels to bundle ones.
|
||||
// If there is no matching permission level, it returns permission level as is, for example, CAN_EDIT.
|
||||
func convertWorkspaceObjectPermissionLevel(level workspace.WorkspaceObjectPermissionLevel) string {
|
||||
switch level {
|
||||
case workspace.WorkspaceObjectPermissionLevelCanRead:
|
||||
return CAN_VIEW
|
||||
default:
|
||||
return string(level)
|
||||
}
|
||||
}
|
||||
|
||||
func toString(p []resources.Permission) string {
|
||||
var sb strings.Builder
|
||||
for _, perm := range p {
|
||||
sb.WriteString(fmt.Sprintf("- %s\n", perm.String()))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
package permissions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWorkspacePathPermissionsCompare(t *testing.T) {
|
||||
testCases := []struct {
|
||||
perms []resources.Permission
|
||||
acl []workspace.WorkspaceObjectAccessControlResponse
|
||||
expected diag.Diagnostics
|
||||
}{
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
GroupName: "admins",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_VIEW, UserName: "foo@bar.com"},
|
||||
{Level: CAN_MANAGE, ServicePrincipalName: "sp.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_READ"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
GroupName: "foo",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, group_name: foo\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
perms: []resources.Permission{
|
||||
{Level: CAN_MANAGE, UserName: "foo@bar.com"},
|
||||
},
|
||||
acl: []workspace.WorkspaceObjectAccessControlResponse{
|
||||
{
|
||||
UserName: "foo2@bar.com",
|
||||
AllPermissions: []workspace.WorkspaceObjectPermission{
|
||||
{PermissionLevel: "CAN_MANAGE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Warning,
|
||||
Summary: "untracked permissions apply to target workspace path",
|
||||
Detail: "The following permissions apply to the workspace folder at \"path\" but are not configured in the bundle:\n- level: CAN_MANAGE, user_name: foo2@bar.com\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
wp := ObjectAclToResourcePermissions("path", tc.acl)
|
||||
diags := wp.Compare(tc.perms)
|
||||
require.Equal(t, tc.expected, diags)
|
||||
}
|
||||
|
||||
}
|
|
@ -34,7 +34,7 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
|
|||
permissions := make([]workspace.WorkspaceObjectAccessControlRequest, 0)
|
||||
|
||||
for _, p := range b.Config.Permissions {
|
||||
level, err := getWorkspaceObjectPermissionLevel(p.Level)
|
||||
level, err := GetWorkspaceObjectPermissionLevel(p.Level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func getWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
|
||||
func GetWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) {
|
||||
switch bundlePermission {
|
||||
case CAN_MANAGE:
|
||||
return workspace.WorkspaceObjectPermissionLevelCanManage, nil
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
package resources
|
||||
|
||||
import "github.com/databricks/cli/bundle"
|
||||
|
||||
// Completions returns the same as [References] except
|
||||
// that every key maps directly to a single reference.
|
||||
func Completions(b *bundle.Bundle, filters ...Filter) map[string]Reference {
|
||||
out := make(map[string]Reference)
|
||||
keyOnlyRefs, _ := References(b, filters...)
|
||||
for k, refs := range keyOnlyRefs {
|
||||
if len(refs) != 1 {
|
||||
continue
|
||||
}
|
||||
out[k] = refs[0]
|
||||
}
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompletions_SkipDuplicates(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Test that this skips duplicates and only includes unambiguous completions.
|
||||
out := Completions(b)
|
||||
if assert.Len(t, out, 1) {
|
||||
assert.Contains(t, out, "bar")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompletions_Filter(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
includeJobs := func(ref Reference) bool {
|
||||
_, ok := ref.Resource.(*resources.Job)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Test that this does not include the pipeline.
|
||||
out := Completions(b, includeJobs)
|
||||
if assert.Len(t, out, 1) {
|
||||
assert.Contains(t, out, "foo")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
// Reference is a reference to a resource.
|
||||
// It includes the resource type description, and a reference to the resource itself.
|
||||
type Reference struct {
|
||||
// Key is the unique key of the resource, e.g. "my_job".
|
||||
Key string
|
||||
|
||||
// KeyWithType is the unique key of the resource, including the resource type, e.g. "jobs.my_job".
|
||||
KeyWithType string
|
||||
|
||||
// Description is the resource type description.
|
||||
Description config.ResourceDescription
|
||||
|
||||
// Resource is the resource itself.
|
||||
Resource config.ConfigResource
|
||||
}
|
||||
|
||||
// Map is the core type for resource lookup and completion.
|
||||
type Map map[string][]Reference
|
||||
|
||||
// Filter defines the function signature for filtering resources.
|
||||
type Filter func(Reference) bool
|
||||
|
||||
// includeReference checks if the specified reference passes all filters.
|
||||
// If the list of filters is empty, the reference is always included.
|
||||
func includeReference(filters []Filter, ref Reference) bool {
|
||||
for _, filter := range filters {
|
||||
if !filter(ref) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// References returns maps of resource keys to a slice of [Reference].
|
||||
//
|
||||
// The first map is indexed by the resource key only.
|
||||
// The second map is indexed by the resource type name and its key.
|
||||
//
|
||||
// While the return types allows for multiple resources to share the same key,
|
||||
// this is confirmed not to happen in the [validate.UniqueResourceKeys] mutator.
|
||||
func References(b *bundle.Bundle, filters ...Filter) (Map, Map) {
|
||||
keyOnly := make(Map)
|
||||
keyWithType := make(Map)
|
||||
|
||||
// Collect map of resource references indexed by their keys.
|
||||
for _, group := range b.Config.Resources.AllResources() {
|
||||
for k, v := range group.Resources {
|
||||
ref := Reference{
|
||||
Key: k,
|
||||
KeyWithType: fmt.Sprintf("%s.%s", group.Description.PluralName, k),
|
||||
Description: group.Description,
|
||||
Resource: v,
|
||||
}
|
||||
|
||||
// Skip resources that do not pass all filters.
|
||||
if !includeReference(filters, ref) {
|
||||
continue
|
||||
}
|
||||
|
||||
keyOnly[ref.Key] = append(keyOnly[ref.Key], ref)
|
||||
keyWithType[ref.KeyWithType] = append(keyWithType[ref.KeyWithType], ref)
|
||||
}
|
||||
}
|
||||
|
||||
return keyOnly, keyWithType
|
||||
}
|
||||
|
||||
// Lookup returns the resource with the specified key.
|
||||
// If the key maps to more than one resource, an error is returned.
|
||||
// If the key does not map to any resource, an error is returned.
|
||||
func Lookup(b *bundle.Bundle, key string, filters ...Filter) (Reference, error) {
|
||||
keyOnlyRefs, keyWithTypeRefs := References(b, filters...)
|
||||
refs, ok := keyOnlyRefs[key]
|
||||
if !ok {
|
||||
refs, ok = keyWithTypeRefs[key]
|
||||
if !ok {
|
||||
return Reference{}, fmt.Errorf("resource with key %q not found", key)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(refs) == 1:
|
||||
return refs[0], nil
|
||||
case len(refs) > 1:
|
||||
return Reference{}, fmt.Errorf("multiple resources with key %q found", key)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLookup_EmptyBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "foo")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "resource with key \"foo\" not found")
|
||||
}
|
||||
|
||||
func TestLookup_NotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "qux")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `resource with key "qux" not found`)
|
||||
}
|
||||
|
||||
func TestLookup_MultipleFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Lookup(b, "foo")
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `multiple resources with key "foo" found`)
|
||||
}
|
||||
|
||||
func TestLookup_Nominal(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "Foo job",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Lookup by key only.
|
||||
out, err := Lookup(b, "foo")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "Foo job", out.Resource.GetName())
|
||||
}
|
||||
|
||||
// Lookup by type and key.
|
||||
out, err = Lookup(b, "jobs.foo")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "Foo job", out.Resource.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookup_NominalWithFilters(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
includeJobs := func(ref Reference) bool {
|
||||
_, ok := ref.Resource.(*resources.Job)
|
||||
return ok
|
||||
}
|
||||
|
||||
// This should succeed because the filter includes jobs.
|
||||
_, err := Lookup(b, "foo", includeJobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// This should fail because the filter excludes pipelines.
|
||||
_, err = Lookup(b, "bar", includeJobs)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `resource with key "bar" not found`)
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// RunnerLookup maps identifiers to a list of workloads that match that identifier.
|
||||
// The list can have more than 1 entry if resources of different types use the
|
||||
// same key. When this happens, the user should disambiguate between them.
|
||||
type RunnerLookup map[string][]Runner
|
||||
|
||||
// ResourceKeys computes a map with
|
||||
func ResourceKeys(b *bundle.Bundle) (keyOnly RunnerLookup, keyWithType RunnerLookup) {
|
||||
keyOnly = make(RunnerLookup)
|
||||
keyWithType = make(RunnerLookup)
|
||||
|
||||
r := b.Config.Resources
|
||||
for k, v := range r.Jobs {
|
||||
kt := fmt.Sprintf("jobs.%s", k)
|
||||
w := jobRunner{key: key(kt), bundle: b, job: v}
|
||||
keyOnly[k] = append(keyOnly[k], &w)
|
||||
keyWithType[kt] = append(keyWithType[kt], &w)
|
||||
}
|
||||
for k, v := range r.Pipelines {
|
||||
kt := fmt.Sprintf("pipelines.%s", k)
|
||||
w := pipelineRunner{key: key(kt), bundle: b, pipeline: v}
|
||||
keyOnly[k] = append(keyOnly[k], &w)
|
||||
keyWithType[kt] = append(keyWithType[kt], &w)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ResourceCompletionMap returns a map of resource keys to their respective names.
|
||||
func ResourceCompletionMap(b *bundle.Bundle) map[string]string {
|
||||
out := make(map[string]string)
|
||||
keyOnly, keyWithType := ResourceKeys(b)
|
||||
|
||||
// Keep track of resources we have seen by their fully qualified key.
|
||||
seen := make(map[string]bool)
|
||||
|
||||
// First add resources that can be identified by key alone.
|
||||
for k, v := range keyOnly {
|
||||
// Invariant: len(v) >= 1. See [ResourceKeys].
|
||||
if len(v) == 1 {
|
||||
seen[v[0].Key()] = true
|
||||
out[k] = v[0].Name()
|
||||
}
|
||||
}
|
||||
|
||||
// Then add resources that can only be identified by their type and key.
|
||||
for k, v := range keyWithType {
|
||||
// Invariant: len(v) == 1. See [ResourceKeys].
|
||||
_, ok := seen[v[0].Key()]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
out[k] = v[0].Name()
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ResourceCompletions returns a list of keys that unambiguously reference resources in the bundle.
|
||||
func ResourceCompletions(b *bundle.Bundle) []string {
|
||||
return maps.Keys(ResourceCompletionMap(b))
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestResourceCompletionsUnique(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, []string{"foo", "bar"}, ResourceCompletions(b))
|
||||
}
|
|
@ -3,9 +3,10 @@ package run
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
refs "github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/bundle/run/output"
|
||||
)
|
||||
|
||||
|
@ -38,34 +39,24 @@ type Runner interface {
|
|||
argsHandler
|
||||
}
|
||||
|
||||
// Find locates a runner matching the specified argument.
|
||||
//
|
||||
// Its behavior is as follows:
|
||||
// 1. Try to find a resource with <key> identical to the argument.
|
||||
// 2. Try to find a resource with <type>.<key> identical to the argument.
|
||||
//
|
||||
// If an argument resolves to multiple resources, it returns an error.
|
||||
func Find(b *bundle.Bundle, arg string) (Runner, error) {
|
||||
keyOnly, keyWithType := ResourceKeys(b)
|
||||
if len(keyWithType) == 0 {
|
||||
return nil, fmt.Errorf("bundle defines no resources")
|
||||
// IsRunnable returns a filter that only allows runnable resources.
|
||||
func IsRunnable(ref refs.Reference) bool {
|
||||
switch ref.Resource.(type) {
|
||||
case *resources.Job, *resources.Pipeline:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ToRunner converts a resource reference to a runnable resource.
|
||||
func ToRunner(b *bundle.Bundle, ref refs.Reference) (Runner, error) {
|
||||
switch resource := ref.Resource.(type) {
|
||||
case *resources.Job:
|
||||
return &jobRunner{key: key(ref.KeyWithType), bundle: b, job: resource}, nil
|
||||
case *resources.Pipeline:
|
||||
return &pipelineRunner{key: key(ref.KeyWithType), bundle: b, pipeline: resource}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported resource type: %T", resource)
|
||||
}
|
||||
|
||||
runners, ok := keyOnly[arg]
|
||||
if !ok {
|
||||
runners, ok = keyWithType[arg]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such resource: %s", arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(runners) != 1 {
|
||||
var keys []string
|
||||
for _, runner := range runners {
|
||||
keys = append(keys, runner.Key())
|
||||
}
|
||||
return nil, fmt.Errorf("ambiguous: %s (can resolve to all of %s)", arg, strings.Join(keys, ", "))
|
||||
}
|
||||
|
||||
return runners[0], nil
|
||||
}
|
||||
|
|
|
@ -3,82 +3,14 @@ package run
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
refs "github.com/databricks/cli/bundle/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindNoResources(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "foo")
|
||||
assert.ErrorContains(t, err, "bundle defines no resources")
|
||||
}
|
||||
|
||||
func TestFindSingleArg(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "foo")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestFindSingleArgNotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "bar")
|
||||
assert.ErrorContains(t, err, "no such resource: bar")
|
||||
}
|
||||
|
||||
func TestFindSingleArgAmbiguous(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"key": {},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"key": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "key")
|
||||
assert.ErrorContains(t, err, "ambiguous: ")
|
||||
}
|
||||
|
||||
func TestFindSingleArgWithType(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"key": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Find(b, "jobs.key")
|
||||
assert.NoError(t, err)
|
||||
func TestRunner_IsRunnable(t *testing.T) {
|
||||
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Job{}}))
|
||||
assert.True(t, IsRunnable(refs.Reference{Resource: &resources.Pipeline{}}))
|
||||
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowModel{}}))
|
||||
assert.False(t, IsRunnable(refs.Reference{Resource: &resources.MlflowExperiment{}}))
|
||||
}
|
||||
|
|
|
@ -27,5 +27,6 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(newGenerateCommand())
|
||||
cmd.AddCommand(newDebugCommand())
|
||||
cmd.AddCommand(deployment.NewDeploymentCommand())
|
||||
cmd.AddCommand(newOpenCommand())
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/pkg/browser"
|
||||
)
|
||||
|
||||
func promptOpenArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
|
||||
// Compute map of "Human readable name of resource" -> "resource key".
|
||||
inv := make(map[string]string)
|
||||
for k, ref := range resources.Completions(b) {
|
||||
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
|
||||
inv[title] = k
|
||||
}
|
||||
|
||||
key, err := cmdio.Select(ctx, inv, "Resource to open")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) {
|
||||
// If no arguments are specified, prompt the user to select the resource to open.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
return promptOpenArgument(ctx, b)
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", fmt.Errorf("expected a KEY of the resource to open")
|
||||
}
|
||||
|
||||
return args[0], nil
|
||||
}
|
||||
|
||||
func newOpenCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "open",
|
||||
Short: "Open a resource in the browser",
|
||||
Args: root.MaximumNArgs(1),
|
||||
}
|
||||
|
||||
var forcePull bool
|
||||
cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
b, diags := utils.ConfigureBundleWithVariables(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
return diags.Error()
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, phases.Initialize())
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
arg, err := resolveOpenArgument(ctx, b, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cacheDir, err := terraform.Dir(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName))
|
||||
_, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName))
|
||||
noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist)
|
||||
|
||||
if forcePull || noCache {
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.StatePull(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.Load(),
|
||||
mutator.InitializeURLs(),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Locate resource to open.
|
||||
ref, err := resources.Lookup(b, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Confirm that the resource has a URL.
|
||||
url := ref.Resource.GetURL()
|
||||
if url == "" {
|
||||
return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)")
|
||||
}
|
||||
|
||||
return browser.OpenURL(url)
|
||||
}
|
||||
|
||||
cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
b, diags := root.MustConfigureBundle(cmd)
|
||||
if err := diags.Error(); err != nil {
|
||||
cobra.CompErrorln(err.Error())
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
// No completion in the context of a bundle.
|
||||
// Source and destination paths are taken from bundle configuration.
|
||||
if b == nil {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
completions := resources.Completions(b)
|
||||
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
|
||||
} else {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
|
@ -1,12 +1,14 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/phases"
|
||||
"github.com/databricks/cli/bundle/resources"
|
||||
"github.com/databricks/cli/bundle/run"
|
||||
"github.com/databricks/cli/bundle/run/output"
|
||||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
|
@ -14,8 +16,54 @@ import (
|
|||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func promptRunArgument(ctx context.Context, b *bundle.Bundle) (string, error) {
|
||||
// Compute map of "Human readable name of resource" -> "resource key".
|
||||
inv := make(map[string]string)
|
||||
for k, ref := range resources.Completions(b, run.IsRunnable) {
|
||||
title := fmt.Sprintf("%s: %s", ref.Description.SingularTitle, ref.Resource.GetName())
|
||||
inv[title] = k
|
||||
}
|
||||
|
||||
key, err := cmdio.Select(ctx, inv, "Resource to run")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) {
|
||||
// If no arguments are specified, prompt the user to select something to run.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
return promptRunArgument(ctx, b)
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", fmt.Errorf("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
return args[0], nil
|
||||
}
|
||||
|
||||
func keyToRunner(b *bundle.Bundle, arg string) (run.Runner, error) {
|
||||
// Locate the resource to run.
|
||||
ref, err := resources.Lookup(b, arg, run.IsRunnable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the resource to a runnable resource.
|
||||
runner, err := run.ToRunner(b, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
func newRunCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "run [flags] KEY",
|
||||
|
@ -61,23 +109,10 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
// If no arguments are specified, prompt the user to select something to run.
|
||||
if len(args) == 0 && cmdio.IsPromptSupported(ctx) {
|
||||
// Invert completions from KEY -> NAME, to NAME -> KEY.
|
||||
inv := make(map[string]string)
|
||||
for k, v := range run.ResourceCompletionMap(b) {
|
||||
inv[v] = k
|
||||
}
|
||||
id, err := cmdio.Select(ctx, inv, "Resource to run")
|
||||
arg, err := resolveRunArgument(ctx, b, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.Interpolate(),
|
||||
|
@ -89,7 +124,7 @@ task or a Python wheel task, the second example applies.
|
|||
return err
|
||||
}
|
||||
|
||||
runner, err := run.Find(b, args[0])
|
||||
runner, err := keyToRunner(b, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -146,10 +181,11 @@ task or a Python wheel task, the second example applies.
|
|||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp
|
||||
completions := resources.Completions(b, run.IsRunnable)
|
||||
return maps.Keys(completions), cobra.ShellCompDirectiveNoFileComp
|
||||
} else {
|
||||
// If we know the resource to run, we can complete additional positional arguments.
|
||||
runner, err := run.Find(b, args[0])
|
||||
runner, err := keyToRunner(b, args[0])
|
||||
if err != nil {
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type execOption func(context.Context, *exec.Cmd) error
|
||||
|
@ -69,10 +70,24 @@ func WithStdoutWriter(dst io.Writer) execOption {
|
|||
}
|
||||
}
|
||||
|
||||
// safeWriter is a writer that is safe to use concurrently.
|
||||
// It serializes writes to the underlying writer.
|
||||
type safeWriter struct {
|
||||
w io.Writer
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
func (s *safeWriter) Write(p []byte) (n int, err error) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
return s.w.Write(p)
|
||||
}
|
||||
|
||||
func WithCombinedOutput(buf *bytes.Buffer) execOption {
|
||||
sw := &safeWriter{w: buf}
|
||||
return func(_ context.Context, c *exec.Cmd) error {
|
||||
c.Stdout = io.MultiWriter(buf, c.Stdout)
|
||||
c.Stderr = io.MultiWriter(buf, c.Stderr)
|
||||
c.Stdout = io.MultiWriter(sw, c.Stdout)
|
||||
c.Stderr = io.MultiWriter(sw, c.Stderr)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue