2022-05-13 13:30:22 +00:00
|
|
|
package project
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2022-05-14 17:55:00 +00:00
|
|
|
"sync"
|
2022-05-13 13:30:22 +00:00
|
|
|
|
2022-09-07 09:55:59 +00:00
|
|
|
"github.com/databricks/databricks-sdk-go/service/clusters"
|
|
|
|
"github.com/databricks/databricks-sdk-go/service/commands"
|
|
|
|
"github.com/databricks/databricks-sdk-go/workspaces"
|
2022-05-13 13:30:22 +00:00
|
|
|
"github.com/databrickslabs/terraform-provider-databricks/common"
|
|
|
|
"github.com/databrickslabs/terraform-provider-databricks/scim"
|
|
|
|
)
|
|
|
|
|
2022-05-20 19:40:03 +00:00
|
|
|
// Current CLI application state - fixure out
|
2022-05-14 17:55:00 +00:00
|
|
|
var Current inner
|
|
|
|
|
|
|
|
type inner struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
once sync.Once
|
|
|
|
|
|
|
|
project *Project
|
2022-09-07 09:55:59 +00:00
|
|
|
wsc *workspaces.WorkspacesClient
|
2022-05-14 17:55:00 +00:00
|
|
|
client *common.DatabricksClient
|
|
|
|
me *scim.User
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *inner) init() {
|
|
|
|
i.mu.Lock()
|
|
|
|
defer i.mu.Unlock()
|
|
|
|
i.once.Do(func() {
|
2022-05-20 19:40:03 +00:00
|
|
|
client := &common.DatabricksClient{}
|
2022-05-14 17:55:00 +00:00
|
|
|
i.client = client
|
2022-09-07 09:55:59 +00:00
|
|
|
i.wsc = workspaces.New()
|
2022-05-14 17:55:00 +00:00
|
|
|
prj, err := loadProjectConf()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2022-05-20 19:40:03 +00:00
|
|
|
client.Profile = prj.Profile // Databricks CLI profile
|
|
|
|
err = client.Configure()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2022-05-14 17:55:00 +00:00
|
|
|
i.project = &prj
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *inner) Client() *common.DatabricksClient {
|
|
|
|
i.init()
|
|
|
|
return i.client
|
2022-05-13 13:30:22 +00:00
|
|
|
}
|
|
|
|
|
2022-05-14 17:55:00 +00:00
|
|
|
func (i *inner) Project() *Project {
|
|
|
|
i.init()
|
|
|
|
return i.project
|
2022-05-13 13:30:22 +00:00
|
|
|
}
|
|
|
|
|
2022-09-07 09:55:59 +00:00
|
|
|
// Make sure to initialize the workspaces client on project init
|
|
|
|
func (i *inner) WorkspacesClient() *workspaces.WorkspacesClient {
|
|
|
|
i.init()
|
|
|
|
return i.wsc
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can replace this with go sdk once https://github.com/databricks/databricks-sdk-go/issues/56 is fixed
|
2022-05-14 17:55:00 +00:00
|
|
|
func (i *inner) Me() *scim.User {
|
|
|
|
i.mu.Lock()
|
|
|
|
defer i.mu.Unlock()
|
|
|
|
if i.me != nil {
|
|
|
|
return i.me
|
|
|
|
}
|
|
|
|
me, err := scim.NewUsersAPI(context.Background(), i.Client()).Me()
|
2022-05-13 13:30:22 +00:00
|
|
|
if err != nil {
|
2022-05-14 17:55:00 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
i.me = &me
|
|
|
|
return &me
|
|
|
|
}
|
|
|
|
|
2022-05-21 13:23:37 +00:00
|
|
|
func (i *inner) DeploymentIsolationPrefix() string {
|
|
|
|
if i.project.Isolation == None {
|
|
|
|
return i.project.Name
|
|
|
|
}
|
|
|
|
if i.project.Isolation == Soft {
|
|
|
|
me := i.Me()
|
|
|
|
return fmt.Sprintf("%s/%s", i.project.Name, me.UserName)
|
|
|
|
}
|
|
|
|
panic(fmt.Errorf("unknow project isolation: %s", i.project.Isolation))
|
|
|
|
}
|
|
|
|
|
2022-09-07 09:55:59 +00:00
|
|
|
func getClusterIdFromClusterName(ctx context.Context,
|
|
|
|
wsc *workspaces.WorkspacesClient,
|
|
|
|
clusterName string,
|
|
|
|
) (clusterId string, err error) {
|
|
|
|
clusterId = ""
|
|
|
|
clustersList, err := wsc.Clusters.List(ctx, clusters.ListRequest{})
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, cluster := range clustersList.Clusters {
|
|
|
|
if cluster.ClusterName == clusterName {
|
|
|
|
clusterId = cluster.ClusterId
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = fmt.Errorf("could not find cluster with name: %s", clusterName)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Old version of getting development cluster details with isolation implemented.
|
|
|
|
// Kept just for reference. Remove once isolation is implemented properly
|
|
|
|
/*
|
2022-05-14 17:55:00 +00:00
|
|
|
func (i *inner) DevelopmentCluster(ctx context.Context) (cluster clusters.ClusterInfo, err error) {
|
|
|
|
api := clusters.NewClustersAPI(ctx, i.Client()) // TODO: rewrite with normal SDK
|
|
|
|
if i.project.DevCluster == nil {
|
|
|
|
i.project.DevCluster = &clusters.Cluster{}
|
|
|
|
}
|
|
|
|
dc := i.project.DevCluster
|
|
|
|
if i.project.Isolation == Soft {
|
|
|
|
if i.project.IsDevClusterJustReference() {
|
|
|
|
err = fmt.Errorf("projects with soft isolation cannot have named clusters")
|
|
|
|
return
|
|
|
|
}
|
2022-05-21 13:23:37 +00:00
|
|
|
dc.ClusterName = fmt.Sprintf("dev/%s", i.DeploymentIsolationPrefix())
|
2022-05-14 17:55:00 +00:00
|
|
|
}
|
|
|
|
if dc.ClusterName == "" {
|
|
|
|
err = fmt.Errorf("please either pick `isolation: soft` or specify a shared cluster name")
|
2022-05-13 13:30:22 +00:00
|
|
|
return
|
|
|
|
}
|
2022-05-14 17:55:00 +00:00
|
|
|
return api.GetOrCreateRunningCluster(dc.ClusterName, *dc)
|
2022-05-13 13:30:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func runCommandOnDev(ctx context.Context, language, command string) common.CommandResults {
|
2022-05-14 17:55:00 +00:00
|
|
|
cluster, err := Current.DevelopmentCluster(ctx)
|
|
|
|
exec := Current.Client().CommandExecutor(ctx)
|
2022-05-13 13:30:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return common.CommandResults{
|
|
|
|
ResultType: "error",
|
2022-05-14 17:55:00 +00:00
|
|
|
Summary: err.Error(),
|
2022-05-13 13:30:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return exec.Execute(cluster.ClusterID, language, command)
|
|
|
|
}
|
|
|
|
|
|
|
|
func RunPythonOnDev(ctx context.Context, command string) common.CommandResults {
|
|
|
|
return runCommandOnDev(ctx, "python", command)
|
2022-05-14 17:55:00 +00:00
|
|
|
}
|
2022-09-07 09:55:59 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
// TODO: Add safe access to i.project and i.project.DevCluster that throws errors if
|
|
|
|
// the fields are not defined properly
|
|
|
|
func (i *inner) GetDevelopmentClusterId(ctx context.Context) (clusterId string, err error) {
|
|
|
|
i.init()
|
|
|
|
clusterId = i.project.DevCluster.ClusterId
|
|
|
|
clusterName := i.project.DevCluster.ClusterName
|
|
|
|
if clusterId != "" {
|
|
|
|
return
|
|
|
|
} else if clusterName != "" {
|
|
|
|
// Add workspaces client on init
|
|
|
|
return getClusterIdFromClusterName(ctx, i.wsc, clusterName)
|
|
|
|
} else {
|
|
|
|
// TODO: Add the project config file location used to error message
|
|
|
|
err = fmt.Errorf("please define either development cluster's cluster_id or cluster_name in your project config")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func runCommandOnDev(ctx context.Context, language, command string) commands.CommandResults {
|
|
|
|
clusterId, err := Current.GetDevelopmentClusterId(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return commands.CommandResults{
|
|
|
|
ResultType: "error",
|
|
|
|
Summary: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Current.wsc.Commands.Execute(ctx, clusterId, language, command)
|
|
|
|
}
|
|
|
|
|
|
|
|
func RunPythonOnDev(ctx context.Context, command string) commands.CommandResults {
|
|
|
|
return runCommandOnDev(ctx, "python", command)
|
|
|
|
}
|