mirror of https://github.com/databricks/cli.git
wip
This commit is contained in:
parent
961ec74c7a
commit
df8d39bf16
|
@ -43,7 +43,10 @@ var deployCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
|
||||
env := "dev"
|
||||
env := cmd.Flag("environment").Value.String()
|
||||
if env == "" {
|
||||
env = "development"
|
||||
}
|
||||
|
||||
var out = map[string]interface{}{
|
||||
"terraform": map[string]interface{}{
|
||||
|
@ -113,12 +116,19 @@ var deployCmd = &cobra.Command{
|
|||
panic(err)
|
||||
}
|
||||
|
||||
for _, job := range out["resource"].(map[string]interface{})["databricks_job"].(map[string]interface{}) {
|
||||
spark_python_task := job.(map[string]interface{})["spark_python_task"].(map[string]interface{})
|
||||
python_file := spark_python_task["python_file"].(string)
|
||||
spark_python_task["python_file"] = filepath.Join(prj.Environment().Workspace.Root, python_file)
|
||||
}
|
||||
}
|
||||
|
||||
// Perform any string interpolation / string templating
|
||||
|
||||
// TODO Make sure dist/env exists...
|
||||
|
||||
os.MkdirAll(filepath.Join(prj.Root(), "dist", env), 0755)
|
||||
|
||||
f, err := os.Create(filepath.Join(prj.Root(), "dist", env, "main.tf.json"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -143,7 +153,7 @@ var deployCmd = &cobra.Command{
|
|||
runtf := true
|
||||
if runtf {
|
||||
execPath := "/opt/homebrew/bin/terraform"
|
||||
log.Printf("[INFO] tf exec path: %s", execPath)
|
||||
// log.Printf("[INFO] tf exec path: %s", execPath)
|
||||
|
||||
workingDir := filepath.Join(prj.Root(), "dist", env)
|
||||
tf, err := tfexec.NewTerraform(workingDir, execPath)
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
package sync
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -23,54 +19,54 @@ const SyncSnapshotFile = "repo_snapshot.json"
|
|||
const BricksDir = ".bricks"
|
||||
|
||||
func (s *snapshot) storeSnapshot(root string) error {
|
||||
// create snapshot file
|
||||
configDir := filepath.Join(root, BricksDir)
|
||||
if _, err := os.Stat(configDir); os.IsNotExist(err) {
|
||||
err = os.Mkdir(configDir, os.ModeDir|os.ModePerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create config directory: %s", err)
|
||||
}
|
||||
}
|
||||
persistedSnapshotPath := filepath.Join(configDir, SyncSnapshotFile)
|
||||
f, err := os.OpenFile(persistedSnapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
// // create snapshot file
|
||||
// configDir := filepath.Join(root, BricksDir)
|
||||
// if _, err := os.Stat(configDir); os.IsNotExist(err) {
|
||||
// err = os.Mkdir(configDir, os.ModeDir|os.ModePerm)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to create config directory: %s", err)
|
||||
// }
|
||||
// }
|
||||
// persistedSnapshotPath := filepath.Join(configDir, SyncSnapshotFile)
|
||||
// f, err := os.OpenFile(persistedSnapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err)
|
||||
// }
|
||||
// defer f.Close()
|
||||
|
||||
// persist snapshot to disk
|
||||
bytes, err := json.MarshalIndent(s, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to json marshal in-memory snapshot: %s", err)
|
||||
}
|
||||
_, err = f.Write(bytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write sync snapshot to disk: %s", err)
|
||||
}
|
||||
// // persist snapshot to disk
|
||||
// bytes, err := json.MarshalIndent(s, "", " ")
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to json marshal in-memory snapshot: %s", err)
|
||||
// }
|
||||
// _, err = f.Write(bytes)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to write sync snapshot to disk: %s", err)
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshot) loadSnapshot(root string) error {
|
||||
persistedSnapshotPath := filepath.Join(root, BricksDir, SyncSnapshotFile)
|
||||
if _, err := os.Stat(persistedSnapshotPath); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
// persistedSnapshotPath := filepath.Join(root, BricksDir, SyncSnapshotFile)
|
||||
// if _, err := os.Stat(persistedSnapshotPath); os.IsNotExist(err) {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
f, err := os.Open(persistedSnapshotPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open persisted sync snapshot file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
// f, err := os.Open(persistedSnapshotPath)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to open persisted sync snapshot file: %s", err)
|
||||
// }
|
||||
// defer f.Close()
|
||||
|
||||
bytes, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
// clean up these error messages a bit
|
||||
return fmt.Errorf("failed to read sync snapshot from disk: %s", err)
|
||||
}
|
||||
err = json.Unmarshal(bytes, s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to json unmarshal persisted snapshot: %s", err)
|
||||
}
|
||||
// bytes, err := io.ReadAll(f)
|
||||
// if err != nil {
|
||||
// // clean up these error messages a bit
|
||||
// return fmt.Errorf("failed to read sync snapshot from disk: %s", err)
|
||||
// }
|
||||
// err = json.Unmarshal(bytes, s)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to json unmarshal persisted snapshot: %s", err)
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package sync
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/bricks/cmd/root"
|
||||
|
@ -22,6 +21,10 @@ var syncCmd = &cobra.Command{
|
|||
prj := project.Get(ctx)
|
||||
wsc := prj.WorkspacesClient()
|
||||
|
||||
if prj.Environment().Workspace.Root != "" {
|
||||
*remotePath = prj.Environment().Workspace.Root
|
||||
}
|
||||
|
||||
if *remotePath == "" {
|
||||
me, err := prj.Me()
|
||||
if err != nil {
|
||||
|
@ -33,15 +36,15 @@ var syncCmd = &cobra.Command{
|
|||
}
|
||||
*remotePath = fmt.Sprintf("/Repos/%s/%s", me.UserName, repositoryName)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Remote file sync location: %v", *remotePath)
|
||||
repoExists, err := git.RepoExists(*remotePath, ctx, wsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !repoExists {
|
||||
return fmt.Errorf("repo not found, please ensure %s exists", *remotePath)
|
||||
}
|
||||
var err error
|
||||
// log.Printf("[INFO] Remote file sync location: %v", *remotePath)
|
||||
// repoExists, err := git.RepoExists(*remotePath, ctx, wsc)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if !repoExists {
|
||||
// return fmt.Errorf("repo not found, please ensure %s exists", *remotePath)
|
||||
// }
|
||||
|
||||
root := prj.Root()
|
||||
fileSet := git.NewFileSet(root)
|
||||
|
@ -64,5 +67,4 @@ func init() {
|
|||
interval = syncCmd.Flags().Duration("interval", 1*time.Second, "project files polling interval")
|
||||
remotePath = syncCmd.Flags().String("remote-path", "", "remote path to store repo in. eg: /Repos/me@example.com/test-repo")
|
||||
syncCmd.Flags().StringP("environment", "e", "", "Environment to use")
|
||||
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,2 @@
|
|||
/dist
|
||||
repo_snapshot.json
|
|
@ -0,0 +1,17 @@
|
|||
name: example-project
|
||||
|
||||
environments:
|
||||
development:
|
||||
workspace:
|
||||
profile: e2-dogfood
|
||||
root: /Repos/pieter.noordhuis@databricks.com/bricks-demos/development
|
||||
|
||||
staging:
|
||||
workspace:
|
||||
profile: e2-dogfood
|
||||
root: /Repos/pieter.noordhuis@databricks.com/bricks-demos/staging
|
||||
|
||||
production:
|
||||
workspace:
|
||||
profile: e2-dogfood
|
||||
root: /Repos/pieter.noordhuis@databricks.com/bricks-demos/production
|
|
@ -0,0 +1,7 @@
|
|||
import sys
|
||||
|
||||
print("hello world!")
|
||||
print(sys.argv)
|
||||
|
||||
for x in spark.range(10).collect():
|
||||
print(x)
|
|
@ -0,0 +1,45 @@
|
|||
# yaml-language-server: $schema=./.bricks/schema.json
|
||||
|
||||
resources:
|
||||
job:
|
||||
my_first_job:
|
||||
spark_python_task:
|
||||
python_file: "./my_first_job.py"
|
||||
parameters:
|
||||
- "hello"
|
||||
- "world"
|
||||
tags:
|
||||
key: value
|
||||
|
||||
|
||||
environments:
|
||||
development:
|
||||
resources:
|
||||
job:
|
||||
my_first_job:
|
||||
name: "[development] my first job"
|
||||
|
||||
# Use autoscaling on e2-dogfood.
|
||||
existing_cluster_id: "0923-164208-meows279"
|
||||
|
||||
staging:
|
||||
resources:
|
||||
job:
|
||||
my_first_job:
|
||||
name: "[staging] my first job"
|
||||
new_cluster:
|
||||
spark_version: 10.4.x-scala2.12
|
||||
node_type_id: "i3.xlarge"
|
||||
num_workers: 2
|
||||
|
||||
production:
|
||||
resources:
|
||||
job:
|
||||
my_first_job:
|
||||
name: "[production] my first job"
|
||||
new_cluster:
|
||||
spark_version: 10.4.x-scala2.12
|
||||
node_type_id: "i3.xlarge"
|
||||
autoscale:
|
||||
min_workers: 4
|
||||
max_workers: 8
|
|
@ -13,6 +13,7 @@ const defaultEnvironment = "development"
|
|||
// Workspace defines configurables at the workspace level.
|
||||
type Workspace struct {
|
||||
Profile string `json:"profile,omitempty"`
|
||||
Root string `json:"root,omitempty"`
|
||||
}
|
||||
|
||||
// Environment defines all configurables for a single environment.
|
||||
|
|
Loading…
Reference in New Issue