Merge remote-tracking branch 'origin' into add-bundle-init-event

This commit is contained in:
Shreyas Goenka 2025-01-03 16:19:21 +05:30
commit aca49b2285
No known key found for this signature in database
GPG Key ID: 92A07DF49CCB0622
24 changed files with 42 additions and 41 deletions

View File

@ -12,6 +12,8 @@ linters:
- gofumpt
- goimports
- testifylint
- intrange
- mirror
linters-settings:
govet:
enable-all: true

View File

@ -382,7 +382,7 @@ func TestAllResourcesMocked(t *testing.T) {
b := mockBundle(config.Development)
resources := reflect.ValueOf(b.Config.Resources)
for i := 0; i < resources.NumField(); i++ {
for i := range resources.NumField() {
field := resources.Field(i)
if field.Kind() == reflect.Map {
assert.True(
@ -411,7 +411,7 @@ func TestAllNonUcResourcesAreRenamed(t *testing.T) {
require.NoError(t, diags.Error())
resources := reflect.ValueOf(b.Config.Resources)
for i := 0; i < resources.NumField(); i++ {
for i := range resources.NumField() {
field := resources.Field(i)
if field.Kind() == reflect.Map {

View File

@ -33,7 +33,7 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
r := Resources{}
rt := reflect.TypeOf(r)
for i := 0; i < rt.NumField(); i++ {
for i := range rt.NumField() {
field := rt.Field(i)
// Fields in Resources are expected be of the form map[string]*resourceStruct
@ -75,7 +75,7 @@ func TestResourcesAllResourcesCompleteness(t *testing.T) {
types = append(types, group.Description.PluralName)
}
for i := 0; i < rt.NumField(); i++ {
for i := range rt.NumField() {
field := rt.Field(i)
jsonTag := field.Tag.Get("json")
@ -92,7 +92,7 @@ func TestSupportedResources(t *testing.T) {
actual := SupportedResources()
typ := reflect.TypeOf(Resources{})
for i := 0; i < typ.NumField(); i++ {
for i := range typ.NumField() {
field := typ.Field(i)
jsonTags := strings.Split(field.Tag.Get("json"), ",")
pluralName := jsonTags[0]

View File

@ -13,7 +13,7 @@ func TestLookup_Coverage(t *testing.T) {
val := reflect.ValueOf(lookup)
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
for i := range val.NumField() {
field := val.Field(i)
if field.Kind() != reflect.String {
t.Fatalf("Field %s is not a string", typ.Field(i).Name)

View File

@ -1261,7 +1261,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
func AssertFullResourceCoverage(t *testing.T, config *config.Root) {
resources := reflect.ValueOf(config.Resources)
for i := 0; i < resources.NumField(); i++ {
for i := range resources.NumField() {
field := resources.Field(i)
if field.Kind() == reflect.Map {
assert.True(

View File

@ -7,6 +7,7 @@ import (
"io"
"os"
"path/filepath"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/cmdio"
@ -67,7 +68,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
if changed && !m.opts.AutoApprove {
output := buf.String()
// Remove output starting from Warning until end of output
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
output = output[:strings.Index(output, "Warning:")]
cmdio.LogString(ctx, output)
if !cmdio.IsPromptSupported(ctx) {

View File

@ -71,7 +71,7 @@ func TestStatePushLargeState(t *testing.T) {
b := statePushTestBundle(t)
largeState := map[string]any{}
for i := 0; i < 1000000; i++ {
for i := range 1000000 {
largeState[fmt.Sprintf("field_%d", i)] = i
}

View File

@ -54,7 +54,7 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) {
// Check for embedded Databricks Go SDK types.
if typ.Kind() == reflect.Struct {
for i := 0; i < typ.NumField(); i++ {
for i := range typ.NumField() {
if !typ.Field(i).Anonymous {
continue
}

View File

@ -17,7 +17,7 @@ import (
func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent {
result := []pipelines.PipelineEvent{}
for i := 0; i < len(events); i++ {
for i := range events {
if events[i].Origin.UpdateId == updateId {
result = append(result, events[i])
}
@ -32,7 +32,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE
}
if event.Error != nil && len(event.Error.Exceptions) > 0 {
logString += "trace for most recent exception: \n"
for i := 0; i < len(event.Error.Exceptions); i++ {
for i := range len(event.Error.Exceptions) {
logString += fmt.Sprintf("%s\n", event.Error.Exceptions[i].Message)
}
}

View File

@ -40,8 +40,6 @@ func TestBundleInitErrorOnUnknownFields(t *testing.T) {
// make changes that can break the MLOps Stacks DAB. In which case we should
// skip this test until the MLOps Stacks DAB is updated to work again.
func TestBundleInitOnMlopsStacks(t *testing.T) {
testutil.SkipUntil(t, "2025-01-09")
ctx, wt := acc.WorkspaceTest(t)
w := wt.W

View File

@ -60,13 +60,13 @@ func TestLock(t *testing.T) {
lockerErrs := make([]error, numConcurrentLocks)
lockers := make([]*lockpkg.Locker, numConcurrentLocks)
for i := 0; i < numConcurrentLocks; i++ {
for i := range numConcurrentLocks {
lockers[i], err = lockpkg.CreateLocker("humpty.dumpty@databricks.com", remoteProjectRoot, wsc)
require.NoError(t, err)
}
var wg sync.WaitGroup
for i := 0; i < numConcurrentLocks; i++ {
for i := range numConcurrentLocks {
wg.Add(1)
currentIndex := i
go func() {
@ -80,7 +80,7 @@ func TestLock(t *testing.T) {
countActive := 0
indexOfActiveLocker := 0
indexOfAnInactiveLocker := -1
for i := 0; i < numConcurrentLocks; i++ {
for i := range numConcurrentLocks {
if lockers[i].Active {
countActive += 1
assert.NoError(t, lockerErrs[i])
@ -102,7 +102,7 @@ func TestLock(t *testing.T) {
assert.True(t, remoteLocker.AcquisitionTime.Equal(lockers[indexOfActiveLocker].State.AcquisitionTime), "remote locker acquisition time does not match active locker")
// test all other locks (inactive ones) do not match the remote lock and Unlock fails
for i := 0; i < numConcurrentLocks; i++ {
for i := range numConcurrentLocks {
if i == indexOfActiveLocker {
continue
}
@ -112,7 +112,7 @@ func TestLock(t *testing.T) {
}
// test inactive locks fail to write a file
for i := 0; i < numConcurrentLocks; i++ {
for i := range numConcurrentLocks {
if i == indexOfActiveLocker {
continue
}
@ -140,7 +140,7 @@ func TestLock(t *testing.T) {
assert.Equal(t, "Shah Rukh", res["name"])
// inactive locker file reads fail
for i := 0; i < numConcurrentLocks; i++ {
for i := range numConcurrentLocks {
if i == indexOfActiveLocker {
continue
}

View File

@ -266,7 +266,7 @@ func prepareRepoFiles(t *testing.T) *testFiles {
func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
tasks := make([]jobs.SubmitTask, 0)
for i := 0; i < len(versions); i++ {
for i := range versions {
task := jobs.SubmitTask{
TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")),
NotebookTask: &jobs.NotebookTask{
@ -287,7 +287,7 @@ func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId st
func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
tasks := make([]jobs.SubmitTask, 0)
for i := 0; i < len(versions); i++ {
for i := range versions {
task := jobs.SubmitTask{
TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")),
SparkPythonTask: &jobs.SparkPythonTask{
@ -308,7 +308,7 @@ func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId
func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
tasks := make([]jobs.SubmitTask, 0)
for i := 0; i < len(versions); i++ {
for i := range versions {
task := jobs.SubmitTask{
TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")),
PythonWheelTask: &jobs.PythonWheelTask{

View File

@ -39,7 +39,7 @@ func Heredoc(tmpl string) (trimmed string) {
break
}
}
for i := 0; i < len(lines); i++ {
for i := range lines {
if lines[i] == "" || strings.TrimSpace(lines[i]) == "" {
continue
}

View File

@ -55,7 +55,7 @@ func (d *dummyIterator) Next(ctx context.Context) (*provisioning.Workspace, erro
func makeWorkspaces(count int) []*provisioning.Workspace {
res := make([]*provisioning.Workspace, 0, count)
next := []*provisioning.Workspace{&dummyWorkspace1, &dummyWorkspace2}
for i := 0; i < count; i++ {
for range count {
n := next[0]
next = append(next[1:], n)
res = append(res, n)
@ -74,7 +74,7 @@ func makeIterator(count int) listing.Iterator[*provisioning.Workspace] {
func makeBigOutput(count int) string {
res := bytes.Buffer{}
for _, ws := range makeWorkspaces(count) {
res.Write([]byte(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName)))
res.WriteString(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName))
}
return res.String()
}

View File

@ -209,7 +209,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) {
}
out := make([]dyn.Value, src.Len())
for i := 0; i < src.Len(); i++ {
for i := range src.Len() {
v := src.Index(i)
refv := ref.Index(i)

View File

@ -65,7 +65,7 @@ func buildStructInfo(typ reflect.Type) structInfo {
}
nf := styp.NumField()
for j := 0; j < nf; j++ {
for j := range nf {
sf := styp.Field(j)
// Recurse into anonymous fields.

View File

@ -185,7 +185,7 @@ func TestMappingClone(t *testing.T) {
func TestMappingMerge(t *testing.T) {
var m1 dyn.Mapping
for i := 0; i < 10; i++ {
for i := range 10 {
err := m1.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i))
require.NoError(t, err)
}

View File

@ -165,7 +165,7 @@ func overrideSequence(basePath dyn.Path, left, right []dyn.Value, visitor Overri
minLen := min(len(left), len(right))
var values []dyn.Value
for i := 0; i < minLen; i++ {
for i := range minLen {
path := basePath.Append(dyn.Index(i))
merged, err := override(path, left[i], right[i], visitor)
if err != nil {

View File

@ -65,7 +65,7 @@ func (p Path) Equal(q Path) bool {
if pl != ql {
return false
}
for i := 0; i < pl; i++ {
for i := range pl {
if p[i] != q[i] {
return false
}
@ -81,7 +81,7 @@ func (p Path) HasPrefix(q Path) bool {
if pl < ql {
return false
}
for i := 0; i < ql; i++ {
for i := range ql {
if p[i] != q[i] {
return false
}

View File

@ -87,12 +87,12 @@ func TestMapFuncOnMapWithEmptySequence(t *testing.T) {
dyn.V([]dyn.Value{dyn.V(42)}),
}
for i := 0; i < len(variants); i++ {
for i := range variants {
vin := dyn.V(map[string]dyn.Value{
"key": variants[i],
})
for j := 0; j < len(variants); j++ {
for j := range variants {
vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("key")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
return variants[j], nil
})
@ -153,12 +153,12 @@ func TestMapFuncOnSequenceWithEmptySequence(t *testing.T) {
dyn.V([]dyn.Value{dyn.V(42)}),
}
for i := 0; i < len(variants); i++ {
for i := range variants {
vin := dyn.V([]dyn.Value{
variants[i],
})
for j := 0; j < len(variants); j++ {
for j := range variants {
vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
return variants[j], nil
})

View File

@ -141,7 +141,7 @@ func TestMultipleCommandsRunInParrallel(t *testing.T) {
const count = 5
var wg sync.WaitGroup
for i := 0; i < count; i++ {
for i := range count {
wg.Add(1)
cmd, err := executor.StartCommand(context.Background(), fmt.Sprintf("echo 'Hello %d'", i))
go func(cmd Command, i int) {

View File

@ -107,7 +107,7 @@ func TestFsOpenDir(t *testing.T) {
de.Close()
for i := 0; i < 3; i++ {
for range 3 {
tmp, err = de.ReadDir(1)
require.NoError(t, err)
entries = append(entries, tmp...)

View File

@ -195,7 +195,7 @@ func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io
// This API returns 400 if the file already exists, when the object type is notebook
regex := regexp.MustCompile(`Path \((.*)\) already exists.`)
if aerr.StatusCode == http.StatusBadRequest && regex.Match([]byte(aerr.Message)) {
if aerr.StatusCode == http.StatusBadRequest && regex.MatchString(aerr.Message) {
// Parse file path from regex capture group
matches := regex.FindStringSubmatch(aerr.Message)
if len(matches) == 2 {

View File

@ -211,7 +211,7 @@ func getStructFields(typ reflect.Type) []reflect.StructField {
fields := []reflect.StructField{}
bfsQueue := list.New()
for i := 0; i < typ.NumField(); i++ {
for i := range typ.NumField() {
bfsQueue.PushBack(typ.Field(i))
}
for bfsQueue.Len() > 0 {
@ -233,7 +233,7 @@ func getStructFields(typ reflect.Type) []reflect.StructField {
fieldType = fieldType.Elem()
}
for i := 0; i < fieldType.NumField(); i++ {
for i := range fieldType.NumField() {
bfsQueue.PushBack(fieldType.Field(i))
}
}