mirror of https://github.com/databricks/cli.git
Enable intrange linter and apply autofix (#2069)
New construct in Go1.22+ for integer iteration: https://github.com/ckaznocha/intrange?tab=readme-ov-file#intrange
This commit is contained in:
parent
60782b57bd
commit
39d1e8093f
|
@ -12,6 +12,7 @@ linters:
|
|||
- gofumpt
|
||||
- goimports
|
||||
- testifylint
|
||||
- intrange
|
||||
linters-settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
|
|
|
@ -382,7 +382,7 @@ func TestAllResourcesMocked(t *testing.T) {
|
|||
b := mockBundle(config.Development)
|
||||
resources := reflect.ValueOf(b.Config.Resources)
|
||||
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
for i := range resources.NumField() {
|
||||
field := resources.Field(i)
|
||||
if field.Kind() == reflect.Map {
|
||||
assert.True(
|
||||
|
@ -411,7 +411,7 @@ func TestAllNonUcResourcesAreRenamed(t *testing.T) {
|
|||
require.NoError(t, diags.Error())
|
||||
|
||||
resources := reflect.ValueOf(b.Config.Resources)
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
for i := range resources.NumField() {
|
||||
field := resources.Field(i)
|
||||
|
||||
if field.Kind() == reflect.Map {
|
||||
|
|
|
@ -33,7 +33,7 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
|
|||
r := Resources{}
|
||||
rt := reflect.TypeOf(r)
|
||||
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
field := rt.Field(i)
|
||||
|
||||
// Fields in Resources are expected be of the form map[string]*resourceStruct
|
||||
|
@ -75,7 +75,7 @@ func TestResourcesAllResourcesCompleteness(t *testing.T) {
|
|||
types = append(types, group.Description.PluralName)
|
||||
}
|
||||
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
field := rt.Field(i)
|
||||
jsonTag := field.Tag.Get("json")
|
||||
|
||||
|
@ -92,7 +92,7 @@ func TestSupportedResources(t *testing.T) {
|
|||
actual := SupportedResources()
|
||||
|
||||
typ := reflect.TypeOf(Resources{})
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
for i := range typ.NumField() {
|
||||
field := typ.Field(i)
|
||||
jsonTags := strings.Split(field.Tag.Get("json"), ",")
|
||||
pluralName := jsonTags[0]
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestLookup_Coverage(t *testing.T) {
|
|||
val := reflect.ValueOf(lookup)
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
for i := range val.NumField() {
|
||||
field := val.Field(i)
|
||||
if field.Kind() != reflect.String {
|
||||
t.Fatalf("Field %s is not a string", typ.Field(i).Name)
|
||||
|
|
|
@ -1261,7 +1261,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
|
||||
func AssertFullResourceCoverage(t *testing.T, config *config.Root) {
|
||||
resources := reflect.ValueOf(config.Resources)
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
for i := range resources.NumField() {
|
||||
field := resources.Field(i)
|
||||
if field.Kind() == reflect.Map {
|
||||
assert.True(
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestStatePushLargeState(t *testing.T) {
|
|||
b := statePushTestBundle(t)
|
||||
|
||||
largeState := map[string]any{}
|
||||
for i := 0; i < 1000000; i++ {
|
||||
for i := range 1000000 {
|
||||
largeState[fmt.Sprintf("field_%d", i)] = i
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) {
|
|||
|
||||
// Check for embedded Databricks Go SDK types.
|
||||
if typ.Kind() == reflect.Struct {
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
for i := range typ.NumField() {
|
||||
if !typ.Field(i).Anonymous {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
|
||||
func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent {
|
||||
result := []pipelines.PipelineEvent{}
|
||||
for i := 0; i < len(events); i++ {
|
||||
for i := range events {
|
||||
if events[i].Origin.UpdateId == updateId {
|
||||
result = append(result, events[i])
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE
|
|||
}
|
||||
if event.Error != nil && len(event.Error.Exceptions) > 0 {
|
||||
logString += "trace for most recent exception: \n"
|
||||
for i := 0; i < len(event.Error.Exceptions); i++ {
|
||||
for i := range len(event.Error.Exceptions) {
|
||||
logString += fmt.Sprintf("%s\n", event.Error.Exceptions[i].Message)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,13 +60,13 @@ func TestLock(t *testing.T) {
|
|||
|
||||
lockerErrs := make([]error, numConcurrentLocks)
|
||||
lockers := make([]*lockpkg.Locker, numConcurrentLocks)
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
lockers[i], err = lockpkg.CreateLocker("humpty.dumpty@databricks.com", remoteProjectRoot, wsc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
wg.Add(1)
|
||||
currentIndex := i
|
||||
go func() {
|
||||
|
@ -80,7 +80,7 @@ func TestLock(t *testing.T) {
|
|||
countActive := 0
|
||||
indexOfActiveLocker := 0
|
||||
indexOfAnInactiveLocker := -1
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if lockers[i].Active {
|
||||
countActive += 1
|
||||
assert.NoError(t, lockerErrs[i])
|
||||
|
@ -102,7 +102,7 @@ func TestLock(t *testing.T) {
|
|||
assert.True(t, remoteLocker.AcquisitionTime.Equal(lockers[indexOfActiveLocker].State.AcquisitionTime), "remote locker acquisition time does not match active locker")
|
||||
|
||||
// test all other locks (inactive ones) do not match the remote lock and Unlock fails
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if i == indexOfActiveLocker {
|
||||
continue
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func TestLock(t *testing.T) {
|
|||
}
|
||||
|
||||
// test inactive locks fail to write a file
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if i == indexOfActiveLocker {
|
||||
continue
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func TestLock(t *testing.T) {
|
|||
assert.Equal(t, "Shah Rukh", res["name"])
|
||||
|
||||
// inactive locker file reads fail
|
||||
for i := 0; i < numConcurrentLocks; i++ {
|
||||
for i := range numConcurrentLocks {
|
||||
if i == indexOfActiveLocker {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ func prepareRepoFiles(t *testing.T) *testFiles {
|
|||
|
||||
func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
|
||||
tasks := make([]jobs.SubmitTask, 0)
|
||||
for i := 0; i < len(versions); i++ {
|
||||
for i := range versions {
|
||||
task := jobs.SubmitTask{
|
||||
TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")),
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
|
@ -287,7 +287,7 @@ func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId st
|
|||
|
||||
func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
|
||||
tasks := make([]jobs.SubmitTask, 0)
|
||||
for i := 0; i < len(versions); i++ {
|
||||
for i := range versions {
|
||||
task := jobs.SubmitTask{
|
||||
TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")),
|
||||
SparkPythonTask: &jobs.SparkPythonTask{
|
||||
|
@ -308,7 +308,7 @@ func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId
|
|||
|
||||
func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask {
|
||||
tasks := make([]jobs.SubmitTask, 0)
|
||||
for i := 0; i < len(versions); i++ {
|
||||
for i := range versions {
|
||||
task := jobs.SubmitTask{
|
||||
TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")),
|
||||
PythonWheelTask: &jobs.PythonWheelTask{
|
||||
|
|
|
@ -39,7 +39,7 @@ func Heredoc(tmpl string) (trimmed string) {
|
|||
break
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(lines); i++ {
|
||||
for i := range lines {
|
||||
if lines[i] == "" || strings.TrimSpace(lines[i]) == "" {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func (d *dummyIterator) Next(ctx context.Context) (*provisioning.Workspace, erro
|
|||
func makeWorkspaces(count int) []*provisioning.Workspace {
|
||||
res := make([]*provisioning.Workspace, 0, count)
|
||||
next := []*provisioning.Workspace{&dummyWorkspace1, &dummyWorkspace2}
|
||||
for i := 0; i < count; i++ {
|
||||
for range count {
|
||||
n := next[0]
|
||||
next = append(next[1:], n)
|
||||
res = append(res, n)
|
||||
|
|
|
@ -209,7 +209,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) {
|
|||
}
|
||||
|
||||
out := make([]dyn.Value, src.Len())
|
||||
for i := 0; i < src.Len(); i++ {
|
||||
for i := range src.Len() {
|
||||
v := src.Index(i)
|
||||
refv := ref.Index(i)
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func buildStructInfo(typ reflect.Type) structInfo {
|
|||
}
|
||||
|
||||
nf := styp.NumField()
|
||||
for j := 0; j < nf; j++ {
|
||||
for j := range nf {
|
||||
sf := styp.Field(j)
|
||||
|
||||
// Recurse into anonymous fields.
|
||||
|
|
|
@ -185,7 +185,7 @@ func TestMappingClone(t *testing.T) {
|
|||
|
||||
func TestMappingMerge(t *testing.T) {
|
||||
var m1 dyn.Mapping
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
err := m1.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ func overrideSequence(basePath dyn.Path, left, right []dyn.Value, visitor Overri
|
|||
minLen := min(len(left), len(right))
|
||||
var values []dyn.Value
|
||||
|
||||
for i := 0; i < minLen; i++ {
|
||||
for i := range minLen {
|
||||
path := basePath.Append(dyn.Index(i))
|
||||
merged, err := override(path, left[i], right[i], visitor)
|
||||
if err != nil {
|
||||
|
|
|
@ -65,7 +65,7 @@ func (p Path) Equal(q Path) bool {
|
|||
if pl != ql {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < pl; i++ {
|
||||
for i := range pl {
|
||||
if p[i] != q[i] {
|
||||
return false
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func (p Path) HasPrefix(q Path) bool {
|
|||
if pl < ql {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < ql; i++ {
|
||||
for i := range ql {
|
||||
if p[i] != q[i] {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -87,12 +87,12 @@ func TestMapFuncOnMapWithEmptySequence(t *testing.T) {
|
|||
dyn.V([]dyn.Value{dyn.V(42)}),
|
||||
}
|
||||
|
||||
for i := 0; i < len(variants); i++ {
|
||||
for i := range variants {
|
||||
vin := dyn.V(map[string]dyn.Value{
|
||||
"key": variants[i],
|
||||
})
|
||||
|
||||
for j := 0; j < len(variants); j++ {
|
||||
for j := range variants {
|
||||
vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("key")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
return variants[j], nil
|
||||
})
|
||||
|
@ -153,12 +153,12 @@ func TestMapFuncOnSequenceWithEmptySequence(t *testing.T) {
|
|||
dyn.V([]dyn.Value{dyn.V(42)}),
|
||||
}
|
||||
|
||||
for i := 0; i < len(variants); i++ {
|
||||
for i := range variants {
|
||||
vin := dyn.V([]dyn.Value{
|
||||
variants[i],
|
||||
})
|
||||
|
||||
for j := 0; j < len(variants); j++ {
|
||||
for j := range variants {
|
||||
vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
return variants[j], nil
|
||||
})
|
||||
|
|
|
@ -141,7 +141,7 @@ func TestMultipleCommandsRunInParrallel(t *testing.T) {
|
|||
const count = 5
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
wg.Add(1)
|
||||
cmd, err := executor.StartCommand(context.Background(), fmt.Sprintf("echo 'Hello %d'", i))
|
||||
go func(cmd Command, i int) {
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestFsOpenDir(t *testing.T) {
|
|||
|
||||
de.Close()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
tmp, err = de.ReadDir(1)
|
||||
require.NoError(t, err)
|
||||
entries = append(entries, tmp...)
|
||||
|
|
|
@ -211,7 +211,7 @@ func getStructFields(typ reflect.Type) []reflect.StructField {
|
|||
fields := []reflect.StructField{}
|
||||
bfsQueue := list.New()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
for i := range typ.NumField() {
|
||||
bfsQueue.PushBack(typ.Field(i))
|
||||
}
|
||||
for bfsQueue.Len() > 0 {
|
||||
|
@ -233,7 +233,7 @@ func getStructFields(typ reflect.Type) []reflect.StructField {
|
|||
fieldType = fieldType.Elem()
|
||||
}
|
||||
|
||||
for i := 0; i < fieldType.NumField(); i++ {
|
||||
for i := range fieldType.NumField() {
|
||||
bfsQueue.PushBack(fieldType.Field(i))
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue