diff --git a/internal/tools/sigmigrate/cmd/flags.go b/internal/tools/sigmigrate/cmd/flags.go index 920e275e..2830ca86 100644 --- a/internal/tools/sigmigrate/cmd/flags.go +++ b/internal/tools/sigmigrate/cmd/flags.go @@ -61,6 +61,6 @@ func addFlags(flags *pflag.FlagSet) { flags.String( "object", "", - "Scan only a specific object in format //. Use 'clusterwide' namespace for cluster-scoped resources.", + "Scan only a specific object in format //. Use 'clusterwide' namespace for cluster-scoped resources. To get the resource name, use the 'kubectl api-resources' command.", ) } diff --git a/internal/tools/sigmigrate/sigmigrate.go b/internal/tools/sigmigrate/sigmigrate.go index 12159261..4878abef 100644 --- a/internal/tools/sigmigrate/sigmigrate.go +++ b/internal/tools/sigmigrate/sigmigrate.go @@ -771,7 +771,7 @@ func loadFailedObjects() (map[string]ObjectRef, error) { } parts := strings.Split(line, "|") - if len(parts) != 3 { + if len(parts) < 3 { continue } @@ -783,20 +783,18 @@ func loadFailedObjects() (map[string]ObjectRef, error) { continue } - key := fmt.Sprintf("%s|%s|%s", namespace, name, kind) - // For retry, we need to reconstruct GVR - use a simple approach - // In production, you might want to store GVR in the file - resource := strings.ToLower(kind) - if !strings.HasSuffix(resource, "s") { - resource += "s" + gvr := schema.GroupVersionResource{Resource: strings.ToLower(kind)} + if len(parts) >= 5 { + gvr.Group = strings.TrimSpace(parts[3]) + gvr.Version = strings.TrimSpace(parts[4]) } + + key := fmt.Sprintf("%s|%s|%s", namespace, name, kind) objects[key] = ObjectRef{ Namespace: namespace, Name: name, Kind: kind, - GVR: schema.GroupVersionResource{ - Resource: resource, - }, + GVR: gvr, } } @@ -817,7 +815,7 @@ func recordFailure(obj ObjectRef, errorMsg string) { tracef("failed to append failed attempts file %s: %v", failedAttemptsFile, err) return } - _, _ = fmt.Fprintf(f, "%s|%s|%s\n", obj.Namespace, obj.Name, obj.Kind) + _, _ = fmt.Fprintf(f, "%s|%s|%s|%s|%s\n", obj.Namespace, obj.Name, obj.Kind, obj.GVR.Group, obj.GVR.Version) _ = f.Sync() _ = f.Close() diff --git a/internal/tools/sigmigrate/sigmigrate_test.go b/internal/tools/sigmigrate/sigmigrate_test.go index 36052cab..22ef75cc 100644 --- a/internal/tools/sigmigrate/sigmigrate_test.go +++ b/internal/tools/sigmigrate/sigmigrate_test.go @@ -266,23 +266,90 @@ func TestLoadFailedObjects(t *testing.T) { setCurrentRunState(runState) defer setCurrentRunState(nil) - testData := "default|test-pod|pods\nkube-system|test-cm|configmaps\n|cluster-resource|clusterroles\n" + testData := "default|test-pod|pods\nkube-system|test-cm|configmaps\nclusterwide|worker|nodegroups\n|cluster-resource|clusterroles\n" err := os.WriteFile(legacyRetryFile, []byte(testData), 0644) require.NoError(t, err) objects, err := loadFailedObjects() require.NoError(t, err) - require.Len(t, objects, 2) + require.Len(t, objects, 3) first := objects["default|test-pod|pods"] require.Equal(t, "default", first.Namespace) require.Equal(t, "test-pod", first.Name) require.Equal(t, "pods", first.Kind) + require.Equal(t, "pods", first.GVR.Resource) second := objects["kube-system|test-cm|configmaps"] require.Equal(t, "kube-system", second.Namespace) require.Equal(t, "test-cm", second.Name) require.Equal(t, "configmaps", second.Kind) + require.Equal(t, "configmaps", second.GVR.Resource) + + nodeGroup := objects["clusterwide|worker|nodegroups"] + require.Equal(t, "clusterwide", nodeGroup.Namespace) + require.Equal(t, "worker", nodeGroup.Name) + require.Equal(t, "nodegroups", nodeGroup.Kind) + require.Equal(t, "nodegroups", nodeGroup.GVR.Resource) +} + +func TestLoadFailedObjects_ExtendedRetryFormatIncludesGVR(t *testing.T) { + tmpDir := t.TempDir() + legacyRetryFile := filepath.Join(tmpDir, "failed_annotations_legacy.txt") + setCurrentRunState(&sigMigrateRunState{LegacyFailedRetryFile: legacyRetryFile}) + defer setCurrentRunState(nil) + + // Extended format (future-proof for retry): namespace|name|kind|group|version + testData := "clusterwide|worker|nodegroups|deckhouse.io|v1\n" + err := os.WriteFile(legacyRetryFile, []byte(testData), 0644) + require.NoError(t, err) + + objects, err := loadFailedObjects() + require.NoError(t, err) + require.Len(t, objects, 1) + + obj := objects["clusterwide|worker|nodegroups"] + require.Equal(t, "clusterwide", obj.Namespace) + require.Equal(t, "worker", obj.Name) + require.Equal(t, "nodegroups", obj.Kind) + require.Equal(t, "nodegroups", obj.GVR.Resource) + require.Equal(t, "deckhouse.io", obj.GVR.Group) + require.Equal(t, "v1", obj.GVR.Version) +} + +func TestLoadFailedObjects_ExtendedRetryFormat_NamespacedObjects(t *testing.T) { + tmpDir := t.TempDir() + legacyRetryFile := filepath.Join(tmpDir, "failed_annotations_legacy.txt") + setCurrentRunState(&sigMigrateRunState{LegacyFailedRetryFile: legacyRetryFile}) + defer setCurrentRunState(nil) + + testData := strings.Join([]string{ + "default|web-app|deployments|apps|v1", + "kube-system|coredns|configmaps||v1", + }, "\n") + "\n" + + err := os.WriteFile(legacyRetryFile, []byte(testData), 0644) + require.NoError(t, err) + + objects, err := loadFailedObjects() + require.NoError(t, err) + require.Len(t, objects, 2) + + deployment := objects["default|web-app|deployments"] + require.Equal(t, "default", deployment.Namespace) + require.Equal(t, "web-app", deployment.Name) + require.Equal(t, "deployments", deployment.Kind) + require.Equal(t, "deployments", deployment.GVR.Resource) + require.Equal(t, "apps", deployment.GVR.Group) + require.Equal(t, "v1", deployment.GVR.Version) + + configMap := objects["kube-system|coredns|configmaps"] + require.Equal(t, "kube-system", configMap.Namespace) + require.Equal(t, "coredns", configMap.Name) + require.Equal(t, "configmaps", configMap.Kind) + require.Equal(t, "configmaps", configMap.GVR.Resource) + require.Equal(t, "", configMap.GVR.Group) + require.Equal(t, "v1", configMap.GVR.Version) } func TestRecordFailure(t *testing.T) {