diff --git a/docs/resources/settings.md b/docs/resources/settings.md index 308e6b9..994f69a 100644 --- a/docs/resources/settings.md +++ b/docs/resources/settings.md @@ -39,6 +39,19 @@ resource "supabase_settings" "production" { mfa_phone_otp_length = 6 sms_otp_length = 6 }) + + storage = jsonencode({ + # fileSizeLimit is expressed in bytes (e.g., 50MB = 50 * 1024 * 1024) + fileSizeLimit = 52428800 + features = { + imageTransformation = { + enabled = true + } + s3Protocol = { + enabled = false + } + } + }) } ``` diff --git a/examples/resources/supabase_settings/resource.tf b/examples/resources/supabase_settings/resource.tf index d18040e..b46620a 100644 --- a/examples/resources/supabase_settings/resource.tf +++ b/examples/resources/supabase_settings/resource.tf @@ -24,4 +24,17 @@ resource "supabase_settings" "production" { mfa_phone_otp_length = 6 sms_otp_length = 6 }) + + storage = jsonencode({ + # fileSizeLimit is expressed in bytes (e.g., 50MB = 50 * 1024 * 1024) + fileSizeLimit = 52428800 + features = { + imageTransformation = { + enabled = true + } + s3Protocol = { + enabled = false + } + } + }) } diff --git a/internal/provider/settings_resource.go b/internal/provider/settings_resource.go index e680638..50313a0 100644 --- a/internal/provider/settings_resource.go +++ b/internal/provider/settings_resource.go @@ -145,6 +145,9 @@ func (r *SettingsResource) Create(ctx context.Context, req resource.CreateReques if !data.Auth.IsNull() { resp.Diagnostics.Append(updateAuthConfig(ctx, &data, r.client)...) } + if !data.Storage.IsNull() { + resp.Diagnostics.Append(updateStorageConfig(ctx, &data, r.client)...) + } // TODO: update all settings above concurrently if resp.Diagnostics.HasError() { return @@ -183,6 +186,9 @@ func (r *SettingsResource) Read(ctx context.Context, req resource.ReadRequest, r if !data.Auth.IsNull() { resp.Diagnostics.Append(readAuthConfig(ctx, &data, r.client)...) } + if !data.Storage.IsNull() { + resp.Diagnostics.Append(readStorageConfig(ctx, &data, r.client)...) + } // TODO: read all settings above concurrently if resp.Diagnostics.HasError() { return @@ -223,6 +229,9 @@ func (r *SettingsResource) Update(ctx context.Context, req resource.UpdateReques if !planData.Auth.IsNull() && !planData.Auth.Equal(stateData.Auth) { resp.Diagnostics.Append(updateAuthConfig(ctx, &planData, r.client)...) } + if !planData.Storage.IsNull() && !planData.Storage.Equal(stateData.Storage) { + resp.Diagnostics.Append(updateStorageConfig(ctx, &planData, r.client)...) + } // TODO: update all settings above concurrently if resp.Diagnostics.HasError() { return @@ -253,6 +262,7 @@ func (r *SettingsResource) ImportState(ctx context.Context, req resource.ImportS resp.Diagnostics.Append(readNetworkConfig(ctx, &data, r.client)...) resp.Diagnostics.Append(readApiConfig(ctx, &data, r.client)...) resp.Diagnostics.Append(readAuthConfig(ctx, &data, r.client)...) + resp.Diagnostics.Append(readStorageConfig(ctx, &data, r.client)...) resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } @@ -432,9 +442,25 @@ func pickConfig(source any, target map[string]any) { tag := t.Field(i).Tag.Get("json") k := strings.Split(tag, ",")[0] // Check that tag is picked by target - if _, ok := target[k]; ok { - target[k] = v.Field(i).Interface() + targetVal, ok := target[k] + if !ok { + continue + } + sourceField := v.Field(i) + // Recursively merge nested structs so user values survive when API omits fields. + if targetMap, isMap := targetVal.(map[string]any); isMap { + if sourceField.Kind() == reflect.Pointer { + if sourceField.IsNil() { + continue + } + sourceField = sourceField.Elem() + } + if sourceField.Kind() == reflect.Struct { + pickConfig(sourceField.Interface(), targetMap) + continue + } } + target[k] = sourceField.Interface() } } @@ -615,3 +641,52 @@ func updateNetworkConfig(ctx context.Context, plan *SettingsResourceModel, clien } return nil } + +func readStorageConfig(ctx context.Context, state *SettingsResourceModel, client *api.ClientWithResponses) diag.Diagnostics { + // Use ProjectRef if Id is not set (during Create), otherwise use Id (during Read/Import) + projectRef := state.Id.ValueString() + if projectRef == "" { + projectRef = state.ProjectRef.ValueString() + } + + httpResp, err := client.V1GetStorageConfigWithResponse(ctx, projectRef) + if err != nil { + msg := fmt.Sprintf("Unable to read storage settings, got error: %s", err) + return diag.Diagnostics{diag.NewErrorDiagnostic("Client Error", msg)} + } + // Deleted project is an orphan resource, not returning error so it can be destroyed. + switch httpResp.StatusCode() { + case http.StatusNotFound, http.StatusNotAcceptable: + return nil + } + if httpResp.JSON200 == nil { + msg := fmt.Sprintf("Unable to read storage settings, got status %d: %s", httpResp.StatusCode(), httpResp.Body) + return diag.Diagnostics{diag.NewErrorDiagnostic("Client Error", msg)} + } + + if state.Storage, err = parseConfig(state.Storage, *httpResp.JSON200); err != nil { + msg := fmt.Sprintf("Unable to read storage settings, got error: %s", err) + return diag.Diagnostics{diag.NewErrorDiagnostic("Client Error", msg)} + } + return nil +} + +func updateStorageConfig(ctx context.Context, plan *SettingsResourceModel, client *api.ClientWithResponses) diag.Diagnostics { + var body api.UpdateStorageConfigBody + if diags := plan.Storage.Unmarshal(&body); diags.HasError() { + return diags + } + + httpResp, err := client.V1UpdateStorageConfigWithResponse(ctx, plan.ProjectRef.ValueString(), body) + if err != nil { + msg := fmt.Sprintf("Unable to update storage settings, got error: %s", err) + return diag.Diagnostics{diag.NewErrorDiagnostic("Client Error", msg)} + } + if httpResp.StatusCode() != http.StatusOK { + msg := fmt.Sprintf("Unable to update storage settings, got status %d: %s", httpResp.StatusCode(), httpResp.Body) + return diag.Diagnostics{diag.NewErrorDiagnostic("Client Error", msg)} + } + + // Read back the updated config to get the actual state with correct field names + return readStorageConfig(ctx, plan, client) +} diff --git a/internal/provider/settings_resource_test.go b/internal/provider/settings_resource_test.go index 1c0c90e..8d9a074 100644 --- a/internal/provider/settings_resource_test.go +++ b/internal/provider/settings_resource_test.go @@ -13,6 +13,7 @@ import ( "strings" "testing" + "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/oapi-codegen/nullable" @@ -92,6 +93,43 @@ func TestAccSettingsResource(t *testing.T) { SmsOtpLength: 6, SmtpAdminEmail: nullable.NewNullNullable[openapi_types.Email](), }) + gock.New("https://api.supabase.com"). + Get("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK). + JSON(map[string]any{ + "fileSizeLimit": 52428800, + "features": map[string]any{ + "imageTransformation": map[string]any{"enabled": true}, + "s3Protocol": map[string]any{"enabled": false}, + }, + "capabilities": map[string]any{ + "iceberg_catalog": false, + "list_v2": true, + }, + "external": map[string]any{ + "upstreamTarget": "main", + }, + }) + gock.New("https://api.supabase.com"). + Patch("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK) + gock.New("https://api.supabase.com"). + Get("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK). + JSON(map[string]any{ + "fileSizeLimit": 52428800, + "features": map[string]any{ + "imageTransformation": map[string]any{"enabled": true}, + "s3Protocol": map[string]any{"enabled": false}, + }, + "capabilities": map[string]any{ + "iceberg_catalog": false, + "list_v2": true, + }, + "external": map[string]any{ + "upstreamTarget": "main", + }, + }) // Step 2: read gock.New("https://api.supabase.com"). Get("/v1/projects/mayuaycdtijbctgqbycg/config/database/postgres"). @@ -159,6 +197,40 @@ func TestAccSettingsResource(t *testing.T) { SmsOtpLength: 6, SmtpAdminEmail: nullable.NewNullNullable[openapi_types.Email](), }) + gock.New("https://api.supabase.com"). + Get("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK). + JSON(map[string]any{ + "fileSizeLimit": 52428800, + "features": map[string]any{ + "imageTransformation": map[string]any{"enabled": true}, + "s3Protocol": map[string]any{"enabled": false}, + }, + "capabilities": map[string]any{ + "iceberg_catalog": false, + "list_v2": true, + }, + "external": map[string]any{ + "upstreamTarget": "main", + }, + }) + gock.New("https://api.supabase.com"). + Get("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK). + JSON(map[string]any{ + "fileSizeLimit": 52428800, + "features": map[string]any{ + "imageTransformation": map[string]any{"enabled": true}, + "s3Protocol": map[string]any{"enabled": false}, + }, + "capabilities": map[string]any{ + "iceberg_catalog": false, + "list_v2": true, + }, + "external": map[string]any{ + "upstreamTarget": "main", + }, + }) // Step 3: update gock.New("https://api.supabase.com"). Get("/v1/projects/mayuaycdtijbctgqbycg/config/database/postgres"). @@ -251,6 +323,60 @@ func TestAccSettingsResource(t *testing.T) { JwtExp: nullable.NewNullableWithValue(1800), SmtpAdminEmail: nullable.NewNullNullable[openapi_types.Email](), }) + gock.New("https://api.supabase.com"). + Get("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK). + JSON(map[string]any{ + "fileSizeLimit": 52428800, + "features": map[string]any{ + "imageTransformation": map[string]any{"enabled": true}, + "s3Protocol": map[string]any{"enabled": false}, + }, + "capabilities": map[string]any{ + "iceberg_catalog": false, + "list_v2": true, + }, + "external": map[string]any{ + "upstreamTarget": "main", + }, + }) + gock.New("https://api.supabase.com"). + Patch("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK) + gock.New("https://api.supabase.com"). + Get("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK). + JSON(map[string]any{ + "fileSizeLimit": 52428800, + "features": map[string]any{ + "imageTransformation": map[string]any{"enabled": true}, + "s3Protocol": map[string]any{"enabled": false}, + }, + "capabilities": map[string]any{ + "iceberg_catalog": false, + "list_v2": true, + }, + "external": map[string]any{ + "upstreamTarget": "main", + }, + }) + gock.New("https://api.supabase.com"). + Get("/v1/projects/mayuaycdtijbctgqbycg/config/storage"). + Reply(http.StatusOK). + JSON(map[string]any{ + "fileSizeLimit": 52428800, + "features": map[string]any{ + "imageTransformation": map[string]any{"enabled": true}, + "s3Protocol": map[string]any{"enabled": false}, + }, + "capabilities": map[string]any{ + "iceberg_catalog": false, + "list_v2": true, + }, + "external": map[string]any{ + "upstreamTarget": "main", + }, + }) // Run test resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -308,6 +434,21 @@ func TestAccSettingsResource(t *testing.T) { return fmt.Errorf("expected auth.smtp_admin_email to be filtered out, got %v", auth["smtp_admin_email"]) } + storage, err := unmarshalStateAttr(state, "storage") + if err != nil { + return err + } + if storage["fileSizeLimit"] != float64(52428800) { + return fmt.Errorf("expected storage.fileSizeLimit to be 52428800, got %v", storage["fileSizeLimit"]) + } + if features, ok := storage["features"].(map[string]any); ok { + if imgTransform, ok := features["imageTransformation"].(map[string]any); ok { + if imgTransform["enabled"] != true { + return fmt.Errorf("expected storage.features.imageTransformation.enabled to be true, got %v", imgTransform["enabled"]) + } + } + } + if projectRef, ok := state.Attributes["project_ref"]; !ok || projectRef != "mayuaycdtijbctgqbycg" { return fmt.Errorf("expected project_ref to be mayuaycdtijbctgqbycg, got %v", projectRef) } @@ -592,6 +733,67 @@ resource "supabase_settings" "test" { }) } +func TestParseConfigNestedOmitempty(t *testing.T) { + userConfig := `{ + "fileSizeLimit": 52428800, + "features": { + "icebergCatalog": {"enabled": true}, + "imageTransformation": {"enabled": true}, + "s3Protocol": {"enabled": false} + } + }` + apiResponse := api.StorageConfigResponse{ + FileSizeLimit: 52428800, + Features: struct { + IcebergCatalog *struct { + Enabled bool `json:"enabled"` + } `json:"icebergCatalog,omitempty"` + ImageTransformation struct { + Enabled bool `json:"enabled"` + } `json:"imageTransformation"` + S3Protocol struct { + Enabled bool `json:"enabled"` + } `json:"s3Protocol"` + }{ + IcebergCatalog: nil, + ImageTransformation: struct { + Enabled bool `json:"enabled"` + }{Enabled: true}, + S3Protocol: struct { + Enabled bool `json:"enabled"` + }{Enabled: false}, + }, + } + + field := jsontypes.NewNormalizedValue(userConfig) + result, err := parseConfig(field, apiResponse) + if err != nil { + t.Fatalf("parseConfig failed: %v", err) + } + + var resultMap map[string]any + if err := json.Unmarshal([]byte(result.ValueString()), &resultMap); err != nil { + t.Fatalf("failed to unmarshal result: %v", err) + } + + features, ok := resultMap["features"].(map[string]any) + if !ok { + t.Fatalf("expected features in result, got %v", resultMap) + } + + iceberg, exists := features["icebergCatalog"] + if !exists { + t.Fatalf("expected icebergCatalog to be preserved, got %v", features) + } + icebergMap, ok := iceberg.(map[string]any) + if !ok { + t.Fatalf("expected icebergCatalog to be a map, got %T", iceberg) + } + if icebergMap["enabled"] != true { + t.Errorf("expected icebergCatalog.enabled to be true, got %v", icebergMap["enabled"]) + } +} + const testAccSettingsResourceConfig = ` resource "supabase_settings" "production" { project_ref = "mayuaycdtijbctgqbycg" @@ -615,9 +817,17 @@ resource "supabase_settings" "production" { jwt_exp = 1800 }) - # storage = jsonencode({ - # file_size_limit = "50MB" - # }) + storage = jsonencode({ + fileSizeLimit = 52428800 + features = { + imageTransformation = { + enabled = true + } + s3Protocol = { + enabled = false + } + } + }) # pooler = jsonencode({ # default_pool_size = 15