diff --git a/apiserver/controllers/controllers.go b/apiserver/controllers/controllers.go index 8ff3bf06c..95e96a935 100644 --- a/apiserver/controllers/controllers.go +++ b/apiserver/controllers/controllers.go @@ -208,13 +208,6 @@ func (a *APIController) WebhookHandler(w http.ResponseWriter, r *http.Request) { func (a *APIController) EventsHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - if !auth.IsAdmin(ctx) { - w.WriteHeader(http.StatusForbidden) - if _, err := w.Write([]byte("events are available to admin users")); err != nil { - slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response") - } - return - } conn, err := a.upgrader.Upgrade(w, r, nil) if err != nil { @@ -245,13 +238,6 @@ func (a *APIController) EventsHandler(w http.ResponseWriter, r *http.Request) { func (a *APIController) WSHandler(writer http.ResponseWriter, req *http.Request) { ctx := req.Context() - if !auth.IsAdmin(ctx) { - writer.WriteHeader(http.StatusForbidden) - if _, err := writer.Write([]byte("you need admin level access to view logs")); err != nil { - slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response") - } - return - } if a.hub == nil { handleError(ctx, writer, gErrors.NewBadRequestError("log streamer is disabled")) @@ -542,3 +528,127 @@ func (a *APIController) ForceToolsSyncHandler(w http.ResponseWriter, r *http.Req slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response") } } + +// swagger:route GET /auth/oidc/status oidc OIDCStatus +// +// Returns the OIDC configuration status (enabled/disabled). +// This endpoint is public and does not require authentication. +// +// Responses: +// 200: OIDCStatusResponse +func (a *APIController) OIDCStatusHandler(w http.ResponseWriter, r *http.Request) { + response := struct { + Enabled bool `json:"enabled"` + }{ + Enabled: a.auth.IsOIDCEnabled(), + } + + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(response); err != nil { + slog.With(slog.Any("error", err)).ErrorContext(r.Context(), "failed to encode OIDC status response") + } +} + +// swagger:route GET /auth/oidc/login oidc OIDCLogin +// +// Initiates OIDC login flow by redirecting to the identity provider. +// +// Responses: +// 302: description:Redirect to OIDC provider +// 400: APIErrorResponse +// 501: APIErrorResponse +func (a *APIController) OIDCLoginHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + if !a.auth.IsOIDCEnabled() { + handleError(ctx, w, gErrors.NewBadRequestError("OIDC authentication is not enabled")) + return + } + + authURL, _, err := a.auth.GetOIDCAuthURL() + if err != nil { + handleError(ctx, w, err) + return + } + + http.Redirect(w, r, authURL, http.StatusFound) +} + +// swagger:route GET /auth/oidc/callback oidc OIDCCallback +// +// Handles the OIDC callback from the identity provider. +// +// Responses: +// 200: JWTResponse +// 400: APIErrorResponse +// 401: APIErrorResponse +func (a *APIController) OIDCCallbackHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + if !a.auth.IsOIDCEnabled() { + handleError(ctx, w, gErrors.NewBadRequestError("OIDC authentication is not enabled")) + return + } + + // Check for error from OIDC provider first (before checking for code/state) + // When the IdP returns an error (e.g., user not assigned), it won't include a code + if errParam := r.URL.Query().Get("error"); errParam != "" { + errDesc := r.URL.Query().Get("error_description") + slog.With(slog.String("error", errParam), slog.String("description", errDesc)).Error("OIDC provider returned error") + handleError(ctx, w, gErrors.NewBadRequestError("OIDC provider error: %s - %s", errParam, errDesc)) + return + } + + code := r.URL.Query().Get("code") + state := r.URL.Query().Get("state") + + if code == "" || state == "" { + handleError(ctx, w, gErrors.NewBadRequestError("missing code or state parameter")) + return + } + + ctx, err := a.auth.HandleOIDCCallback(ctx, code, state) + if err != nil { + handleError(ctx, w, err) + return + } + + tokenString, err := a.auth.GetJWTToken(ctx) + if err != nil { + handleError(ctx, w, err) + return + } + + // Get user info from context for the cookie + userName := auth.Username(ctx) + if userName == "" { + userName = auth.UserID(ctx) + } + + // Set cookies for the webapp + // Token cookie - NOT HttpOnly because the webapp JavaScript needs to read it + // to set it in the API client for authenticated requests + http.SetCookie(w, &http.Cookie{ + Name: "garm_token", + Value: tokenString, + Path: "/", + HttpOnly: false, + Secure: r.TLS != nil || r.Header.Get("X-Forwarded-Proto") == "https", + SameSite: http.SameSiteLaxMode, + MaxAge: 86400 * 7, // 7 days + }) + + // User cookie - accessible to JavaScript for display purposes + http.SetCookie(w, &http.Cookie{ + Name: "garm_user", + Value: userName, + Path: "/", + HttpOnly: false, + Secure: r.TLS != nil || r.Header.Get("X-Forwarded-Proto") == "https", + SameSite: http.SameSiteLaxMode, + MaxAge: 86400 * 7, // 7 days + }) + + // Redirect to the webapp + http.Redirect(w, r, "/ui/", http.StatusFound) +} diff --git a/apiserver/controllers/users.go b/apiserver/controllers/users.go new file mode 100644 index 000000000..51b44183f --- /dev/null +++ b/apiserver/controllers/users.go @@ -0,0 +1,45 @@ +// Copyright 2022 Cloudbase Solutions SRL +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package controllers + +import ( + "encoding/json" + "log/slog" + "net/http" +) + +// swagger:route GET /users users ListUsers +// +// List all users. +// +// Responses: +// 200: Users +// default: APIErrorResponse +func (a *APIController) ListUsersHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + users, err := a.r.ListUsers(ctx) + if err != nil { + slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing users") + handleError(ctx, w, err) + return + } + + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(users); err != nil { + slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response") + } +} + diff --git a/apiserver/routers/routers.go b/apiserver/routers/routers.go index f92ff7a40..cdd7e112b 100644 --- a/apiserver/routers/routers.go +++ b/apiserver/routers/routers.go @@ -203,6 +203,15 @@ func NewAPIRouter(han *controllers.APIController, authMiddleware, initMiddleware authRouter.Handle("/{login:login\\/?}", http.HandlerFunc(han.LoginHandler)).Methods("POST", "OPTIONS") authRouter.Use(initMiddleware.Middleware) + // OIDC authentication routes (no auth middleware - these initiate/complete auth) + oidcRouter := apiSubRouter.PathPrefix("/auth/oidc").Subrouter() + oidcRouter.Handle("/status/", http.HandlerFunc(han.OIDCStatusHandler)).Methods("GET", "OPTIONS") + oidcRouter.Handle("/status", http.HandlerFunc(han.OIDCStatusHandler)).Methods("GET", "OPTIONS") + oidcRouter.Handle("/login/", http.HandlerFunc(han.OIDCLoginHandler)).Methods("GET", "OPTIONS") + oidcRouter.Handle("/login", http.HandlerFunc(han.OIDCLoginHandler)).Methods("GET", "OPTIONS") + oidcRouter.Handle("/callback/", http.HandlerFunc(han.OIDCCallbackHandler)).Methods("GET", "OPTIONS") + oidcRouter.Handle("/callback", http.HandlerFunc(han.OIDCCallbackHandler)).Methods("GET", "OPTIONS") + ////////////////////////// // Controller endpoints // ////////////////////////// @@ -242,6 +251,13 @@ func NewAPIRouter(han *controllers.APIController, authMiddleware, initMiddleware apiRouter.Handle("/metrics-token/", http.HandlerFunc(han.MetricsTokenHandler)).Methods("GET", "OPTIONS") apiRouter.Handle("/metrics-token", http.HandlerFunc(han.MetricsTokenHandler)).Methods("GET", "OPTIONS") + /////////// + // Users // + /////////// + // List users + apiRouter.Handle("/users/", http.HandlerFunc(han.ListUsersHandler)).Methods("GET", "OPTIONS") + apiRouter.Handle("/users", http.HandlerFunc(han.ListUsersHandler)).Methods("GET", "OPTIONS") + ///////////// // Objects // ///////////// diff --git a/auth/admin_required.go b/auth/admin_required.go index b3ca36244..cdacb038d 100644 --- a/auth/admin_required.go +++ b/auth/admin_required.go @@ -17,6 +17,13 @@ import "net/http" func AdminRequiredMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Allow read-only methods for all authenticated users + // Only require admin for mutating operations + if r.Method == http.MethodGet || r.Method == http.MethodOptions || r.Method == http.MethodHead { + next.ServeHTTP(w, r) + return + } + ctx := r.Context() if !IsAdmin(ctx) { http.Error(w, "Unauthorized", http.StatusUnauthorized) diff --git a/auth/auth.go b/auth/auth.go index c5fa1ebdc..986cdfb65 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -16,13 +16,20 @@ package auth import ( "context" + "crypto/rand" + "encoding/base64" "errors" "fmt" + "log/slog" + "strings" + "sync" "time" + "github.com/coreos/go-oidc/v3/oidc" jwt "github.com/golang-jwt/jwt/v5" "github.com/nbutton23/zxcvbn-go" "golang.org/x/crypto/bcrypt" + "golang.org/x/oauth2" runnerErrors "github.com/cloudbase/garm-provider-common/errors" "github.com/cloudbase/garm-provider-common/util" @@ -33,14 +40,28 @@ import ( func NewAuthenticator(cfg config.JWTAuth, store common.Store) *Authenticator { return &Authenticator{ - cfg: cfg, - store: store, + cfg: cfg, + store: store, + oidcStates: make(map[string]oidcStateEntry), } } type Authenticator struct { store common.Store cfg config.JWTAuth + + // OIDC fields + oidcCfg config.OIDC + oidcProvider *oidc.Provider + oidcVerifier *oidc.IDTokenVerifier + oidcOAuth2 oauth2.Config + oidcStateMu sync.RWMutex + oidcStates map[string]oidcStateEntry +} + +type oidcStateEntry struct { + createdAt time.Time + nonce string } func (a *Authenticator) IsInitialized() bool { @@ -66,6 +87,7 @@ func (a *Authenticator) GetJWTToken(ctx context.Context) (string, error) { }, UserID: UserID(ctx), TokenID: tokenID, + Username: Username(ctx), IsAdmin: IsAdmin(ctx), FullName: FullName(ctx), Generation: generation, @@ -187,3 +209,267 @@ func (a *Authenticator) AuthenticateUser(ctx context.Context, info params.Passwo return PopulateContext(ctx, user, nil), nil } + +// InitOIDC initializes OIDC authentication +func (a *Authenticator) InitOIDC(ctx context.Context, cfg config.OIDC) error { + if !cfg.Enable { + return nil + } + + provider, err := oidc.NewProvider(ctx, cfg.IssuerURL) + if err != nil { + return fmt.Errorf("failed to create OIDC provider: %w", err) + } + + a.oidcCfg = cfg + a.oidcProvider = provider + a.oidcVerifier = provider.Verifier(&oidc.Config{ClientID: cfg.ClientID}) + a.oidcOAuth2 = oauth2.Config{ + ClientID: cfg.ClientID, + ClientSecret: cfg.ClientSecret, + RedirectURL: cfg.RedirectURL, + Endpoint: provider.Endpoint(), + Scopes: cfg.GetScopes(), + } + + return nil +} + +// IsOIDCEnabled returns whether OIDC is enabled +func (a *Authenticator) IsOIDCEnabled() bool { + return a.oidcCfg.Enable && a.oidcProvider != nil +} + +// GetOIDCAuthURL returns the OIDC authorization URL +func (a *Authenticator) GetOIDCAuthURL() (string, string, error) { + if !a.IsOIDCEnabled() { + return "", "", runnerErrors.NewBadRequestError("OIDC authentication is not enabled") + } + + state, err := a.generateOIDCState() + if err != nil { + return "", "", err + } + + nonce, err := a.generateOIDCNonce() + if err != nil { + return "", "", err + } + + // Store state with expiration + a.oidcStateMu.Lock() + a.oidcStates[state] = oidcStateEntry{ + createdAt: time.Now(), + nonce: nonce, + } + a.oidcStateMu.Unlock() + + // Clean up old states + go a.cleanupOIDCStates() + + url := a.oidcOAuth2.AuthCodeURL(state, oidc.Nonce(nonce)) + return url, state, nil +} + +// generateOIDCState creates a cryptographically secure random state +func (a *Authenticator) generateOIDCState() (string, error) { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("failed to generate random state: %w", err) + } + return base64.URLEncoding.EncodeToString(b), nil +} + +// generateOIDCNonce creates a cryptographically secure random nonce +func (a *Authenticator) generateOIDCNonce() (string, error) { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("failed to generate random nonce: %w", err) + } + return base64.URLEncoding.EncodeToString(b), nil +} + +// cleanupOIDCStates removes expired states (older than 10 minutes) +func (a *Authenticator) cleanupOIDCStates() { + a.oidcStateMu.Lock() + defer a.oidcStateMu.Unlock() + + cutoff := time.Now().Add(-10 * time.Minute) + for state, entry := range a.oidcStates { + if entry.createdAt.Before(cutoff) { + delete(a.oidcStates, state) + } + } +} + +// validateOIDCState checks if the state is valid and returns the nonce +func (a *Authenticator) validateOIDCState(state string) (string, error) { + a.oidcStateMu.Lock() + defer a.oidcStateMu.Unlock() + + entry, ok := a.oidcStates[state] + if !ok { + return "", runnerErrors.NewBadRequestError("invalid state") + } + + // Check if state is expired (10 minutes) + if time.Since(entry.createdAt) > 10*time.Minute { + delete(a.oidcStates, state) + return "", runnerErrors.NewBadRequestError("state expired") + } + + // Delete state after use (one-time use) + delete(a.oidcStates, state) + return entry.nonce, nil +} + +// OIDCClaims represents the claims from an OIDC ID token +type OIDCClaims struct { + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + Name string `json:"name"` + Subject string `json:"sub"` +} + +// HandleOIDCCallback processes the OIDC callback and returns an authenticated context +func (a *Authenticator) HandleOIDCCallback(ctx context.Context, code, state string) (context.Context, error) { + if !a.IsOIDCEnabled() { + return ctx, runnerErrors.NewBadRequestError("OIDC authentication is not enabled") + } + + // Validate state and get nonce + nonce, err := a.validateOIDCState(state) + if err != nil { + return ctx, err + } + + // Exchange code for token + oauth2Token, err := a.oidcOAuth2.Exchange(ctx, code) + if err != nil { + slog.With(slog.Any("error", err)).Error("failed to exchange code for token") + return ctx, runnerErrors.NewBadRequestError("failed to exchange code for token") + } + + // Extract ID token + rawIDToken, ok := oauth2Token.Extra("id_token").(string) + if !ok { + return ctx, runnerErrors.NewBadRequestError("no id_token in token response") + } + + // Verify ID token + idToken, err := a.oidcVerifier.Verify(ctx, rawIDToken) + if err != nil { + slog.With(slog.Any("error", err)).Error("failed to verify ID token") + return ctx, runnerErrors.NewBadRequestError("failed to verify ID token") + } + + // Verify nonce + if idToken.Nonce != nonce { + return ctx, runnerErrors.NewBadRequestError("nonce mismatch") + } + + // Extract claims + var claims OIDCClaims + if err := idToken.Claims(&claims); err != nil { + slog.With(slog.Any("error", err)).Error("failed to extract claims") + return ctx, runnerErrors.NewBadRequestError("failed to extract claims") + } + + // Validate email + if claims.Email == "" { + return ctx, runnerErrors.NewBadRequestError("email claim is required") + } + + // Check allowed domains + if len(a.oidcCfg.AllowedDomains) > 0 { + emailDomain := extractEmailDomain(claims.Email) + allowed := false + for _, domain := range a.oidcCfg.AllowedDomains { + if strings.EqualFold(emailDomain, domain) { + allowed = true + break + } + } + if !allowed { + slog.With(slog.String("email", claims.Email)).Warn("email domain not allowed") + return ctx, runnerErrors.ErrUnauthorized + } + } + + // Try to find existing user + user, err := a.store.GetUser(ctx, claims.Email) + if err != nil { + if !errors.Is(err, runnerErrors.ErrNotFound) { + return ctx, fmt.Errorf("failed to get user: %w", err) + } + + // User not found - check if JIT creation is enabled + if !a.oidcCfg.JITUserCreation { + slog.With(slog.String("email", claims.Email)).Warn("user not found and JIT creation disabled") + return ctx, runnerErrors.ErrUnauthorized + } + + // Create user JIT + user, err = a.createOIDCUser(ctx, claims) + if err != nil { + return ctx, fmt.Errorf("failed to create JIT user: %w", err) + } + slog.With(slog.String("email", claims.Email)).Info("created JIT user via OIDC") + } + + // Check if user is enabled + if !user.Enabled { + return ctx, runnerErrors.ErrUnauthorized + } + + return PopulateContext(ctx, user, nil), nil +} + +// createOIDCUser creates a new user from OIDC claims +func (a *Authenticator) createOIDCUser(ctx context.Context, claims OIDCClaims) (params.User, error) { + // Generate username from email (before @) + username := strings.Split(claims.Email, "@")[0] + // Sanitize username - only alphanumeric + username = sanitizeOIDCUsername(username) + if len(username) > 64 { + username = username[:64] + } + + // Use name from claims or fallback to username + fullName := claims.Name + if fullName == "" { + fullName = username + } + + newUser := params.NewUserParams{ + Email: claims.Email, + Username: username, + FullName: fullName, + Password: "", // SSO users don't have passwords + IsAdmin: a.oidcCfg.DefaultUserAdmin, + Enabled: true, + IsSSOUser: true, + } + + return a.store.CreateUser(ctx, newUser) +} + +// extractEmailDomain extracts the domain from an email address +func extractEmailDomain(email string) string { + parts := strings.Split(email, "@") + if len(parts) != 2 { + return "" + } + return parts[1] +} + +// sanitizeOIDCUsername removes non-alphanumeric characters from username +func sanitizeOIDCUsername(s string) string { + var result strings.Builder + for _, r := range s { + if util.IsAlphanumeric(string(r)) { + result.WriteRune(r) + } + } + return result.String() +} diff --git a/auth/context.go b/auth/context.go index d983c62fb..87fbd361a 100644 --- a/auth/context.go +++ b/auth/context.go @@ -27,6 +27,7 @@ type contextFlags string const ( isAdminKey contextFlags = "is_admin" fullNameKey contextFlags = "full_name" + usernameKey contextFlags = "username" readMetricsKey contextFlags = "read_metrics" // UserIDFlag is the User ID flag we set in the context UserIDFlag contextFlags = "user_id" @@ -218,6 +219,7 @@ func PopulateContext(ctx context.Context, user params.User, authExpires *time.Ti ctx = SetAdmin(ctx, user.IsAdmin) ctx = SetIsEnabled(ctx, user.Enabled) ctx = SetFullName(ctx, user.FullName) + ctx = SetUsername(ctx, user.Username) ctx = SetExpires(ctx, authExpires) ctx = SetPasswordGeneration(ctx, user.Generation) return ctx @@ -264,6 +266,20 @@ func FullName(ctx context.Context) string { return name.(string) } +// SetUsername sets the username in the context +func SetUsername(ctx context.Context, username string) context.Context { + return context.WithValue(ctx, usernameKey, username) +} + +// Username returns the username from context +func Username(ctx context.Context) string { + username := ctx.Value(usernameKey) + if username == nil { + return "" + } + return username.(string) +} + // SetIsEnabled sets a flag indicating if account is enabled func SetIsEnabled(ctx context.Context, enabled bool) context.Context { return context.WithValue(ctx, isEnabledFlag, enabled) diff --git a/auth/jwt.go b/auth/jwt.go index 6468b6d46..2626bcc5d 100644 --- a/auth/jwt.go +++ b/auth/jwt.go @@ -35,6 +35,7 @@ import ( type JWTClaims struct { UserID string `json:"user"` TokenID string `json:"token_id"` + Username string `json:"username,omitempty"` FullName string `json:"full_name"` IsAdmin bool `json:"is_admin"` ReadMetrics bool `json:"read_metrics"` diff --git a/auth/oidc.go b/auth/oidc.go new file mode 100644 index 000000000..01ed3666e --- /dev/null +++ b/auth/oidc.go @@ -0,0 +1,159 @@ +// Copyright 2022 Cloudbase Solutions SRL +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "sync" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "golang.org/x/oauth2" + + runnerErrors "github.com/cloudbase/garm-provider-common/errors" + "github.com/cloudbase/garm/config" + "github.com/cloudbase/garm/database/common" +) + +// OIDCAuthenticator handles OIDC authentication +type OIDCAuthenticator struct { + cfg config.OIDC + store common.Store + provider *oidc.Provider + verifier *oidc.IDTokenVerifier + oauth2 oauth2.Config + + // State management for OIDC flow + stateMu sync.RWMutex + states map[string]stateEntry +} + +type stateEntry struct { + createdAt time.Time + nonce string +} + +// NewOIDCAuthenticator creates a new OIDC authenticator +func NewOIDCAuthenticator(ctx context.Context, cfg config.OIDC, store common.Store) (*OIDCAuthenticator, error) { + if !cfg.Enable { + return nil, nil + } + + provider, err := oidc.NewProvider(ctx, cfg.IssuerURL) + if err != nil { + return nil, fmt.Errorf("failed to create OIDC provider: %w", err) + } + + oauth2Config := oauth2.Config{ + ClientID: cfg.ClientID, + ClientSecret: cfg.ClientSecret, + RedirectURL: cfg.RedirectURL, + Endpoint: provider.Endpoint(), + Scopes: cfg.GetScopes(), + } + + verifier := provider.Verifier(&oidc.Config{ClientID: cfg.ClientID}) + + return &OIDCAuthenticator{ + cfg: cfg, + store: store, + provider: provider, + verifier: verifier, + oauth2: oauth2Config, + states: make(map[string]stateEntry), + }, nil +} + +// generateState creates a cryptographically secure random state +func (o *OIDCAuthenticator) generateState() (string, error) { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("failed to generate random state: %w", err) + } + return base64.URLEncoding.EncodeToString(b), nil +} + +// generateNonce creates a cryptographically secure random nonce +func (o *OIDCAuthenticator) generateNonce() (string, error) { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("failed to generate random nonce: %w", err) + } + return base64.URLEncoding.EncodeToString(b), nil +} + +// GetAuthURL returns the OIDC authorization URL +func (o *OIDCAuthenticator) GetAuthURL() (string, string, error) { + state, err := o.generateState() + if err != nil { + return "", "", err + } + + nonce, err := o.generateNonce() + if err != nil { + return "", "", err + } + + // Store state with expiration + o.stateMu.Lock() + o.states[state] = stateEntry{ + createdAt: time.Now(), + nonce: nonce, + } + o.stateMu.Unlock() + + // Clean up old states + go o.cleanupStates() + + url := o.oauth2.AuthCodeURL(state, oidc.Nonce(nonce)) + return url, state, nil +} + +// cleanupStates removes expired states (older than 10 minutes) +func (o *OIDCAuthenticator) cleanupStates() { + o.stateMu.Lock() + defer o.stateMu.Unlock() + + cutoff := time.Now().Add(-10 * time.Minute) + for state, entry := range o.states { + if entry.createdAt.Before(cutoff) { + delete(o.states, state) + } + } +} + +// ValidateState checks if the state is valid and returns the nonce +func (o *OIDCAuthenticator) ValidateState(state string) (string, error) { + o.stateMu.Lock() + defer o.stateMu.Unlock() + + entry, ok := o.states[state] + if !ok { + return "", runnerErrors.NewBadRequestError("invalid state") + } + + // Check if state is expired (10 minutes) + if time.Since(entry.createdAt) > 10*time.Minute { + delete(o.states, state) + return "", runnerErrors.NewBadRequestError("state expired") + } + + // Delete state after use (one-time use) + delete(o.states, state) + return entry.nonce, nil +} diff --git a/auth/oidc_test.go b/auth/oidc_test.go new file mode 100644 index 000000000..a935be035 --- /dev/null +++ b/auth/oidc_test.go @@ -0,0 +1,367 @@ +// Copyright 2022 Cloudbase Solutions SRL +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cloudbase/garm/config" +) + +func TestAuthenticator_IsOIDCEnabled(t *testing.T) { + tests := []struct { + name string + setup func() *Authenticator + expected bool + }{ + { + name: "OIDC not initialized", + setup: func() *Authenticator { + return NewAuthenticator(config.JWTAuth{}, nil) + }, + expected: false, + }, + { + name: "OIDC disabled in config", + setup: func() *Authenticator { + auth := NewAuthenticator(config.JWTAuth{}, nil) + auth.oidcCfg = config.OIDC{Enable: false} + return auth + }, + expected: false, + }, + { + name: "OIDC enabled but provider nil", + setup: func() *Authenticator { + auth := NewAuthenticator(config.JWTAuth{}, nil) + auth.oidcCfg = config.OIDC{Enable: true} + // provider is nil + return auth + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + auth := tt.setup() + result := auth.IsOIDCEnabled() + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestExtractEmailDomain(t *testing.T) { + tests := []struct { + name string + email string + expected string + }{ + { + name: "valid email", + email: "user@example.com", + expected: "example.com", + }, + { + name: "subdomain email", + email: "user@mail.example.com", + expected: "mail.example.com", + }, + { + name: "no @ symbol", + email: "invalid-email", + expected: "", + }, + { + name: "empty string", + email: "", + expected: "", + }, + { + name: "multiple @ symbols", + email: "user@domain@example.com", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := extractEmailDomain(tt.email) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestSanitizeOIDCUsername(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "alphanumeric only", + input: "testuser123", + expected: "testuser123", + }, + { + name: "with dots", + input: "test.user", + expected: "testuser", + }, + { + name: "with special chars", + input: "test-user_name+extra", + expected: "testusernameextra", + }, + { + name: "with spaces", + input: "test user", + expected: "testuser", + }, + { + name: "empty string", + input: "", + expected: "", + }, + { + name: "only special chars", + input: ".-_+@", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := sanitizeOIDCUsername(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestAuthenticator_GenerateOIDCState(t *testing.T) { + auth := NewAuthenticator(config.JWTAuth{}, nil) + + state1, err := auth.generateOIDCState() + require.NoError(t, err) + assert.NotEmpty(t, state1) + + state2, err := auth.generateOIDCState() + require.NoError(t, err) + assert.NotEmpty(t, state2) + + // States should be different + assert.NotEqual(t, state1, state2) +} + +func TestAuthenticator_GenerateOIDCNonce(t *testing.T) { + auth := NewAuthenticator(config.JWTAuth{}, nil) + + nonce1, err := auth.generateOIDCNonce() + require.NoError(t, err) + assert.NotEmpty(t, nonce1) + + nonce2, err := auth.generateOIDCNonce() + require.NoError(t, err) + assert.NotEmpty(t, nonce2) + + // Nonces should be different + assert.NotEqual(t, nonce1, nonce2) +} + +func TestAuthenticator_ValidateOIDCState(t *testing.T) { + auth := NewAuthenticator(config.JWTAuth{}, nil) + + // Add a valid state manually + testState := "test-state-123" + testNonce := "test-nonce-456" + auth.oidcStates[testState] = oidcStateEntry{ + createdAt: time.Now(), + nonce: testNonce, + } + + nonce, err := auth.validateOIDCState(testState) + require.NoError(t, err) + assert.Equal(t, testNonce, nonce) + + // State should be consumed (one-time use) + _, err = auth.validateOIDCState(testState) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid state") +} + +func TestAuthenticator_ValidateOIDCState_Invalid(t *testing.T) { + auth := NewAuthenticator(config.JWTAuth{}, nil) + + // Test invalid state + _, err := auth.validateOIDCState("invalid-state") + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid state") +} + +func TestAuthenticator_ValidateOIDCState_Expired(t *testing.T) { + auth := NewAuthenticator(config.JWTAuth{}, nil) + + // Add an expired state manually + expiredState := "expired-state-123" + auth.oidcStates[expiredState] = oidcStateEntry{ + createdAt: time.Now().Add(-15 * time.Minute), // 15 minutes ago (expired) + nonce: "test-nonce", + } + + _, err := auth.validateOIDCState(expiredState) + assert.Error(t, err) + assert.Contains(t, err.Error(), "state expired") +} + +func TestAuthenticator_CleanupOIDCStates(t *testing.T) { + auth := NewAuthenticator(config.JWTAuth{}, nil) + + // Add some states + auth.oidcStates["fresh-state"] = oidcStateEntry{ + createdAt: time.Now(), + nonce: "nonce1", + } + auth.oidcStates["old-state"] = oidcStateEntry{ + createdAt: time.Now().Add(-15 * time.Minute), + nonce: "nonce2", + } + + assert.Len(t, auth.oidcStates, 2) + + auth.cleanupOIDCStates() + + // Only fresh state should remain + assert.Len(t, auth.oidcStates, 1) + _, exists := auth.oidcStates["fresh-state"] + assert.True(t, exists) + _, exists = auth.oidcStates["old-state"] + assert.False(t, exists) +} + +func TestOIDCConfigValidation(t *testing.T) { + tests := []struct { + name string + cfg config.OIDC + expectError bool + errContains string + }{ + { + name: "disabled - no validation", + cfg: config.OIDC{ + Enable: false, + }, + expectError: false, + }, + { + name: "enabled - missing issuer_url", + cfg: config.OIDC{ + Enable: true, + ClientID: "client-id", + ClientSecret: "client-secret", + RedirectURL: "https://example.com/callback", + }, + expectError: true, + errContains: "issuer_url", + }, + { + name: "enabled - missing client_id", + cfg: config.OIDC{ + Enable: true, + IssuerURL: "https://issuer.example.com", + ClientSecret: "client-secret", + RedirectURL: "https://example.com/callback", + }, + expectError: true, + errContains: "client_id", + }, + { + name: "enabled - missing client_secret", + cfg: config.OIDC{ + Enable: true, + IssuerURL: "https://issuer.example.com", + ClientID: "client-id", + RedirectURL: "https://example.com/callback", + }, + expectError: true, + errContains: "client_secret", + }, + { + name: "enabled - missing redirect_url", + cfg: config.OIDC{ + Enable: true, + IssuerURL: "https://issuer.example.com", + ClientID: "client-id", + ClientSecret: "client-secret", + }, + expectError: true, + errContains: "redirect_url", + }, + { + name: "enabled - all required fields", + cfg: config.OIDC{ + Enable: true, + IssuerURL: "https://issuer.example.com", + ClientID: "client-id", + ClientSecret: "client-secret", + RedirectURL: "https://example.com/callback", + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.cfg.Validate() + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errContains) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestOIDCGetScopes(t *testing.T) { + tests := []struct { + name string + cfg config.OIDC + expected []string + }{ + { + name: "default scopes when empty", + cfg: config.OIDC{}, + expected: []string{"openid", "email", "profile"}, + }, + { + name: "custom scopes", + cfg: config.OIDC{ + Scopes: []string{"openid", "email"}, + }, + expected: []string{"openid", "email"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.cfg.GetScopes() + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/cmd/garm/main.go b/cmd/garm/main.go index 81131b7e2..f6146a34e 100644 --- a/cmd/garm/main.go +++ b/cmd/garm/main.go @@ -287,6 +287,15 @@ func main() { } authenticator := auth.NewAuthenticator(cfg.JWTAuth, db) + + // Initialize OIDC if enabled + if cfg.OIDC.Enable { + if err := authenticator.InitOIDC(ctx, cfg.OIDC); err != nil { + log.Fatalf("failed to initialize OIDC: %+v", err) + } + log.Printf("OIDC authentication enabled with issuer: %s", cfg.OIDC.IssuerURL) + } + controller, err := controllers.NewAPIController(runner, authenticator, hub, agentHub, cfg.APIServer) if err != nil { log.Fatalf("failed to create controller: %+v", err) diff --git a/config/config.go b/config/config.go index 4b104b99f..cd1e42217 100644 --- a/config/config.go +++ b/config/config.go @@ -100,6 +100,7 @@ type Config struct { Github []Github `toml:"github,omitempty"` JWTAuth JWTAuth `toml:"jwt_auth" json:"jwt-auth"` Logging Logging `toml:"logging" json:"logging"` + OIDC OIDC `toml:"oidc" json:"oidc"` } // Validate validates the config @@ -129,6 +130,10 @@ func (c *Config) Validate() error { return fmt.Errorf("error validating logging config: %w", err) } + if err := c.OIDC.Validate(); err != nil { + return fmt.Errorf("error validating oidc config: %w", err) + } + providerNames := map[string]int{} for _, provider := range c.Providers { @@ -812,3 +817,56 @@ func (j *JWTAuth) Validate() error { } return nil } + +// OIDC holds settings for OpenID Connect authentication +type OIDC struct { + // Enable enables OIDC authentication + Enable bool `toml:"enable" json:"enable"` + // IssuerURL is the OIDC provider's issuer URL (e.g., https://accounts.google.com) + IssuerURL string `toml:"issuer_url" json:"issuer-url"` + // ClientID is the OAuth2 client ID + ClientID string `toml:"client_id" json:"client-id"` + // ClientSecret is the OAuth2 client secret + ClientSecret string `toml:"client_secret" json:"client-secret"` + // RedirectURL is the callback URL for OIDC (e.g., https://garm.example.com/api/v1/auth/oidc/callback) + RedirectURL string `toml:"redirect_url" json:"redirect-url"` + // Scopes are the OAuth2 scopes to request (defaults to openid, email, profile) + Scopes []string `toml:"scopes" json:"scopes"` + // AllowedDomains restricts login to users with email addresses from these domains + // If empty, all authenticated users are allowed + AllowedDomains []string `toml:"allowed_domains" json:"allowed-domains"` + // JITUserCreation enables Just-In-Time user creation on first OIDC login + JITUserCreation bool `toml:"jit_user_creation" json:"jit-user-creation"` + // DefaultUserAdmin sets whether JIT-created users should be admins + DefaultUserAdmin bool `toml:"default_user_admin" json:"default-user-admin"` +} + +// Validate validates the OIDC config +func (o *OIDC) Validate() error { + if !o.Enable { + return nil + } + + if o.IssuerURL == "" { + return fmt.Errorf("oidc issuer_url is required when OIDC is enabled") + } + if o.ClientID == "" { + return fmt.Errorf("oidc client_id is required when OIDC is enabled") + } + if o.ClientSecret == "" { + return fmt.Errorf("oidc client_secret is required when OIDC is enabled") + } + if o.RedirectURL == "" { + return fmt.Errorf("oidc redirect_url is required when OIDC is enabled") + } + + return nil +} + +// GetScopes returns the OAuth2 scopes, with defaults if not specified +func (o *OIDC) GetScopes() []string { + if len(o.Scopes) == 0 { + return []string{"openid", "email", "profile"} + } + return o.Scopes +} diff --git a/database/common/store.go b/database/common/store.go index f5bf73c21..60e696125 100644 --- a/database/common/store.go +++ b/database/common/store.go @@ -85,6 +85,7 @@ type UserStore interface { GetUser(ctx context.Context, user string) (params.User, error) GetUserByID(ctx context.Context, userID string) (params.User, error) GetAdminUser(ctx context.Context) (params.User, error) + ListUsers(ctx context.Context) ([]params.User, error) CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error) UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error) diff --git a/database/sql/users.go b/database/sql/users.go index ca78c5e8b..8856954f4 100644 --- a/database/sql/users.go +++ b/database/sql/users.go @@ -57,12 +57,16 @@ func (s *sqlDatabase) getUserByID(tx *gorm.DB, userID string) (User, error) { } func (s *sqlDatabase) CreateUser(_ context.Context, user params.NewUserParams) (params.User, error) { - if user.Username == "" || user.Email == "" || user.Password == "" { - return params.User{}, runnerErrors.NewBadRequestError("missing username, password or email") + if user.Username == "" || user.Email == "" { + return params.User{}, runnerErrors.NewBadRequestError("missing username or email") + } + // SSO users don't have passwords, but regular users must have one + if !user.IsSSOUser && user.Password == "" { + return params.User{}, runnerErrors.NewBadRequestError("missing password for non-SSO user") } newUser := User{ Username: user.Username, - Password: user.Password, + Password: user.Password, // Empty for SSO users FullName: user.FullName, Enabled: user.Enabled, Email: user.Email, @@ -76,10 +80,6 @@ func (s *sqlDatabase) CreateUser(_ context.Context, user params.NewUserParams) ( return runnerErrors.NewConflictError("email already exists") } - if s.hasAdmin(tx) && user.IsAdmin { - return runnerErrors.NewBadRequestError("admin user already exists") - } - q := tx.Save(&newUser) if q.Error != nil { return fmt.Errorf("error creating user: %w", q.Error) @@ -163,3 +163,17 @@ func (s *sqlDatabase) GetAdminUser(_ context.Context) (params.User, error) { } return s.sqlToParamsUser(user), nil } + +func (s *sqlDatabase) ListUsers(_ context.Context) ([]params.User, error) { + var users []User + q := s.conn.Model(&User{}).Find(&users) + if q.Error != nil { + return nil, fmt.Errorf("error fetching users: %w", q.Error) + } + + ret := make([]params.User, len(users)) + for idx, user := range users { + ret[idx] = s.sqlToParamsUser(user) + } + return ret, nil +} diff --git a/doc/oidc_authentication.md b/doc/oidc_authentication.md new file mode 100644 index 000000000..1ba93c8b7 --- /dev/null +++ b/doc/oidc_authentication.md @@ -0,0 +1,154 @@ +# OIDC Authentication + +GARM supports OpenID Connect (OIDC) authentication, allowing users to authenticate using external identity providers such as Google, Okta, Azure AD, Keycloak, and other OIDC-compliant providers. + +## Configuration + +To enable OIDC authentication, add the `[oidc]` section to your GARM configuration file: + +```toml +[oidc] +# Enable OIDC authentication +enable = true + +# The OIDC provider's issuer URL +# Examples: +# - Google: https://accounts.google.com +# - Okta: https://your-domain.okta.com +# - Azure AD: https://login.microsoftonline.com/{tenant}/v2.0 +# - Keycloak: https://your-keycloak-server/realms/{realm} +issuer_url = "https://accounts.google.com" + +# OAuth2 client ID from your identity provider +client_id = "your-client-id" + +# OAuth2 client secret from your identity provider +client_secret = "your-client-secret" + +# The callback URL where the identity provider will redirect after authentication +# This must match the redirect URI configured in your identity provider +redirect_url = "https://your-garm-server/api/v1/auth/oidc/callback" + +# OAuth2 scopes to request (optional) +# Defaults to ["openid", "email", "profile"] if not specified +scopes = ["openid", "email", "profile"] + +# Restrict login to users with email addresses from specific domains (optional) +# If empty, all authenticated users are allowed +allowed_domains = ["example.com", "yourcompany.com"] + +# Enable Just-In-Time (JIT) user creation on first OIDC login (optional) +# If true, new users will be automatically created when they first authenticate via OIDC +# If false, users must be pre-created in GARM before they can log in +jit_user_creation = true + +# Set whether JIT-created users should be admins (optional) +# Only applies when jit_user_creation is true +default_user_admin = false +``` + +## API Endpoints + +OIDC authentication adds the following API endpoints: + +### Login Endpoint + +``` +GET /api/v1/auth/oidc/login +``` + +Initiates the OIDC login flow by redirecting the user to the identity provider's authorization endpoint. + +**Response:** HTTP 302 redirect to the identity provider + +### Callback Endpoint + +``` +GET /api/v1/auth/oidc/callback +``` + +Handles the callback from the identity provider after successful authentication. + +**Query Parameters:** +- `code` - Authorization code from the identity provider +- `state` - State parameter for CSRF protection + +**Success Response:** +```json +{ + "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." +} +``` + +**Error Responses:** +- `400 Bad Request` - Missing parameters, invalid state, or token exchange failure +- `401 Unauthorized` - User not allowed (domain restriction or user disabled) + +## How It Works + +1. **User initiates login**: The user navigates to `/api/v1/auth/oidc/login` +2. **Redirect to IdP**: GARM redirects the user to the identity provider with a state parameter +3. **User authenticates**: The user authenticates with their identity provider +4. **Callback**: The IdP redirects back to `/api/v1/auth/oidc/callback` with an authorization code +5. **Token exchange**: GARM exchanges the code for tokens with the IdP +6. **User lookup/creation**: GARM looks up the user by email, or creates one if JIT is enabled +7. **JWT issued**: GARM issues a JWT token for subsequent API requests + +## Setting Up Identity Providers + +### Google + +1. Go to the [Google Cloud Console](https://console.cloud.google.com/) +2. Create or select a project +3. Navigate to "APIs & Services" > "Credentials" +4. Create an OAuth 2.0 Client ID +5. Add your callback URL: `https://your-garm-server/api/v1/auth/oidc/callback` +6. Copy the Client ID and Client Secret to your GARM config + +### Okta + +1. Log in to your Okta Admin Console +2. Navigate to "Applications" > "Create App Integration" +3. Select "OIDC - OpenID Connect" and "Web Application" +4. Add your callback URL +5. Copy the Client ID and Client Secret + +### Azure AD + +1. Go to the [Azure Portal](https://portal.azure.com/) +2. Navigate to "Azure Active Directory" > "App registrations" +3. Create a new registration +4. Add a redirect URI for "Web" platform +5. Create a client secret under "Certificates & secrets" + +### Keycloak + +1. Log in to your Keycloak Admin Console +2. Select or create a realm +3. Navigate to "Clients" and create a new client +4. Set the Root URL and Valid Redirect URIs +5. Copy the Client ID and Client Secret from the "Credentials" tab + +## Security Considerations + +- **HTTPS Required**: Always use HTTPS for the redirect URL in production +- **Client Secret**: Keep the client secret secure and never expose it +- **Domain Restrictions**: Use `allowed_domains` to restrict access to specific email domains +- **JIT User Creation**: Consider disabling JIT creation (`jit_user_creation = false`) for tighter access control +- **State Validation**: GARM validates the state parameter to prevent CSRF attacks +- **Token Expiration**: OIDC state tokens expire after 10 minutes + +## Troubleshooting + +### "OIDC authentication is not enabled" +Ensure `enable = true` in the `[oidc]` section and restart GARM. + +### "failed to create OIDC provider" +Check that the `issuer_url` is correct and accessible from the GARM server. + +### "email domain not allowed" +The user's email domain is not in the `allowed_domains` list. + +### "user not found and JIT creation disabled" +Enable `jit_user_creation = true` or pre-create the user in GARM. + diff --git a/go.mod b/go.mod index b77d92070..2c7a82d69 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/BurntSushi/toml v1.6.0 github.com/bradleyfalzon/ghinstallation/v2 v2.17.0 github.com/cloudbase/garm-provider-common v0.1.8-0.20251001105909-bbcacae60e7c + github.com/coreos/go-oidc/v3 v3.17.0 github.com/felixge/httpsnoop v1.0.4 github.com/gdamore/tcell/v2 v2.13.8 github.com/go-openapi/errors v0.22.6 @@ -51,6 +52,7 @@ require ( github.com/clipperhouse/uax29/v2 v2.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gdamore/encoding v1.0.1 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.24.2 // indirect diff --git a/go.sum b/go.sum index 635e44fcf..153f4d591 100644 --- a/go.sum +++ b/go.sum @@ -23,6 +23,8 @@ github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= github.com/cloudbase/garm-provider-common v0.1.8-0.20251001105909-bbcacae60e7c h1:IaIJoyugbSAYRHkiVJaBpibFftsQAi/mle7k11Ze94g= github.com/cloudbase/garm-provider-common v0.1.8-0.20251001105909-bbcacae60e7c/go.mod h1:2O51WbcfqRx5fDHyyJgIFq7KdTZZnefsM+aoOchyleU= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -35,6 +37,8 @@ github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uh github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo= github.com/gdamore/tcell/v2 v2.13.8 h1:Mys/Kl5wfC/GcC5Cx4C2BIQH9dbnhnkPgS9/wF3RlfU= github.com/gdamore/tcell/v2 v2.13.8/go.mod h1:+Wfe208WDdB7INEtCsNrAN6O2m+wsTPk1RAovjaILlo= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/params/requests.go b/params/requests.go index ad6391472..63fe68851 100644 --- a/params/requests.go +++ b/params/requests.go @@ -156,6 +156,9 @@ type NewUserParams struct { Password string `json:"password,omitempty"` IsAdmin bool `json:"-"` Enabled bool `json:"-"` + // IsSSOUser indicates this user authenticates via SSO (OIDC/SAML) + // and does not have a local password + IsSSOUser bool `json:"-"` } // swagger:model UpdatePoolParams diff --git a/runner/enterprises.go b/runner/enterprises.go index ee54aed40..3b02c0e64 100644 --- a/runner/enterprises.go +++ b/runner/enterprises.go @@ -86,10 +86,6 @@ func (r *Runner) CreateEnterprise(ctx context.Context, param params.CreateEnterp } func (r *Runner) ListEnterprises(ctx context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - enterprises, err := r.store.ListEnterprises(ctx, filter) if err != nil { return nil, fmt.Errorf("error listing enterprises: %w", err) @@ -112,10 +108,6 @@ func (r *Runner) ListEnterprises(ctx context.Context, filter params.EnterpriseFi } func (r *Runner) GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error) { - if !auth.IsAdmin(ctx) { - return params.Enterprise{}, runnerErrors.ErrUnauthorized - } - enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID) if err != nil { return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err) @@ -239,9 +231,6 @@ func (r *Runner) CreateEnterprisePool(ctx context.Context, enterpriseID string, } func (r *Runner) GetEnterprisePoolByID(ctx context.Context, enterpriseID, poolID string) (params.Pool, error) { - if !auth.IsAdmin(ctx) { - return params.Pool{}, runnerErrors.ErrUnauthorized - } entity := params.ForgeEntity{ ID: enterpriseID, EntityType: params.ForgeEntityTypeEnterprise, @@ -285,10 +274,6 @@ func (r *Runner) DeleteEnterprisePool(ctx context.Context, enterpriseID, poolID } func (r *Runner) ListEnterprisePools(ctx context.Context, enterpriseID string) ([]params.Pool, error) { - if !auth.IsAdmin(ctx) { - return []params.Pool{}, runnerErrors.ErrUnauthorized - } - entity := params.ForgeEntity{ ID: enterpriseID, EntityType: params.ForgeEntityTypeEnterprise, @@ -343,9 +328,6 @@ func (r *Runner) UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID } func (r *Runner) ListEnterpriseInstances(ctx context.Context, enterpriseID string) ([]params.Instance, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } entity := params.ForgeEntity{ ID: enterpriseID, EntityType: params.ForgeEntityTypeEnterprise, diff --git a/runner/gitea_credentials.go b/runner/gitea_credentials.go index d66212f93..075b4ef0e 100644 --- a/runner/gitea_credentials.go +++ b/runner/gitea_credentials.go @@ -24,10 +24,6 @@ import ( ) func (r *Runner) ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - // Get the credentials from the store. The cache is always updated after the database successfully // commits the transaction that created/updated the credentials. // If we create a set of credentials then immediately after we call ListGiteaCredentials, @@ -57,10 +53,6 @@ func (r *Runner) CreateGiteaCredentials(ctx context.Context, param params.Create } func (r *Runner) GetGiteaCredentials(ctx context.Context, id uint) (params.ForgeCredentials, error) { - if !auth.IsAdmin(ctx) { - return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized - } - creds, err := r.store.GetGiteaCredentials(ctx, id, true) if err != nil { return params.ForgeCredentials{}, fmt.Errorf("error failed to get gitea credentials: %w", err) diff --git a/runner/gitea_endpoints.go b/runner/gitea_endpoints.go index 4a7e32d92..4179ff6f2 100644 --- a/runner/gitea_endpoints.go +++ b/runner/gitea_endpoints.go @@ -41,9 +41,6 @@ func (r *Runner) CreateGiteaEndpoint(ctx context.Context, param params.CreateGit } func (r *Runner) GetGiteaEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) { - if !auth.IsAdmin(ctx) { - return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized - } endpoint, err := r.store.GetGiteaEndpoint(ctx, name) if err != nil { return params.ForgeEndpoint{}, fmt.Errorf("failed to get gitea endpoint: %w", err) @@ -82,10 +79,6 @@ func (r *Runner) UpdateGiteaEndpoint(ctx context.Context, name string, param par } func (r *Runner) ListGiteaEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - endpoints, err := r.store.ListGiteaEndpoints(ctx) if err != nil { return nil, fmt.Errorf("failed to list gitea endpoints: %w", err) diff --git a/runner/github_credentials.go b/runner/github_credentials.go index 5e1291fff..3f3afe520 100644 --- a/runner/github_credentials.go +++ b/runner/github_credentials.go @@ -25,10 +25,6 @@ import ( ) func (r *Runner) ListCredentials(ctx context.Context) ([]params.ForgeCredentials, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - // Get the credentials from the store. The cache is always updated after the database successfully // commits the transaction that created/updated the credentials. // If we create a set of credentials then immediately after we call ListCredentials, @@ -68,10 +64,6 @@ func (r *Runner) CreateGithubCredentials(ctx context.Context, param params.Creat } func (r *Runner) GetGithubCredentials(ctx context.Context, id uint) (params.ForgeCredentials, error) { - if !auth.IsAdmin(ctx) { - return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized - } - creds, err := r.store.GetGithubCredentials(ctx, id, true) if err != nil { return params.ForgeCredentials{}, fmt.Errorf("failed to get github credentials: %w", err) diff --git a/runner/github_endpoints.go b/runner/github_endpoints.go index 29965081e..ea34d1d80 100644 --- a/runner/github_endpoints.go +++ b/runner/github_endpoints.go @@ -41,9 +41,6 @@ func (r *Runner) CreateGithubEndpoint(ctx context.Context, param params.CreateGi } func (r *Runner) GetGithubEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) { - if !auth.IsAdmin(ctx) { - return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized - } endpoint, err := r.store.GetGithubEndpoint(ctx, name) if err != nil { return params.ForgeEndpoint{}, fmt.Errorf("failed to get github endpoint: %w", err) @@ -82,10 +79,6 @@ func (r *Runner) UpdateGithubEndpoint(ctx context.Context, name string, param pa } func (r *Runner) ListGithubEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - endpoints, err := r.store.ListGithubEndpoints(ctx) if err != nil { return nil, fmt.Errorf("failed to list github endpoints: %w", err) diff --git a/runner/object_store.go b/runner/object_store.go index 421e0253b..da0347a2b 100644 --- a/runner/object_store.go +++ b/runner/object_store.go @@ -44,10 +44,6 @@ func (r *Runner) CreateFileObject(ctx context.Context, param params.CreateFileOb } func (r *Runner) GetFileObject(ctx context.Context, objID uint) (params.FileObject, error) { - if !auth.IsAdmin(ctx) { - return params.FileObject{}, runnerErrors.ErrUnauthorized - } - fileObj, err := r.store.GetFileObject(ctx, objID) if err != nil { return params.FileObject{}, fmt.Errorf("failed to get file object: %w", err) @@ -83,9 +79,6 @@ func (r *Runner) DeleteFileObjectsByTags(ctx context.Context, tags []string) (in } func (r *Runner) ListFileObjects(ctx context.Context, page, pageSize uint64, tags []string) (params.FileObjectPaginatedResponse, error) { - if !auth.IsAdmin(ctx) { - return params.FileObjectPaginatedResponse{}, runnerErrors.ErrUnauthorized - } var resp params.FileObjectPaginatedResponse var err error if len(tags) == 0 { @@ -126,10 +119,6 @@ func (r *Runner) UpdateFileObject(ctx context.Context, objID uint, param params. } func (r *Runner) GetFileObjectReader(ctx context.Context, objID uint) (io.ReadCloser, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - readCloser, err := r.store.OpenFileObjectContent(ctx, objID) if err != nil { return nil, fmt.Errorf("failed to open file object: %w", err) diff --git a/runner/organizations.go b/runner/organizations.go index f68401e69..7011f1843 100644 --- a/runner/organizations.go +++ b/runner/organizations.go @@ -95,10 +95,6 @@ func (r *Runner) CreateOrganization(ctx context.Context, param params.CreateOrgP } func (r *Runner) ListOrganizations(ctx context.Context, filter params.OrganizationFilter) ([]params.Organization, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - orgs, err := r.store.ListOrganizations(ctx, filter) if err != nil { return nil, fmt.Errorf("error listing organizations: %w", err) @@ -122,10 +118,6 @@ func (r *Runner) ListOrganizations(ctx context.Context, filter params.Organizati } func (r *Runner) GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error) { - if !auth.IsAdmin(ctx) { - return params.Organization{}, runnerErrors.ErrUnauthorized - } - org, err := r.store.GetOrganizationByID(ctx, orgID) if err != nil { return params.Organization{}, fmt.Errorf("error fetching organization: %w", err) @@ -264,10 +256,6 @@ func (r *Runner) CreateOrgPool(ctx context.Context, orgID string, param params.C } func (r *Runner) GetOrgPoolByID(ctx context.Context, orgID, poolID string) (params.Pool, error) { - if !auth.IsAdmin(ctx) { - return params.Pool{}, runnerErrors.ErrUnauthorized - } - entity := params.ForgeEntity{ ID: orgID, EntityType: params.ForgeEntityTypeOrganization, @@ -316,9 +304,6 @@ func (r *Runner) DeleteOrgPool(ctx context.Context, orgID, poolID string) error } func (r *Runner) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool, error) { - if !auth.IsAdmin(ctx) { - return []params.Pool{}, runnerErrors.ErrUnauthorized - } entity := params.ForgeEntity{ ID: orgID, EntityType: params.ForgeEntityTypeOrganization, @@ -374,10 +359,6 @@ func (r *Runner) UpdateOrgPool(ctx context.Context, orgID, poolID string, param } func (r *Runner) ListOrgInstances(ctx context.Context, orgID string) ([]params.Instance, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - entity := params.ForgeEntity{ ID: orgID, EntityType: params.ForgeEntityTypeOrganization, @@ -450,10 +431,6 @@ func (r *Runner) UninstallOrgWebhook(ctx context.Context, orgID string) error { } func (r *Runner) GetOrgWebhookInfo(ctx context.Context, orgID string) (params.HookInfo, error) { - if !auth.IsAdmin(ctx) { - return params.HookInfo{}, runnerErrors.ErrUnauthorized - } - org, err := r.store.GetOrganizationByID(ctx, orgID) if err != nil { return params.HookInfo{}, fmt.Errorf("error fetching org: %w", err) diff --git a/runner/pools.go b/runner/pools.go index ffd3b9c80..ed56b8db2 100644 --- a/runner/pools.go +++ b/runner/pools.go @@ -25,10 +25,6 @@ import ( ) func (r *Runner) ListAllPools(ctx context.Context) ([]params.Pool, error) { - if !auth.IsAdmin(ctx) { - return []params.Pool{}, runnerErrors.ErrUnauthorized - } - pools, err := r.store.ListAllPools(ctx) if err != nil { return nil, fmt.Errorf("error fetching pools: %w", err) @@ -37,10 +33,6 @@ func (r *Runner) ListAllPools(ctx context.Context) ([]params.Pool, error) { } func (r *Runner) GetPoolByID(ctx context.Context, poolID string) (params.Pool, error) { - if !auth.IsAdmin(ctx) { - return params.Pool{}, runnerErrors.ErrUnauthorized - } - pool, err := r.store.GetPoolByID(ctx, poolID) if err != nil { return params.Pool{}, fmt.Errorf("error fetching pool: %w", err) @@ -112,10 +104,6 @@ func (r *Runner) UpdatePoolByID(ctx context.Context, poolID string, param params } func (r *Runner) ListAllJobs(ctx context.Context) ([]params.Job, error) { - if !auth.IsAdmin(ctx) { - return []params.Job{}, runnerErrors.ErrUnauthorized - } - jobs, err := r.store.ListAllJobs(ctx) if err != nil { return nil, fmt.Errorf("error fetching jobs: %w", err) diff --git a/runner/repositories.go b/runner/repositories.go index 9a39f5233..8cabefd17 100644 --- a/runner/repositories.go +++ b/runner/repositories.go @@ -94,10 +94,6 @@ func (r *Runner) CreateRepository(ctx context.Context, param params.CreateRepoPa } func (r *Runner) ListRepositories(ctx context.Context, filter params.RepositoryFilter) ([]params.Repository, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - repos, err := r.store.ListRepositories(ctx, filter) if err != nil { return nil, fmt.Errorf("error listing repositories: %w", err) @@ -120,10 +116,6 @@ func (r *Runner) ListRepositories(ctx context.Context, filter params.RepositoryF } func (r *Runner) GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error) { - if !auth.IsAdmin(ctx) { - return params.Repository{}, runnerErrors.ErrUnauthorized - } - repo, err := r.store.GetRepositoryByID(ctx, repoID) if err != nil { return params.Repository{}, fmt.Errorf("error fetching repository: %w", err) @@ -302,10 +294,6 @@ func (r *Runner) CreateRepoPool(ctx context.Context, repoID string, param params } func (r *Runner) GetRepoPoolByID(ctx context.Context, repoID, poolID string) (params.Pool, error) { - if !auth.IsAdmin(ctx) { - return params.Pool{}, runnerErrors.ErrUnauthorized - } - entity := params.ForgeEntity{ ID: repoID, EntityType: params.ForgeEntityTypeRepository, @@ -350,9 +338,6 @@ func (r *Runner) DeleteRepoPool(ctx context.Context, repoID, poolID string) erro } func (r *Runner) ListRepoPools(ctx context.Context, repoID string) ([]params.Pool, error) { - if !auth.IsAdmin(ctx) { - return []params.Pool{}, runnerErrors.ErrUnauthorized - } entity := params.ForgeEntity{ ID: repoID, EntityType: params.ForgeEntityTypeRepository, @@ -365,10 +350,6 @@ func (r *Runner) ListRepoPools(ctx context.Context, repoID string) ([]params.Poo } func (r *Runner) ListPoolInstances(ctx context.Context, poolID string, outdatedOnly bool) ([]params.Instance, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - instances, err := r.store.ListPoolInstances(ctx, poolID, outdatedOnly) if err != nil { return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err) @@ -424,9 +405,6 @@ func (r *Runner) UpdateRepoPool(ctx context.Context, repoID, poolID string, para } func (r *Runner) ListRepoInstances(ctx context.Context, repoID string) ([]params.Instance, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } entity := params.ForgeEntity{ ID: repoID, EntityType: params.ForgeEntityTypeRepository, @@ -498,10 +476,6 @@ func (r *Runner) UninstallRepoWebhook(ctx context.Context, repoID string) error } func (r *Runner) GetRepoWebhookInfo(ctx context.Context, repoID string) (params.HookInfo, error) { - if !auth.IsAdmin(ctx) { - return params.HookInfo{}, runnerErrors.ErrUnauthorized - } - repo, err := r.store.GetRepositoryByID(ctx, repoID) if err != nil { return params.HookInfo{}, fmt.Errorf("error fetching repo: %w", err) diff --git a/runner/runner.go b/runner/runner.go index 73be2459f..91f83dcc6 100644 --- a/runner/runner.go +++ b/runner/runner.go @@ -293,9 +293,6 @@ func (r *Runner) ForceToolsSync(ctx context.Context) (params.ControllerInfo, err // GetControllerInfo returns the controller id and the hostname. // This data might be used in metrics and logging. func (r *Runner) GetControllerInfo(ctx context.Context) (params.ControllerInfo, error) { - if !auth.IsAdmin(ctx) { - return params.ControllerInfo{}, runnerErrors.ErrUnauthorized - } // It is unlikely that fetching the hostname will encounter an error on a standard // linux (or Windows) system, but if os.Hostname() can fail, we need to at least retry // a few times before giving up. @@ -336,9 +333,6 @@ func (r *Runner) GetControllerInfo(ctx context.Context) (params.ControllerInfo, } func (r *Runner) ListProviders(ctx context.Context) ([]params.Provider, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } ret := []params.Provider{} for _, val := range r.providers { @@ -753,10 +747,6 @@ func (r *Runner) appendTagsToCreatePoolParams(param params.CreatePoolParams) (pa } func (r *Runner) GetInstance(ctx context.Context, instanceName string) (params.Instance, error) { - if !auth.IsAdmin(ctx) { - return params.Instance{}, runnerErrors.ErrUnauthorized - } - instance, err := r.store.GetInstance(ctx, instanceName) if err != nil { return params.Instance{}, fmt.Errorf("error fetching instance: %w", err) @@ -765,10 +755,6 @@ func (r *Runner) GetInstance(ctx context.Context, instanceName string) (params.I } func (r *Runner) ListAllInstances(ctx context.Context) ([]params.Instance, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - instances, err := r.store.ListAllInstances(ctx) if err != nil { return nil, fmt.Errorf("error fetching instances: %w", err) @@ -991,3 +977,11 @@ func (r *Runner) getGHCliFromInstance(ctx context.Context, instance params.Insta } return ghCli, scaleSetCli, nil } + +func (r *Runner) ListUsers(ctx context.Context) ([]params.User, error) { + users, err := r.store.ListUsers(ctx) + if err != nil { + return nil, fmt.Errorf("error fetching users: %w", err) + } + return users, nil +} diff --git a/runner/scalesets.go b/runner/scalesets.go index 20d2fa3d6..392080aa8 100644 --- a/runner/scalesets.go +++ b/runner/scalesets.go @@ -29,10 +29,6 @@ import ( ) func (r *Runner) ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error) { - if !auth.IsAdmin(ctx) { - return []params.ScaleSet{}, runnerErrors.ErrUnauthorized - } - scalesets, err := r.store.ListAllScaleSets(ctx) if err != nil { return nil, fmt.Errorf("error fetching pools: %w", err) @@ -41,10 +37,6 @@ func (r *Runner) ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error } func (r *Runner) GetScaleSetByID(ctx context.Context, scaleSet uint) (params.ScaleSet, error) { - if !auth.IsAdmin(ctx) { - return params.ScaleSet{}, runnerErrors.ErrUnauthorized - } - set, err := r.store.GetScaleSetByID(ctx, scaleSet) if err != nil { return params.ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err) @@ -289,10 +281,6 @@ func (r *Runner) CreateEntityScaleSet(ctx context.Context, entityType params.For } func (r *Runner) ListScaleSetInstances(ctx context.Context, scalesetID uint, outdatedOnly bool) ([]params.Instance, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - instances, err := r.store.ListScaleSetInstances(ctx, scalesetID, outdatedOnly) if err != nil { return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err) @@ -301,9 +289,6 @@ func (r *Runner) ListScaleSetInstances(ctx context.Context, scalesetID uint, out } func (r *Runner) ListEntityScaleSets(ctx context.Context, entityType params.ForgeEntityType, entityID string) ([]params.ScaleSet, error) { - if !auth.IsAdmin(ctx) { - return []params.ScaleSet{}, runnerErrors.ErrUnauthorized - } entity := params.ForgeEntity{ ID: entityID, EntityType: entityType, diff --git a/runner/templates.go b/runner/templates.go index 9d2906fe7..b060db4f4 100644 --- a/runner/templates.go +++ b/runner/templates.go @@ -42,9 +42,6 @@ func (r *Runner) CreateTemplate(ctx context.Context, param params.CreateTemplate } func (r *Runner) GetTemplate(ctx context.Context, id uint) (params.Template, error) { - if !auth.IsAdmin(ctx) { - return params.Template{}, runnerErrors.ErrUnauthorized - } template, err := r.store.GetTemplate(ctx, id) if err != nil { return params.Template{}, fmt.Errorf("failed to get template: %w", err) @@ -53,9 +50,6 @@ func (r *Runner) GetTemplate(ctx context.Context, id uint) (params.Template, err } func (r *Runner) GetTemplateByName(ctx context.Context, templateName string) (params.Template, error) { - if !auth.IsAdmin(ctx) { - return params.Template{}, runnerErrors.ErrUnauthorized - } template, err := r.store.GetTemplateByName(ctx, templateName) if err != nil { return params.Template{}, fmt.Errorf("failed to get template: %w", err) @@ -152,10 +146,6 @@ func (r *Runner) RestoreTemplate(ctx context.Context, param params.RestoreTempla } func (r *Runner) ListTemplates(ctx context.Context, osType *commonParams.OSType, forgeType *params.EndpointType, partialName *string) ([]params.Template, error) { - if !auth.IsAdmin(ctx) { - return nil, runnerErrors.ErrUnauthorized - } - templates, err := r.store.ListTemplates(ctx, osType, forgeType, partialName) if err != nil { return nil, fmt.Errorf("failed to list templates: %w", err) diff --git a/vendor/github.com/coreos/go-oidc/v3/LICENSE b/vendor/github.com/coreos/go-oidc/v3/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/go-oidc/v3/NOTICE b/vendor/github.com/coreos/go-oidc/v3/NOTICE new file mode 100644 index 000000000..b39ddfa5c --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go new file mode 100644 index 000000000..f42d37d48 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go @@ -0,0 +1,32 @@ +package oidc + +import jose "github.com/go-jose/go-jose/v4" + +// JOSE asymmetric signing algorithm values as defined by RFC 7518 +// +// see: https://tools.ietf.org/html/rfc7518#section-3.1 +const ( + RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 + RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 + RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 + ES256 = "ES256" // ECDSA using P-256 and SHA-256 + ES384 = "ES384" // ECDSA using P-384 and SHA-384 + ES512 = "ES512" // ECDSA using P-521 and SHA-512 + PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 + PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 + PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 + EdDSA = "EdDSA" // Ed25519 using SHA-512 +) + +var allAlgs = []jose.SignatureAlgorithm{ + jose.RS256, + jose.RS384, + jose.RS512, + jose.ES256, + jose.ES384, + jose.ES512, + jose.PS256, + jose.PS384, + jose.PS512, + jose.EdDSA, +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go new file mode 100644 index 000000000..c5e4d787c --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -0,0 +1,263 @@ +package oidc + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "fmt" + "io" + "net/http" + "sync" + + jose "github.com/go-jose/go-jose/v4" +) + +// StaticKeySet is a verifier that validates JWT against a static set of public keys. +type StaticKeySet struct { + // PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and + // *ecdsa.PublicKey. + PublicKeys []crypto.PublicKey +} + +// VerifySignature compares the signature against a static set of public keys. +func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + // Algorithms are already checked by Verifier, so this parse method accepts + // any algorithm. + jws, err := jose.ParseSigned(jwt, allAlgs) + if err != nil { + return nil, fmt.Errorf("parsing jwt: %v", err) + } + for _, pub := range s.PublicKeys { + switch pub.(type) { + case *rsa.PublicKey: + case *ecdsa.PublicKey: + case ed25519.PublicKey: + default: + return nil, fmt.Errorf("invalid public key type provided: %T", pub) + } + payload, err := jws.Verify(pub) + if err != nil { + continue + } + return payload, nil + } + return nil, fmt.Errorf("no public keys able to verify jwt") +} + +// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP +// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically +// used by NewProvider using the URLs returned by OpenID Connect discovery, but is +// exposed for providers that don't support discovery or to prevent round trips to the +// discovery URL. +// +// The returned KeySet is a long lived verifier that caches keys based on any +// keys change. Reuse a common remote key set instead of creating new ones as needed. +func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet { + return newRemoteKeySet(ctx, jwksURL) +} + +func newRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet { + return &RemoteKeySet{ + jwksURL: jwksURL, + // For historical reasons, this package uses contexts for configuration, not just + // cancellation. In hindsight, this was a bad idea. + // + // Attemps to reason about how cancels should work with background requests have + // largely lead to confusion. Use the context here as a config bag-of-values and + // ignore the cancel function. + ctx: context.WithoutCancel(ctx), + } +} + +// RemoteKeySet is a KeySet implementation that validates JSON web tokens against +// a jwks_uri endpoint. +type RemoteKeySet struct { + jwksURL string + + // Used for configuration. Cancelation is ignored. + ctx context.Context + + // guard all other fields + mu sync.RWMutex + + // inflight suppresses parallel execution of updateKeys and allows + // multiple goroutines to wait for its result. + inflight *inflight + + // A set of cached keys. + cachedKeys []jose.JSONWebKey +} + +// inflight is used to wait on some in-flight request from multiple goroutines. +type inflight struct { + doneCh chan struct{} + + keys []jose.JSONWebKey + err error +} + +func newInflight() *inflight { + return &inflight{doneCh: make(chan struct{})} +} + +// wait returns a channel that multiple goroutines can receive on. Once it returns +// a value, the inflight request is done and result() can be inspected. +func (i *inflight) wait() <-chan struct{} { + return i.doneCh +} + +// done can only be called by a single goroutine. It records the result of the +// inflight request and signals other goroutines that the result is safe to +// inspect. +func (i *inflight) done(keys []jose.JSONWebKey, err error) { + i.keys = keys + i.err = err + close(i.doneCh) +} + +// result cannot be called until the wait() channel has returned a value. +func (i *inflight) result() ([]jose.JSONWebKey, error) { + return i.keys, i.err +} + +// paresdJWTKey is a context key that allows common setups to avoid parsing the +// JWT twice. It holds a *jose.JSONWebSignature value. +var parsedJWTKey contextKey + +// VerifySignature validates a payload against a signature from the jwks_uri. +// +// Users MUST NOT call this method directly and should use an IDTokenVerifier +// instead. This method skips critical validations such as 'alg' values and is +// only exported to implement the KeySet interface. +func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature) + if !ok { + // The algorithm values are already enforced by the Validator, which also sets + // the context value above to pre-parsed signature. + // + // Practically, this codepath isn't called in normal use of this package, but + // if it is, the algorithms have already been checked. + var err error + jws, err = jose.ParseSigned(jwt, allAlgs) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + } + return r.verify(ctx, jws) +} + +func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) { + // We don't support JWTs signed with multiple signatures. + keyID := "" + for _, sig := range jws.Signatures { + keyID = sig.Header.KeyID + break + } + + keys := r.keysFromCache() + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + + // If the kid doesn't match, check for new keys from the remote. This is the + // strategy recommended by the spec. + // + // https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys + keys, err := r.keysFromRemote(ctx) + if err != nil { + return nil, fmt.Errorf("fetching keys %w", err) + } + + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + return nil, errors.New("failed to verify id token signature") +} + +func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) { + r.mu.RLock() + defer r.mu.RUnlock() + return r.cachedKeys +} + +// keysFromRemote syncs the key set from the remote set, records the values in the +// cache, and returns the key set. +func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) { + // Need to lock to inspect the inflight request field. + r.mu.Lock() + // If there's not a current inflight request, create one. + if r.inflight == nil { + r.inflight = newInflight() + + // This goroutine has exclusive ownership over the current inflight + // request. It releases the resource by nil'ing the inflight field + // once the goroutine is done. + go func() { + // Sync keys and finish inflight when that's done. + keys, err := r.updateKeys() + + r.inflight.done(keys, err) + + // Lock to update the keys and indicate that there is no longer an + // inflight request. + r.mu.Lock() + defer r.mu.Unlock() + + if err == nil { + r.cachedKeys = keys + } + + // Free inflight so a different request can run. + r.inflight = nil + }() + } + inflight := r.inflight + r.mu.Unlock() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.wait(): + return inflight.result() + } +} + +func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) { + req, err := http.NewRequest("GET", r.jwksURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: can't create request: %v", err) + } + + resp, err := doRequest(r.ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: get keys failed %w", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body) + } + + var keySet jose.JSONWebKeySet + err = unmarshalResp(resp, body, &keySet) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body) + } + return keySet.Keys, nil +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go new file mode 100644 index 000000000..2659518cc --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -0,0 +1,584 @@ +// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package. +package oidc + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "mime" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" +) + +const ( + // ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests. + ScopeOpenID = "openid" + + // ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting + // OAuth2 refresh tokens. + // + // Support for this scope differs between OpenID Connect providers. For instance + // Google rejects it, favoring appending "access_type=offline" as part of the + // authorization request instead. + // + // See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess + ScopeOfflineAccess = "offline_access" +) + +var ( + errNoAtHash = errors.New("id token did not have an access token hash") + errInvalidAtHash = errors.New("access token hash does not match value in ID token") +) + +type contextKey int + +var issuerURLKey contextKey + +// ClientContext returns a new Context that carries the provided HTTP client. +// +// This method sets the same context key used by the golang.org/x/oauth2 package, +// so the returned context works for that package too. +// +// myClient := &http.Client{} +// ctx := oidc.ClientContext(parentContext, myClient) +// +// // This will use the custom client +// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com") +func ClientContext(ctx context.Context, client *http.Client) context.Context { + return context.WithValue(ctx, oauth2.HTTPClient, client) +} + +func getClient(ctx context.Context) *http.Client { + if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok { + return c + } + return nil +} + +// InsecureIssuerURLContext allows discovery to work when the issuer_url reported +// by upstream is mismatched with the discovery URL. This is meant for integration +// with off-spec providers such as Azure. +// +// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0" +// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0" +// +// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL) +// +// // Provider will be discovered with the discoveryBaseURL, but use issuerURL +// // for future issuer validation. +// provider, err := oidc.NewProvider(ctx, discoveryBaseURL) +// +// This is insecure because validating the correct issuer is critical for multi-tenant +// providers. Any overrides here MUST be carefully reviewed. +func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context { + return context.WithValue(ctx, issuerURLKey, issuerURL) +} + +func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { + client := http.DefaultClient + if c := getClient(ctx); c != nil { + client = c + } + return client.Do(req.WithContext(ctx)) +} + +// Provider represents an OpenID Connect server's configuration. +type Provider struct { + issuer string + authURL string + tokenURL string + deviceAuthURL string + userInfoURL string + jwksURL string + algorithms []string + + // Raw claims returned by the server. + rawClaims []byte + + // Guards all of the following fields. + mu sync.Mutex + // HTTP client specified from the initial NewProvider request. This is used + // when creating the common key set. + client *http.Client + // A key set that uses context.Background() and is shared between all code paths + // that don't have a convinent way of supplying a unique context. + commonRemoteKeySet KeySet +} + +func (p *Provider) remoteKeySet() KeySet { + p.mu.Lock() + defer p.mu.Unlock() + if p.commonRemoteKeySet == nil { + ctx := context.Background() + if p.client != nil { + ctx = ClientContext(ctx, p.client) + } + p.commonRemoteKeySet = NewRemoteKeySet(ctx, p.jwksURL) + } + return p.commonRemoteKeySet +} + +type providerJSON struct { + Issuer string `json:"issuer"` + AuthURL string `json:"authorization_endpoint"` + TokenURL string `json:"token_endpoint"` + DeviceAuthURL string `json:"device_authorization_endpoint"` + JWKSURL string `json:"jwks_uri"` + UserInfoURL string `json:"userinfo_endpoint"` + Algorithms []string `json:"id_token_signing_alg_values_supported"` +} + +// supportedAlgorithms is a list of algorithms explicitly supported by this +// package. If a provider supports other algorithms, such as HS256 or none, +// those values won't be passed to the IDTokenVerifier. +var supportedAlgorithms = map[string]bool{ + RS256: true, + RS384: true, + RS512: true, + ES256: true, + ES384: true, + ES512: true, + PS256: true, + PS384: true, + PS512: true, + EdDSA: true, +} + +// ProviderConfig allows direct creation of a [Provider] from metadata +// configuration. This is intended for interop with providers that don't support +// discovery, or host the JSON discovery document at an off-spec path. +// +// The ProviderConfig struct specifies JSON struct tags to support document +// parsing. +// +// // Directly fetch the metadata document. +// resp, err := http.Get("https://login.example.com/custom-metadata-path") +// if err != nil { +// // ... +// } +// defer resp.Body.Close() +// +// // Parse config from JSON metadata. +// config := &oidc.ProviderConfig{} +// if err := json.NewDecoder(resp.Body).Decode(config); err != nil { +// // ... +// } +// p := config.NewProvider(context.Background()) +// +// For providers that implement discovery, use [NewProvider] instead. +// +// See: https://openid.net/specs/openid-connect-discovery-1_0.html +type ProviderConfig struct { + // IssuerURL is the identity of the provider, and the string it uses to sign + // ID tokens with. For example "https://accounts.google.com". This value MUST + // match ID tokens exactly. + IssuerURL string `json:"issuer"` + // AuthURL is the endpoint used by the provider to support the OAuth 2.0 + // authorization endpoint. + AuthURL string `json:"authorization_endpoint"` + // TokenURL is the endpoint used by the provider to support the OAuth 2.0 + // token endpoint. + TokenURL string `json:"token_endpoint"` + // DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0 + // device authorization endpoint. + DeviceAuthURL string `json:"device_authorization_endpoint"` + // UserInfoURL is the endpoint used by the provider to support the OpenID + // Connect UserInfo flow. + // + // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + UserInfoURL string `json:"userinfo_endpoint"` + // JWKSURL is the endpoint used by the provider to advertise public keys to + // verify issued ID tokens. This endpoint is polled as new keys are made + // available. + JWKSURL string `json:"jwks_uri"` + + // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign + // ID tokens. If not provided, this defaults to the algorithms advertised by + // the JWK endpoint, then the set of algorithms supported by this package. + Algorithms []string `json:"id_token_signing_alg_values_supported"` +} + +// NewProvider initializes a provider from a set of endpoints, rather than +// through discovery. +// +// The provided context is only used for [http.Client] configuration through +// [ClientContext], not cancelation. +func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { + return &Provider{ + issuer: p.IssuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + deviceAuthURL: p.DeviceAuthURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: p.Algorithms, + client: getClient(ctx), + } +} + +// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. +// The issuer is the URL identifier for the service. For example: "https://accounts.google.com" +// or "https://login.salesforce.com". +// +// OpenID Connect providers that don't implement discovery or host the discovery +// document at a non-spec complaint path (such as requiring a URL parameter), +// should use [ProviderConfig] instead. +// +// See: https://openid.net/specs/openid-connect-discovery-1_0.html +func NewProvider(ctx context.Context, issuer string) (*Provider, error) { + wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" + req, err := http.NewRequest("GET", wellKnown, nil) + if err != nil { + return nil, err + } + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + var p providerJSON + err = unmarshalResp(resp, body, &p) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err) + } + + issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string) + if !skipIssuerValidation { + issuerURL = issuer + } + if p.Issuer != issuerURL && !skipIssuerValidation { + return nil, fmt.Errorf("oidc: issuer URL provided to client (%q) did not match the issuer URL returned by provider (%q)", issuer, p.Issuer) + } + var algs []string + for _, a := range p.Algorithms { + if supportedAlgorithms[a] { + algs = append(algs, a) + } + } + return &Provider{ + issuer: issuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + deviceAuthURL: p.DeviceAuthURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: algs, + rawClaims: body, + client: getClient(ctx), + }, nil +} + +// Claims unmarshals raw fields returned by the server during discovery. +// +// var claims struct { +// ScopesSupported []string `json:"scopes_supported"` +// ClaimsSupported []string `json:"claims_supported"` +// } +// +// if err := provider.Claims(&claims); err != nil { +// // handle unmarshaling error +// } +// +// For a list of fields defined by the OpenID Connect spec see: +// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +func (p *Provider) Claims(v interface{}) error { + if p.rawClaims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(p.rawClaims, v) +} + +// Endpoint returns the OAuth2 auth and token endpoints for the given provider. +func (p *Provider) Endpoint() oauth2.Endpoint { + return oauth2.Endpoint{AuthURL: p.authURL, DeviceAuthURL: p.deviceAuthURL, TokenURL: p.tokenURL} +} + +// UserInfoEndpoint returns the OpenID Connect userinfo endpoint for the given +// provider. +func (p *Provider) UserInfoEndpoint() string { + return p.userInfoURL +} + +// UserInfo represents the OpenID Connect userinfo claims. +type UserInfo struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + + claims []byte +} + +type userInfoRaw struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + // Handle providers that return email_verified as a string + // https://forums.aws.amazon.com/thread.jspa?messageID=949441 and + // https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11 + EmailVerified stringAsBool `json:"email_verified"` +} + +// Claims unmarshals the raw JSON object claims into the provided object. +func (u *UserInfo) Claims(v interface{}) error { + if u.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(u.claims, v) +} + +// UserInfo uses the token source to query the provider's user info endpoint. +func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) { + if p.userInfoURL == "" { + return nil, errors.New("oidc: user info endpoint is not supported by this provider") + } + + req, err := http.NewRequest("GET", p.userInfoURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: create GET request: %v", err) + } + + token, err := tokenSource.Token() + if err != nil { + return nil, fmt.Errorf("oidc: get access token: %v", err) + } + token.SetAuthHeader(req) + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + ct := resp.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/jwt" { + payload, err := p.remoteKeySet().VerifySignature(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err) + } + body = payload + } + + var userInfo userInfoRaw + if err := json.Unmarshal(body, &userInfo); err != nil { + return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err) + } + return &UserInfo{ + Subject: userInfo.Subject, + Profile: userInfo.Profile, + Email: userInfo.Email, + EmailVerified: bool(userInfo.EmailVerified), + claims: body, + }, nil +} + +// IDToken is an OpenID Connect extension that provides a predictable representation +// of an authorization event. +// +// The ID Token only holds fields OpenID Connect requires. To access additional +// claims returned by the server, use the Claims method. +type IDToken struct { + // The URL of the server which issued this token. OpenID Connect + // requires this value always be identical to the URL used for + // initial discovery. + // + // Note: Because of a known issue with Google Accounts' implementation + // this value may differ when using Google. + // + // See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo + Issuer string + + // The client ID, or set of client IDs, that this token is issued for. For + // common uses, this is the client that initialized the auth flow. + // + // This package ensures the audience contains an expected value. + Audience []string + + // A unique string which identifies the end user. + Subject string + + // Expiry of the token. Ths package will not process tokens that have + // expired unless that validation is explicitly turned off. + Expiry time.Time + // When the token was issued by the provider. + IssuedAt time.Time + + // Initial nonce provided during the authentication redirect. + // + // This package does NOT provided verification on the value of this field + // and it's the user's responsibility to ensure it contains a valid value. + Nonce string + + // at_hash claim, if set in the ID token. Callers can verify an access token + // that corresponds to the ID token using the VerifyAccessToken method. + AccessTokenHash string + + // signature algorithm used for ID token, needed to compute a verification hash of an + // access token + sigAlgorithm string + + // Raw payload of the id_token. + claims []byte + + // Map of distributed claim names to claim sources + distributedClaims map[string]claimSource +} + +// Claims unmarshals the raw JSON payload of the ID Token into a provided struct. +// +// idToken, err := idTokenVerifier.Verify(rawIDToken) +// if err != nil { +// // handle error +// } +// var claims struct { +// Email string `json:"email"` +// EmailVerified bool `json:"email_verified"` +// } +// if err := idToken.Claims(&claims); err != nil { +// // handle error +// } +func (i *IDToken) Claims(v interface{}) error { + if i.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(i.claims, v) +} + +// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token +// matches the hash in the id token. It returns an error if the hashes don't match. +// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token +// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken +func (i *IDToken) VerifyAccessToken(accessToken string) error { + if i.AccessTokenHash == "" { + return errNoAtHash + } + var h hash.Hash + switch i.sigAlgorithm { + case RS256, ES256, PS256: + h = sha256.New() + case RS384, ES384, PS384: + h = sha512.New384() + case RS512, ES512, PS512, EdDSA: + h = sha512.New() + default: + return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm) + } + h.Write([]byte(accessToken)) // hash documents that Write will never return an error + sum := h.Sum(nil)[:h.Size()/2] + actual := base64.RawURLEncoding.EncodeToString(sum) + if actual != i.AccessTokenHash { + return errInvalidAtHash + } + return nil +} + +type idToken struct { + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience audience `json:"aud"` + Expiry jsonTime `json:"exp"` + IssuedAt jsonTime `json:"iat"` + NotBefore *jsonTime `json:"nbf"` + Nonce string `json:"nonce"` + AtHash string `json:"at_hash"` + ClaimNames map[string]string `json:"_claim_names"` + ClaimSources map[string]claimSource `json:"_claim_sources"` +} + +type claimSource struct { + Endpoint string `json:"endpoint"` + AccessToken string `json:"access_token"` +} + +type stringAsBool bool + +func (sb *stringAsBool) UnmarshalJSON(b []byte) error { + switch string(b) { + case "true", `"true"`: + *sb = true + case "false", `"false"`: + *sb = false + default: + return errors.New("invalid value for boolean") + } + return nil +} + +type audience []string + +func (a *audience) UnmarshalJSON(b []byte) error { + var s string + if json.Unmarshal(b, &s) == nil { + *a = audience{s} + return nil + } + var auds []string + if err := json.Unmarshal(b, &auds); err != nil { + return err + } + *a = auds + return nil +} + +type jsonTime time.Time + +func (j *jsonTime) UnmarshalJSON(b []byte) error { + var n json.Number + if err := json.Unmarshal(b, &n); err != nil { + return err + } + var unix int64 + + if t, err := n.Int64(); err == nil { + unix = t + } else { + f, err := n.Float64() + if err != nil { + return err + } + unix = int64(f) + } + *j = jsonTime(time.Unix(unix, 0)) + return nil +} + +func unmarshalResp(r *http.Response, body []byte, v interface{}) error { + err := json.Unmarshal(body, &v) + if err == nil { + return nil + } + ct := r.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/json" { + return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err) + } + return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err) +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go new file mode 100644 index 000000000..a8bf107d4 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -0,0 +1,338 @@ +package oidc + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + jose "github.com/go-jose/go-jose/v4" + "golang.org/x/oauth2" +) + +const ( + issuerGoogleAccounts = "https://accounts.google.com" + issuerGoogleAccountsNoScheme = "accounts.google.com" +) + +// TokenExpiredError indicates that Verify failed because the token was expired. This +// error does NOT indicate that the token is not also invalid for other reasons. Other +// checks might have failed if the expiration check had not failed. +type TokenExpiredError struct { + // Expiry is the time when the token expired. + Expiry time.Time +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry) +} + +// KeySet is a set of publc JSON Web Keys that can be used to validate the signature +// of JSON web tokens. This is expected to be backed by a remote key set through +// provider metadata discovery or an in-memory set of keys delivered out-of-band. +type KeySet interface { + // VerifySignature parses the JSON web token, verifies the signature, and returns + // the raw payload. Header and claim fields are validated by other parts of the + // package. For example, the KeySet does not need to check values such as signature + // algorithm, issuer, and audience since the IDTokenVerifier validates these values + // independently. + // + // If VerifySignature makes HTTP requests to verify the token, it's expected to + // use any HTTP client associated with the context through ClientContext. + VerifySignature(ctx context.Context, jwt string) (payload []byte, err error) +} + +// IDTokenVerifier provides verification for ID Tokens. +type IDTokenVerifier struct { + keySet KeySet + config *Config + issuer string +} + +// NewVerifier returns a verifier manually constructed from a key set and issuer URL. +// +// It's easier to use provider discovery to construct an IDTokenVerifier than creating +// one directly. This method is intended to be used with provider that don't support +// metadata discovery, or avoiding round trips when the key set URL is already known. +// +// This constructor can be used to create a verifier directly using the issuer URL and +// JSON Web Key Set URL without using discovery: +// +// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs") +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +// +// Or a static key set (e.g. for testing): +// +// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}} +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier { + return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL} +} + +// Config is the configuration for an IDTokenVerifier. +type Config struct { + // Expected audience of the token. For a majority of the cases this is expected to be + // the ID of the client that initialized the login flow. It may occasionally differ if + // the provider supports the authorizing party (azp) claim. + // + // If not provided, users must explicitly set SkipClientIDCheck. + ClientID string + // If specified, only this set of algorithms may be used to sign the JWT. + // + // If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this + // defaults to the set of algorithms the provider supports. Otherwise this values + // defaults to RS256. + SupportedSigningAlgs []string + + // If true, no ClientID check performed. Must be true if ClientID field is empty. + SkipClientIDCheck bool + // If true, token expiry is not checked. + SkipExpiryCheck bool + + // SkipIssuerCheck is intended for specialized cases where the the caller wishes to + // defer issuer validation. When enabled, callers MUST independently verify the Token's + // Issuer is a known good value. + // + // Mismatched issuers often indicate client mis-configuration. If mismatches are + // unexpected, evaluate if the provided issuer URL is incorrect instead of enabling + // this option. + SkipIssuerCheck bool + + // Time function to check Token expiry. Defaults to time.Now + Now func() time.Time + + // InsecureSkipSignatureCheck causes this package to skip JWT signature validation. + // It's intended for special cases where providers (such as Azure), use the "none" + // algorithm. + // + // This option can only be enabled safely when the ID Token is received directly + // from the provider after the token exchange. + // + // This option MUST NOT be used when receiving an ID Token from sources other + // than the token endpoint. + InsecureSkipSignatureCheck bool +} + +// VerifierContext returns an IDTokenVerifier that uses the provider's key set to +// verify JWTs. As opposed to Verifier, the context is used to configure requests +// to the upstream JWKs endpoint. The provided context's cancellation is ignored. +func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier { + return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config) +} + +// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs. +// +// The returned verifier uses a background context for all requests to the upstream +// JWKs endpoint. To control that context, use VerifierContext instead. +func (p *Provider) Verifier(config *Config) *IDTokenVerifier { + return p.newVerifier(p.remoteKeySet(), config) +} + +func (p *Provider) newVerifier(keySet KeySet, config *Config) *IDTokenVerifier { + if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 { + // Make a copy so we don't modify the config values. + cp := &Config{} + *cp = *config + cp.SupportedSigningAlgs = p.algorithms + config = cp + } + return NewVerifier(p.issuer, keySet, config) +} + +func contains(sli []string, ele string) bool { + for _, s := range sli { + if s == ele { + return true + } + } + return false +} + +// Returns the Claims from the distributed JWT token +func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) { + req, err := http.NewRequest("GET", src.Endpoint, nil) + if err != nil { + return nil, fmt.Errorf("malformed request: %v", err) + } + if src.AccessToken != "" { + req.Header.Set("Authorization", "Bearer "+src.AccessToken) + } + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode) + } + + token, err := verifier.Verify(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("malformed response body: %v", err) + } + + return token.claims, nil +} + +// Verify parses a raw ID Token, verifies it's been signed by the provider, performs +// any additional checks depending on the Config, and returns the payload. +// +// Verify does NOT do nonce validation, which is the callers responsibility. +// +// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation +// +// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code")) +// if err != nil { +// // handle error +// } +// +// // Extract the ID Token from oauth2 token. +// rawIDToken, ok := oauth2Token.Extra("id_token").(string) +// if !ok { +// // handle error +// } +// +// token, err := verifier.Verify(ctx, rawIDToken) +func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) { + var supportedSigAlgs []jose.SignatureAlgorithm + for _, alg := range v.config.SupportedSigningAlgs { + supportedSigAlgs = append(supportedSigAlgs, jose.SignatureAlgorithm(alg)) + } + if len(supportedSigAlgs) == 0 { + // If no algorithms were specified by both the config and discovery, default + // to the one mandatory algorithm "RS256". + supportedSigAlgs = []jose.SignatureAlgorithm{jose.RS256} + } + if v.config.InsecureSkipSignatureCheck { + // "none" is a required value to even parse a JWT with the "none" algorithm + // using go-jose. + supportedSigAlgs = append(supportedSigAlgs, "none") + } + + // Parse and verify the signature first. This at least forces the user to have + // a valid, signed ID token before we do any other processing. + jws, err := jose.ParseSigned(rawIDToken, supportedSigAlgs) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + switch len(jws.Signatures) { + case 0: + return nil, fmt.Errorf("oidc: id token not signed") + case 1: + default: + return nil, fmt.Errorf("oidc: multiple signatures on id token not supported") + } + sig := jws.Signatures[0] + + var payload []byte + if v.config.InsecureSkipSignatureCheck { + // Yolo mode. + payload = jws.UnsafePayloadWithoutVerification() + } else { + // The JWT is attached here for the happy path to avoid the verifier from + // having to parse the JWT twice. + ctx = context.WithValue(ctx, parsedJWTKey, jws) + payload, err = v.keySet.VerifySignature(ctx, rawIDToken) + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + } + var token idToken + if err := json.Unmarshal(payload, &token); err != nil { + return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err) + } + + distributedClaims := make(map[string]claimSource) + + //step through the token to map claim names to claim sources" + for cn, src := range token.ClaimNames { + if src == "" { + return nil, fmt.Errorf("oidc: failed to obtain source from claim name") + } + s, ok := token.ClaimSources[src] + if !ok { + return nil, fmt.Errorf("oidc: source does not exist") + } + distributedClaims[cn] = s + } + + t := &IDToken{ + Issuer: token.Issuer, + Subject: token.Subject, + Audience: []string(token.Audience), + Expiry: time.Time(token.Expiry), + IssuedAt: time.Time(token.IssuedAt), + Nonce: token.Nonce, + AccessTokenHash: token.AtHash, + claims: payload, + distributedClaims: distributedClaims, + sigAlgorithm: sig.Header.Algorithm, + } + + // Check issuer. + if !v.config.SkipIssuerCheck && t.Issuer != v.issuer { + // Google sometimes returns "accounts.google.com" as the issuer claim instead of + // the required "https://accounts.google.com". Detect this case and allow it only + // for Google. + // + // We will not add hooks to let other providers go off spec like this. + if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) { + return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer) + } + } + + // If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty. + // + // This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party). + if !v.config.SkipClientIDCheck { + if v.config.ClientID != "" { + if !contains(t.Audience, v.config.ClientID) { + return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience) + } + } else { + return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set") + } + } + + // If a SkipExpiryCheck is false, make sure token is not expired. + if !v.config.SkipExpiryCheck { + now := time.Now + if v.config.Now != nil { + now = v.config.Now + } + nowTime := now() + + if t.Expiry.Before(nowTime) { + return nil, &TokenExpiredError{Expiry: t.Expiry} + } + + // If nbf claim is provided in token, ensure that it is indeed in the past. + if token.NotBefore != nil { + nbfTime := time.Time(*token.NotBefore) + // Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew. + // https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153 + leeway := 5 * time.Minute + + if nowTime.Add(leeway).Before(nbfTime) { + return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime) + } + } + } + + return t, nil +} + +// Nonce returns an auth code option which requires the ID Token created by the +// OpenID Connect provider to contain the specified nonce. +func Nonce(nonce string) oauth2.AuthCodeOption { + return oauth2.SetAuthURLParam("nonce", nonce) +} diff --git a/vendor/github.com/go-jose/go-jose/v4/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore new file mode 100644 index 000000000..eb29ebaef --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.gitignore @@ -0,0 +1,2 @@ +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml new file mode 100644 index 000000000..2a577a8f9 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml new file mode 100644 index 000000000..48de631b0 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md new file mode 100644 index 000000000..4b4805add --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md @@ -0,0 +1,9 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. diff --git a/vendor/github.com/go-jose/go-jose/v4/LICENSE b/vendor/github.com/go-jose/go-jose/v4/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md new file mode 100644 index 000000000..55c550917 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/README.md @@ -0,0 +1,108 @@ +# Go JOSE + +[](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) +[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +## Overview + +The implementation follows the +[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +The forthcoming Version 5 will be released with several breaking API changes, +and will require Golang's `encoding/json/v2`, which is currently requires +Go 1.25 built with GOEXPERIMENT=jsonv2. + +Version 4 is the current stable version: + + import "github.com/go-jose/go-jose/v4" + +It supports at least the current and previous Golang release. Currently it +requires Golang 1.24. + +Version 3 is only receiving critical security updates. Migration to Version 4 is recommended. + +Versions 1 and 2 are obsolete, but can be found in the old repository, [square/go-jose](https://github.com/square/go-jose). + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + +| Key encryption | Algorithm identifier(s) | +|:-----------------------|:-----------------------------------------------| +| RSA-PKCS#1v1.5 | RSA1_5 | +| RSA-OAEP | RSA-OAEP, RSA-OAEP-256 | +| AES key wrap | A128KW, A192KW, A256KW | +| AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW | +| ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW | +| ECDH-ES (direct) | ECDH-ES1 | +| Direct encryption | dir1 | + +1. Not supported in multi-recipient mode + +| Signing / MAC | Algorithm identifier(s) | +|:------------------|:------------------------| +| RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 | +| RSASSA-PSS | PS256, PS384, PS512 | +| HMAC | HS256, HS384, HS512 | +| ECDSA | ES256, ES384, ES512 | +| Ed25519 | EdDSA2 | + +2. Only available in version 2 of the package + +| Content encryption | Algorithm identifier(s) | +|:-------------------|:--------------------------------------------| +| AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 | +| AES-GCM | A128GCM, A192GCM, A256GCM | + +| Compression | Algorithm identifiers(s) | +|:-------------------|--------------------------| +| DEFLATE (RFC 1951) | DEF | + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + +| Algorithm(s) | Corresponding types | +|:------------------|--------------------------------------------------------------------------------------------------------------------------------------| +| RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) | +| ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) | +| EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) | +| AES, HMAC | []byte | + +1. Only available in version 2 or later of the package + +## Examples + +[](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md new file mode 100644 index 000000000..2f18a75a8 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy +This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. + +## Supported Versions +| Version | Supported | +| ------- | ----------| +| >= v3 | ✓ | +| v2 | ✗ | +| v1 | ✗ | + +## Reporting a vulnerability + +Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go new file mode 100644 index 000000000..f8d5774ef --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go @@ -0,0 +1,595 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + josecipher "github.com/go-jose/go-jose/v4/cipher" + "github.com/go-jose/go-jose/v4/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go new file mode 100644 index 000000000..af029cec0 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, ciphertext[:offset]...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go new file mode 100644 index 000000000..f62c3bdba --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go new file mode 100644 index 000000000..093c64674 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go new file mode 100644 index 000000000..b9effbca8 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go new file mode 100644 index 000000000..31290fc87 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -0,0 +1,595 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + + "github.com/go-jose/go-jose/v4/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of name/value pairs to be inserted into the protected + // header of a JWS object. Some specifications which make use of + // JWS require additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary, and returns the updated EncrypterOptions. +// +// The v parameter will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + // Key must have one of these types: + // - ed25519.PublicKey + // - *ecdsa.PublicKey + // - *rsa.PublicKey + // - *JSONWebKey + // - JSONWebKey + // - []byte (a symmetric key) + // - Any type that satisfies the OpaqueKeyEncrypter interface + // + // The type of Key must match the value of Algorithm. + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + keyBytes, ok := rawKey.([]byte) + if !ok { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(keyBytes) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: keyBytes, + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + keyDSA, ok := rawKey.(*ecdsa.PublicKey) + if !ok { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: keyDSA, + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + case OpaqueKeyEncrypter: + return newOpaqueKeyEncrypter(alg, encryptionKey) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + case OpaqueKeyDecrypter: + return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil + default: + return nil, ErrUnsupportedKeyType + } +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. This +// function does not support multi-recipient. If you desire multi-recipient +// decryption use DecryptMulti instead. +// +// The decryptionKey argument must contain a private or symmetric key +// and must have one of these types: +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (a symmetric key) +// - string (a symmetric key) +// - Any type that satisfies the OpaqueKeyDecrypter interface. +// +// Note that ed25519 is only available for signatures, not encryption, so is +// not an option here. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >10x the size of the compressed data, whichever is larger. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + err := headers.checkNoCritical() + if err != nil { + return nil, err + } + + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return nil, err + } + decrypter, err := newDecrypter(key) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } + } + + return plaintext, nil +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +// +// The decryptionKey argument must have one of the types allowed for the +// decryptionKey argument of Decrypt(). +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >3x the size of the compressed data, whichever is larger. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + err := globalHeaders.checkNoCritical() + if err != nil { + return -1, Header{}, nil, err + } + + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return -1, Header{}, nil, err + } + decrypter, err := newDecrypter(key) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/go-jose/go-jose/v4/doc.go b/vendor/github.com/go-jose/go-jose/v4/doc.go new file mode 100644 index 000000000..0ad40ca08 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/doc.go @@ -0,0 +1,25 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple +recipients. +*/ +package jose diff --git a/vendor/github.com/go-jose/go-jose/v4/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go new file mode 100644 index 000000000..4f6e0d4a5 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/encoding.go @@ -0,0 +1,228 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "math/big" + "strings" + "unicode" + + "github.com/go-jose/go-jose/v4/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// deflate compresses the input. +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// inflate decompresses the input. +// +// Errors if the decompressed data would be >250kB or >10x the size of the +// compressed data, whichever is larger. +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + maxCompressedSize := max(250_000, 10*int64(len(input))) + + limit := maxCompressedSize + 1 + n, err := io.CopyN(output, reader, limit) + if err != nil && err != io.EOF { + return nil, err + } + if n == limit { + return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64.RawURLEncoding.DecodeString(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} + +func base64EncodeLen(sl []byte) int { + return base64.RawURLEncoding.EncodedLen(len(sl)) +} + +func base64JoinWithDots(inputs ...[]byte) string { + if len(inputs) == 0 { + return "" + } + + // Count of dots. + totalCount := len(inputs) - 1 + + for _, input := range inputs { + totalCount += base64EncodeLen(input) + } + + out := make([]byte, totalCount) + startEncode := 0 + for i, input := range inputs { + base64.RawURLEncoding.Encode(out[startEncode:], input) + + if i == len(inputs)-1 { + continue + } + + startEncode += base64EncodeLen(input) + out[startEncode] = '.' + startEncode++ + } + + return string(out) +} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md new file mode 100644 index 000000000..86de5e558 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go new file mode 100644 index 000000000..50634dd84 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/decode.go @@ -0,0 +1,1216 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go new file mode 100644 index 000000000..98de68ce1 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML - - - - - - - - - - + + + + + + + + + +
+ {user.username} +
++ {user.email} +
+ {#if user.full_name} ++ {user.full_name} +
+ {/if} +