diff --git a/.gitignore b/.gitignore index 3a3c055..83c960e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ .env -.kilocode +.env.sentry-build-plugin + +.pytest_cache +.ruff_cache diff --git a/Dockerfile b/Dockerfile index 22cebed..0083032 100644 --- a/Dockerfile +++ b/Dockerfile @@ -50,6 +50,8 @@ ENV STATIC_DIR=/app/static ENV REF_CONFIGURATION=/app/.ref ENV FRONTEND_HOST=http://0.0.0.0:8000 +RUN groupadd --system app && useradd --system --gid app app + WORKDIR /app # Copy the installed packages from the build stage @@ -57,5 +59,9 @@ COPY --from=backend --chown=app:app /app /app COPY /backend /app COPY --from=frontend --chown=app:app /frontend/dist /app/static +RUN chown -R app:app /app + +USER app + # Run the REF CLI tool by default ENTRYPOINT ["fastapi", "run", "--workers", "4", "/app/src/ref_backend/main.py"] diff --git a/backend/src/ref_backend/api/routes/diagnostics.py b/backend/src/ref_backend/api/routes/diagnostics.py index 913f1be..5580098 100644 --- a/backend/src/ref_backend/api/routes/diagnostics.py +++ b/backend/src/ref_backend/api/routes/diagnostics.py @@ -1,7 +1,7 @@ from typing import Literal from fastapi import APIRouter, HTTPException, Query, Request -from sqlalchemy import Integer, func, text +from sqlalchemy import Integer, func from sqlalchemy.orm import selectinload from starlette.responses import StreamingResponse @@ -177,23 +177,18 @@ async def facets(app_context: AppContextDep) -> MetricValueFacetSummary: # Get unique values for each CV dimension column from both scalar and series values dimension_summary = {} - # Get dimensions from scalar values + # Get dimensions from scalar and series values using ORM queries + # to avoid raw SQL interpolation for dimension_name in models.ScalarMetricValue._cv_dimensions: - scalar_query = text(f""" - SELECT DISTINCT {dimension_name} - FROM {models.ScalarMetricValue.__tablename__} - WHERE {dimension_name} IS NOT NULL - """) # noqa: S608 - scalar_result = app_context.session.execute(scalar_query).fetchall() + if not hasattr(models.ScalarMetricValue, dimension_name): + continue + + scalar_col = getattr(models.ScalarMetricValue, dimension_name) + scalar_result = app_context.session.query(scalar_col).filter(scalar_col.isnot(None)).distinct().all() scalar_values = {row[0] for row in scalar_result} - # Get dimensions from series values - series_query = text(f""" - SELECT DISTINCT {dimension_name} - FROM {models.SeriesMetricValue.__tablename__} - WHERE {dimension_name} IS NOT NULL - """) # noqa: S608 - series_result = app_context.session.execute(series_query).fetchall() + series_col = getattr(models.SeriesMetricValue, dimension_name) + series_result = app_context.session.query(series_col).filter(series_col.isnot(None)).distinct().all() series_values = {row[0] for row in series_result} # Combine and sort unique values diff --git a/backend/src/ref_backend/builder.py b/backend/src/ref_backend/builder.py index 53675d5..2aa4fa5 100644 --- a/backend/src/ref_backend/builder.py +++ b/backend/src/ref_backend/builder.py @@ -132,8 +132,8 @@ def build_app(settings: Settings, ref_config: Config, database: Database) -> Fas CORSMiddleware, allow_origins=settings.all_cors_origins, allow_origin_regex=settings.BACKEND_CORS_ORIGIN_REGEX, - allow_credentials=True, - allow_methods=["*"], + allow_credentials=False, + allow_methods=["GET"], allow_headers=["*"], ) diff --git a/backend/tests/test_api/test_cors.py b/backend/tests/test_api/test_cors.py new file mode 100644 index 0000000..fe3bb75 --- /dev/null +++ b/backend/tests/test_api/test_cors.py @@ -0,0 +1,79 @@ +"""Tests for CORS middleware configuration. + +Verifies that the read-only API restricts CORS appropriately: +- Only GET method is allowed +- Credentials are not permitted +""" + +from fastapi.testclient import TestClient + + +def test_cors_allows_get_requests(client: TestClient, settings) -> None: + """Test that CORS preflight allows GET requests.""" + r = client.options( + f"{settings.API_V1_STR}/diagnostics/", + headers={ + "Origin": "http://localhost:5173", + "Access-Control-Request-Method": "GET", + }, + ) + assert r.status_code == 200 + assert "GET" in r.headers.get("access-control-allow-methods", "") + + +def test_cors_rejects_post_preflight(client: TestClient, settings) -> None: + """Test that CORS preflight rejects POST requests. + + The API is read-only so only GET should be permitted. + """ + r = client.options( + f"{settings.API_V1_STR}/diagnostics/", + headers={ + "Origin": "http://localhost:5173", + "Access-Control-Request-Method": "POST", + }, + ) + # CORS middleware returns 400 for disallowed methods + assert r.status_code == 400 + + +def test_cors_rejects_delete_preflight(client: TestClient, settings) -> None: + """Test that CORS preflight rejects DELETE requests.""" + r = client.options( + f"{settings.API_V1_STR}/diagnostics/", + headers={ + "Origin": "http://localhost:5173", + "Access-Control-Request-Method": "DELETE", + }, + ) + assert r.status_code == 400 + + +def test_cors_rejects_put_preflight(client: TestClient, settings) -> None: + """Test that CORS preflight rejects PUT requests.""" + r = client.options( + f"{settings.API_V1_STR}/diagnostics/", + headers={ + "Origin": "http://localhost:5173", + "Access-Control-Request-Method": "PUT", + }, + ) + assert r.status_code == 400 + + +def test_cors_does_not_allow_credentials(client: TestClient, settings) -> None: + """Test that CORS does not set allow-credentials header. + + A read-only public API should not accept credentials. + """ + r = client.options( + f"{settings.API_V1_STR}/diagnostics/", + headers={ + "Origin": "http://localhost:5173", + "Access-Control-Request-Method": "GET", + }, + ) + assert r.status_code == 200 + # access-control-allow-credentials should be absent or "false" + allow_creds = r.headers.get("access-control-allow-credentials", "false") + assert allow_creds.lower() != "true" diff --git a/backend/tests/test_api/test_routes/test_diagnostics.py b/backend/tests/test_api/test_routes/test_diagnostics.py index 2cb60bf..330c438 100644 --- a/backend/tests/test_api/test_routes/test_diagnostics.py +++ b/backend/tests/test_api/test_routes/test_diagnostics.py @@ -574,6 +574,94 @@ def test_diagnostic_values_pagination_with_filter(client: TestClient, settings): assert item["dimensions"].get(facet["key"]) == filter_value +def test_diagnostics_facets_dimensions_are_sorted_and_non_null(client: TestClient, settings) -> None: + """Test that facets dimensions contain only sorted, non-null values. + + Validates the ORM-based facets query correctly filters NULLs and sorts results. + """ + r = client.get(f"{settings.API_V1_STR}/diagnostics/facets") + assert r.status_code == 200 + data = r.json() + + for dimension_name, values in data["dimensions"].items(): + assert isinstance(values, list), f"Dimension '{dimension_name}' should be a list" + assert None not in values, f"Dimension '{dimension_name}' should not contain null values" + assert values == sorted(values), f"Dimension '{dimension_name}' should be sorted" + assert len(values) == len(set(values)), f"Dimension '{dimension_name}' should have unique values" + + +def test_diagnostics_facets_count_is_non_negative(client: TestClient, settings) -> None: + """Test that facets count reflects total metric values.""" + r = client.get(f"{settings.API_V1_STR}/diagnostics/facets") + assert r.status_code == 200 + data = r.json() + assert data["count"] >= 0 + + +def test_diagnostic_executions_ignores_truly_unknown_query_params(client: TestClient, settings) -> None: + """Test that query params not matching any model attribute are silently ignored. + + Only params matching a real CMIP6Dataset attribute should affect filtering. + Completely nonexistent param names must not cause errors. + """ + diagnostic = get_diagnostic(client, settings) + provider_slug = diagnostic["provider"]["slug"] + diagnostic_slug = diagnostic["slug"] + + r = client.get( + f"{settings.API_V1_STR}/diagnostics/{provider_slug}/{diagnostic_slug}/executions" + "?nonexistent_column=test&fake_param=evil&zzz_not_real=bad" + ) + assert r.status_code == 200 + + # Truly unknown params should be ignored, yielding the same results as unfiltered + r_unfiltered = client.get( + f"{settings.API_V1_STR}/diagnostics/{provider_slug}/{diagnostic_slug}/executions" + ) + assert r_unfiltered.status_code == 200 + assert r.json()["count"] == r_unfiltered.json()["count"] + + +def test_diagnostic_executions_dunder_attrs_do_not_crash(client: TestClient, settings) -> None: + """Test that dunder attribute names in query params don't cause 500 errors. + + NOTE: hasattr(CMIP6Dataset, '__tablename__') is True because it's a + SQLAlchemy class attribute. This means dunder names pass the hasattr check + and may produce unexpected filter behavior (returning 0 results). This test + verifies the server doesn't crash -- the empty result is a known limitation + of the unfiltered hasattr approach (tracked as MEDIUM priority). + """ + diagnostic = get_diagnostic(client, settings) + provider_slug = diagnostic["provider"]["slug"] + diagnostic_slug = diagnostic["slug"] + + r = client.get( + f"{settings.API_V1_STR}/diagnostics/{provider_slug}/{diagnostic_slug}/executions?__tablename__=evil" + ) + # Must not crash with a 500 + assert r.status_code == 200 + + +def test_diagnostic_values_ignores_unknown_filter_params(client: TestClient, settings) -> None: + """Test that unknown filter params on the values endpoint are safely ignored.""" + diagnostic = get_diagnostic_with_scalar_values(client, settings) + provider_slug = diagnostic["provider"]["slug"] + diagnostic_slug = diagnostic["slug"] + + r = client.get( + f"{settings.API_V1_STR}/diagnostics/{provider_slug}/{diagnostic_slug}/values" + "?value_type=scalar&detect_outliers=off&nonexistent_col=evil&zzz_fake=bad" + ) + assert r.status_code == 200 + + r_clean = client.get( + f"{settings.API_V1_STR}/diagnostics/{provider_slug}/{diagnostic_slug}/values" + "?value_type=scalar&detect_outliers=off" + ) + assert r_clean.status_code == 200 + assert r.json()["total_count"] == r_clean.json()["total_count"] + + def test_diagnostics_list_returns_data(client: TestClient, settings) -> None: """Test that diagnostics list endpoint returns data.""" r = client.get(f"{settings.API_V1_STR}/diagnostics/") diff --git a/changelog/28.fix.md b/changelog/28.fix.md new file mode 100644 index 0000000..affdd02 --- /dev/null +++ b/changelog/28.fix.md @@ -0,0 +1 @@ +Fixed high-priority security vulnerabilities: replaced raw SQL interpolation in diagnostics facets endpoint with safe ORM queries, disabled PII collection in Sentry, and restricted CORS to GET-only methods for the read-only API. diff --git a/docs/explorer-scientific-content-framework.md b/docs/explorer-scientific-content-framework.md deleted file mode 100644 index e5ed870..0000000 --- a/docs/explorer-scientific-content-framework.md +++ /dev/null @@ -1,803 +0,0 @@ -# Climate REF Explorer - Scientific Content Framework - -**Purpose:** This document provides a comprehensive framework for creating, reviewing, and maintaining scientific content in the Climate REF Explorer interface. It identifies content gaps, establishes standards, and provides examples for climate science experts to follow. - -**Last Updated:** 2025-10-01 -**Status:** Draft - Requires expert review and validation - ---- - -## Table of Contents - -1. [Content Gap Analysis](#content-gap-analysis) -2. [Content Framework Standards](#content-framework-standards) -3. [Content Templates](#content-templates) -4. [Example Content for Placeholder Sections](#example-content-for-placeholder-sections) -5. [Implementation Guidelines](#implementation-guidelines) -6. [Quality Assurance Checklist](#quality-assurance-checklist) - ---- - -## 1. Content Gap Analysis - -### 1.1 Sections Requiring Complete Content - -The following explorer sections have been identified as placeholders requiring comprehensive scientific content: - -#### **Ocean State** (Sea Theme) -- **Status:** Placeholder - Requires scientific context -- **Metrics:** AMOC Strength, Sea Surface Salinity, Sea Surface Temperature -- **Priority:** HIGH -- **Rationale:** Ocean state metrics are fundamental to understanding climate system heat storage and circulation patterns - -#### **Cryosphere** (Sea Theme) -- **Status:** Placeholder - Requires scientific context -- **Metrics:** Sea Ice Area -- **Priority:** HIGH -- **Rationale:** Critical indicator of climate change with significant feedback mechanisms - -#### **Cloud & Radiation** (Atmosphere Theme) -- **Status:** Placeholder - Minimal content -- **Metrics:** Cloud Radiative Effects -- **Priority:** HIGH -- **Rationale:** Clouds represent one of the largest uncertainties in climate models - -#### **El Niño-Southern Oscillation** (Earth System Theme) -- **Status:** Placeholder - Requires expanded content -- **Metrics:** ENSO Basic Climatology, ENSO Teleconnections -- **Priority:** MEDIUM -- **Rationale:** Key driver of interannual climate variability - -#### **Terrestrial Carbon Cycle** (Land Theme) -- **Status:** Placeholder - Requires scientific context -- **Metrics:** Gross Primary Production, Net Biome Production, Soil Carbon -- **Priority:** MEDIUM -- **Rationale:** Critical for understanding carbon-climate feedbacks - -#### **Warming Levels** (Impact & Adaptation Theme) -- **Status:** Placeholder - Requires complete content -- **Metrics:** Global Mean Temperature Change at Warming Levels -- **Priority:** MEDIUM -- **Rationale:** Important for policy-relevant climate information - -### 1.2 Sections with Adequate Content - -These sections have sufficient scientific context but could benefit from enhancement: - -- **Climate Sensitivity** (Earth System) - Has basic descriptions, could add interpretation guidance -- **Modes of Variability** (Atmosphere) - Good metric-level descriptions with references -- **Land Surface & Hydrology** (Land) - Has structure, needs scientific context - ---- - -## 2. Content Framework Standards - -### 2.1 Content Hierarchy - -Scientific content in the Explorer follows a three-tier hierarchy: - -``` -Theme Level (e.g., "Atmosphere") - ├── Section Level (e.g., "Cloud & Radiation") - │ ├── Section description (brief scientific context) - │ └── Metrics - │ ├── Metric 1 (e.g., "Cloud Radiative Effects") - │ │ ├── Metric title - │ │ ├── Metric description (detailed scientific explanation) - │ │ └── Units and interpretation guidance - │ └── Metric 2... -``` - -### 2.2 Section-Level Content Requirements - -Each section should include: - -1. **Title** (5-10 words) - - Clear, descriptive, uses standard terminology - -2. **Description** (1-2 sentences, ~20-40 words) - - Brief overview of the scientific domain - - Explains why these metrics matter for climate science - - Written for researchers who may not be domain experts - -**Example:** -```typescript -{ - title: "Ocean State", - description: "Key indicators of ocean health, circulation, and heat content.", -} -``` - -### 2.3 Metric-Level Content Requirements - -Each metric should include: - -1. **Title** (2-5 words) - - Standard acronym or accepted terminology - - Example: "AMOC Strength", "ECS", "NAM RMSE" - -2. **Description** (2-4 sentences, ~40-100 words) [OPTIONAL but RECOMMENDED] - - Physical meaning of the metric - - Why it matters for climate science - - How to interpret the values (what's "good" or "expected") - - Any important caveats or limitations - -3. **Units** (when applicable) - - Standard SI or domain-specific units - - Example: "Sv" (Sverdrup), "K" (Kelvin), "psu" (practical salinity unit) - -4. **References** (when available) - - DOI links to key papers - - Use format: `https://doi.org/xxx` - -**Example:** -```typescript -{ - type: "box-whisker-chart", - provider: "pmp", - diagnostic: "extratropical-modes-of-variability-nam", - title: "NAM RMSE", - description: "Northern Annular Mode (NAM) individual-model pattern RMSE. Lower values indicate better representation of the spatial pattern of variability. See https://doi.org/10.1007/s00382-018-4355-4", - metricUnits: "hPa", -} -``` - -### 2.4 Writing Style Guidelines - -**Audience:** Climate researchers who may not be experts in the specific sub-domain - -**Tone:** -- Scientific but accessible -- Avoid jargon where possible -- Define acronyms on first use within each section -- Use active voice when possible - -**Length:** -- Section descriptions: 20-40 words -- Metric descriptions: 40-100 words -- Be concise but complete - -**Technical Level:** -- Assume graduate-level climate science knowledge -- Explain domain-specific terminology -- Include physical interpretation, not just mathematical definitions - ---- - -## 3. Content Templates - -### 3.1 Section Description Template - -``` -[Domain/Process] that [primary function/role]. [Why it matters for climate]. -``` - -**Examples:** - -✅ Good: "The exchange of carbon between the land surface and the atmosphere, regulating atmospheric CO₂ concentrations and climate feedbacks." - -❌ Too vague: "Important land processes." - -❌ Too technical: "The terrestrial carbon cycle encompasses autotrophic and heterotrophic respiration, primary productivity, and soil organic matter decomposition." - -### 3.2 Metric Description Template - -#### For Bias/Error Metrics: - -``` -[Full name of metric] ([acronym]). [Physical quantity being measured]. -[How to interpret: e.g., "Lower values indicate better agreement with observations"]. -[Optional: Important considerations or limitations]. -[Reference if applicable]. -``` - -**Example:** -"Atlantic Meridional Overturning Circulation (AMOC) strength bias relative to RAPID observations. The AMOC is a critical component of ocean heat transport, carrying warm surface waters northward and cold deep waters southward in the Atlantic. Positive bias indicates overestimation of circulation strength. Models typically show 5-20 Sv spread, with observations around 17 Sv at 26°N." - -#### For Direct Physical Quantities: - -``` -[Physical quantity] [in region/context]. [Why this matters]. -[Typical range or expected behavior]. -[Optional: Known model biases or uncertainties]. -``` - -**Example:** -"Sea surface temperature (SST) over the global ocean. SST is a key climate variable that influences atmospheric circulation, precipitation patterns, and ocean-atmosphere heat exchange. Observational estimates show global mean SST of approximately 18-19°C, with models generally capturing the spatial pattern but showing regional biases of 1-3 K." - -#### For Climate Sensitivity Metrics: - -``` -[Full name] ([acronym]). [Definition in physical terms]. -[Typical range from IPCC or literature]. -[Interpretation guidance]. -[Reference to key assessment]. -``` - -**Example:** -"Equilibrium Climate Sensitivity (ECS). The equilibrium global mean surface temperature increase following a doubling of atmospheric CO₂ concentration. IPCC AR6 assesses ECS to be very likely in the range 2.0-5.0 K, with a best estimate of 3.0 K. Higher ECS values indicate stronger warming response to greenhouse gas forcing." - -### 3.3 Units Reference Table - -| Domain | Common Units | Symbol | Notes | -|--------|-------------|---------|-------| -| Temperature | Kelvin | K | Always use K, not °C, for model comparisons | -| Ocean Transport | Sverdrup | Sv | 1 Sv = 10⁶ m³/s | -| Salinity | Practical Salinity Unit | psu | Dimensionless, sometimes shown as g/kg | -| Carbon Flux | Petagrams per year | PgC/yr | 1 Pg = 10¹⁵ g | -| Carbon Density | Kilograms per square meter | kgC/m² | For carbon stocks | -| Sea Ice | Square kilometers | km² | For area; sometimes 10⁶ km² | -| Precipitation | Millimeters per day | mm/day | Or kg/m²/s in models | -| Radiation | Watts per square meter | W/m² | For fluxes | -| Pressure | Hectopascal | hPa | For atmospheric pressure | - ---- - -## 4. Example Content for Placeholder Sections - -The following sections provide scientifically accurate example content for the identified placeholder sections. **These examples require review and validation by domain experts before implementation.** - -### 4.1 Ocean State (Sea Theme) - -**Section-Level Content:** - -```typescript -{ - title: "Ocean State", - description: "Key indicators of ocean circulation, heat content, and salinity that regulate climate on multiple timescales and transport heat globally.", - placeholder: false, // Remove after content review -} -``` - -**Metric 1: AMOC Strength** - -```typescript -{ - type: "box-whisker-chart", - provider: "ilamb", - diagnostic: "amoc-rapid", - title: "AMOC Strength", - description: "Atlantic Meridional Overturning Circulation (AMOC) strength at 26°N. The AMOC transports warm surface waters northward and cold deep waters southward, playing a critical role in regional climate, particularly in the North Atlantic and Europe. The metric shown is the bias relative to RAPID array observations (2004-present), which measure approximately 17 Sv. Models typically range from 10-25 Sv, with biases indicating potential issues in representing deep water formation or vertical mixing. Weakening AMOC is projected under climate change with implications for regional temperature and precipitation patterns.", - metricUnits: "Sv", - groupingConfig: { - groupBy: "statistic", - hue: "statistic", - }, - otherFilters: { - region: "None", - metric: "Bias", - statistic: "Period Mean", - }, -} -``` - -**Metric 2: Sea Surface Salinity** - -```typescript -{ - type: "box-whisker-chart", - provider: "ilamb", - diagnostic: "so-woa2023-surface", - title: "Sea Surface Salinity", - description: "Sea surface salinity (SSS) compared to World Ocean Atlas 2023 observations. SSS patterns reflect the balance of evaporation, precipitation, river runoff, and ocean circulation. Typical global mean values are 34-35 psu, with subtropical gyres showing higher salinity (>36 psu) due to excess evaporation, and high-latitude regions showing lower values (<33 psu) due to precipitation and freshwater input. Model biases in SSS can indicate problems with freshwater forcing, ocean mixing, or the hydrological cycle representation. Regional SSS patterns are important for ocean stratification and deep water formation.", - metricUnits: "psu", - groupingConfig: { - groupBy: "statistic", - hue: "statistic", - }, - otherFilters: { - region: "None", - metric: "Bias", - statistic: "Period Mean", - }, -} -``` - -**Metric 3: Sea Surface Temperature** - -```typescript -{ - type: "box-whisker-chart", - provider: "ilamb", - diagnostic: "thetao-woa2023-surface", - title: "Sea Surface Temperature", - description: "Sea surface temperature (SST) bias relative to World Ocean Atlas 2023 observations. SST is a fundamental climate variable influencing air-sea heat exchange, atmospheric circulation patterns, and marine ecosystems. Global mean SST is approximately 18-19°C, with regional variations from -2°C in polar regions to >30°C in the warm pool regions. Common model biases include cold bias in eastern boundary upwelling systems and warm bias in the Southern Ocean. SST biases can affect atmospheric model performance through air-sea coupling and have implications for regional climate, ENSO simulation, and tropical cyclone activity.", - metricUnits: "K", - groupingConfig: { - groupBy: "statistic", - hue: "statistic", - }, - otherFilters: { - region: "None", - metric: "Bias", - statistic: "Period Mean", - }, -} -``` - -**Expert Review Needed:** -- Validate AMOC typical values and observation period -- Verify SSS unit conventions (psu vs g/kg) -- Confirm typical SST ranges and common bias patterns -- Add references to key papers on ocean state metrics - -### 4.2 Cryosphere (Sea Theme) - -**Section-Level Content:** - -```typescript -{ - title: "Cryosphere", - description: "Sea ice extent and volume metrics that reflect polar amplification of climate change and drive important climate feedbacks through surface albedo changes.", - placeholder: false, // Remove after content review -} -``` - -**Metric: Sea Ice Area** - -```typescript -{ - type: "box-whisker-chart", - provider: "esmvaltool", - diagnostic: "sea-ice-area-basic", - title: "Sea Ice Area", - description: "Sea ice area (SIA) is the total extent of ocean covered by sea ice, typically reported separately for Arctic and Antarctic regions and by season. Observed Arctic SIA has declined dramatically (approximately 13% per decade in September minimum since 1979), while Antarctic trends are more complex with regional variations. Models show substantial spread in simulating both mean state and trends, with many models underestimating observed Arctic decline. Sea ice area is crucial because it affects surface albedo (ice-albedo feedback), ocean-atmosphere heat exchange, and polar amplification. Metrics may include mean state bias, seasonal cycle amplitude, and trend comparisons against satellite observations.", - metricUnits: "10^6 km²", - groupingConfig: { - groupBy: "metric", - hue: "metric", - }, -} -``` - -**Expert Review Needed:** -- Verify Arctic decline rate (13% per decade) -- Confirm Antarctic sea ice behavior and trends -- Validate typical model performance characteristics -- Add references to recent assessments (IPCC AR6, etc.) -- Consider adding volume vs area distinction if relevant - -### 4.3 Cloud & Radiation (Atmosphere Theme) - -**Section-Level Content:** - -```typescript -{ - title: "Cloud & Radiation", - description: "Cloud properties and radiative effects that represent one of the largest uncertainties in climate model projections due to complex microphysical and dynamical processes.", - placeholder: false, // Remove after content review -} -``` - -**Metric: Cloud Radiative Effects** - -```typescript -{ - type: "box-whisker-chart", - provider: "esmvaltool", - diagnostic: "cloud-radiative-effects", - title: "Cloud Radiative Effects", - description: "Cloud radiative effect (CRE), also called cloud radiative forcing, quantifies the impact of clouds on Earth's radiation budget. It is calculated as the difference between all-sky and clear-sky net radiative fluxes at the top of atmosphere. Globally, the net CRE is approximately -20 W/m², indicating a cooling effect. This net value results from opposing shortwave (SW) cooling (approximately -50 W/m² due to reflected sunlight) and longwave (LW) warming (approximately +30 W/m² due to greenhouse effect). Regional CRE patterns depend on cloud types, optical depth, and altitude. Model biases in CRE often indicate problems with cloud amount, vertical distribution, or optical properties, with important implications for climate sensitivity and regional climate projections.", - metricUnits: "W/m²", - otherFilters: { statistic: "bias" }, - groupingConfig: { - groupBy: "metric", - hue: "metric", - }, -} -``` - -**Expert Review Needed:** -- Verify global mean CRE values (~-20 W/m² net) -- Confirm SW and LW components -- Validate relationship to climate sensitivity -- Add references to key cloud feedback studies -- Consider mentioning specific cloud regimes if relevant to diagnostic - -### 4.4 El Niño-Southern Oscillation (Earth System Theme) - -**Enhanced Section-Level Content:** - -```typescript -{ - title: "El Niño-Southern Oscillation", - description: "Characteristics of ENSO, the dominant mode of interannual climate variability, which influences global weather patterns, precipitation, and temperature extremes.", - placeholder: false, // Remove after content review -} -``` - -**Metric 1: ENSO Basic Climatology** - -```typescript -{ - type: "box-whisker-chart", - provider: "esmvaltool", - diagnostic: "enso-basic-climatology", - title: "ENSO Basic Climatology", - description: "Basic climatological metrics of ENSO variability in the tropical Pacific, including the amplitude, spatial pattern, and temporal characteristics of sea surface temperature variability. Key metrics include the standard deviation of SST anomalies in the Niño 3.4 region (5°N-5°S, 170°W-120°W), which typically ranges from 0.8-1.2 K in observations. Models often struggle with ENSO amplitude, period, and asymmetry between El Niño and La Niña events. Accurate ENSO representation is crucial as it affects global teleconnections and seasonal predictability.", - groupingConfig: { - groupBy: "metric", - hue: "metric", - }, - otherFilters: { region: "global" }, -} -``` - -**Metric 2: ENSO Teleconnections** - -```typescript -{ - type: "box-whisker-chart", - provider: "pmp", - diagnostic: "enso_tel", - title: "ENSO Teleconnections", - description: "ENSO teleconnections represent the far-reaching impacts of tropical Pacific SST variability on global climate patterns through atmospheric circulation changes. These include effects on North American winter climate, Indian monsoon, Australian rainfall, and Atlantic hurricane activity. Metrics assess how well models capture the spatial patterns and strength of these remote influences, typically evaluated through correlation or regression patterns. Proper representation of teleconnections is essential for seasonal prediction and understanding regional climate variability and change.", - groupingConfig: { - groupBy: "metric", - hue: "metric", - }, -} -``` - -**Expert Review Needed:** -- Verify Niño 3.4 region definition and typical variability -- Validate key teleconnection patterns mentioned -- Add specific references to ENSO evaluation studies -- Consider mentioning diversity of ENSO events if relevant - -### 4.5 Terrestrial Carbon Cycle (Land Theme) - -**Enhanced Section-Level Content:** - -```typescript -{ - title: "Terrestrial Carbon Cycle", - description: "The exchange of carbon between land ecosystems and the atmosphere, including photosynthesis, respiration, and soil carbon storage, which critically influence atmospheric CO₂ concentrations and climate feedbacks.", - placeholder: false, // Remove after content review -} -``` - -**Metric 1: Gross Primary Production** - -```typescript -{ - type: "box-whisker-chart", - provider: "ilamb", - diagnostic: "gpp-fluxnet2015", - title: "Gross Primary Production", - description: "Gross Primary Production (GPP) is the total amount of carbon fixed by photosynthesis in terrestrial ecosystems. Global GPP is approximately 120 PgC/yr based on FLUXNET tower observations and satellite-derived estimates, with largest values in tropical forests (>2500 gC/m²/yr) and lowest in deserts and polar regions. Model evaluation focuses on spatial patterns, seasonal cycles, and interannual variability. GPP biases often indicate problems with vegetation distribution, leaf area index, or climate forcing. Accurate GPP simulation is fundamental for carbon cycle projections and carbon-concentration and carbon-climate feedback quantification.", - metricUnits: "gC/m²/yr", - otherFilters: { region: "global" }, - groupingConfig: { - groupBy: "metric", - hue: "metric", - }, -} -``` - -**Metric 2: Net Biome Production** - -```typescript -{ - type: "box-whisker-chart", - provider: "ilamb", - diagnostic: "nbp-hoffman", - title: "Net Biome Production", - description: "Net Biome Production (NBP) represents the net carbon exchange between land and atmosphere, accounting for photosynthesis, respiration, disturbances (fire, harvest), and other losses. Observational estimates suggest NBP varies from a small source to a moderate sink (approximately 0-3 PgC/yr land sink) with large interannual variability driven by climate variations and disturbances. NBP is critical for understanding the land carbon sink that currently removes about 30% of anthropogenic CO₂ emissions. Model spread in NBP indicates uncertainties in disturbance processes, climate sensitivity of ecosystems, and carbon-climate feedbacks.", - metricUnits: "PgC/yr", - clipMax: 2000, - groupingConfig: { - groupBy: "metric", - hue: "metric", - }, -} -``` - -**Metric 3: Soil Carbon** - -```typescript -{ - type: "box-whisker-chart", - provider: "ilamb", - diagnostic: "csoil-hwsd2", - title: "Soil Carbon", - description: "Soil organic carbon (SOC) stocks represent the largest terrestrial carbon reservoir, with global estimates around 1500-2400 PgC in the top 1-2 meters. SOC distribution varies with climate, vegetation, and soil properties, with highest densities in peatlands, permafrost regions, and tropical soils. Model evaluation against databases like HWSD2 (Harmonized World Soil Database) assesses spatial patterns and total stocks. Uncertainty in SOC representation affects projections of carbon-climate feedbacks, particularly regarding permafrost thaw and enhanced decomposition under warming. SOC response to climate change remains a major uncertainty in Earth system models.", - metricUnits: "kgC/m²", - groupingConfig: { - groupBy: "metric", - hue: "metric", - }, -} -``` - -**Expert Review Needed:** -- Verify global GPP estimate (~120 PgC/yr) -- Confirm NBP range and typical sink strength -- Validate soil carbon stocks (1500-2400 PgC) -- Add references to key carbon cycle papers and IPCC assessments -- Consider mentioning specific databases used (FLUXNET, HWSD2) - -### 4.6 Warming Levels (Impact & Adaptation Theme) - -**Enhanced Section-Level Content:** - -```typescript -{ - title: "Warming Levels", - description: "Climate conditions at specific global warming levels relative to pre-industrial, directly relevant to Paris Agreement targets and impact assessments across sectors.", - placeholder: false, // Remove after content review -} -``` - -**Metric: Global Mean Temperature Change at Warming Levels** - -```typescript -{ - type: "box-whisker-chart", - provider: "esmvaltool", - diagnostic: "climate-at-global-warming-level", - title: "Global Mean Temperature Change", - description: "Global mean surface temperature (GMST) change relative to 1850-1900 baseline at specific warming levels (e.g., 1.5°C, 2°C, 3°C, 4°C). These warming levels correspond to Paris Agreement targets and are used to assess climate impacts and adaptation needs. Analysis shows when different models reach each warming level and the pattern of regional climate changes at that level. Current observations show approximately 1.1°C warming as of 2011-2020. The spread across models at a given warming level reflects uncertainties in regional climate responses even when global temperature is constrained. This framework enables policy-relevant comparisons of climate impacts across scenarios and models.", - metricUnits: "K", - placeholder: true, // Keep until diagnostic is fully implemented -} -``` - -**Expert Review Needed:** -- Verify current observed warming level (~1.1°C for 2011-2020) -- Confirm typical warming levels evaluated (1.5, 2, 3, 4°C) -- Add references to IPCC Special Report on 1.5°C -- Validate Paris Agreement target details - ---- - -## 5. Implementation Guidelines - -### 5.1 Where Content Lives - -Content is implemented in TypeScript files within the frontend: - -**Location:** `frontend/src/components/explorer/theme/` - -**Files:** -- `earthSystem.tsx` - Earth System theme -- `atmosphere.tsx` - Atmosphere theme -- `sea.tsx` - Sea (Ocean) theme -- `land.tsx` - Land theme -- `impactAndAdaptation.tsx` - Impact & Adaptation theme - -### 5.2 Implementation Process - -1. **Review Example Content** - - Have domain experts review the example content in Section 4 - - Verify scientific accuracy - - Check for missing important caveats - - Validate typical values and ranges - -2. **Update TypeScript Files** - - Add `description` fields to metrics following the templates - - Update section descriptions as needed - - Remove `placeholder: true` flags after review - - Ensure units are correct - -3. **Test in Browser** - - View changes in the Explorer interface - - Verify descriptions display correctly - - Check that technical level is appropriate - - Ensure descriptions fit in UI without excessive scrolling - -4. **Iterate Based on User Feedback** - - Collect feedback from researchers using the tool - - Refine descriptions for clarity - - Add references as needed - -### 5.3 Code Example - -```typescript -{ - title: "Your Section Title", - description: "Brief scientific context following template.", - placeholder: false, // Only remove after expert review - content: [ - { - type: "box-whisker-chart", - provider: "diagnostic-provider", - diagnostic: "diagnostic-slug", - title: "Metric Name", - description: "Detailed metric description following guidelines. Include physical meaning, typical values, interpretation guidance, and caveats. Add references if available.", - metricUnits: "units", - groupingConfig: { - groupBy: "facet", - hue: "facet", - }, - otherFilters: { /* ... */ }, - }, - ], -} -``` - -### 5.4 Version Control - -- Create a feature branch for content updates: `feat/explorer-content-{theme}` -- Commit changes with clear messages: `feat: Add scientific context to Ocean State metrics` -- Request review from domain experts before merging -- Document major content changes in CHANGELOG or commit messages - ---- - -## 6. Quality Assurance Checklist - -Before finalizing content, verify: - -### Scientific Accuracy -- [ ] Physical explanations are correct -- [ ] Typical values/ranges are accurate -- [ ] Units are correct and clearly stated -- [ ] Important caveats are mentioned -- [ ] References are valid and accessible - -### Clarity -- [ ] Content is accessible to non-expert climate scientists -- [ ] Jargon is defined or avoided -- [ ] Acronyms are spelled out on first use -- [ ] Sentences are concise and clear - -### Completeness -- [ ] All placeholder sections have content -- [ ] Section descriptions provide context -- [ ] Metric descriptions explain "why it matters" -- [ ] Interpretation guidance is provided (what values mean) - -### Consistency -- [ ] Writing style matches guidelines -- [ ] Length guidelines are followed (20-40 words for sections, 40-100 for metrics) -- [ ] Terminology is consistent across themes -- [ ] Format matches templates - -### Technical -- [ ] Code compiles without errors -- [ ] Content displays correctly in UI -- [ ] Links/references work -- [ ] Placeholder flags removed after review - -### Review -- [ ] Content reviewed by domain expert -- [ ] Feedback incorporated -- [ ] Changes documented -- [ ] Approved for implementation - ---- - -## 7. References and Resources - -### Key Climate Science References - -1. **IPCC AR6 Working Group I Report** (2021) - - Chapter 3: Human Influence on Climate - - Chapter 7: Earth's Energy Budget - - Chapter 9: Ocean, Cryosphere and Sea Level Change - - https://www.ipcc.ch/report/ar6/wg1/ - -2. **IPCC Special Report on 1.5°C** (2018) - - https://www.ipcc.ch/sr15/ - -3. **CMIP6 Model Evaluation Studies** - - Eyring et al. (2021): Overview of CMIP6 results - - https://doi.org/10.5194/gmd-14-5087-2021 - -### Observational Datasets Referenced - -- **World Ocean Atlas 2023:** Ocean temperature and salinity climatology -- **RAPID Array:** AMOC observations at 26°N -- **FLUXNET2015:** Terrestrial flux measurements -- **HWSD2:** Harmonized World Soil Database - -### Model Evaluation Frameworks - -- **ILAMB:** International Land Model Benchmarking -- **ESMValTool:** Earth System Model Evaluation Tool -- **PMP:** PCMDI Metrics Package - ---- - -## 8. Maintenance and Updates - -### Regular Review Cycle - -- **Quarterly:** Review new scientific literature for updated typical values -- **Annually:** Major content review with domain experts -- **As Needed:** Updates when new observations or assessments become available (e.g., new IPCC reports) - -### Tracking Changes - -Maintain a change log for significant content updates: - -``` -## Content Change Log - -### 2025-10-01 - Initial Framework -- Created comprehensive content framework -- Added example content for Ocean State, Cryosphere, Cloud & Radiation -- Established templates and guidelines - -### [Future Date] - Expert Review -- Domain expert review of Ocean State content -- Validated AMOC typical values -- Added references to key papers -``` - -### Contact for Questions - -For questions about scientific content: -- **Ocean/Sea content:** [Ocean science expert contact] -- **Atmosphere content:** [Atmosphere science expert contact] -- **Land content:** [Land science expert contact] -- **General framework:** [Project lead contact] - ---- - -## Appendix A: Common Metrics and Their Meanings - -### Climate Sensitivity Metrics - -| Metric | Full Name | Definition | Typical Range | -|--------|-----------|------------|---------------| -| ECS | Equilibrium Climate Sensitivity | Temperature increase at 2×CO₂ equilibrium | 2.0-5.0 K (likely) | -| TCR | Transient Climate Response | Temperature increase at 2×CO₂ (1% yr⁻¹ increase) | 1.4-2.2 K (likely) | -| TCRE | Transient Climate Response to Emissions | Temperature change per cumulative CO₂ emissions | 1.0-2.3 K/EgC | -| ZEC | Zero Emission Commitment | Temperature change after emissions cease | -0.3 to 0.3 K | - -### Ocean Metrics - -| Metric | Full Name | Units | Typical Values | -|--------|-----------|-------|----------------| -| AMOC | Atlantic Meridional Overturning Circulation | Sv | 10-25 Sv (models), ~17 Sv (obs at 26°N) | -| SST | Sea Surface Temperature | K or °C | 18-19°C (global mean) | -| SSS | Sea Surface Salinity | psu | 34-35 psu (global mean) | - -### Carbon Cycle Metrics - -| Metric | Full Name | Units | Typical Values | -|--------|-----------|-------|----------------| -| GPP | Gross Primary Production | PgC/yr or gC/m²/yr | ~120 PgC/yr (global) | -| NBP | Net Biome Production | PgC/yr | 0-3 PgC/yr (land sink) | -| SOC | Soil Organic Carbon | kgC/m² or PgC | 1500-2400 PgC (global) | - -### Radiation Metrics - -| Metric | Full Name | Units | Typical Values | -|--------|-----------|-------|----------------| -| CRE | Cloud Radiative Effect | W/m² | -20 W/m² (net global) | -| SW CRE | Shortwave Cloud Radiative Effect | W/m² | -50 W/m² (cooling) | -| LW CRE | Longwave Cloud Radiative Effect | W/m² | +30 W/m² (warming) | - ---- - -## Appendix B: Glossary of Terms - -**Bias:** The systematic difference between model output and observations. Positive bias means model values are higher than observed. - -**CMIP6:** Coupled Model Intercomparison Project Phase 6, the latest coordinated climate model comparison effort. - -**Climatology:** Long-term average of climate variables, typically 30 years. - -**Feedback:** A process that amplifies (positive feedback) or dampens (negative feedback) the climate system's response to forcing. - -**RMSE:** Root Mean Square Error, a measure of the difference between model and observations accounting for both bias and pattern errors. - -**Teleconnection:** A statistical correlation between climate variables at distant locations, often indicating a physical connection through atmospheric or oceanic circulation. - -**Temporal Resolution:** The time interval between data points (e.g., daily, monthly, annual). - -**Spatial Resolution:** The grid size or detail level of spatial data (e.g., 1° × 1° grid). - ---- - -**End of Framework Document** - -**Next Steps:** -1. Distribute to domain experts for review of example content (Section 4) -2. Iterate on scientific accuracy and clarity -3. Implement reviewed content in codebase -4. Test with research users -5. Establish regular update schedule diff --git a/docs/scientific-content-strategy.md b/docs/scientific-content-strategy.md deleted file mode 100644 index eee181e..0000000 --- a/docs/scientific-content-strategy.md +++ /dev/null @@ -1,1432 +0,0 @@ -# Climate REF Application: Scientific Content Strategy - - ---- - -## 1. Executive Summary - -### Current State -The Climate REF application has established a strong foundation for scientific content through the Explorer interface, with comprehensive documentation for thematic metrics across five major themes (Atmosphere, Sea, Land, Earth System, and Impact & Adaptation). However, significant opportunities exist to extend this scientific context throughout the application. - -### Strategic Vision -Transform the Climate REF application from a data presentation tool into a scientifically-rich educational platform that: -- Provides context at every decision point -- Guides users toward scientifically meaningful comparisons -- Links to authoritative references and external resources -- Maintains scientific accuracy through expert review - -### Key Priorities -1. **Diagnostics Discovery** (HIGH) - Help users understand what each diagnostic measures and why it matters -2. **Figure Interpretation** (HIGH) - Provide context for understanding diagnostic outputs -3. **Dataset Context** (MEDIUM) - Explain dataset types, sources, and appropriate uses -4. **Help & Documentation** (MEDIUM) - Comprehensive user guides and scientific glossary - ---- - -## 2. Work Completed: Explorer Framework - -### 2.1 Achievements - -The Explorer interface work has established: - -1. **Comprehensive Content Framework** - - 5 thematic areas fully documented - - ~20+ placeholder sections identified and populated with example content - - Scientific content templates for consistency - - Quality assurance checklists - -2. **Implementation Patterns** - - TypeScript-based content management in theme files - - Structured approach to metric descriptions - - Integration with existing data models - - Responsive UI considerations - -3. **Scientific Standards** - - Writing style guidelines for accessibility - - Unit conventions and typical value ranges - - Reference citation patterns - - Expert review workflow - -### 2.2 Key Deliverables - -- **Framework Document**: [`explorer-scientific-content-framework.md`](./explorer-scientific-content-framework.md) -- **Implementation**: Theme files in `frontend/src/components/explorer/theme/` - -### 2.3 Lessons Learned - -**What Worked Well:** -- Template-based approach ensured consistency -- Example content provided concrete guidance for experts -- Separation of framework from implementation details -- Clear prioritization of content areas - -**Challenges:** -- Balancing technical accuracy with accessibility -- Determining appropriate level of detail -- Managing content length within UI constraints -- Coordinating expert reviews across domains - ---- - -## 3. Additional Areas Requiring Scientific Context - -### 3.1 Diagnostics Index Page - -**Current State:** -Location: `frontend/src/routes/_app/diagnostics.index.tsx` - -The diagnostics listing shows cards with basic metadata (provider, name, execution counts) but lacks: -- Purpose and scientific rationale for each diagnostic -- Guidance on when to use specific diagnostics -- Relationships between diagnostics -- Expected outputs and interpretation guidance - -**Priority:** **HIGH** - -**Rationale:** This is the primary entry point for users exploring available diagnostics. Without context, users cannot make informed choices about which analyses to review. - -**Proposed Enhancements:** - -1. **Diagnostic Card Content** - - One-sentence "why this matters" statement - - Expected output types (scalars, timeseries, maps) - - Typical use cases - - Related diagnostics - -2. **Thematic Organization** - - Group diagnostics by scientific domain - - Add domain-level introductions - - Visual indicators for diagnostic types - -3. **Search and Discovery** - - Semantic search by scientific concept - - Filter by climate theme or variable - - Sort by relevance to research question - -**Example Enhancement:** -```typescript -{ - diagnostic: "global-mean-timeseries", - scientificContext: { - purpose: "Evaluates model representation of long-term climate trends", - useCase: "Essential for assessing climate sensitivity and transient response", - outputs: ["timeseries", "statistics"], - relatedDiagnostics: ["climate-sensitivity", "trend-analysis"] - } -} -``` - -### 3.2 Individual Diagnostic Detail Pages - -**Current State:** -Location: `frontend/src/routes/_app/diagnostics.$providerSlug.$diagnosticSlug/` - -Diagnostic pages show execution groups and results but lack: -- Scientific interpretation of the diagnostic methodology -- Guidance on interpreting results -- Context for typical model performance -- Links to relevant literature - -**Priority:** **HIGH** - -**Rationale:** Users viewing diagnostic results need to understand what they're looking at and how to interpret patterns in the data. - -**Proposed Enhancements:** - -1. **Diagnostic Overview Section** - - Full description of what the diagnostic measures - - Methodology explanation (2-4 paragraphs) - - Key references - - Known limitations and caveats - -2. **Result Interpretation Guide** - - What "good" performance looks like - - Common model biases for this diagnostic - - Interpretation of different metrics/statistics - - Regional vs global considerations - -3. **AFT Integration Enhancement** - - Currently links to AFT diagnostic metadata - - Expand to show full AFT description - - Link to IPCC chapters and relevant sections - - Connection to CMIP evaluation priorities - -**Data Model Enhancement Needed:** -```python -class DiagnosticDetail(DiagnosticSummary): - """Enhanced diagnostic with full scientific context""" - methodology: str # Detailed methodology explanation - interpretation_guide: str # How to interpret results - typical_performance: str # What to expect from models - key_references: list[Reference] - limitations: str | None - related_ipcc_chapters: list[str] | None -``` - -### 3.3 Figure Galleries and Visualizations - -**Current State:** -Location: `frontend/src/routes/_app/diagnostics.$providerSlug.$diagnosticSlug/figures.tsx` - -Figure galleries display diagnostic outputs without: -- Captions explaining what's shown -- Interpretation guidance for patterns -- Context for color scales and units -- Links to underlying data - -**Priority:** **HIGH** - -**Rationale:** Figures are the primary way users evaluate model performance. Without context, visual patterns may be misinterpreted. - -**Proposed Enhancements:** - -1. **Figure Metadata Enhancement** - - Standardized caption format - - Explanation of visualization type - - Guidance on interpretation (e.g., "Red colors indicate warm bias") - - Note on statistical significance - -2. **Interactive Elements** - - Hover tooltips with contextual information - - Click to expand with detailed interpretation - - Compare with reference data explanation - - Link to related figures - -3. **Gallery Organization** - - Group by figure type (maps, timeseries, statistics) - - Provide section introductions - - Highlight key findings - - Progressive disclosure of detail - -**Technical Implementation:** -```typescript -interface FigureContext { - caption: string; // What is shown - interpretation: string; // What it means - colorScaleGuide?: string; // How to read colors - statisticalNotes?: string; // Significance, uncertainty - relatedFigures?: string[]; // Links to related visualizations -} -``` - -### 3.4 Dataset Detail Pages - -**Current State:** -Location: `frontend/src/routes/_app/datasets.$slug.tsx` - -Dataset pages show metadata and execution lists but lack: -- Scientific context for dataset types -- Explanation of CMIP6 metadata (experiment_id, variant_label, etc.) -- Guidance on appropriate use of different datasets -- Quality flags or known issues - -**Priority:** **MEDIUM** - -**Rationale:** Users need to understand dataset provenance and appropriate usage, especially for different source types (models vs observations). - -**Proposed Enhancements:** - -1. **Dataset Type Context** - - Explanation of CMIP6 vs obs4MIPs vs other types - - Description of what experiment_id means - - Explanation of ensemble members (variant_label) - - Temporal and spatial coverage context - -2. **Quality Indicators** - - Known issues or caveats - - Completeness metrics - - Comparison to other versions - - Update frequency and status - -3. **Usage Guidance** - - Appropriate uses for this dataset type - - Common applications in literature - - Suggested diagnostics for evaluation - - Related datasets for comparison - -**Example Content Structure:** -```markdown -### CMIP6 Model Output: ACCESS-CM2 (historical, r1i1p1f1) - -**Experiment:** Historical simulation (1850-2014) forced with observed GHG concentrations, -aerosols, and land use changes. - -**Ensemble Member:** First realization (r1) with initialization 1 (i1), physics variant 1 (p1), -and forcing variant 1 (f1). This represents one possible climate state from this model -configuration. - -**Typical Use:** Evaluating model performance against observations during the historical -period. Compare with obs4MIPs datasets for validation. - -**Known Issues:** [Link to model documentation or known issues] -``` - -### 3.5 General Help and Documentation - -**Current State:** -Minimal in-app help beyond basic navigation - -**Priority:** **MEDIUM** - -**Rationale:** Users need comprehensive documentation to use the tool effectively and understand scientific concepts. - -**Proposed Sections:** - -1. **Getting Started Guide** - - Overview of Climate REF capabilities - - Common workflows - - How to find relevant diagnostics - - Interpreting results - -2. **Scientific Glossary** - - Climate science terminology - - Model evaluation concepts - - Statistical terms - - CMIP6/AFT terminology - -3. **How-To Guides** - - "How to evaluate a specific model" - - "How to compare models for a given variable" - - "How to download and use results" - - "How to cite Climate REF" - -4. **Reference Material** - - Links to IPCC reports - - CMIP6 documentation - - Provider tool documentation - - Key papers on model evaluation - -**Implementation Location:** -- Create new `/docs` or `/help` route -- Add help icons throughout interface -- Contextual tooltips on complex terms -- Searchable knowledge base - ---- - -## 4. Content Strategy Recommendations - -### 4.1 Engaging Domain Experts - -**Identification:** -- IPCC author lists (WG1 authors by chapter) -- CMIP Model Intercomparison Project participants -- Diagnostic tool developers (ILAMB, ESMValTool, PMP teams) -- University climate science programs -- National laboratory climate researchers - -**Engagement Approach:** - -1. **Tiered Engagement Model** - - **Tier 1 - Content Review** (2-4 hours per domain) - - Review example content for accuracy - - Suggest typical values and ranges - - Provide key references - - - **Tier 2 - Content Creation** (8-16 hours per domain) - - Write detailed diagnostic descriptions - - Develop interpretation guides - - Create educational content - - - **Tier 3 - Advisory Board** (Quarterly meetings) - - Strategic guidance on content priorities - - Review major content additions - - Maintain scientific standards - -2. **Recognition and Incentives** - - Co-authorship on Climate REF papers - - Acknowledgment in documentation - - DOI for contributed content sections - - Integration with academic profiles (ORCID) - - Support for conference presentations - -3. **Expert Recruitment Strategy** - - Target diagnostic tool developers first (already engaged) - - Leverage existing CMIP community connections - - Present at AGU, EGU, and other conferences - - Workshop on model evaluation best practices - -### 4.2 Prioritization Framework - -**Priority Matrix:** - -| Area | Scientific Impact | User Frequency | Complexity | Priority | -|------|------------------|----------------|------------|----------| -| Diagnostics Index | High | Very High | Low | **HIGH** | -| Diagnostic Details | High | High | Medium | **HIGH** | -| Figure Interpretation | High | High | Medium | **HIGH** | -| Dataset Context | Medium | Medium | Low | **MEDIUM** | -| Help Documentation | Medium | Medium | Medium | **MEDIUM** | -| Explorer Enhancement | High | Medium | Low | **COMPLETE** | - -**Sequencing Rationale:** -1. Start with high-traffic, high-impact areas -2. Build on Explorer framework patterns -3. Leverage existing AFT diagnostic metadata -4. Coordinate with provider tool documentation - -**Phase 1** (Months 1-3): Diagnostics discovery and figure interpretation -**Phase 2** (Months 4-6): Dataset context and diagnostic details -**Phase 3** (Months 7-9): Help documentation and advanced features - -### 4.3 Quality Assurance Processes - -**Content Review Workflow:** - -1. **Initial Draft** - - Use templates from Explorer framework - - Research literature for typical values - - Document sources and uncertainties - -2. **Expert Review** - - Domain expert review (scientific accuracy) - - Climate scientist review (accessibility) - - User testing (comprehension) - -3. **Implementation Review** - - Technical review (integration) - - UI/UX review (presentation) - - Accessibility review (WCAG compliance) - -4. **Approval** - - Project lead sign-off - - Community feedback period - - Publication to production - -**Quality Checklist:** -- [ ] Scientific accuracy verified by domain expert -- [ ] Appropriate level of detail for target audience -- [ ] References provided for key claims -- [ ] Units clearly stated and correct -- [ ] Consistent with existing content -- [ ] Accessible to non-expert climate scientists -- [ ] Tested with representative users - -### 4.4 Maintenance and Update Procedures - -**Regular Review Cycle:** - -1. **Quarterly Updates** - - Review new scientific literature - - Update typical value ranges - - Add newly published references - - Address user feedback - -2. **Annual Major Review** - - Full content audit - - Expert re-review of critical sections - - Update for new IPCC assessments - - Align with CMIP7 developments - -3. **Triggered Updates** - - New IPCC reports - - Major scientific findings - - User-reported errors - - Provider tool updates - -**Version Control:** -- Use Git for content tracking -- Tag major content versions -- Maintain change log -- Document review history - -**Content Ownership:** -- Assign domain leads for each scientific area -- Rotate reviewers for fresh perspectives -- Build community of contributors -- Establish editorial guidelines - -### 4.5 User Testing and Feedback - -**Testing Methodology:** - -1. **Usability Testing** - - Task-based testing (e.g., "find appropriate diagnostic for ocean heat content") - - Think-aloud protocols - - Eye-tracking for figure interpretation - - Time-to-task completion - -2. **Comprehension Testing** - - Quiz on content interpretation - - Open-ended interpretation questions - - Comparison with expert interpretations - - Misconception identification - -3. **User Surveys** - - Satisfaction with scientific content - - Usefulness ratings by section - - Suggestions for improvement - - Missing content identification - -**Feedback Collection:** - -1. **In-App Feedback** - - "Was this helpful?" buttons - - Report error/suggestion links - - Context-specific feedback forms - - User annotation capability - -2. **Community Feedback** - - GitHub issues for content - - Discussion forum - - User workshops - - Conference booth feedback - -3. **Analytics** - - Content engagement metrics - - Search query analysis - - Help documentation usage - - User pathway analysis - ---- - -## 5. Technical Recommendations - -### 5.1 Data Model Enhancements - -**Priority Enhancements:** - -1. **Rich Diagnostic Metadata** -```python -class DiagnosticDetail(BaseModel): - """Enhanced diagnostic model with full scientific context""" - # Existing fields... - id: int - provider: ProviderSummary - slug: str - name: str - description: str - - # New scientific content fields - methodology: str | None - """Detailed explanation of diagnostic methodology""" - - interpretation_guide: str | None - """Guidance on interpreting results""" - - typical_performance_range: str | None - """Expected range of values from models""" - - key_references: list[Reference] | None - """Primary literature references""" - - limitations: str | None - """Known limitations and caveats""" - - use_cases: list[str] | None - """Common applications and use cases""" - - related_diagnostics: list[str] | None - """Related diagnostic slugs""" - - ipcc_relevance: IPCCRelevance | None - """Connection to IPCC assessment chapters""" -``` - -2. **Figure Context Model** -```python -class FigureMetadata(BaseModel): - """Scientific context for diagnostic figures""" - execution_output_id: int - caption: str - """What is shown in the figure""" - - interpretation: str - """Scientific interpretation guidance""" - - color_scale_guide: str | None - """How to interpret color scales""" - - statistical_notes: str | None - """Notes on significance, uncertainty""" - - related_figures: list[int] | None - """Links to related visualizations""" -``` - -3. **Dataset Context Model** -```python -class DatasetContext(BaseModel): - """Scientific context for datasets""" - dataset_type_description: str - """Explanation of dataset type (CMIP6, obs4MIPs, etc.)""" - - appropriate_uses: str - """Guidance on when to use this dataset""" - - quality_flags: list[QualityFlag] | None - """Known issues or quality indicators""" - - version_notes: str | None - """Notes on dataset version and updates""" -``` - -**Implementation Strategy:** -- Store in database with migrations -- Cache frequently accessed content -- Support i18n for multi-language content -- Version content separately from code - -### 5.2 Content Management System - -**Requirements:** - -1. **Capabilities Needed** - - In-database content storage - - Version control and history - - Multi-author editing workflow - - Preview before publish - - Content templates - - Search and filtering - - Export/import functionality - -2. **Architecture Options** - - **Option A: Database-Based CMS** (RECOMMENDED) - - Store content in PostgreSQL tables - - API endpoints for CRUD operations - - Admin interface for content editing - - Version history in database - - **Option B: File-Based CMS** - - Markdown files in repository - - Git for version control - - Build-time content compilation - - Requires deployment for updates - - **Option C: Headless CMS** - - External service (Contentful, Strapi) - - API integration - - Advanced editing features - - Additional infrastructure cost - -3. **Recommended Approach** - - Start with **Option A** (database-based): - - Aligns with existing architecture - - Low infrastructure overhead - - Fast content updates - - Full version control - - Can migrate to Option C if needed - -**Implementation Plan:** -```sql --- Content tables -CREATE TABLE scientific_content ( - id SERIAL PRIMARY KEY, - content_type VARCHAR(50), -- 'diagnostic', 'figure', 'dataset' - entity_id INTEGER, -- Foreign key to entity - field_name VARCHAR(50), -- 'methodology', 'interpretation', etc. - content TEXT, - author_id INTEGER, - reviewed_by INTEGER, - approved_at TIMESTAMP, - version INTEGER, - created_at TIMESTAMP, - updated_at TIMESTAMP -); - -CREATE TABLE content_versions ( - id SERIAL PRIMARY KEY, - scientific_content_id INTEGER, - content TEXT, - author_id INTEGER, - change_note TEXT, - created_at TIMESTAMP -); -``` - -### 5.3 Integration with External Resources - -**Priority Integrations:** - -1. **IPCC Reports** - - Link diagnostic to specific AR6 chapters - - Deep links to relevant sections - - Extract key findings programmatically - - Track when new assessments released - -2. **Scientific Papers** - - DOI resolution and metadata - - Abstract display in tooltips - - Citation formatting - - Track citations of Climate REF - -3. **Climate Data Glossaries** - - CF Standard Names - - CMIP6 Controlled Vocabularies - - IPCC AR6 Glossary - - Build unified glossary with links - -4. **Provider Tool Documentation** - - Link to ILAMB documentation - - Link to ESMValTool recipe documentation - - Link to PMP documentation - - Embed relevant sections - -**Technical Implementation:** - -```python -class ExternalResource(BaseModel): - """Link to external scientific resource""" - resource_type: Literal["ipcc", "paper", "glossary", "documentation"] - url: HttpUrl - title: str - description: str | None - metadata: dict | None # e.g., DOI, ISBN, chapter number -``` - -### 5.4 Search and Discovery Improvements - -**Semantic Search Enhancement:** - -1. **Full-Text Search** - - PostgreSQL full-text search - - Index all scientific content - - Rank by relevance - - Highlight matching terms - -2. **Faceted Search** - - Filter by climate theme - - Filter by diagnostic type - - Filter by provider - - Filter by output type - -3. **Contextual Suggestions** - - "Related diagnostics" based on content similarity - - "Users who viewed X also viewed Y" - - "Suggested next steps" based on current view - - Topic-based navigation - -4. **Natural Language Queries** - - "Show me diagnostics for ocean heat content" - - "Compare model performance for precipitation" - - Translate to structured queries - - Learn from user interactions - -**Implementation:** -```python -# Add to backend/src/ref_backend/api/routes/search.py -@router.get("/search") -def semantic_search( - query: str, - filters: SearchFilters | None = None, - limit: int = 20 -) -> SearchResults: - """ - Perform semantic search across diagnostics, datasets, and content. - """ - # Use PostgreSQL full-text search - # Return ranked results with highlighted snippets - # Include related resources -``` - -### 5.5 Analytics and Instrumentation - -**Tracking Requirements:** - -1. **Content Engagement** - - Time spent on scientific content - - Scroll depth on description pages - - Click-through on references - - Expansion of "read more" sections - -2. **User Pathways** - - Common navigation patterns - - Entry and exit pages - - Search → result → action flows - - Diagnostic discovery patterns - -3. **Content Effectiveness** - - Help documentation usage - - Feedback ratings by content section - - Error reports by page - - Search queries leading to "no results" - -**Privacy-Preserving Analytics:** -- Aggregate statistics only -- No personal data collection -- Optional opt-in for detailed feedback -- Compliance with institutional policies - ---- - -## 6. Resource Requirements - -### 6.1 Effort Estimates - -**By Content Area:** - -| Area | Content Creation | Expert Review | Implementation | Testing | Total | -|------|-----------------|---------------|----------------|---------|-------| -| Diagnostics Index | 40h | 16h | 40h | 16h | 112h | -| Diagnostic Details | 80h | 32h | 60h | 24h | 196h | -| Figure Interpretation | 60h | 24h | 80h | 24h | 188h | -| Dataset Context | 40h | 16h | 40h | 16h | 112h | -| Help & Documentation | 80h | 24h | 40h | 16h | 160h | -| CMS Infrastructure | - | - | 120h | 40h | 160h | -| **TOTAL** | **300h** | **112h** | **380h** | **136h** | **928h** | - -**Personnel Time (FTE):** -- **0.5 FTE** - 6 months: Content creation and coordination -- **0.25 FTE** - 6 months: Expert reviews (distributed across experts) -- **0.75 FTE** - 6 months: Development and implementation -- **0.25 FTE** - 6 months: Testing and refinement - -### 6.2 Types of Expertise Needed - -**By Scientific Domain:** - -1. **Atmospheric Science** (40 hours expert review) - - Cloud physics specialist - - Atmospheric dynamics expert - - Radiation transfer specialist - -2. **Ocean Science** (40 hours expert review) - - Physical oceanographer - - Ocean circulation specialist - - Sea ice specialist - -3. **Land Science** (40 hours expert review) - - Carbon cycle expert - - Hydrologist - - Vegetation modeler - -4. **Earth System** (40 hours expert review) - - Climate sensitivity expert - - ENSO specialist - - Climate feedback specialist - -5. **Impact & Adaptation** (32 hours expert review) - - Climate impacts researcher - - Regional climate specialist - -**Technical Expertise:** - -1. **Climate Model Evaluation** (80 hours) - - CMIP experience - - Diagnostic tool development - - Statistical methods - -2. **Science Communication** (60 hours) - - Technical writing for scientists - - Educational content development - - Accessibility specialist - -3. **Software Development** (380 hours) - - Full-stack web development - - Database design - - API development - - Frontend UI/UX - -### 6.3 Timeline Suggestions - -**Phase 1: Foundation** (Months 1-3) -- Set up content management infrastructure -- Recruit domain experts -- Develop detailed specifications -- Create content templates -- **Deliverable:** CMS operational, expert team assembled - -**Phase 2: Core Content** (Months 4-6) -- Diagnostics index scientific context -- Begin diagnostic detail pages -- Figure interpretation framework -- **Deliverable:** 50% of diagnostics have full scientific context - -**Phase 3: Comprehensive Coverage** (Months 7-9) -- Complete diagnostic details -- Dataset context pages -- Help documentation -- **Deliverable:** 90% of content complete - -**Phase 4: Refinement** (Months 10-12) -- User testing and feedback -- Content refinement -- Additional examples and case studies -- Community review -- **Deliverable:** Production-ready scientific content - -### 6.4 Potential Collaborations - -**Research Institutions:** -- **NCAR** (National Center for Atmospheric Research) -- **PCMDI** (Program for Climate Model Diagnosis and Intercomparison) -- **Met Office Hadley Centre** -- **NOAA/GFDL** (Geophysical Fluid Dynamics Laboratory) -- **Max Planck Institute for Meteorology** - -**International Programs:** -- **CMIP** (Coupled Model Intercomparison Project) -- **WCRP** (World Climate Research Programme) -- **IPCC** (Intergovernmental Panel on Climate Change) -- **ESGF** (Earth System Grid Federation) - -**Diagnostic Tool Teams:** -- **ILAMB** (International Land Model Benchmarking) -- **ESMValTool** (Earth System Model Evaluation Tool) -- **PMP** (PCMDI Metrics Package) - -**Funding Opportunities:** -- **DOE** - Scientific Discovery through Advanced Computing (SciDAC) -- **NSF** - Cyberinfrastructure for Sustained Scientific Innovation (CSSI) -- **NASA** - Earth Science Technology Office (ESTO) -- **NOAA** - Climate Program Office (CPO) - ---- - -## 7. Success Metrics - -### 7.1 Quantitative Metrics - -**Content Coverage:** -- [ ] 100% of diagnostics have basic description -- [ ] 90% of diagnostics have full scientific context -- [ ] 80% of figures have interpretation guidance -- [ ] 75% of datasets have usage context -- [ ] Comprehensive help documentation (20+ pages) - -**User Engagement:** -- **Target:** 60% of users view scientific content on visited pages -- **Target:** Average 2+ minutes on content-rich pages -- **Target:** 30% expansion of "read more" sections -- **Target:** 20% click-through on references - -**Content Quality:** -- **Target:** 4.0+ average rating (5-point scale) -- **Target:** <5% error reports per content section -- **Target:** 80% positive feedback on comprehension tests -- **Target:** Expert approval for 100% of content - -**Search and Discovery:** -- **Target:** 70% search success rate -- **Target:** <3 clicks to find relevant diagnostic -- **Target:** 40% use of related diagnostics links -- **Target:** 25% use of help documentation - -### 7.2 Qualitative Metrics - -**User Feedback:** -- Collect testimonials from researchers -- Gather feedback on content usefulness -- Track feature requests for additional content -- Monitor community discussion topics - -**Expert Validation:** -- Periodic expert review scores -- Published peer-reviewed paper on Climate REF -- Recognition from climate modeling community -- Adoption by educational institutions - -**Impact Indicators:** -- Citations in scientific papers -- Use in graduate courses -- Presentations at conferences -- Requests for similar tools in other domains - -### 7.3 User Feedback Mechanisms - -**In-App Feedback:** - -1. **Contextual Feedback Buttons** - ```typescript - - - - - - - ``` - -2. **Detailed Feedback Form** - - Rating (1-5 stars) - - What was most useful? - - What was confusing? - - What's missing? - - Open text field - -3. **Anonymous Analytics** - - Time on page - - Scroll depth - - Click patterns - - Search queries - -**External Feedback:** - -1. **User Surveys** (Quarterly) - - Overall satisfaction - - Content quality assessment - - Feature priorities - - Comparison with alternatives - -2. **User Interviews** (Monthly) - - In-depth feedback sessions - - Workflow observation - - Pain point identification - - Feature exploration - -3. **Community Engagement** - - GitHub discussions - - Email feedback - - Conference conversations - - Workshop feedback - -### 7.4 Analytics to Track - -**Content Analytics:** -```sql --- Example analytics queries -SELECT - content_type, - AVG(time_on_page) as avg_time, - AVG(scroll_depth) as avg_scroll, - COUNT(*) as views, - SUM(CASE WHEN clicked_reference THEN 1 ELSE 0 END) as reference_clicks -FROM content_analytics -WHERE created_at > NOW() - INTERVAL '30 days' -GROUP BY content_type; -``` - -**Dashboard Metrics:** -- Content engagement by section -- Most viewed scientific content -- Most helpful content (by rating) -- Content with highest error reports -- Search query trends -- User pathway analysis - ---- - -## 8. Implementation Roadmap - -### 8.1 Phase 1: Foundation (Months 1-3) - -**Goals:** -- Establish infrastructure -- Assemble expert team -- Create detailed specifications - -**Tasks:** - -**Month 1:** -- [ ] Design content management system architecture -- [ ] Create database schema for scientific content -- [ ] Develop API endpoints for content CRUD -- [ ] Build basic admin interface -- [ ] Identify and recruit domain experts - -**Month 2:** -- [ ] Implement content versioning -- [ ] Create content templates based on Explorer framework -- [ ] Set up expert review workflow -- [ ] Develop diagnostic detail page specifications -- [ ] Begin expert workshops - -**Month 3:** -- [ ] Complete CMS infrastructure -- [ ] Finalize content templates -- [ ] Create style guide for scientific content -- [ ] Establish QA processes -- [ ] Begin content creation for priority diagnostics - -**Deliverables:** -- Operational CMS -- Expert team assembled and trained -- Complete specifications for all content areas -- 10 diagnostics with full scientific context (pilot) - -### 8.2 Phase 2: Core Content (Months 4-6) - -**Goals:** -- Add scientific context to high-priority areas -- Build momentum with expert contributors -- Validate approach with users - -**Tasks:** - -**Month 4:** -- [ ] Add context to top 30 diagnostics (by usage) -- [ ] Implement diagnostic index enhancements -- [ ] Create figure interpretation framework -- [ ] Begin help documentation structure - -**Month 5:** -- [ ] Complete 50% of diagnostic details -- [ ] Add interpretation guidance to figure galleries -- [ ] Implement search improvements -- [ ] Create scientific glossary (first draft) - -**Month 6:** -- [ ] Add dataset context for major dataset types -- [ ] Complete 70% of diagnostic details -- [ ] User testing of content (first round) -- [ ] Iterate based on feedback - -**Deliverables:** -- 50+ diagnostics with full context -- Figure interpretation for top providers -- Dataset type explanations -- Initial help documentation -- User testing report - -### 8.3 Phase 3: Comprehensive Coverage (Months 7-9) - -**Goals:** -- Complete remaining content areas -- Ensure consistency and quality -- Expand documentation - -**Tasks:** - -**Month 7:** -- [ ] Complete all diagnostic detail pages -- [ ] Add figure context for all visualizations -- [ ] Complete dataset context pages -- [ ] Expand help documentation - -**Month 8:** -- [ ] Integration with external resources (IPCC, papers) -- [ ] Advanced search features -- [ ] Content cross-linking -- [ ] Second round of user testing - -**Month 9:** -- [ ] Address feedback from testing -- [ ] Expert review of all content -- [ ] Polish and refinement -- [ ] Prepare for launch - -**Deliverables:** -- 90%+ content coverage -- Complete help documentation -- External resource integration -- Quality-assured content - -### 8.4 Phase 4: Launch and Iteration (Months 10-12) - -**Goals:** -- Production release -- Community engagement -- Continuous improvement - -**Tasks:** - -**Month 10:** -- [ ] Soft launch with beta users -- [ ] Monitor analytics and feedback -- [ ] Address critical issues -- [ ] Prepare announcement materials - -**Month 11:** -- [ ] Public launch -- [ ] Conference presentations -- [ ] Community outreach -- [ ] Ongoing content updates - -**Month 12:** -- [ ] Post-launch refinements -- [ ] Establish maintenance procedures -- [ ] Plan for ongoing expert engagement -- [ ] Document lessons learned - -**Deliverables:** -- Production-ready application -- Comprehensive scientific content -- User community engaged -- Maintenance plan established - -### 8.5 Ongoing Maintenance (Year 2+) - -**Regular Activities:** - -**Quarterly:** -- Content review and updates -- New literature review -- User feedback analysis -- Expert consultation - -**Annually:** -- Major content audit -- Expert re-review -- Update for new IPCC reports -- Strategic planning - -**Continuous:** -- Monitor user feedback -- Address error reports -- Add new diagnostics -- Update references - ---- - -## 9. Risks and Mitigation - -### 9.1 Identified Risks - -**Risk 1: Expert Availability** -- **Mitigation:** Multiple experts per domain, staggered engagement, clear time commitments - -**Risk 2: Content Maintenance Burden** -- **Mitigation:** CMS automation, community contributions, regular review cycles - -**Risk 3: Technical Implementation Delays** -- **Mitigation:** Phased approach, MVP focus, external contractor if needed - -**Risk 4: Content Consistency** -- **Mitigation:** Templates, style guide, editorial review, automated checks - -**Risk 5: User Adoption** -- **Mitigation:** User testing, iterative feedback, community engagement - -### 9.2 Contingency Plans - -**If expert engagement is low:** -- Leverage diagnostic tool developers first -- Graduate student contributors -- Community sourcing with expert review - -**If development resources are constrained:** -- Start with simpler file-based content -- Focus on highest priority areas only -- Extend timeline with reduced scope - -**If user feedback is negative:** -- Rapid iteration cycles -- A/B testing of different approaches -- Direct user interviews -- Simplify content if too technical - ---- - -## 10. Conclusion - -### 10.1 Strategic Value - -Comprehensive scientific content throughout the Climate REF application will: - -1. **Enhance Research Quality** - - Help researchers use diagnostics appropriately - - Reduce misinterpretation of results - - Connect evaluations to broader scientific context - -2. **Accelerate Science** - - Reduce time to understand diagnostic outputs - - Enable more researchers to perform model evaluation - - Facilitate knowledge transfer - -3. **Build Community** - - Establish Climate REF as educational resource - - Create platform for expert knowledge sharing - - Support next generation of climate scientists - -4. **Increase Impact** - - Citations in scientific literature - - Use in graduate education - - Influence on CMIP evaluation priorities - -### 10.2 Call to Action - -**Immediate Next Steps:** - -1. **Secure Resources** (Week 1-2) - - Approve budget and timeline - - Allocate development resources - - Identify funding opportunities - -2. **Recruit Expert Team** (Week 3-6) - - Contact key potential contributors - - Establish advisory board - - Schedule kickoff workshops - -3. **Begin Technical Work** (Week 7-12) - - Implement CMS infrastructure - - Create pilot content for 10 diagnostics - - Set up expert review process - -4. **Launch Phase 1** (Month 3) - - Pilot content live - - Gather initial feedback - - Iterate and improve - -### 10.3 Long-Term Vision - -The scientific content strategy positions Climate REF as: - -- **The** authoritative platform for climate model evaluation -- A comprehensive educational resource for climate science -- A model for scientific data platforms in other domains -- A sustainable, community-driven knowledge base - -By systematically adding scientific context throughout the application, Climate REF will transform from a useful tool into an indispensable resource for the climate modeling community. - ---- - -## Appendix A: Content Templates - -### A.1 Diagnostic Description Template - -```markdown -# {Diagnostic Name} - -## Overview -{1-2 sentence summary of what this diagnostic measures} - -## Scientific Context -{2-4 paragraphs explaining: -- Physical process or phenomenon measured -- Why this is important for climate science -- How it relates to broader climate questions} - -## Methodology -{2-3 paragraphs explaining: -- How the diagnostic is calculated -- Reference datasets used -- Statistical methods applied} - -## Interpretation Guide -{2-3 paragraphs on: -- What "good" performance looks like -- Common model biases -- How to interpret different metrics} - -## Typical Model Performance -{1-2 paragraphs with: -- Expected range of values -- Inter-model spread -- Performance relative to observations} - -## Limitations and Caveats -{1-2 paragraphs noting: -- Known limitations of diagnostic -- Observational uncertainties -- Interpretation caveats} - -## Key References -- Reference 1 (DOI) -- Reference 2 (DOI) -- IPCC chapters - -## Related Diagnostics -- Related diagnostic 1 -- Related diagnostic 2 -``` - -### A.2 Figure Caption Template - -```markdown -**{Figure Title}** - -{1-2 sentences describing what is shown} - -**Interpretation:** {1-2 sentences on how to interpret patterns, colors, etc.} - -**Statistical Notes:** {Optional: significance, uncertainty, sample size} - -**Related Figures:** [Links to related visualizations] -``` - -### A.3 Dataset Context Template - -```markdown -# {Dataset Name/Slug} - -## Dataset Type -{CMIP6, obs4MIPs, etc. with explanation} - -## Scientific Context -{What this dataset represents, observation/model details} - -## Metadata -{Explanation of key metadata fields: -- experiment_id -- variant_label -- etc.} - -## Appropriate Uses -{When to use this dataset, typical applications} - -## Quality Information -{Known issues, completeness, version notes} - -## Related Datasets -{Similar or complementary datasets} -``` - ---- - -## Appendix B: Expert Recruitment Template - -### B.1 Email Template for Expert Engagement - -``` -Subject: Climate REF: Request for Scientific Content Review - -Dear Dr. {Name}, - -I'm writing to invite you to contribute to the Climate Rapid Evaluation Framework -(Climate REF), a community platform for systematic climate model evaluation. - -We are developing comprehensive scientific content to help researchers interpret -model evaluation diagnostics, and your expertise in {domain} would be invaluable. - -**What we're asking:** -- Review example content for scientific accuracy (2-4 hours) -- Suggest typical value ranges and key references -- Optionally: develop detailed content for specific diagnostics - -**What you gain:** -- Acknowledgment in documentation -- Co-authorship on Climate REF papers -- Platform for your expertise to reach broad audience -- Support for conference presentations - -**Timeline:** {dates} - -Would you be interested in discussing this further? I'd be happy to schedule -a call to explain the project in more detail. - -Best regards, -{Your Name} -{Title} -{Contact Information} - -Climate REF: {URL} -``` - ---- - -## Appendix C: Glossary of Terms - -**AFT:** CMIP Assessment Fast Track - Priority diagnostics for CMIP7 evaluation - -**CMS:** Content Management System - Infrastructure for creating and managing scientific content - -**CMIP:** Coupled Model Intercomparison Project - International framework for comparing climate models - -**DOI:** Digital Object Identifier - Persistent identifier for scientific publications - -**ESGF:** Earth System Grid Federation - Infrastructure for climate data distribution - -**ESMValTool:** Earth System Model Evaluation Tool - Diagnostic package - -**ILAMB:** International Land Model Benchmarking - Evaluation framework - -**IPCC:** Intergovernmental Panel on Climate Change - -**PMP:** PCMDI Metrics Package - Diagnostic tools - -**obs4MIPs:** Observations for Model Intercomparisons - Standardized observation datasets - ---- - -**Document End** - -**Next Actions:** -1. Review and approve this strategic document -2. Present to stakeholders for feedback -3. Begin Phase 1 implementation planning -4. Start expert recruitment process - -**Last Updated:** 2025-10-01 -**Document Owner:** {TBD} -**Review Cycle:** Quarterly diff --git a/frontend/src/instrument.ts b/frontend/src/instrument.ts index f636047..16b8f9b 100644 --- a/frontend/src/instrument.ts +++ b/frontend/src/instrument.ts @@ -5,6 +5,6 @@ Sentry.init({ dsn: import.meta.env.PROD ? "https://4d5d10b87919476ae6b23d5a613c413a@o4510086266486785.ingest.de.sentry.io/4510086733496401" : "", - sendDefaultPii: true, + sendDefaultPii: false, integrations: [], });