diff --git a/.github/workflows/python-engine.yml b/.github/workflows/python-engine.yml index e9fd5ac..dd7c49c 100644 --- a/.github/workflows/python-engine.yml +++ b/.github/workflows/python-engine.yml @@ -31,7 +31,7 @@ jobs: dep = check_dependencies() assert dep['ok'], f'Missing deps: {dep}' - img = np.random.randint(0, 256, (800, 800, 3), dtype=np.uint8) + img = np.random.default_rng(7).integers(0, 256, (800, 800, 3), dtype=np.uint8) embedded = embed_watermark( img, @@ -49,7 +49,26 @@ jobs: quality='balanced', ) assert extracted['ok'], f'Extract failed: {extracted}' - assert extracted['wm'] == 'hello world', f'Roundtrip failed: {extracted}' + assert extracted['wm'].startswith('fp:'), f'Fingerprint roundtrip failed: {extracted}' + assert extracted.get('payloadMode') == 'fingerprint64', extracted + + text16_embedded = embed_watermark( + img, + 'hello world', + password=42, + quality='balanced', + payload_mode='text16', + ) + assert text16_embedded['ok'], f'Text16 embed failed: {text16_embedded}' + + text16_extracted = extract_watermark( + text16_embedded['image'], + password=42, + quality='balanced', + payload_mode='text16', + ) + assert text16_extracted['ok'], f'Text16 extract failed: {text16_extracted}' + assert text16_extracted['wm'] == 'hello world', f'Text16 roundtrip failed: {text16_extracted}' print('All tests passed.') diff --git a/blind_watermark/README.md b/blind_watermark/README.md index dad841f..4cfd82e 100644 --- a/blind_watermark/README.md +++ b/blind_watermark/README.md @@ -5,7 +5,7 @@ It is a heavily modified version of the original [blind_watermark](https://githu ## Modifications in LuminCrypt -- **Adaptive Quality Profiles**: Dynamically adjusts Reed-Solomon error correction (`rs_nsym`) and block redundancy based on selected quality (invisible, balanced, robust). +- **Adaptive Quality Profiles**: Dynamically adjusts Reed-Solomon error correction (`rs_nsym`) and block redundancy for the legacy engine, plus six neural visibility/robustness profiles (trace, faint, light, balanced, strong, robust). Low-visibility neural profiles use keyed Y-channel DCT spread-spectrum embedding to avoid visible grid or line patterns. - **Multi-scale Embedding & Extraction**: Embeds the watermark at multiple resolutions (e.g., 1.0x, 0.75x, 0.5x) to resist severe downscaling and platform compression. - **Enhanced Synchronization**: Uses a dual-ring, multi-peak template for better resistance against rotation, cropping, and aspect ratio changes. - **Edge Exclusion**: Avoids embedding watermarks in the outermost edges of the image where cropping is most likely to occur. @@ -14,4 +14,4 @@ It is a heavily modified version of the original [blind_watermark](https://githu ## License -The original code is licensed under the MIT License. See the `NOTICE` file in the root directory for full copyright and license details. \ No newline at end of file +The original code is licensed under the MIT License. See the `NOTICE` file in the root directory for full copyright and license details. diff --git a/blind_watermark/bwm_helper.py b/blind_watermark/bwm_helper.py index 775f777..3703c6d 100644 --- a/blind_watermark/bwm_helper.py +++ b/blind_watermark/bwm_helper.py @@ -44,6 +44,56 @@ def imwrite_unicode(path, img, params=None): return ok +def emit(payload): + print(json.dumps(payload, ensure_ascii=False), flush=True) + + +def failure(code, stage, message, *, detail=None, **extra): + payload = { + 'ok': False, + 'error': detail or message, + 'failureCode': code, + 'failureStage': stage, + 'userMessage': message, + **extra, + } + emit(payload) + + +def public_embed_payload(result, output, quality): + diagnostics = result.get('diagnostics', {}) + payload = { + 'ok': True, + 'output': output, + 'quality': result.get('quality_used', quality), + 'engineUsed': result.get('engine_used'), + 'fallbackUsed': result.get('fallback_used', False), + 'confidence': result.get('confidence'), + 'diagnostics': diagnostics, + } + for key in ('payloadMode', 'fingerprint', 'codec', 'berEstimate', 'spreadConfidence'): + if diagnostics.get(key) is not None: + payload[key] = diagnostics.get(key) + if result.get('warningCode'): + payload['warningCode'] = result.get('warningCode') + if result.get('warnings'): + payload['warnings'] = result.get('warnings') + return payload + + +def public_extract_payload(result): + payload = dict(result) + if 'engine_used' in payload: + payload['engineUsed'] = payload.pop('engine_used') + if 'fallback_used' in payload: + payload['fallbackUsed'] = payload.pop('fallback_used') + diagnostics = payload.get('diagnostics', {}) or {} + for key in ('payloadMode', 'fingerprint', 'codec', 'berEstimate', 'spreadConfidence'): + if diagnostics.get(key) is not None: + payload[key] = diagnostics.get(key) + return payload + + def _apply_models_dir(args) -> None: models_dir = getattr(args, 'models_dir', '') if models_dir: @@ -61,36 +111,38 @@ def cmd_check(args): f"Missing dependencies: {', '.join(result.get('missing', []))}. " f"Run: pip install {' '.join(result.get('missing', []))}" ) - print(json.dumps(result), flush=True) + emit(result) except ImportError as exc: - print(json.dumps({'ok': False, 'error': str(exc)}), flush=True) + emit({'ok': False, 'error': str(exc), 'failureCode': 'model_unavailable', 'failureStage': 'check'}) def cmd_embed(args): if not args.output: - print(json.dumps({'ok': False, 'error': '--output is required for embed mode'}), flush=True) + failure('invalid_request', 'embed', 'Output path is required.', detail='--output is required for embed mode') return if not args.wm: - print(json.dumps({'ok': False, 'error': '--wm is required for embed mode'}), flush=True) + failure('invalid_request', 'embed', 'Watermark text is required.', detail='--wm is required for embed mode') return if not os.path.isfile(args.input): - print(json.dumps({'ok': False, 'error': f'Input file not found: {args.input}'}), flush=True) + failure('input_unreadable', 'embed', 'The input image was not found.', detail=f'Input file not found: {args.input}') return try: _apply_models_dir(args) from rwm_engine import embed_watermark except ImportError as exc: - print(json.dumps({ + emit({ 'ok': False, 'error': f'Cannot import dependencies: {exc}. Run: pip install -r blind_watermark/requirements.txt', - }), flush=True) + 'failureCode': 'model_unavailable', + 'failureStage': 'embed', + }) return try: image = imread_unicode(args.input) if image is None: - print(json.dumps({'ok': False, 'error': f'Cannot read image: {args.input}'}), flush=True) + failure('input_unreadable', 'embed', 'The image could not be read.', detail=f'Cannot read image: {args.input}') return result = embed_watermark( @@ -101,49 +153,47 @@ def cmd_embed(args): engine=args.engine, models_dir=args.models_dir or None, self_check=getattr(args, 'self_check', True), + payload_mode=getattr(args, 'payload_mode', 'fingerprint64'), ) if not result.get('ok'): - print(json.dumps(result), flush=True) + emit(public_extract_payload(result)) return - imwrite_unicode(args.output, result['image']) - payload = { - 'ok': True, - 'output': args.output, - 'quality': result.get('quality_used', args.quality), - 'engineUsed': result.get('engine_used'), - 'fallbackUsed': result.get('fallback_used', False), - 'confidence': result.get('confidence'), - 'diagnostics': result.get('diagnostics', {}), - } - print(json.dumps(payload), flush=True) + if not imwrite_unicode(args.output, result['image']): + failure('input_unreadable', 'embed', 'The output image could not be written.', detail=f'Cannot write image: {args.output}') + return + emit(public_embed_payload(result, args.output, args.quality)) except Exception as exc: - print(json.dumps({ + emit({ 'ok': False, 'error': str(exc), + 'failureCode': 'engine_mismatch', + 'failureStage': 'embed', 'detail': traceback.format_exc(), - }), flush=True) + }) def cmd_extract(args): if not os.path.isfile(args.input): - print(json.dumps({'ok': False, 'error': f'Input file not found: {args.input}'}), flush=True) + failure('input_unreadable', 'extract', 'The input image was not found.', detail=f'Input file not found: {args.input}') return try: _apply_models_dir(args) from rwm_engine import extract_watermark except ImportError as exc: - print(json.dumps({ + emit({ 'ok': False, 'error': f'Cannot import dependencies: {exc}. Run: pip install -r blind_watermark/requirements.txt', - }), flush=True) + 'failureCode': 'model_unavailable', + 'failureStage': 'extract', + }) return try: image = imread_unicode(args.input) if image is None: - print(json.dumps({'ok': False, 'error': f'Cannot read image: {args.input}'}), flush=True) + failure('input_unreadable', 'extract', 'The image could not be read.', detail=f'Cannot read image: {args.input}') return result = extract_watermark( @@ -152,30 +202,155 @@ def cmd_extract(args): quality=args.quality, engine=args.engine, models_dir=args.models_dir or None, + payload_mode=getattr(args, 'payload_mode', None), ) if result.get('ok'): + diagnostics = result.get('diagnostics', {}) payload = { 'ok': True, 'wm': result.get('wm'), + 'fingerprint': result.get('fingerprint') or diagnostics.get('fingerprint'), + 'payloadMode': result.get('payloadMode') or diagnostics.get('payloadMode'), + 'codec': diagnostics.get('codec'), + 'berEstimate': diagnostics.get('berEstimate'), + 'spreadConfidence': diagnostics.get('spreadConfidence'), 'engineUsed': result.get('engine_used'), 'fallbackUsed': result.get('fallback_used', False), 'confidence': result.get('confidence'), - 'diagnostics': result.get('diagnostics', {}), + 'diagnostics': diagnostics, } - print(json.dumps(payload), flush=True) + emit(payload) else: - payload = dict(result) - if 'engine_used' in payload: - payload['engineUsed'] = payload.pop('engine_used') - if 'fallback_used' in payload: - payload['fallbackUsed'] = payload.pop('fallback_used') - print(json.dumps(payload), flush=True) + emit(public_extract_payload(result)) except Exception as exc: - print(json.dumps({ + emit({ 'ok': False, 'error': str(exc), + 'failureCode': 'engine_mismatch', + 'failureStage': 'extract', 'detail': traceback.format_exc(), - }), flush=True) + }) + + +def cmd_warmup(args): + try: + _apply_models_dir(args) + from rwm_engine import check_dependencies + result = check_dependencies() + if result.get('neuralReady'): + try: + from mlwm.infer import probe_runtime, _load_encoder_session, _load_decoder_session + status = probe_runtime(args.models_dir or None) + _load_encoder_session(str(status.encoder_path), False) + _load_decoder_session(str(status.decoder_path), False) + result['warmupReady'] = True + except Exception as exc: + result['warmupReady'] = False + result['warmupError'] = str(exc) + emit(result) + except Exception as exc: + emit({'ok': False, 'error': str(exc), 'failureCode': 'model_unavailable', 'failureStage': 'warmup'}) + + +def _safe_replace_write(path, image): + root, ext = os.path.splitext(path) + tmp_path = f'{root}.tmp{ext or ".png"}' + if not imwrite_unicode(tmp_path, image): + return False + os.replace(tmp_path, path) + return True + + +def cmd_embed_batch(args): + items = getattr(args, 'items', []) or [] + total = len(items) + results = [] + batch_id = getattr(args, 'batch_id', '') or 'batch' + if total == 0: + failure('invalid_request', 'embed_batch', 'No images were selected for batch watermarking.') + return + try: + _apply_models_dir(args) + from rwm_engine import embed_watermark + except ImportError as exc: + failure('model_unavailable', 'embed_batch', 'The watermark backend is not available.', detail=str(exc)) + return + + for index, item in enumerate(items): + input_path = item.get('input') or '' + output_path = item.get('output') or '' + self_check = bool(item.get('self_check', False)) + progress_base = { + 'event': 'progress', + 'batchId': batch_id, + 'index': index, + 'total': total, + 'input': input_path, + 'output': output_path, + } + emit({**progress_base, 'status': 'running', 'progress': index / total}) + try: + image = imread_unicode(input_path) + if image is None: + raise ValueError('Cannot read image') + result = embed_watermark( + img=image, + text=args.wm, + password=args.password, + quality=args.quality, + engine=args.engine, + models_dir=args.models_dir or None, + self_check=self_check, + payload_mode=getattr(args, 'payload_mode', 'fingerprint64'), + ) + if not result.get('ok'): + item_result = public_extract_payload(result) + item_result.update({'input': input_path, 'output': output_path, 'index': index, 'status': 'failed'}) + elif not _safe_replace_write(output_path, result['image']): + item_result = { + 'ok': False, + 'input': input_path, + 'output': output_path, + 'index': index, + 'status': 'failed', + 'failureCode': 'input_unreadable', + 'failureStage': 'embed_batch', + 'error': f'Cannot write image: {output_path}', + } + else: + item_result = public_embed_payload(result, output_path, args.quality) + item_result.update({'input': input_path, 'index': index, 'status': 'done'}) + except Exception as exc: + item_result = { + 'ok': False, + 'input': input_path, + 'output': output_path, + 'index': index, + 'status': 'failed', + 'failureCode': 'engine_mismatch', + 'failureStage': 'embed_batch', + 'error': str(exc), + } + results.append(item_result) + emit({ + **progress_base, + 'status': 'done' if item_result.get('ok') else 'failed', + 'progress': (index + 1) / total, + 'failureCode': item_result.get('failureCode'), + 'error': item_result.get('error'), + }) + + success_count = sum(1 for item in results if item.get('ok')) + emit({ + 'event': 'complete', + 'ok': success_count == total, + 'batchId': batch_id, + 'total': total, + 'successCount': success_count, + 'failureCount': total - success_count, + 'failureCode': None if success_count == total else 'batch_partial_failure', + 'results': results, + }) def parse_args(): @@ -196,16 +371,20 @@ class _Args: args.self_check = opts.get('self_check', True) args.engine = opts.get('engine', 'auto') args.models_dir = opts.get('models_dir', '') + args.payload_mode = opts.get('payload_mode', opts.get('payloadMode', 'fingerprint64')) + args.items = opts.get('items', []) + args.batch_id = opts.get('batch_id', '') return args parser = argparse.ArgumentParser(description='Robust Watermark Engine bridge') - parser.add_argument('--mode', choices=['check', 'embed', 'extract'], required=True) + parser.add_argument('--mode', choices=['check', 'warmup', 'embed', 'extract', 'embed_batch'], required=True) parser.add_argument('--input', default='') parser.add_argument('--output', default='') parser.add_argument('--wm', default='') parser.add_argument('--password', type=int, default=1) - parser.add_argument('--quality', choices=['invisible', 'balanced', 'robust'], default='balanced') + parser.add_argument('--quality', choices=['trace', 'faint', 'light', 'invisible', 'balanced', 'strong', 'robust'], default='balanced') parser.add_argument('--engine', choices=['auto', 'legacy', 'neural'], default='auto') + parser.add_argument('--payload-mode', choices=['fingerprint64', 'text16'], default='fingerprint64') parser.add_argument('--models-dir', default='') return parser.parse_args() @@ -214,10 +393,14 @@ def main() -> None: args = parse_args() if args.mode == 'check': cmd_check(args) + elif args.mode == 'warmup': + cmd_warmup(args) elif args.mode == 'embed': cmd_embed(args) elif args.mode == 'extract': cmd_extract(args) + elif args.mode == 'embed_batch': + cmd_embed_batch(args) if __name__ == '__main__': diff --git a/blind_watermark/mlwm/bench_spread_v2.py b/blind_watermark/mlwm/bench_spread_v2.py new file mode 100644 index 0000000..8ebb865 --- /dev/null +++ b/blind_watermark/mlwm/bench_spread_v2.py @@ -0,0 +1,176 @@ +from __future__ import annotations + +import argparse +import csv +import glob +import json +import os +import time +from pathlib import Path +from typing import Any + +import cv2 + +from .metrics import psnr, ssim +from .spread_v2 import decode_bgr, embed_bgr, fingerprint_hex + + +def _jpeg_roundtrip(image, quality: int): + ok, buf = cv2.imencode('.jpg', image, [int(cv2.IMWRITE_JPEG_QUALITY), quality]) + if not ok: + raise ValueError('JPEG encode failed') + return cv2.imdecode(buf, cv2.IMREAD_COLOR) + + +def _attacks(image) -> dict[str, Any]: + h, w = image.shape[:2] + attacks = { + 'clean': image, + 'png_roundtrip': cv2.imdecode(cv2.imencode('.png', image)[1], cv2.IMREAD_COLOR), + } + for quality in [95, 90, 85, 80, 75]: + attacks[f'jpeg{quality}'] = _jpeg_roundtrip(image, quality) + for scale in [0.75, 0.5]: + small = cv2.resize(image, (max(1, int(w * scale)), max(1, int(h * scale))), interpolation=cv2.INTER_AREA) + attacks[f'resize{scale:g}'] = cv2.resize(small, (w, h), interpolation=cv2.INTER_CUBIC) + attacks['mild_blur'] = cv2.GaussianBlur(image, (3, 3), 0.55) + crop = image[int(h * 0.05):int(h * 0.95), int(w * 0.05):int(w * 0.95)] + attacks['crop90'] = cv2.resize(crop, (w, h), interpolation=cv2.INTER_CUBIC) + return attacks + + +def _image_paths(inputs: list[str]) -> list[str]: + paths: list[str] = [] + for item in inputs: + p = Path(item) + if p.is_dir(): + for ext in ('*.png', '*.jpg', '*.jpeg', '*.bmp', '*.tif', '*.tiff'): + paths.extend(glob.glob(str(p / ext))) + else: + paths.extend(glob.glob(item)) + return sorted(set(paths)) + + +def run(args: argparse.Namespace) -> dict[str, Any]: + out_dir = Path(args.out_dir) + out_dir.mkdir(parents=True, exist_ok=True) + rows = [] + started = time.time() + for path in _image_paths(args.inputs): + image = cv2.imread(path, cv2.IMREAD_COLOR) + if image is None: + continue + for profile in args.profiles: + expected_fp = fingerprint_hex(args.text, args.password) + try: + watermarked, diagnostics = embed_bgr( + image, + args.text, + args.password, + profile, + payload_mode=args.payload_mode, + ) + quality_psnr = psnr(image, watermarked) + quality_ssim = ssim(image, watermarked) + except Exception as exc: + rows.append({ + 'image': path, + 'profile': profile, + 'attack': 'embed', + 'ok': False, + 'error': str(exc), + }) + continue + + for attack_name, attacked in _attacks(watermarked).items(): + row = { + 'image': path, + 'profile': profile, + 'payloadMode': args.payload_mode, + 'fingerprint': diagnostics.get('fingerprint'), + 'attack': attack_name, + 'psnr': quality_psnr, + 'ssim': quality_ssim, + 'ok': False, + 'confidence': None, + 'berEstimate': None, + 'decoded': '', + 'error': '', + } + try: + decoded = decode_bgr(attacked, args.password, profile, payload_mode=args.payload_mode) + row['decoded'] = decoded.get('text', '') + row['confidence'] = decoded.get('confidence') + row['berEstimate'] = decoded.get('berEstimate') + row['ok'] = ( + row['decoded'] == f'fp:{expected_fp}' + if args.payload_mode == 'fingerprint64' + else row['decoded'] == args.text + ) + except Exception as exc: + row['error'] = str(exc) + rows.append(row) + + fpr_row = { + 'image': path, + 'profile': profile, + 'payloadMode': args.payload_mode, + 'fingerprint': diagnostics.get('fingerprint'), + 'attack': 'original_fpr', + 'psnr': quality_psnr, + 'ssim': quality_ssim, + 'ok': False, + 'confidence': None, + 'berEstimate': None, + 'decoded': '', + 'error': '', + } + try: + decoded = decode_bgr(image, args.password, profile, payload_mode=args.payload_mode) + fpr_row['decoded'] = decoded.get('text', '') + fpr_row['confidence'] = decoded.get('confidence') + fpr_row['ok'] = True + except Exception as exc: + fpr_row['error'] = str(exc) + rows.append(fpr_row) + + csv_path = out_dir / 'spread_v2_bench.csv' + json_path = out_dir / 'spread_v2_bench.json' + fields = sorted({key for row in rows for key in row.keys()}) + with csv_path.open('w', newline='', encoding='utf-8') as fh: + writer = csv.DictWriter(fh, fieldnames=fields) + writer.writeheader() + writer.writerows(rows) + summary = { + 'createdAt': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'elapsedSec': round(time.time() - started, 3), + 'rows': len(rows), + 'csv': str(csv_path), + 'profiles': args.profiles, + 'payloadMode': args.payload_mode, + 'successByAttack': {}, + } + for attack in sorted({row.get('attack') for row in rows}): + subset = [row for row in rows if row.get('attack') == attack] + summary['successByAttack'][attack] = { + 'ok': sum(1 for row in subset if row.get('ok') is True), + 'total': len(subset), + } + json_path.write_text(json.dumps({'summary': summary, 'rows': rows}, ensure_ascii=False, indent=2), encoding='utf-8') + summary['json'] = str(json_path) + return summary + + +def main() -> None: + parser = argparse.ArgumentParser(description='Benchmark frequency-spread-v2 image watermarking.') + parser.add_argument('inputs', nargs='*', default=['data/train_images', 'data/val_images']) + parser.add_argument('--out-dir', default='artifacts/spread_v2_eval') + parser.add_argument('--profiles', nargs='+', default=['trace', 'faint', 'light', 'balanced']) + parser.add_argument('--payload-mode', choices=['fingerprint64', 'text16'], default='fingerprint64') + parser.add_argument('--text', default='LuminCrypt spread v2 benchmark payload') + parser.add_argument('--password', type=int, default=123) + print(json.dumps(run(parser.parse_args()), ensure_ascii=False, indent=2)) + + +if __name__ == '__main__': + main() diff --git a/blind_watermark/mlwm/infer.py b/blind_watermark/mlwm/infer.py index 38c983a..d902040 100644 --- a/blind_watermark/mlwm/infer.py +++ b/blind_watermark/mlwm/infer.py @@ -21,6 +21,21 @@ class NeuralRuntimeUnavailable(RuntimeError): pass +class NeuralDecodeError(ValueError): + def __init__( + self, + failure_code: str, + message: str, + *, + confidence: float = 0.0, + attempts: list[dict[str, Any]] | None = None, + ): + super().__init__(message) + self.failure_code = failure_code + self.confidence = confidence + self.attempts = attempts or [] + + @dataclass class NeuralModelStatus: models_dir: Path @@ -100,6 +115,43 @@ def _resize_residual(residual: np.ndarray, shape: tuple[int, int]) -> np.ndarray return cv2.resize(residual, (w, h), interpolation=cv2.INTER_CUBIC) +def _texture_mask(image_rgb: np.ndarray, *, floor: float = 1.0, power: float = 1.0) -> np.ndarray: + if floor >= 0.999: + return np.ones(image_rgb.shape[:2], dtype=np.float32) + gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY).astype(np.float32) / 255.0 + gx = cv2.Sobel(gray, cv2.CV_32F, 1, 0, ksize=3) + gy = cv2.Sobel(gray, cv2.CV_32F, 0, 1, ksize=3) + mag = cv2.GaussianBlur(np.sqrt(gx * gx + gy * gy), (0, 0), 1.2) + lo, hi = float(np.percentile(mag, 35)), float(np.percentile(mag, 96)) + norm = np.clip((mag - lo) / max(hi - lo, 1e-6), 0.0, 1.0) + if power != 1.0: + norm = np.power(norm, power) + return (floor + (1.0 - floor) * norm).astype(np.float32) + + +def _shape_residual(image_rgb: np.ndarray, residual: np.ndarray, profile: dict[str, Any] | None) -> np.ndarray: + if not profile: + return residual.astype(np.float32) + shaped = residual.astype(np.float32) + chroma_scale = float(profile.get('chroma_scale', 1.0)) + if chroma_scale < 0.999: + luminance_like = np.mean(shaped, axis=2, keepdims=True) + shaped = luminance_like + (shaped - luminance_like) * max(0.0, chroma_scale) + mask_floor = float(profile.get('texture_floor', 1.0)) + if mask_floor < 0.999: + mask = _texture_mask( + image_rgb, + floor=max(0.0, min(1.0, mask_floor)), + power=float(profile.get('texture_power', 1.0)), + ) + shaped = shaped * mask[..., None] + max_abs = profile.get('max_abs_residual') + if max_abs is not None: + limit = max(float(max_abs), 1e-6) + shaped = np.tanh(shaped / limit) * limit + return shaped + + def _bits_array(payload_bits: np.ndarray) -> np.ndarray: bits = np.asarray(payload_bits, dtype=np.float32).reshape(1, PAYLOAD_BITS) if bits.shape[1] != PAYLOAD_BITS: @@ -135,9 +187,11 @@ def apply_neural_residual( residual: np.ndarray, *, strength: float = 1.0, + profile: dict[str, Any] | None = None, ) -> np.ndarray: out = image_rgb.astype(np.float32) / 255.0 - out = np.clip(out + residual.astype(np.float32) * strength, 0.0, 1.0) + shaped = _shape_residual(image_rgb, residual, profile) + out = np.clip(out + shaped * strength, 0.0, 1.0) return np.clip(np.round(out * 255.0), 0, 255).astype(np.uint8) @@ -153,6 +207,7 @@ def neural_decode_views( raise NeuralRuntimeUnavailable('neural models are not ready') session = _load_decoder_session(str(status.decoder_path), use_cuda) attempts: list[dict[str, Any]] = [] + decode_errors: list[str] = [] weighted_logits = np.zeros(PAYLOAD_BITS, dtype=np.float32) total_weight = 0.0 for index, view in enumerate(views_rgb): @@ -174,16 +229,27 @@ def neural_decode_views( decoded['strategy'] = 'single-view' decoded['manifest'] = status.manifest return decoded - except Exception: + except Exception as exc: + decode_errors.append(str(exc)) continue if total_weight <= 1e-8: raise NeuralRuntimeUnavailable('decoder produced no usable confidence scores') aggregated_logits = weighted_logits / total_weight - decoded = decode_payload_logits(aggregated_logits, password=password) - decoded['confidence'] = float(total_weight / max(len(views_rgb), 1)) - decoded['attempts'] = [{'index': a['index'], 'confidence': a['confidence']} for a in attempts] - decoded['strategy'] = 'weighted-aggregate' - decoded['manifest'] = status.manifest - return decoded + avg_confidence = float(total_weight / max(len(views_rgb), 1)) + try: + decoded = decode_payload_logits(aggregated_logits, password=password) + decoded['confidence'] = avg_confidence + decoded['attempts'] = [{'index': a['index'], 'confidence': a['confidence']} for a in attempts] + decoded['strategy'] = 'weighted-aggregate' + decoded['manifest'] = status.manifest + return decoded + except Exception as exc: + failure_code = 'wrong_password_or_corrupted_payload' if avg_confidence >= 0.45 else 'no_signal' + raise NeuralDecodeError( + failure_code, + 'payload checksum failed' if failure_code != 'no_signal' else 'no reliable neural watermark signal', + confidence=avg_confidence, + attempts=[{'index': a['index'], 'confidence': a['confidence']} for a in attempts], + ) from exc diff --git a/blind_watermark/mlwm/spread_v2.py b/blind_watermark/mlwm/spread_v2.py new file mode 100644 index 0000000..5ad0f0a --- /dev/null +++ b/blind_watermark/mlwm/spread_v2.py @@ -0,0 +1,496 @@ +from __future__ import annotations + +import hashlib +import math +import struct +import zlib +from dataclasses import dataclass +from typing import Any + +import cv2 +import numpy as np +import reedsolo + +BLOCK = 8 +MODEL_VERSION = 'frequency-spread-v2' +PROTOCOL_FINGERPRINT64 = 'spread-v2-fingerprint64' +PROTOCOL_TEXT16 = 'spread-v2-text16' +PAYLOAD_MODE_FINGERPRINT64 = 'fingerprint64' +PAYLOAD_MODE_TEXT16 = 'text16' + +FINGERPRINT_FRAME_BYTES = 16 +FINGERPRINT_RS_NSYM = 8 +FINGERPRINT_ENCODED_BYTES = FINGERPRINT_FRAME_BYTES + FINGERPRINT_RS_NSYM +FINGERPRINT_BITS = FINGERPRINT_ENCODED_BYTES * 8 + +DCT_PAIRS = [ + ((2, 3), (3, 2)), + ((2, 4), (4, 2)), + ((3, 3), (2, 5)), + ((4, 3), (3, 4)), +] + +PROFILES: dict[str, dict[str, float | int]] = { + 'trace': { + 'delta': 10.0, + 'reps': 5, + 'mask_floor': 0.55, + 'mask_gain': 1.20, + 'max_y_delta': 40.0, + }, + 'faint': { + 'delta': 11.0, + 'reps': 5, + 'mask_floor': 0.55, + 'mask_gain': 1.22, + 'max_y_delta': 40.0, + }, + 'light': { + 'delta': 12.0, + 'reps': 5, + 'mask_floor': 0.55, + 'mask_gain': 1.25, + 'max_y_delta': 40.0, + }, + 'balanced': { + 'delta': 14.0, + 'reps': 5, + 'mask_floor': 0.55, + 'mask_gain': 1.25, + 'max_y_delta': 45.0, + }, +} + + +@dataclass +class SpreadPayload: + mode: str + protocol: str + bits: np.ndarray + text: str + fingerprint: str | None = None + payload_bytes: int = 0 + + +class SpreadV2DecodeError(Exception): + def __init__(self, failure_code: str, message: str, *, confidence: float, attempts: list[dict[str, Any]] | None = None): + super().__init__(message) + self.failure_code = failure_code + self.confidence = confidence + self.attempts = attempts or [] + + +def fingerprint_hex(text: str, password: int | None) -> str: + normalized = int(password or 0) & 0xffffffff + material = b'lc-fp-v1' + struct.pack('>I', normalized) + text.encode('utf-8') + return hashlib.sha256(material).digest()[:8].hex() + + +def _seed(password: int | None, label: str) -> int: + normalized = int(password or 0) & 0xffffffff + digest = hashlib.sha256(label.encode('utf-8') + struct.pack('>I', normalized)).digest() + return int.from_bytes(digest[:8], 'little') + + +def _bytes_to_bits(data: bytes) -> np.ndarray: + out = np.zeros(len(data) * 8, dtype=np.float32) + for i, b in enumerate(data): + for j in range(8): + out[i * 8 + j] = float((b >> (7 - j)) & 1) + return out + + +def _bits_to_bytes(bits: np.ndarray | list[float] | list[int]) -> bytes: + flat = np.asarray(bits, dtype=np.float32).reshape(-1) + if flat.size % 8 != 0: + raise ValueError('bit array length must be divisible by 8') + out = bytearray(flat.size // 8) + for i in range(len(out)): + value = 0 + for j in range(8): + value = (value << 1) | int(flat[i * 8 + j] >= 0.5) + out[i] = value + return bytes(out) + + +def _mask_bits(length: int, label: str, password: int | None = None) -> np.ndarray: + seed = label.encode('utf-8') + if password is not None: + seed += struct.pack('>I', int(password) & 0xffffffff) + stream = bytearray() + counter = 0 + byte_len = math.ceil(length / 8) + while len(stream) < byte_len: + stream.extend(hashlib.sha256(seed + struct.pack('>I', counter)).digest()) + counter += 1 + return _bytes_to_bits(bytes(stream[:byte_len]))[:length] + + +def _xor_bits(bits: np.ndarray, mask: np.ndarray) -> np.ndarray: + hard = (np.asarray(bits, dtype=np.float32).reshape(-1) >= 0.5).astype(np.float32) + return np.abs(hard - mask.astype(np.float32)).astype(np.float32) + + +def _fingerprint_rs() -> reedsolo.RSCodec: + return reedsolo.RSCodec(FINGERPRINT_RS_NSYM) + + +def _build_fingerprint_frame(fp_hex: str) -> bytes: + fp = bytes.fromhex(fp_hex) + if len(fp) != 8: + raise ValueError('fingerprint must be 8 bytes') + head = bytes([2, 1, 0, len(fp)]) + crc = zlib.crc32(head + fp) & 0xffffffff + frame = head + fp + crc.to_bytes(4, 'big') + if len(frame) != FINGERPRINT_FRAME_BYTES: + raise AssertionError('unexpected fingerprint frame length') + return frame + + +def encode_fingerprint_payload(text: str, password: int | None) -> SpreadPayload: + fp = fingerprint_hex(text, password) + frame = _build_fingerprint_frame(fp) + encoded = bytes(_fingerprint_rs().encode(frame)) + bits = _bytes_to_bits(encoded) + bits = _xor_bits(bits, _mask_bits(FINGERPRINT_BITS, 'spread-v2-whiten-fp')) + bits = _xor_bits(bits, _mask_bits(FINGERPRINT_BITS, 'spread-v2-key-fp', password)) + return SpreadPayload( + mode=PAYLOAD_MODE_FINGERPRINT64, + protocol=PROTOCOL_FINGERPRINT64, + bits=bits, + text=f'fp:{fp}', + fingerprint=fp, + payload_bytes=8, + ) + + +def decode_fingerprint_logits(logits: np.ndarray, password: int | None) -> dict[str, Any]: + logits = np.asarray(logits, dtype=np.float32).reshape(-1) + if logits.size != FINGERPRINT_BITS: + raise ValueError(f'expected {FINGERPRINT_BITS} fingerprint bits, got {logits.size}') + key_mask = _mask_bits(FINGERPRINT_BITS, 'spread-v2-key-fp', password) + logits = logits * np.where(key_mask >= 0.5, -1.0, 1.0).astype(np.float32) + probs = 1.0 / (1.0 + np.exp(-logits)) + keyed_bits = (probs >= 0.5).astype(np.float32) + raw_bits = _xor_bits(keyed_bits, _mask_bits(FINGERPRINT_BITS, 'spread-v2-whiten-fp')) + encoded = _bits_to_bytes(raw_bits) + decoded = _fingerprint_rs().decode(encoded) + frame = bytes(decoded[0] if isinstance(decoded, tuple) else decoded) + if len(frame) != FINGERPRINT_FRAME_BYTES: + raise ValueError('fingerprint frame length mismatch') + version, mode_id, _flags, fp_len = frame[0], frame[1], frame[2], frame[3] + if version != 2 or mode_id != 1 or fp_len != 8: + raise ValueError('unsupported fingerprint frame') + crc_expected = int.from_bytes(frame[12:16], 'big') + crc_actual = zlib.crc32(frame[:12]) & 0xffffffff + if crc_actual != crc_expected: + raise ValueError('fingerprint CRC mismatch') + fp = frame[4:12].hex() + confidence = float(np.mean(np.maximum(probs, 1.0 - probs))) + return { + 'text': f'fp:{fp}', + 'fingerprint': fp, + 'payloadMode': PAYLOAD_MODE_FINGERPRINT64, + 'protocol': PROTOCOL_FINGERPRINT64, + 'payloadBytes': 8, + 'probabilities': probs, + 'bitConfidence': confidence, + 'berEstimate': float(np.mean(np.minimum(probs, 1.0 - probs))), + } + + +def encode_text16_payload(text: str, password: int | None) -> SpreadPayload: + try: + from blind_watermark.mlwm.codec import encode_text_payload + except ImportError: + from mlwm.codec import encode_text_payload + + envelope = encode_text_payload(text, password=password) + return SpreadPayload( + mode=PAYLOAD_MODE_TEXT16, + protocol=PROTOCOL_TEXT16, + bits=envelope.bits, + text=text, + payload_bytes=len(envelope.text_bytes), + ) + + +def decode_text16_logits(logits: np.ndarray, password: int | None) -> dict[str, Any]: + try: + from blind_watermark.mlwm.codec import decode_payload_logits + except ImportError: + from mlwm.codec import decode_payload_logits + + decoded = decode_payload_logits(logits, password=password) + decoded['payloadMode'] = PAYLOAD_MODE_TEXT16 + decoded['protocol'] = PROTOCOL_TEXT16 + decoded['berEstimate'] = float( + np.mean(np.minimum(decoded['probabilities'], 1.0 - decoded['probabilities'])) + ) + return decoded + + +def build_payload(text: str, password: int | None, payload_mode: str = PAYLOAD_MODE_FINGERPRINT64) -> SpreadPayload: + if payload_mode == PAYLOAD_MODE_TEXT16: + return encode_text16_payload(text, password) + return encode_fingerprint_payload(text, password) + + +def _haar_dwt_once(arr: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + even_h = arr.shape[0] - (arr.shape[0] % 2) + even_w = arr.shape[1] - (arr.shape[1] % 2) + work = arr[:even_h, :even_w].astype(np.float32) + a = work[0::2, 0::2] + b = work[0::2, 1::2] + c = work[1::2, 0::2] + d = work[1::2, 1::2] + ll = (a + b + c + d) * 0.5 + hl = (a - b + c - d) * 0.5 + lh = (a + b - c - d) * 0.5 + hh = (a - b - c + d) * 0.5 + return ll, lh, hl, hh + + +def _haar_idwt_once(ll: np.ndarray, lh: np.ndarray, hl: np.ndarray, hh: np.ndarray) -> np.ndarray: + out = np.zeros((ll.shape[0] * 2, ll.shape[1] * 2), dtype=np.float32) + out[0::2, 0::2] = (ll + hl + lh + hh) * 0.5 + out[0::2, 1::2] = (ll - hl + lh - hh) * 0.5 + out[1::2, 0::2] = (ll + hl - lh - hh) * 0.5 + out[1::2, 1::2] = (ll - hl - lh + hh) * 0.5 + return out + + +def dwt2_level2(arr: np.ndarray) -> dict[str, np.ndarray]: + ll1, lh1, hl1, hh1 = _haar_dwt_once(arr) + ll2, lh2, hl2, hh2 = _haar_dwt_once(ll1) + return { + 'll2': ll2, + 'lh2': lh2, + 'hl2': hl2, + 'hh2': hh2, + 'lh1': lh1, + 'hl1': hl1, + 'hh1': hh1, + } + + +def idwt2_level2(bands: dict[str, np.ndarray]) -> np.ndarray: + ll1 = _haar_idwt_once(bands['ll2'], bands['lh2'], bands['hl2'], bands['hh2']) + return _haar_idwt_once(ll1, bands['lh1'], bands['hl1'], bands['hh1']) + + +def _trim_for_level2(y: np.ndarray) -> tuple[np.ndarray, int, int]: + h = (y.shape[0] // 32) * 32 + w = (y.shape[1] // 32) * 32 + if h < 256 or w < 256: + raise ValueError('image is too small for frequency-spread-v2 watermark') + return y[:h, :w].astype(np.float32), h, w + + +def _subband_mask(y: np.ndarray, sub_shape: tuple[int, int]) -> np.ndarray: + blur = cv2.GaussianBlur(y, (0, 0), 1.2) + mean = cv2.boxFilter(blur, cv2.CV_32F, (15, 15), normalize=True) + mean_sq = cv2.boxFilter(blur * blur, cv2.CV_32F, (15, 15), normalize=True) + variance = np.maximum(mean_sq - mean * mean, 0.0) + gx = cv2.Sobel(blur, cv2.CV_32F, 1, 0, ksize=3) + gy = cv2.Sobel(blur, cv2.CV_32F, 0, 1, ksize=3) + edge = np.sqrt(gx * gx + gy * gy) + texture = np.log1p(variance) * 0.62 + np.log1p(edge) * 0.38 + brightness = np.clip(1.0 - np.abs(blur - 128.0) / 128.0, 0.0, 1.0) + texture_small = cv2.resize(texture, (sub_shape[1], sub_shape[0]), interpolation=cv2.INTER_AREA) + bright_small = cv2.resize(brightness, (sub_shape[1], sub_shape[0]), interpolation=cv2.INTER_AREA) + hb, wb = sub_shape[0] // BLOCK, sub_shape[1] // BLOCK + values: list[float] = [] + for by in range(hb): + for bx in range(wb): + y0, x0 = by * BLOCK, bx * BLOCK + values.append( + float(texture_small[y0:y0 + BLOCK, x0:x0 + BLOCK].mean()) + * (0.35 + 0.65 * float(bright_small[y0:y0 + BLOCK, x0:x0 + BLOCK].mean())) + ) + mask = np.asarray(values, dtype=np.float32) + lo, hi = float(np.percentile(mask, 20)), float(np.percentile(mask, 97)) + return np.clip((mask - lo) / max(hi - lo, 1e-6), 0.0, 1.0) + + +def _mapping( + n_bits: int, + total_positions: int, + requested_reps: int, + password: int | None, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]: + reps = max(2, min(int(requested_reps), total_positions // n_bits)) + if reps < 2: + raise ValueError('image is too small for frequency-spread-v2 payload') + rng = np.random.default_rng(_seed(password, 'spread-v2-map')) + carriers = rng.permutation(total_positions)[:n_bits * reps] + bit_order = rng.permutation(n_bits) + pair_indices = rng.integers(0, len(DCT_PAIRS), size=n_bits * reps) + signs = rng.choice(np.asarray([-1.0, 1.0], dtype=np.float32), size=n_bits * reps) + return carriers, bit_order, pair_indices, signs, reps + + +def _embed_band( + bands: dict[str, np.ndarray], + band_names: tuple[str, str], + block_mask: np.ndarray, + bits: np.ndarray, + password: int | None, + profile: dict[str, float | int], +) -> int: + band_h, band_w = bands[band_names[0]].shape + hb, wb = band_h // BLOCK, band_w // BLOCK + positions_per_band = hb * wb + total_positions = positions_per_band * len(band_names) + carriers, bit_order, pair_indices, signs, reps = _mapping( + len(bits), total_positions, int(profile['reps']), password + ) + delta = float(profile['delta']) + mask_floor = float(profile['mask_floor']) + mask_gain = float(profile['mask_gain']) + hard_bits = (np.asarray(bits, dtype=np.float32).reshape(-1) >= 0.5).astype(np.float32) + + for i, carrier in enumerate(carriers): + bit_index = int(bit_order[i // reps]) + symbol = 1.0 if hard_bits[bit_index] >= 0.5 else -1.0 + signed_symbol = symbol * float(signs[i]) + band_index, block_index = divmod(int(carrier), positions_per_band) + by, bx = divmod(block_index, wb) + y0, x0 = by * BLOCK, bx * BLOCK + band = bands[band_names[band_index]] + dct_block = cv2.dct(band[y0:y0 + BLOCK, x0:x0 + BLOCK].astype(np.float32)) + coeff_a, coeff_b = DCT_PAIRS[int(pair_indices[i])] + diff = float(dct_block[coeff_a] - dct_block[coeff_b]) + local_delta = delta * (mask_floor + mask_gain * float(block_mask[block_index])) + if signed_symbol * diff < local_delta: + change = (signed_symbol * local_delta - diff) * 0.5 + dct_block[coeff_a] += change + dct_block[coeff_b] -= change + band[y0:y0 + BLOCK, x0:x0 + BLOCK] = cv2.idct(dct_block) + return reps + + +def _decode_band_logits( + bands: dict[str, np.ndarray], + band_names: tuple[str, str], + n_bits: int, + password: int | None, + profile: dict[str, float | int], +) -> tuple[np.ndarray, float, int]: + band_h, band_w = bands[band_names[0]].shape + hb, wb = band_h // BLOCK, band_w // BLOCK + positions_per_band = hb * wb + total_positions = positions_per_band * len(band_names) + carriers, bit_order, pair_indices, signs, reps = _mapping( + n_bits, total_positions, int(profile['reps']), password + ) + delta = float(profile['delta']) + scores: list[list[float]] = [[] for _ in range(n_bits)] + for i, carrier in enumerate(carriers): + bit_index = int(bit_order[i // reps]) + band_index, block_index = divmod(int(carrier), positions_per_band) + by, bx = divmod(block_index, wb) + y0, x0 = by * BLOCK, bx * BLOCK + band = bands[band_names[band_index]] + dct_block = cv2.dct(band[y0:y0 + BLOCK, x0:x0 + BLOCK].astype(np.float32)) + coeff_a, coeff_b = DCT_PAIRS[int(pair_indices[i])] + diff = float(dct_block[coeff_a] - dct_block[coeff_b]) + scores[bit_index].append(float(signs[i]) * math.tanh(diff / max(delta * 0.9, 1e-6))) + logits = np.zeros(n_bits, dtype=np.float32) + confidence_samples = np.zeros(n_bits, dtype=np.float32) + for bit_index, bit_scores in enumerate(scores): + score = float(np.mean(bit_scores)) if bit_scores else 0.0 + logits[bit_index] = score * 5.0 + confidence_samples[bit_index] = abs(score) + return logits, float(np.mean(confidence_samples)), reps + + +def embed_bgr( + base_bgr: np.ndarray, + text: str, + password: int | None, + quality: str, + payload_mode: str = PAYLOAD_MODE_FINGERPRINT64, +) -> tuple[np.ndarray, dict[str, Any]]: + profile_name = quality if quality in PROFILES else 'light' + profile = PROFILES[profile_name] + payload = build_payload(text, password, payload_mode) + ycc = cv2.cvtColor(base_bgr[:, :, :3], cv2.COLOR_BGR2YCrCb) + y = ycc[:, :, 0].astype(np.float32) + work, hh, ww = _trim_for_level2(y) + bands = dwt2_level2(work) + band_names = ('lh2', 'hl2') + block_mask = _subband_mask(work, bands[band_names[0]].shape) + reps = _embed_band(bands, band_names, block_mask, payload.bits, password, profile) + reconstructed = idwt2_level2(bands) + diff = np.clip( + reconstructed - work, + -float(profile['max_y_delta']), + float(profile['max_y_delta']), + ) + out = ycc.copy() + out_y = y.copy() + out_y[:hh, :ww] = np.clip(np.round(work + diff), 0, 255) + out[:, :, 0] = out_y.astype(np.uint8) + out_bgr = cv2.cvtColor(out, cv2.COLOR_YCrCb2BGR) + return out_bgr, { + 'codec': MODEL_VERSION, + 'protocol': payload.protocol, + 'payloadMode': payload.mode, + 'fingerprint': payload.fingerprint, + 'payloadBytes': payload.payload_bytes, + 'spreadDelta': float(profile['delta']), + 'spreadReps': int(reps), + 'spreadMaskFloor': float(profile['mask_floor']), + 'spreadMaskGain': float(profile['mask_gain']), + 'spreadBlocks': int((bands[band_names[0]].shape[0] // BLOCK) * (bands[band_names[0]].shape[1] // BLOCK) * 2), + 'modelVersion': MODEL_VERSION, + } + + +def decode_bgr( + base_bgr: np.ndarray, + password: int | None, + quality: str, + payload_mode: str = PAYLOAD_MODE_FINGERPRINT64, +) -> dict[str, Any]: + profile_name = quality if quality in PROFILES else 'light' + profile = PROFILES[profile_name] + ycc = cv2.cvtColor(base_bgr[:, :, :3], cv2.COLOR_BGR2YCrCb) + work, _hh, _ww = _trim_for_level2(ycc[:, :, 0].astype(np.float32)) + bands = dwt2_level2(work) + if payload_mode == PAYLOAD_MODE_TEXT16: + try: + from blind_watermark.mlwm.codec import PAYLOAD_BITS + except ImportError: + from mlwm.codec import PAYLOAD_BITS + + logits, confidence, reps = _decode_band_logits(bands, ('lh2', 'hl2'), PAYLOAD_BITS, password, profile) + try: + decoded = decode_text16_logits(logits, password) + except Exception as exc: + code = 'wrong_password_or_corrupted_payload' if confidence >= 0.48 else 'no_signal' + raise SpreadV2DecodeError( + code, + 'frequency-spread-v2 text16 payload checksum failed' if code != 'no_signal' else 'no reliable frequency-spread-v2 signal', + confidence=confidence, + attempts=[{'payloadMode': payload_mode, 'profile': profile_name, 'confidence': confidence}], + ) from exc + else: + logits, confidence, reps = _decode_band_logits(bands, ('lh2', 'hl2'), FINGERPRINT_BITS, password, profile) + try: + decoded = decode_fingerprint_logits(logits, password) + except Exception as exc: + code = 'wrong_password_or_corrupted_payload' if confidence >= 0.48 else 'no_signal' + raise SpreadV2DecodeError( + code, + 'frequency-spread-v2 fingerprint payload checksum failed' if code != 'no_signal' else 'no reliable frequency-spread-v2 signal', + confidence=confidence, + attempts=[{'payloadMode': payload_mode, 'profile': profile_name, 'confidence': confidence}], + ) from exc + decoded['confidence'] = confidence + decoded['spreadReps'] = reps + decoded['spreadDelta'] = float(profile['delta']) + decoded['strategy'] = 'dwt2-dct-spread-spectrum' + decoded['modelVersion'] = MODEL_VERSION + return decoded diff --git a/blind_watermark/rwm_engine.py b/blind_watermark/rwm_engine.py index 55c5bdc..ac5d5f4 100644 --- a/blind_watermark/rwm_engine.py +++ b/blind_watermark/rwm_engine.py @@ -82,28 +82,119 @@ # v2 compat constants _V2_RS_NSYM = 20 _V2_REDUNDANCY = 3 -RWM_VERSION = '3.2.0' +RWM_VERSION = '3.5.0' NEURAL_MAX_TEXT_BYTES = 16 +DEFAULT_PAYLOAD_MODE = 'fingerprint64' +PAYLOAD_MODES = {'fingerprint64', 'text16'} NEURAL_PROFILES = { - 'invisible': { - 'residual_strength': 0.35, + 'trace': { + 'residual_strength': 0.12, + 'codec': 'frequency_spread_v2', + 'spread_delta': 3.0, + 'spread_reps': 12, + 'spread_mask_floor': 0.32, + 'spread_mask_gain': 0.82, + 'chroma_scale': 0.20, + 'texture_floor': 0.20, + 'texture_power': 1.45, + 'max_abs_residual': 0.006, + 'allow_self_check_failure': False, + 'template_strength': 0.0, + 'template_peaks': 0, + 'sync_enabled': False, + }, + 'faint': { + 'residual_strength': 0.24, + 'codec': 'frequency_spread_v2', + 'spread_delta': 3.4, + 'spread_reps': 12, + 'spread_mask_floor': 0.35, + 'spread_mask_gain': 0.85, + 'chroma_scale': 0.45, + 'texture_floor': 0.45, + 'texture_power': 1.20, + 'max_abs_residual': 0.014, + 'allow_self_check_failure': False, + 'template_strength': 0.0, + 'template_peaks': 0, + 'sync_enabled': False, + }, + 'light': { + 'residual_strength': 0.45, + 'codec': 'frequency_spread_v2', + 'spread_delta': 4.0, + 'spread_reps': 12, + 'spread_mask_floor': 0.38, + 'spread_mask_gain': 0.88, + 'chroma_scale': 0.80, + 'texture_floor': 0.80, + 'texture_power': 1.0, + 'max_abs_residual': None, + 'allow_self_check_failure': False, 'template_strength': 0.0, 'template_peaks': 0, 'sync_enabled': False, }, 'balanced': { - 'residual_strength': 0.55, + 'residual_strength': 0.70, + 'codec': 'frequency_spread_v2', + 'spread_delta': 4.8, + 'spread_reps': 14, + 'spread_mask_floor': 0.40, + 'spread_mask_gain': 0.90, + 'chroma_scale': 0.80, + 'texture_floor': 0.80, + 'texture_power': 1.0, + 'max_abs_residual': None, + 'allow_self_check_failure': False, + 'template_strength': 0.0, + 'template_peaks': 0, + 'sync_enabled': False, + }, + 'strong': { + 'residual_strength': 0.80, + 'chroma_scale': 0.90, + 'texture_floor': 0.90, + 'texture_power': 1.0, + 'max_abs_residual': None, + 'allow_self_check_failure': False, 'template_strength': 0.0, 'template_peaks': 0, 'sync_enabled': False, }, 'robust': { 'residual_strength': 1.0, + 'chroma_scale': 1.0, + 'texture_floor': 1.0, + 'texture_power': 1.0, + 'max_abs_residual': None, + 'allow_self_check_failure': False, 'template_strength': 0.0, 'template_peaks': 0, 'sync_enabled': False, }, } +SPREAD_DCT_PAIRS = [ + ((2, 3), (3, 2)), + ((2, 4), (4, 2)), + ((3, 3), (2, 5)), + ((4, 3), (3, 4)), + ((1, 4), (4, 1)), + ((2, 2), (1, 5)), +] +SPREAD_PROFILE_ORDER = ['balanced', 'light', 'faint', 'trace'] +NEURAL_PROFILE_ALIASES = { + 'invisible': 'light', +} +QUALITY_ALIASES = { + 'trace': 'invisible', + 'faint': 'invisible', + 'light': 'invisible', + 'invisible': 'invisible', + 'balanced': 'balanced', + 'strong': 'robust', + 'robust': 'robust', +} # ─── Helpers ────────────────────────────────────────────────────────────────── @@ -363,7 +454,12 @@ def _count_available_blocks(h, w, margin_ratio): # ─── Public API ─────────────────────────────────────────────────────────────── +def _resolve_legacy_quality(quality): + return QUALITY_ALIASES.get(quality, 'balanced') + + def embed_watermark_legacy(img, text, password=1, quality='balanced', self_check=True): + quality = _resolve_legacy_quality(quality) if quality not in QUALITY: raise ValueError(f'quality must be one of {list(QUALITY.keys())}') cfg = QUALITY[quality] @@ -441,6 +537,7 @@ def embed_watermark_legacy(img, text, password=1, quality='balanced', self_check def extract_watermark_legacy(img, password=1, quality='balanced'): + quality = _resolve_legacy_quality(quality) if quality not in QUALITY: quality = 'balanced' @@ -523,11 +620,82 @@ def _try_all_presets(gray_ch, preset_order=None): def _resolve_neural_profile(quality): + quality = NEURAL_PROFILE_ALIASES.get(quality, quality) if quality in NEURAL_PROFILES: return quality return 'balanced' +FAILURE_MESSAGES = { + 'invalid_request': ( + 'The watermark request is incomplete or invalid.', + ['Check the selected image, password, engine, and output path.'], + ), + 'input_unreadable': ( + 'The image could not be read.', + ['Confirm the file still exists and is a supported image format.'], + ), + 'model_unavailable': ( + 'The neural watermark model is not available.', + ['Check whether the bundled model files and ONNX runtime are installed.'], + ), + 'payload_too_long': ( + f'Neural watermark text must be {NEURAL_MAX_TEXT_BYTES} UTF-8 bytes or shorter.', + ['Use Auto or Legacy for longer text, or shorten the neural payload.'], + ), + 'no_signal': ( + 'No reliable watermark signal was found.', + ['Check whether this is the watermarked image.', 'Try Auto mode or Legacy compatibility mode.'], + ), + 'wrong_password_or_corrupted_payload': ( + 'A watermark-like signal was found, but the payload did not pass validation.', + ['Confirm the password first.', 'If the password is correct, the image may have been compressed, cropped, or overlaid too heavily.'], + ), + 'engine_mismatch': ( + 'The selected engine could not decode this image.', + ['Try Auto mode, or switch between Neural and Legacy.'], + ), + 'unsupported_protocol': ( + 'The watermark protocol is not supported by this version.', + ['Update the app and helper, then try again.'], + ), +} + + +def _failure_payload(code, stage, *, error=None, engine='auto', fallback=False, diagnostics=None, confidence=None): + message, hints = FAILURE_MESSAGES.get(code, FAILURE_MESSAGES['invalid_request']) + payload = { + 'ok': False, + 'error': error or message, + 'failureCode': code, + 'failureStage': stage, + 'userMessage': message, + 'recoveryHints': hints, + 'engine_used': engine, + 'fallback_used': fallback, + 'diagnostics': diagnostics or {}, + } + if confidence is not None: + payload['confidence'] = confidence + return payload + + +def _classify_extract_error(exc): + code = getattr(exc, 'failure_code', None) + if code: + return code + lower = str(exc).lower() + if 'onnxruntime' in lower or 'neural models are not ready' in lower or 'not installed' in lower: + return 'model_unavailable' + if 'protocol' in lower: + return 'unsupported_protocol' + if 'chien search' in lower or 'crc' in lower or 'checksum' in lower or 'reedsolo' in lower: + return 'wrong_password_or_corrupted_payload' + if 'no valid watermark' in lower or 'no reliable neural watermark signal' in lower: + return 'no_signal' + return 'engine_mismatch' + + def _center_square(image): h, w = image.shape[:2] edge = min(h, w) @@ -536,6 +704,136 @@ def _center_square(image): return image[y0:y0 + edge, x0:x0 + edge] +def _spread_seed(password, label='spread-v1'): + seed_material = label.encode('utf-8') + struct.pack('>I', int(password) & 0xffffffff) + digest = hashlib.sha256(seed_material).digest() + return int.from_bytes(digest[:8], 'little') + + +def _spread_block_mask(y_channel, hb, wb): + gx = cv2.Sobel(y_channel, cv2.CV_32F, 1, 0, ksize=3) + gy = cv2.Sobel(y_channel, cv2.CV_32F, 0, 1, ksize=3) + edge = np.sqrt(gx * gx + gy * gy) + mask = np.zeros(hb * wb, dtype=np.float32) + k = 0 + for by in range(hb): + for bx in range(wb): + y0, x0 = by * BLOCK, bx * BLOCK + block = y_channel[y0:y0 + BLOCK, x0:x0 + BLOCK] + edge_block = edge[y0:y0 + BLOCK, x0:x0 + BLOCK] + mask[k] = np.log1p(float(block.var())) * 0.65 + np.log1p(float(edge_block.mean())) * 0.35 + k += 1 + lo, hi = float(np.percentile(mask, 25)), float(np.percentile(mask, 95)) + return np.clip((mask - lo) / max(hi - lo, 1e-6), 0.0, 1.0) + + +def _spread_mapping(hb, wb, reps, password): + try: + from blind_watermark.mlwm.codec import PAYLOAD_BITS + except ImportError: + from mlwm.codec import PAYLOAD_BITS + + total_blocks = hb * wb + reps = max(1, min(int(reps), total_blocks // PAYLOAD_BITS)) + if reps < 3: + raise ValueError('image is too small for frequency-spread watermark') + rng = np.random.default_rng(_spread_seed(password)) + block_indices = rng.permutation(total_blocks)[:PAYLOAD_BITS * reps].reshape(PAYLOAD_BITS, reps) + pair_indices = rng.integers(0, len(SPREAD_DCT_PAIRS), PAYLOAD_BITS * reps).reshape(PAYLOAD_BITS, reps) + return block_indices, pair_indices, reps + + +def _dct_spread_embed_bgr(base_bgr, payload_bits, password, profile): + ycc = cv2.cvtColor(base_bgr[:, :, :3], cv2.COLOR_BGR2YCrCb) + y = ycc[:, :, 0].astype(np.float32) + h, w = y.shape + hb, wb = h // BLOCK, w // BLOCK + hh, ww = hb * BLOCK, wb * BLOCK + work = y[:hh, :ww].copy() + delta = float(profile.get('spread_delta', 3.0)) + mask_floor = float(profile.get('spread_mask_floor', 0.35)) + mask_gain = float(profile.get('spread_mask_gain', 0.85)) + block_mask = _spread_block_mask(work, hb, wb) + block_indices, pair_indices, reps = _spread_mapping(hb, wb, profile.get('spread_reps', 12), password) + + for bit_index, bit in enumerate(np.asarray(payload_bits, dtype=np.float32).reshape(-1)): + symbol = 1.0 if bit >= 0.5 else -1.0 + for sample_index in range(reps): + block_index = int(block_indices[bit_index, sample_index]) + by, bx = divmod(block_index, wb) + y0, x0 = by * BLOCK, bx * BLOCK + coeff_a, coeff_b = SPREAD_DCT_PAIRS[int(pair_indices[bit_index, sample_index])] + dct_block = cv2.dct(work[y0:y0 + BLOCK, x0:x0 + BLOCK] - 128.0) + diff = float(dct_block[coeff_a] - dct_block[coeff_b]) + local_delta = delta * (mask_floor + mask_gain * float(block_mask[block_index])) + if symbol * diff < local_delta: + change = (symbol * local_delta - diff) / 2.0 + dct_block[coeff_a] += change + dct_block[coeff_b] -= change + work[y0:y0 + BLOCK, x0:x0 + BLOCK] = cv2.idct(dct_block) + 128.0 + + out = ycc.copy() + out[:hh, :ww, 0] = np.clip(np.round(work), 0, 255).astype(np.uint8) + return cv2.cvtColor(out, cv2.COLOR_YCrCb2BGR), { + 'spreadDelta': delta, + 'spreadReps': reps, + 'spreadMaskFloor': mask_floor, + 'spreadMaskGain': mask_gain, + 'spreadBlocks': int(hb * wb), + } + + +def _dct_spread_decode_bgr(base_bgr, password, profile): + try: + from blind_watermark.mlwm.codec import PAYLOAD_BITS, decode_payload_logits + from blind_watermark.mlwm.infer import NeuralDecodeError + except ImportError: + from mlwm.codec import PAYLOAD_BITS, decode_payload_logits + from mlwm.infer import NeuralDecodeError + + ycc = cv2.cvtColor(base_bgr[:, :, :3], cv2.COLOR_BGR2YCrCb) + y = ycc[:, :, 0].astype(np.float32) + h, w = y.shape + hb, wb = h // BLOCK, w // BLOCK + hh, ww = hb * BLOCK, wb * BLOCK + work = y[:hh, :ww] + delta = float(profile.get('spread_delta', 3.0)) + block_indices, pair_indices, reps = _spread_mapping(hb, wb, profile.get('spread_reps', 12), password) + logits = np.zeros(PAYLOAD_BITS, dtype=np.float32) + confidence_samples = [] + + for bit_index in range(PAYLOAD_BITS): + scores = [] + for sample_index in range(reps): + block_index = int(block_indices[bit_index, sample_index]) + by, bx = divmod(block_index, wb) + y0, x0 = by * BLOCK, bx * BLOCK + coeff_a, coeff_b = SPREAD_DCT_PAIRS[int(pair_indices[bit_index, sample_index])] + dct_block = cv2.dct(work[y0:y0 + BLOCK, x0:x0 + BLOCK] - 128.0) + diff = float(dct_block[coeff_a] - dct_block[coeff_b]) + scores.append(np.tanh(diff / max(delta * 1.1, 1e-6))) + score = float(np.mean(scores)) + logits[bit_index] = score * 5.0 + confidence_samples.append(abs(score)) + + avg_confidence = float(np.mean(confidence_samples)) + try: + decoded = decode_payload_logits(logits, password=password) + except Exception as exc: + failure_code = 'wrong_password_or_corrupted_payload' if avg_confidence >= 0.18 else 'no_signal' + raise NeuralDecodeError( + failure_code, + 'frequency-spread payload checksum failed' if failure_code != 'no_signal' else 'no reliable frequency-spread watermark signal', + confidence=avg_confidence, + attempts=[{'profile': profile.get('name'), 'confidence': avg_confidence, 'reps': reps}], + ) from exc + decoded['confidence'] = avg_confidence + decoded['strategy'] = 'dct-spread-spectrum' + decoded['spreadReps'] = reps + decoded['spreadDelta'] = delta + return decoded + + def _rectify_neural_image(img, password): gray_orig = cv2.cvtColor(img[:, :, :3], cv2.COLOR_BGR2GRAY).astype(np.float64) h, w = gray_orig.shape @@ -592,28 +890,81 @@ def _legacy_extract_response(text, quality, requested_engine, fallback_reason=No } -def _neural_embed_impl(img, text, password=1, quality='balanced', models_dir=None, self_check=True): - if len(text.encode('utf-8')) > NEURAL_MAX_TEXT_BYTES: - raise ValueError(f'Neural watermark supports up to {NEURAL_MAX_TEXT_BYTES} UTF-8 bytes') - try: - from blind_watermark.mlwm.codec import encode_text_payload - from blind_watermark.mlwm.infer import NeuralRuntimeUnavailable, neural_encode_residual, apply_neural_residual - except ImportError: - from mlwm.codec import encode_text_payload - from mlwm.infer import NeuralRuntimeUnavailable, neural_encode_residual, apply_neural_residual +def _resolve_payload_mode(payload_mode): + return payload_mode if payload_mode in PAYLOAD_MODES else DEFAULT_PAYLOAD_MODE + + +def _neural_embed_impl( + img, + text, + password=1, + quality='balanced', + models_dir=None, + self_check=True, + payload_mode=DEFAULT_PAYLOAD_MODE, +): + payload_mode = _resolve_payload_mode(payload_mode) + if payload_mode == 'text16' and len(text.encode('utf-8')) > NEURAL_MAX_TEXT_BYTES: + raise ValueError(f'Neural text16 watermark supports up to {NEURAL_MAX_TEXT_BYTES} UTF-8 bytes') profile_name = _resolve_neural_profile(quality) - profile = NEURAL_PROFILES[profile_name] + profile = dict(NEURAL_PROFILES[profile_name], name=profile_name) alpha = img[:, :, 3].copy() if img.ndim == 3 and img.shape[2] == 4 else None base = img[:, :, :3] if alpha is not None else img.copy() - payload = encode_text_payload(text, password=password) - rgb = cv2.cvtColor(base, cv2.COLOR_BGR2RGB) - try: - encoded = neural_encode_residual(rgb, payload.bits, models_dir=models_dir, use_cuda=False) - except NeuralRuntimeUnavailable: - raise - rgb_watermarked = apply_neural_residual(rgb, encoded['residual'], strength=profile['residual_strength']) - out = cv2.cvtColor(rgb_watermarked, cv2.COLOR_RGB2BGR).astype(np.float64) + payload = None + spread_diagnostics = {} + if profile.get('codec') == 'frequency_spread_v2': + try: + from blind_watermark.mlwm import spread_v2 + except ImportError: + from mlwm import spread_v2 + + out_bgr, spread_diagnostics = spread_v2.embed_bgr( + base, + text, + password, + profile_name, + payload_mode=payload_mode, + ) + encoded = {'modelVersion': spread_diagnostics.get('modelVersion', 'frequency-spread-v2')} + out = out_bgr.astype(np.float64) + payload = type('PayloadInfo', (), { + 'protocol': spread_diagnostics.get('protocol'), + 'password_protected': True, + 'text_bytes': text.encode('utf-8') if payload_mode == 'text16' else bytes.fromhex(spread_diagnostics.get('fingerprint') or ''), + })() + elif profile.get('codec') == 'frequency_spread': + try: + from blind_watermark.mlwm.codec import encode_text_payload + except ImportError: + from mlwm.codec import encode_text_payload + payload = encode_text_payload(text, password=password) + out_bgr, spread_diagnostics = _dct_spread_embed_bgr(base, payload.bits, password, profile) + encoded = {'modelVersion': 'frequency-spread-v1'} + out = out_bgr.astype(np.float64) + else: + try: + from blind_watermark.mlwm.codec import encode_text_payload + except ImportError: + from mlwm.codec import encode_text_payload + payload = encode_text_payload(text, password=password) + try: + from blind_watermark.mlwm.infer import NeuralRuntimeUnavailable, neural_encode_residual, apply_neural_residual + except ImportError: + from mlwm.infer import NeuralRuntimeUnavailable, neural_encode_residual, apply_neural_residual + + rgb = cv2.cvtColor(base, cv2.COLOR_BGR2RGB) + try: + encoded = neural_encode_residual(rgb, payload.bits, models_dir=models_dir, use_cuda=False) + except NeuralRuntimeUnavailable: + raise + rgb_watermarked = apply_neural_residual( + rgb, + encoded['residual'], + strength=profile['residual_strength'], + profile=profile, + ) + out = cv2.cvtColor(rgb_watermarked, cv2.COLOR_RGB2BGR).astype(np.float64) if profile.get('sync_enabled', False) and profile.get('template_strength', 0.0) > 0.0: gray = cv2.cvtColor(out.astype(np.uint8), cv2.COLOR_BGR2GRAY).astype(np.float64) @@ -628,20 +979,43 @@ def _neural_embed_impl(img, text, password=1, quality='balanced', models_dir=Non diagnostics = { 'profile': profile_name, + 'codec': profile.get('codec', 'neural_residual'), 'protocol': payload.protocol, 'passwordProtected': payload.password_protected, - 'visualStrength': profile['residual_strength'], + 'visualStrength': profile.get('spread_delta', profile['residual_strength']), + 'chromaScale': profile.get('chroma_scale'), + 'textureFloor': profile.get('texture_floor'), + 'selfCheckRequired': not bool(profile.get('allow_self_check_failure', False)), + 'selfCheckPassed': None, 'payloadBytes': len(payload.text_bytes), 'modelVersion': encoded.get('modelVersion'), 'modelsDir': models_dir, + 'warnings': [], } + diagnostics.update(spread_diagnostics) if self_check: - extracted = _neural_extract_impl(out, password=password, quality=quality, models_dir=models_dir) - if not extracted.get('ok') or extracted.get('wm') != text: - raise ValueError('Neural self-check mismatch') - - return { + try: + extracted = _neural_extract_impl( + out, + password=password, + quality=quality, + models_dir=models_dir, + payload_mode=payload_mode, + ) + expected_text = f"fp:{spread_diagnostics.get('fingerprint')}" if payload_mode == 'fingerprint64' else text + if not extracted.get('ok') or extracted.get('wm') != expected_text: + raise ValueError('Neural self-check mismatch') + diagnostics['selfCheckPassed'] = True + except Exception as exc: + diagnostics['selfCheckPassed'] = False + diagnostics['selfCheckError'] = str(exc) + if profile.get('allow_self_check_failure', False): + diagnostics['warnings'].append('self-check-failed-risk-accepted') + else: + raise + + response = { 'ok': True, 'image': out, 'engine_used': 'neural', @@ -650,15 +1024,45 @@ def _neural_embed_impl(img, text, password=1, quality='balanced', models_dir=Non 'confidence': 1.0, 'diagnostics': diagnostics, } + if diagnostics['warnings']: + response['warningCode'] = diagnostics['warnings'][0] + response['warnings'] = diagnostics['warnings'] + return response + + +def _neural_extract_impl( + img, + password=1, + quality='balanced', + models_dir=None, + payload_mode=None, +): + profile_name = _resolve_neural_profile(quality) + spread_order = [profile_name] if profile_name in SPREAD_PROFILE_ORDER else [] + spread_order.extend([name for name in SPREAD_PROFILE_ORDER if name not in spread_order]) + spread_errors = [] + requested_payload_modes = [] + if payload_mode in PAYLOAD_MODES: + requested_payload_modes.append(payload_mode) + requested_payload_modes.extend([mode for mode in ['fingerprint64', 'text16'] if mode not in requested_payload_modes]) + for spread_name in spread_order: + for mode in requested_payload_modes: + try: + return _frequency_spread_v2_extract_response(img, password, spread_name, mode, models_dir) + except Exception as exc: + spread_errors.append(exc) + + for spread_name in spread_order: + try: + return _frequency_spread_extract_response(img, password, spread_name, models_dir) + except Exception as exc: + spread_errors.append(exc) - -def _neural_extract_impl(img, password=1, quality='balanced', models_dir=None): try: from blind_watermark.mlwm.infer import NeuralRuntimeUnavailable, neural_decode_views except ImportError: from mlwm.infer import NeuralRuntimeUnavailable, neural_decode_views - profile_name = _resolve_neural_profile(quality) profile = NEURAL_PROFILES[profile_name] if profile.get('sync_enabled', False): corrected, geo = _rectify_neural_image(img[:, :, :3] if img.ndim == 3 else img, password) @@ -669,12 +1073,28 @@ def _neural_extract_impl(img, password=1, quality='balanced', models_dir=None): try: decoded = neural_decode_views(views, models_dir=models_dir, use_cuda=False, password=password) except NeuralRuntimeUnavailable: + if spread_errors: + raise spread_errors[0] + raise + except Exception: + if spread_errors: + no_signal_errors = [ + err for err in spread_errors + if getattr(err, 'failure_code', None) == 'no_signal' + ] + raise (no_signal_errors[0] if no_signal_errors else spread_errors[0]) raise confidence = float(decoded.get('confidence', decoded.get('bitConfidence', 0.0))) + text = decoded.get('text', '') + if not text: + raise ValueError('decoded empty payload') + payload_bytes = decoded.get('payloadBytes') + if isinstance(payload_bytes, (bytes, bytearray)): + payload_bytes = len(payload_bytes) return { 'ok': True, - 'wm': decoded['text'], + 'wm': text, 'engine_used': 'neural', 'fallback_used': False, 'confidence': confidence, @@ -694,28 +1114,134 @@ def _neural_extract_impl(img, password=1, quality='balanced', models_dir=None): } -def embed_watermark(img, text, password=1, quality='balanced', engine='auto', models_dir=None, self_check=True): +def _frequency_spread_v2_extract_response(img, password, profile_name, payload_mode, models_dir=None): + try: + from blind_watermark.mlwm import spread_v2 + from blind_watermark.mlwm.infer import NeuralDecodeError + except ImportError: + from mlwm import spread_v2 + from mlwm.infer import NeuralDecodeError + + try: + decoded = spread_v2.decode_bgr( + img[:, :, :3] if img.ndim == 3 else img, + password, + profile_name, + payload_mode=payload_mode, + ) + except Exception as exc: + failure_code = getattr(exc, 'failure_code', 'wrong_password_or_corrupted_payload') + raise NeuralDecodeError( + failure_code, + str(exc), + confidence=getattr(exc, 'confidence', None), + attempts=getattr(exc, 'attempts', None) or [{'profile': profile_name, 'payloadMode': payload_mode, 'error': str(exc)}], + ) from exc + confidence = float(decoded.get('confidence', decoded.get('bitConfidence', 0.0))) + text = decoded.get('text', '') + if not text: + raise ValueError('decoded empty payload') + payload_bytes = decoded.get('payloadBytes') + if isinstance(payload_bytes, (bytes, bytearray)): + payload_bytes = len(payload_bytes) + return { + 'ok': True, + 'wm': text, + 'fingerprint': decoded.get('fingerprint'), + 'payloadMode': decoded.get('payloadMode', payload_mode), + 'engine_used': 'neural', + 'fallback_used': False, + 'confidence': confidence, + 'diagnostics': { + 'profile': profile_name, + 'codec': 'frequency_spread_v2', + 'protocol': decoded.get('protocol', 'spread-v2'), + 'payloadMode': decoded.get('payloadMode', payload_mode), + 'fingerprint': decoded.get('fingerprint'), + 'passwordProtected': True, + 'payloadBytes': payload_bytes, + 'bitConfidence': float(decoded.get('bitConfidence', 0.0)), + 'berEstimate': decoded.get('berEstimate'), + 'spreadConfidence': confidence, + 'decodeStrategy': decoded.get('strategy'), + 'spreadReps': decoded.get('spreadReps'), + 'spreadDelta': decoded.get('spreadDelta'), + 'geometricCorrection': {'angle': 0.0, 'scale': 1.0, 'confidence': 0.0, 'peaks': 0, 'syncEnabled': False}, + 'modelVersion': decoded.get('modelVersion', 'frequency-spread-v2'), + 'modelsDir': models_dir, + }, + } + + +def _frequency_spread_extract_response(img, password, profile_name, models_dir=None): + profile = dict(NEURAL_PROFILES[profile_name], name=profile_name) + decoded = _dct_spread_decode_bgr(img[:, :, :3] if img.ndim == 3 else img, password, profile) + confidence = float(decoded.get('confidence', decoded.get('bitConfidence', 0.0))) + text = decoded.get('text', '') + if not text: + raise ValueError('decoded empty payload') + return { + 'ok': True, + 'wm': text, + 'engine_used': 'neural', + 'fallback_used': False, + 'confidence': confidence, + 'diagnostics': { + 'profile': profile_name, + 'codec': 'frequency_spread', + 'protocol': decoded.get('protocol', 'keyed-v2'), + 'passwordProtected': bool(decoded.get('passwordProtected', True)), + 'visualStrength': profile.get('spread_delta', profile['residual_strength']), + 'bitConfidence': float(decoded.get('bitConfidence', 0.0)), + 'spreadConfidence': confidence, + 'decodeStrategy': decoded.get('strategy'), + 'spreadReps': decoded.get('spreadReps'), + 'spreadDelta': decoded.get('spreadDelta'), + 'geometricCorrection': {'angle': 0.0, 'scale': 1.0, 'confidence': 0.0, 'peaks': 0, 'syncEnabled': False}, + 'modelVersion': 'frequency-spread-v1', + 'modelsDir': models_dir, + }, + } + + +def embed_watermark( + img, + text, + password=1, + quality='balanced', + engine='auto', + models_dir=None, + self_check=True, + payload_mode=DEFAULT_PAYLOAD_MODE, +): requested_engine = engine if engine in ('auto', 'legacy', 'neural') else 'auto' + payload_mode = _resolve_payload_mode(payload_mode) text_bytes = text.encode('utf-8') if requested_engine == 'legacy': - legacy = embed_watermark_legacy(img, text, password=password, quality=quality, self_check=self_check) - return _legacy_embed_response(legacy, quality, requested_engine) + legacy_quality = _resolve_legacy_quality(quality) + legacy = embed_watermark_legacy(img, text, password=password, quality=legacy_quality, self_check=self_check) + return _legacy_embed_response(legacy, legacy_quality, requested_engine) - neural_allowed = len(text_bytes) <= NEURAL_MAX_TEXT_BYTES + profile_name = _resolve_neural_profile(quality) + profile_codec = NEURAL_PROFILES[profile_name].get('codec') + neural_allowed = ( + profile_codec == 'frequency_spread_v2' and payload_mode == 'fingerprint64' + ) or len(text_bytes) <= NEURAL_MAX_TEXT_BYTES large_enough = min(img.shape[:2]) >= 512 if hasattr(img, 'shape') else False if requested_engine == 'neural' and not neural_allowed: - return { - 'ok': False, - 'error': f'Neural watermark supports up to {NEURAL_MAX_TEXT_BYTES} UTF-8 bytes', - 'engine_used': 'neural', - 'fallback_used': False, - } + return _failure_payload( + 'payload_too_long', + 'embed', + error=f'Neural text16 watermark supports up to {NEURAL_MAX_TEXT_BYTES} UTF-8 bytes', + engine='neural', + ) if requested_engine == 'auto' and (not neural_allowed or not large_enough): reason = 'payload-too-long' if not neural_allowed else 'image-too-small-for-neural-auto' - legacy = embed_watermark_legacy(img, text, password=password, quality=quality, self_check=self_check) - return _legacy_embed_response(legacy, quality, requested_engine, reason) + legacy_quality = _resolve_legacy_quality(quality) + legacy = embed_watermark_legacy(img, text, password=password, quality=legacy_quality, self_check=self_check) + return _legacy_embed_response(legacy, legacy_quality, requested_engine, reason) try: return _neural_embed_impl( @@ -725,49 +1251,73 @@ def embed_watermark(img, text, password=1, quality='balanced', engine='auto', mo quality=quality, models_dir=models_dir, self_check=self_check, + payload_mode=payload_mode, ) except Exception as exc: if requested_engine == 'neural': - return { - 'ok': False, - 'error': str(exc), - 'engine_used': 'neural', - 'fallback_used': False, - } - legacy = embed_watermark_legacy(img, text, password=password, quality=quality, self_check=self_check) - return _legacy_embed_response(legacy, quality, requested_engine, f'neural-failed:{exc}') - - -def extract_watermark(img, password=1, quality='balanced', engine='auto', models_dir=None): + code = 'model_unavailable' if 'neural models are not ready' in str(exc).lower() else 'engine_mismatch' + return _failure_payload(code, 'embed', error=str(exc), engine='neural') + legacy_quality = _resolve_legacy_quality(quality) + legacy = embed_watermark_legacy(img, text, password=password, quality=legacy_quality, self_check=self_check) + return _legacy_embed_response(legacy, legacy_quality, requested_engine, f'neural-failed:{exc}') + + +def extract_watermark( + img, + password=1, + quality='balanced', + engine='auto', + models_dir=None, + payload_mode=None, +): requested_engine = engine if engine in ('auto', 'legacy', 'neural') else 'auto' if requested_engine == 'legacy': - text = extract_watermark_legacy(img, password=password, quality=quality) - return _legacy_extract_response(text, quality, requested_engine) + legacy_quality = _resolve_legacy_quality(quality) + text = extract_watermark_legacy(img, password=password, quality=legacy_quality) + return _legacy_extract_response(text, legacy_quality, requested_engine) try: - return _neural_extract_impl(img, password=password, quality=quality, models_dir=models_dir) + return _neural_extract_impl( + img, + password=password, + quality=quality, + models_dir=models_dir, + payload_mode=payload_mode, + ) except Exception as exc: if requested_engine == 'neural': - return { - 'ok': False, - 'error': str(exc), - 'engine_used': 'neural', - 'fallback_used': False, - } + code = _classify_extract_error(exc) + return _failure_payload( + code, + 'extract', + error=str(exc), + engine='neural', + confidence=getattr(exc, 'confidence', None), + diagnostics={ + 'neuralError': str(exc), + 'attempts': getattr(exc, 'attempts', None), + }, + ) try: - text = extract_watermark_legacy(img, password=password, quality=quality) - return _legacy_extract_response(text, quality, requested_engine, f'neural-failed:{exc}') + legacy_quality = _resolve_legacy_quality(quality) + text = extract_watermark_legacy(img, password=password, quality=legacy_quality) + return _legacy_extract_response(text, legacy_quality, requested_engine, f'neural-failed:{exc}') except Exception as legacy_exc: - return { - 'ok': False, - 'error': str(legacy_exc), - 'engine_used': 'legacy', - 'fallback_used': True, - 'diagnostics': { + code = _classify_extract_error(exc) + diagnostics = { 'fallbackReason': f'neural-failed:{exc}', + 'neuralError': str(exc), 'legacyError': str(legacy_exc), - }, } + return _failure_payload( + code, + 'extract', + error=str(exc), + engine=requested_engine, + fallback=True, + confidence=getattr(exc, 'confidence', None), + diagnostics=diagnostics, + ) def _try_extract_single_with_seed(gray, sd, quality_name, rs_nsym, redundancy, diff --git a/blind_watermark/tests/test_mlwm_runtime.py b/blind_watermark/tests/test_mlwm_runtime.py index 21a6b53..f2a18ab 100644 --- a/blind_watermark/tests/test_mlwm_runtime.py +++ b/blind_watermark/tests/test_mlwm_runtime.py @@ -9,11 +9,26 @@ def test_alpha1_neural_profiles_do_not_inject_untrained_sync_template(self): self.assertFalse(profile.get('sync_enabled', False)) self.assertEqual(profile.get('template_strength'), 0.0) - def test_neural_profiles_expose_three_visual_strengths(self): - self.assertEqual(set(rwm_engine.NEURAL_PROFILES.keys()), {'invisible', 'balanced', 'robust'}) - self.assertLess(rwm_engine.NEURAL_PROFILES['invisible']['residual_strength'], rwm_engine.NEURAL_PROFILES['balanced']['residual_strength']) + def test_neural_profiles_expose_visual_strength_ladder(self): + self.assertEqual(set(rwm_engine.NEURAL_PROFILES.keys()), {'trace', 'faint', 'light', 'balanced', 'strong', 'robust'}) + self.assertEqual(rwm_engine._resolve_neural_profile('invisible'), 'light') + self.assertLess(rwm_engine.NEURAL_PROFILES['trace']['residual_strength'], rwm_engine.NEURAL_PROFILES['faint']['residual_strength']) + self.assertLess(rwm_engine.NEURAL_PROFILES['faint']['residual_strength'], rwm_engine.NEURAL_PROFILES['light']['residual_strength']) + self.assertLess(rwm_engine.NEURAL_PROFILES['light']['residual_strength'], rwm_engine.NEURAL_PROFILES['balanced']['residual_strength']) + self.assertLess(rwm_engine.NEURAL_PROFILES['balanced']['residual_strength'], rwm_engine.NEURAL_PROFILES['strong']['residual_strength']) + self.assertLess(rwm_engine.NEURAL_PROFILES['strong']['residual_strength'], rwm_engine.NEURAL_PROFILES['robust']['residual_strength']) self.assertLess(rwm_engine.NEURAL_PROFILES['balanced']['residual_strength'], rwm_engine.NEURAL_PROFILES['robust']['residual_strength']) + def test_only_risk_profiles_allow_self_check_failure(self): + for name in ['trace', 'faint', 'light', 'balanced', 'strong', 'robust']: + self.assertFalse(rwm_engine.NEURAL_PROFILES[name]['allow_self_check_failure']) + + def test_low_visibility_profiles_use_frequency_spread_codec(self): + for name in ['trace', 'faint', 'light', 'balanced']: + self.assertEqual(rwm_engine.NEURAL_PROFILES[name].get('codec'), 'frequency_spread_v2') + for name in ['strong', 'robust']: + self.assertNotEqual(rwm_engine.NEURAL_PROFILES[name].get('codec'), 'frequency_spread_v2') + if __name__ == '__main__': unittest.main() diff --git a/blind_watermark/tests/test_spread_v2.py b/blind_watermark/tests/test_spread_v2.py new file mode 100644 index 0000000..c84f870 --- /dev/null +++ b/blind_watermark/tests/test_spread_v2.py @@ -0,0 +1,47 @@ +import unittest + +import cv2 +import numpy as np + +from blind_watermark.mlwm import spread_v2 + + +class SpreadV2Tests(unittest.TestCase): + def test_fingerprint_payload_roundtrips_and_rejects_wrong_password(self): + payload = spread_v2.encode_fingerprint_payload('hello world', 123) + logits = np.where(payload.bits >= 0.5, 8.0, -8.0).astype(np.float32) + + decoded = spread_v2.decode_fingerprint_logits(logits, 123) + + self.assertEqual(decoded['text'], payload.text) + self.assertEqual(decoded['fingerprint'], payload.fingerprint) + with self.assertRaises(Exception): + spread_v2.decode_fingerprint_logits(logits, 124) + + def test_haar_level2_roundtrip(self): + rng = np.random.default_rng(7) + arr = rng.normal(128.0, 30.0, size=(256, 320)).astype(np.float32) + + bands = spread_v2.dwt2_level2(arr) + restored = spread_v2.idwt2_level2(bands) + + self.assertTrue(np.allclose(arr, restored, atol=1e-5)) + + def test_embed_extract_clean_fingerprint(self): + yy, xx = np.mgrid[0:640, 0:768].astype(np.float32) + y = 128 + 45 * np.sin(xx / 19.0) + 35 * np.cos(yy / 23.0) + img = cv2.merge([ + np.clip(y + 12 * np.sin(yy / 7.0), 0, 255).astype(np.uint8), + np.clip(y + 18 * np.cos(xx / 11.0), 0, 255).astype(np.uint8), + np.clip(y, 0, 255).astype(np.uint8), + ]) + + watermarked, diagnostics = spread_v2.embed_bgr(img, 'sample text', 123, 'light', 'fingerprint64') + decoded = spread_v2.decode_bgr(watermarked, 123, 'light', 'fingerprint64') + + self.assertEqual(decoded['text'], f"fp:{diagnostics['fingerprint']}") + self.assertGreater(decoded['confidence'], 0.45) + + +if __name__ == '__main__': + unittest.main() diff --git a/eslint.config.mjs b/eslint.config.mjs index aff5d3f..b33b9a6 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -6,7 +6,21 @@ import eslintPluginReactHooks from 'eslint-plugin-react-hooks' import eslintPluginReactRefresh from 'eslint-plugin-react-refresh' export default defineConfig( - { ignores: ['**/node_modules', '**/dist', '**/out'] }, + { + ignores: [ + '**/node_modules/**', + '**/dist/**', + '**/out/**', + '**/build/**', + '**/.venv*/**', + '**/artifacts/**', + '**/data/**', + '**/tmp/**', + '**/monitor_samples/**', + '**/blind_watermark/**', + '**/resources/bin/**' + ] + }, tseslint.configs.recommended, eslintPluginReact.configs.flat.recommended, eslintPluginReact.configs.flat['jsx-runtime'], diff --git a/src/main/index.ts b/src/main/index.ts index e2dd381..cfa20a0 100644 --- a/src/main/index.ts +++ b/src/main/index.ts @@ -1,5 +1,5 @@ import { app, shell, BrowserWindow, ipcMain, dialog, clipboard } from 'electron' -import { join, extname, normalize } from 'path' +import { join, extname, normalize, parse } from 'path' import { writeFile, readFile } from 'fs/promises' import { existsSync } from 'fs' import { spawn } from 'child_process' @@ -8,8 +8,17 @@ import icon from '../../resources/icon.png?asset' let storeCache: Record = {} const ALLOWED_EXTERNAL_PROTOCOLS = new Set(['https:', 'http:']) -const ALLOWED_QUALITY = new Set(['invisible', 'balanced', 'robust']) +const ALLOWED_QUALITY = new Set([ + 'trace', + 'faint', + 'light', + 'invisible', + 'balanced', + 'strong', + 'robust' +]) const ALLOWED_ENGINE = new Set(['auto', 'legacy', 'neural']) +const ALLOWED_PAYLOAD_MODE = new Set(['fingerprint64', 'text16']) const ALLOWED_IMG_EXT = new Set(['.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.tif']) const MAX_TEXT_PAYLOAD = 2_000_000 const MAX_PDF_HTML_B64 = 10_000_000 @@ -27,8 +36,13 @@ function isSafeExternalUrl(raw: string): boolean { function isSafeAppNavigation(raw: string): boolean { try { const url = new URL(raw) - if (url.protocol === 'file:' || url.protocol === 'devtools:' || url.protocol === 'app:') return true - if (is.dev && (url.protocol === 'http:' || url.protocol === 'https:') && (url.hostname === 'localhost' || url.hostname === '127.0.0.1')) { + if (url.protocol === 'file:' || url.protocol === 'devtools:' || url.protocol === 'app:') + return true + if ( + is.dev && + (url.protocol === 'http:' || url.protocol === 'https:') && + (url.hostname === 'localhost' || url.hostname === '127.0.0.1') + ) { return true } return false @@ -38,7 +52,12 @@ function isSafeAppNavigation(raw: string): boolean { } function isValidPathInput(value: unknown): value is string { - return typeof value === 'string' && value.trim().length > 0 && value.length <= 4096 && !value.includes('\0') + return ( + typeof value === 'string' && + value.trim().length > 0 && + value.length <= 4096 && + !value.includes('\0') + ) } function isAllowedImagePath(pathLike: string): boolean { @@ -90,8 +109,8 @@ function createWindow(): void { preload: join(__dirname, '../preload/index.js'), sandbox: false, contextIsolation: true, - nodeIntegration: false, - }, + nodeIntegration: false + } }) mainWindow.on('ready-to-show', () => { @@ -148,6 +167,20 @@ function findBundledModelsDir(): string { } async function findPythonExe(): Promise { + const localCandidates = + process.platform === 'win32' + ? [ + join(app.getAppPath(), '.venv-ml', 'Scripts', 'python.exe'), + join(app.getAppPath(), '.venv', 'Scripts', 'python.exe') + ] + : [ + join(app.getAppPath(), '.venv-ml', 'bin', 'python'), + join(app.getAppPath(), '.venv', 'bin', 'python') + ] + for (const candidate of localCandidates) { + if (existsSync(candidate)) return candidate + } + if (process.platform === 'win32') { for (const cmd of ['python', 'python3', 'py']) { const resolved = await new Promise((resolve) => { @@ -180,8 +213,17 @@ async function findPythonExe(): Promise { } type BwmRunner = { mode: 'exe'; exePath: string } | { mode: 'python'; python: string } +type BackendStatus = Record & { mode?: 'exe' | 'python'; python: string | null } + +let backendStatusCache: BackendStatus | null = null +let backendWarmupPromise: Promise | null = null +const batchProcesses = new Map boolean; cancelled: boolean }>() async function getRunner(): Promise { + if (is.dev) { + const python = await findPythonExe() + if (python) return { mode: 'python', python } + } const exePath = findBundledExe() if (exePath) return { mode: 'exe', exePath } const python = await findPythonExe() @@ -189,20 +231,29 @@ async function getRunner(): Promise { } function bwmScriptPath(): string { - return is.dev ? join(app.getAppPath(), 'blind_watermark', 'bwm_helper.py') : join(process.resourcesPath, 'bwm_helper.py') + return is.dev + ? join(app.getAppPath(), 'blind_watermark', 'bwm_helper.py') + : join(process.resourcesPath, 'bwm_helper.py') } -function runBwm(runner: BwmRunner, opts: Record): Promise> { +function runBwm( + runner: BwmRunner, + opts: Record +): Promise> { return new Promise((resolve) => { const spawnEnv = { ...process.env, PYTHONIOENCODING: 'utf-8', PYTHONUTF8: '1' } const child = runner.mode === 'exe' - ? spawn(runner.exePath, ['--json-stdin'], { windowsHide: true, timeout: 180_000, env: spawnEnv }) + ? spawn(runner.exePath, ['--json-stdin'], { + windowsHide: true, + timeout: 180_000, + env: spawnEnv + }) : spawn(runner.python, [bwmScriptPath(), '--json-stdin'], { windowsHide: true, timeout: 180_000, env: spawnEnv, - cwd: is.dev ? join(app.getAppPath(), 'blind_watermark') : process.resourcesPath, + cwd: is.dev ? join(app.getAppPath(), 'blind_watermark') : process.resourcesPath }) let stdout = '' @@ -221,7 +272,7 @@ function runBwm(runner: BwmRunner, opts: Record): Promise): Promise { + const modelsDir = findBundledModelsDir() + const runner = await getRunner() + if (!runner) return { ok: false, mode: 'python', python: null, error: 'no-runner' } + const result = await runBwm(runner, { mode, models_dir: modelsDir }) + const normalized = { + ...result, + mode: runner.mode, + python: runner.mode === 'python' ? runner.python : null + } + backendStatusCache = normalized + return normalized +} + +function warmupBwmBackend(force = false): Promise { + if (!force && backendStatusCache) return Promise.resolve(backendStatusCache) + if (!force && backendWarmupPromise) return backendWarmupPromise + backendWarmupPromise = probeBwmBackend('warmup').finally(() => { + backendWarmupPromise = null + }) + return backendWarmupPromise +} + +function uniqueBatchOutputPath( + inputPath: string, + outputDir: string, + usedNames: Set +): string { + const parsed = parse(inputPath) + const safeStem = + parsed.name + .split('') + .map((char) => { + const code = char.charCodeAt(0) + return code <= 31 || '<>:"/\\|?*'.includes(char) ? '_' : char + }) + .join('') + .slice(0, 180) || 'image' + let candidate = `${safeStem}_wm.png` + let suffix = 2 + while (usedNames.has(candidate.toLowerCase()) || existsSync(join(outputDir, candidate))) { + candidate = `${safeStem}_wm_${suffix}.png` + suffix += 1 + } + usedNames.add(candidate.toLowerCase()) + return join(outputDir, candidate) +} + +function runBwmBatch( + runner: BwmRunner, + opts: Record & { batch_id: string }, + sender: Electron.WebContents +): Promise> { + return new Promise((resolve) => { + const spawnEnv = { ...process.env, PYTHONIOENCODING: 'utf-8', PYTHONUTF8: '1' } + const child = + runner.mode === 'exe' + ? spawn(runner.exePath, ['--json-stdin'], { + windowsHide: true, + timeout: 30 * 60_000, + env: spawnEnv + }) + : spawn(runner.python, [bwmScriptPath(), '--json-stdin'], { + windowsHide: true, + timeout: 30 * 60_000, + env: spawnEnv, + cwd: is.dev ? join(app.getAppPath(), 'blind_watermark') : process.resourcesPath + }) + const batchState = { + cancelled: false, + cancel: (): boolean => { + batchState.cancelled = true + return child.kill() + } + } + batchProcesses.set(opts.batch_id, batchState) + + let stdoutBuffer = '' + let stderr = '' + let finalPayload: Record | null = null + const consumeLine = (line: string): void => { + if (!line.trim()) return + try { + const payload = JSON.parse(line) as Record + if (payload.event === 'progress') { + sender.send('image-wm:batch-progress', payload) + } else if (payload.event === 'complete') { + finalPayload = payload + sender.send('image-wm:batch-progress', payload) + } else { + finalPayload = payload + } + } catch { + stderr += line + '\n' + } + } + + child.stdout?.on('data', (d: Buffer) => { + stdoutBuffer += d.toString('utf8') + let newline = stdoutBuffer.indexOf('\n') + while (newline >= 0) { + const line = stdoutBuffer.slice(0, newline) + stdoutBuffer = stdoutBuffer.slice(newline + 1) + consumeLine(line) + newline = stdoutBuffer.indexOf('\n') + } + }) + child.stderr?.on('data', (d: Buffer) => { + stderr += d.toString('utf8') + }) + child.on('close', (code) => { + batchProcesses.delete(opts.batch_id) + consumeLine(stdoutBuffer) + if (finalPayload) return resolve(finalPayload) + resolve({ + ok: false, + batchId: opts.batch_id, + failureCode: batchState.cancelled ? 'batch_cancelled' : 'batch_partial_failure', + error: stderr.trim() || `Process exited with code ${code}` + }) + }) + child.on('error', (err) => { + batchProcesses.delete(opts.batch_id) + resolve({ + ok: false, + batchId: opts.batch_id, + failureCode: 'model_unavailable', + error: err.message + }) + }) + + child.stdin?.write(Buffer.from(JSON.stringify(opts), 'utf-8')) + child.stdin?.end() + }) +} + app.whenReady().then(async () => { await loadStore() @@ -269,35 +456,39 @@ app.whenReady().then(async () => { ipcMain.handle('clipboard:read', () => clipboard.readText()) - ipcMain.handle('dialog:saveFile', async (_event, content: string, ext: 'json' | 'pdf', defaultName: string) => { - if (typeof content !== 'string' || content.length > MAX_TEXT_PAYLOAD) { - return { success: false, error: 'Invalid content payload' } - } - if (typeof defaultName !== 'string' || defaultName.length === 0 || defaultName.length > 255) { - return { success: false, error: 'Invalid file name' } - } - const win = BrowserWindow.getFocusedWindow() - if (!win) return { success: false, error: 'No window' } - - const filters = ext === 'json' - ? [{ name: 'JSON Report', extensions: ['json'] }] - : [{ name: 'PDF Report', extensions: ['pdf'] }] - - const { canceled, filePath } = await dialog.showSaveDialog(win, { - title: 'Save Report', - defaultPath: defaultName, - filters, - }) - if (canceled || !filePath) return { success: false, error: 'Canceled' } + ipcMain.handle( + 'dialog:saveFile', + async (_event, content: string, ext: 'json' | 'pdf', defaultName: string) => { + if (typeof content !== 'string' || content.length > MAX_TEXT_PAYLOAD) { + return { success: false, error: 'Invalid content payload' } + } + if (typeof defaultName !== 'string' || defaultName.length === 0 || defaultName.length > 255) { + return { success: false, error: 'Invalid file name' } + } + const win = BrowserWindow.getFocusedWindow() + if (!win) return { success: false, error: 'No window' } + + const filters = + ext === 'json' + ? [{ name: 'JSON Report', extensions: ['json'] }] + : [{ name: 'PDF Report', extensions: ['pdf'] }] + + const { canceled, filePath } = await dialog.showSaveDialog(win, { + title: 'Save Report', + defaultPath: defaultName, + filters + }) + if (canceled || !filePath) return { success: false, error: 'Canceled' } - try { - if (ext === 'json') await writeFile(filePath, content, 'utf-8') - else await writeFile(filePath, Buffer.from(content, 'base64')) - return { success: true, filePath } - } catch (err) { - return { success: false, error: String(err) } + try { + if (ext === 'json') await writeFile(filePath, content, 'utf-8') + else await writeFile(filePath, Buffer.from(content, 'base64')) + return { success: true, filePath } + } catch (err) { + return { success: false, error: String(err) } + } } - }) + ) ipcMain.handle('dialog:saveCSV', async (_event, content: string, defaultName: string) => { if (typeof content !== 'string' || content.length > MAX_TEXT_PAYLOAD) { @@ -312,7 +503,7 @@ app.whenReady().then(async () => { const { canceled, filePath } = await dialog.showSaveDialog(win, { title: 'Save CSV', defaultPath: defaultName, - filters: [{ name: 'CSV File', extensions: ['csv'] }], + filters: [{ name: 'CSV File', extensions: ['csv'] }] }) if (canceled || !filePath) return { success: false, error: 'Canceled' } @@ -337,7 +528,7 @@ app.whenReady().then(async () => { const { canceled, filePath } = await dialog.showSaveDialog(win, { title: 'Save PDF', defaultPath: defaultName, - filters: [{ name: 'PDF Document', extensions: ['pdf'] }], + filters: [{ name: 'PDF Document', extensions: ['pdf'] }] }) if (canceled || !filePath) return { success: false, error: 'Canceled' } @@ -345,13 +536,13 @@ app.whenReady().then(async () => { show: false, width: 900, height: 1200, - webPreferences: { sandbox: true }, + webPreferences: { sandbox: true } }) try { await pdfWin.loadURL(`data:text/html;base64,${htmlB64}`) const pdfData = await pdfWin.webContents.printToPDF({ printBackground: true, - pageSize: 'A4', + pageSize: 'A4' }) await writeFile(filePath, pdfData) return { success: true, filePath } @@ -363,16 +554,15 @@ app.whenReady().then(async () => { }) ipcMain.handle('image-wm:checkPython', async () => { - const modelsDir = findBundledModelsDir() - const exePath = findBundledExe() - if (exePath) { - const result = await runBwm({ mode: 'exe', exePath }, { mode: 'check', models_dir: modelsDir }) - return { ...result, mode: 'exe', python: null } - } - const python = await findPythonExe() - if (!python) return { ok: false, mode: 'python', python: null, error: 'no-runner' } - const result = await runBwm({ mode: 'python', python }, { mode: 'check', models_dir: modelsDir }) - return { ...result, mode: 'python', python } + return warmupBwmBackend(false) + }) + + ipcMain.handle('image-wm:backendStatus', async () => { + return backendStatusCache ?? warmupBwmBackend(false) + }) + + ipcMain.handle('image-wm:warmup', async () => { + return warmupBwmBackend(true) }) ipcMain.handle('image-wm:openImage', async () => { @@ -383,26 +573,62 @@ app.whenReady().then(async () => { properties: ['openFile'], filters: [ { name: 'Images', extensions: ['png', 'jpg', 'jpeg', 'bmp', 'tiff', 'tif'] }, - { name: 'All files', extensions: ['*'] }, - ], + { name: 'All files', extensions: ['*'] } + ] }) return canceled || filePaths.length === 0 ? null : filePaths[0] }) + ipcMain.handle('image-wm:openImages', async () => { + const win = BrowserWindow.getFocusedWindow() + if (!win) return [] + const { canceled, filePaths } = await dialog.showOpenDialog(win, { + title: 'Open images', + properties: ['openFile', 'multiSelections'], + filters: [ + { name: 'Images', extensions: ['png', 'jpg', 'jpeg', 'bmp', 'tiff', 'tif'] }, + { name: 'All files', extensions: ['*'] } + ] + }) + return canceled ? [] : filePaths.filter(isAllowedImagePath) + }) + ipcMain.handle('image-wm:saveImage', async () => { const win = BrowserWindow.getFocusedWindow() if (!win) return null const { canceled, filePath } = await dialog.showSaveDialog(win, { title: 'Save watermarked image', defaultPath: 'watermarked.png', - filters: [{ name: 'PNG Image', extensions: ['png'] }], + filters: [{ name: 'PNG Image', extensions: ['png'] }] }) return canceled || !filePath ? null : filePath }) + ipcMain.handle('image-wm:chooseOutputDir', async () => { + const win = BrowserWindow.getFocusedWindow() + if (!win) return null + const { canceled, filePaths } = await dialog.showOpenDialog(win, { + title: 'Choose output folder', + properties: ['openDirectory', 'createDirectory'] + }) + return canceled || filePaths.length === 0 ? null : filePaths[0] + }) + ipcMain.handle( 'image-wm:embed', - async (_e, opts: { inputPath: string; outputPath: string; wmText: string; password: number; quality: string; engine: string }) => { + async ( + _e, + opts: { + inputPath: string + outputPath: string + wmText: string + password: number + quality: string + engine: string + payloadMode?: string + } + ) => { + const payloadMode = opts?.payloadMode ?? 'fingerprint64' if ( !opts || !isValidPathInput(opts.inputPath) || @@ -415,6 +641,7 @@ app.whenReady().then(async () => { opts.password > 2_147_483_647 || !ALLOWED_QUALITY.has(opts.quality) || !ALLOWED_ENGINE.has(opts.engine) || + !ALLOWED_PAYLOAD_MODE.has(payloadMode) || !isAllowedImagePath(opts.inputPath) || !isAllowedImagePath(opts.outputPath) ) { @@ -430,14 +657,25 @@ app.whenReady().then(async () => { password: opts.password, quality: opts.quality, engine: opts.engine, - models_dir: findBundledModelsDir(), + payload_mode: payloadMode, + models_dir: findBundledModelsDir() }) - }, + } ) ipcMain.handle( 'image-wm:extract', - async (_e, opts: { inputPath: string; password: number; quality: string; engine: string }) => { + async ( + _e, + opts: { + inputPath: string + password: number + quality: string + engine: string + payloadMode?: string + } + ) => { + const payloadMode = opts?.payloadMode if ( !opts || !isValidPathInput(opts.inputPath) || @@ -446,6 +684,7 @@ app.whenReady().then(async () => { opts.password > 2_147_483_647 || !ALLOWED_QUALITY.has(opts.quality) || !ALLOWED_ENGINE.has(opts.engine) || + (payloadMode != null && !ALLOWED_PAYLOAD_MODE.has(payloadMode)) || !isAllowedImagePath(opts.inputPath) ) { return { ok: false, error: 'Invalid image extract parameters' } @@ -456,14 +695,114 @@ app.whenReady().then(async () => { mode: 'extract', input: opts.inputPath, password: opts.password, - quality: opts.quality, + quality: opts.quality || 'balanced', engine: opts.engine, - models_dir: findBundledModelsDir(), + payload_mode: payloadMode, + models_dir: findBundledModelsDir() }) - }, + } ) + ipcMain.handle( + 'image-wm:embedBatch', + async ( + e, + opts: { + inputPaths: string[] + outputDir: string + wmText: string + password: number + quality: string + engine: string + payloadMode?: string + selfCheckMode?: 'sampled' | 'all' | 'off' + } + ) => { + const payloadMode = opts?.payloadMode ?? 'fingerprint64' + if ( + !opts || + !Array.isArray(opts.inputPaths) || + opts.inputPaths.length === 0 || + opts.inputPaths.length > 500 || + !isValidPathInput(opts.outputDir) || + typeof opts.wmText !== 'string' || + opts.wmText.trim().length === 0 || + opts.wmText.length > 4096 || + !Number.isInteger(opts.password) || + opts.password < 0 || + opts.password > 2_147_483_647 || + !ALLOWED_QUALITY.has(opts.quality) || + !ALLOWED_ENGINE.has(opts.engine) || + !ALLOWED_PAYLOAD_MODE.has(payloadMode) + ) { + return { + ok: false, + error: 'Invalid image batch embed parameters', + failureCode: 'invalid_request' + } + } + const runner = await getRunner() + if (!runner) + return { + ok: false, + error: 'No runnable image watermark backend available', + failureCode: 'model_unavailable' + } + const batchId = `batch-${Date.now()}-${Math.random().toString(36).slice(2, 8)}` + const usedNames = new Set() + const lastIndex = opts.inputPaths.length - 1 + const mode = opts.selfCheckMode ?? 'sampled' + const items = opts.inputPaths.map((inputPath, index) => { + const selfCheck = + mode === 'all' || + (mode === 'sampled' && (index === 0 || index === lastIndex || index % 10 === 0)) + return { + input: inputPath, + output: uniqueBatchOutputPath(inputPath, opts.outputDir, usedNames), + self_check: selfCheck + } + }) + if ( + !items.every( + (item) => + isValidPathInput(item.input) && + isAllowedImagePath(item.input) && + isAllowedImagePath(item.output) + ) + ) { + return { ok: false, error: 'Invalid image path in batch', failureCode: 'invalid_request' } + } + return runBwmBatch( + runner, + { + mode: 'embed_batch', + batch_id: batchId, + items, + wm: opts.wmText, + password: opts.password, + quality: opts.quality, + engine: opts.engine, + payload_mode: payloadMode, + models_dir: findBundledModelsDir() + }, + e.sender + ) + } + ) + + ipcMain.handle('image-wm:cancelBatch', async (_e, batchId: string) => { + if (typeof batchId !== 'string') return false + const batch = batchProcesses.get(batchId) + if (!batch) return false + const killed = batch.cancel() + batchProcesses.delete(batchId) + return killed + }) + createWindow() + setTimeout(() => { + void warmupBwmBackend(true) + }, 800) app.on('activate', () => { if (BrowserWindow.getAllWindows().length === 0) createWindow() diff --git a/src/preload/index.d.ts b/src/preload/index.d.ts index 627668a..82724ec 100644 --- a/src/preload/index.d.ts +++ b/src/preload/index.d.ts @@ -3,7 +3,8 @@ import { ElectronAPI } from '@electron-toolkit/preload' type SaveResult = { success: boolean; filePath?: string; error?: string } type RunnerMode = 'exe' | 'python' type WatermarkEngine = 'auto' | 'legacy' | 'neural' -type WatermarkQuality = 'invisible' | 'balanced' | 'robust' +type WatermarkQuality = 'trace' | 'faint' | 'light' | 'invisible' | 'balanced' | 'strong' | 'robust' +type ImagePayloadMode = 'fingerprint64' | 'text16' type ImageWmCheckResult = { ok: boolean @@ -23,20 +24,62 @@ type ImageWmEmbedResult = { output?: string quality?: string error?: string + failureCode?: string + userMessage?: string + recoveryHints?: string[] + warningCode?: string + warnings?: string[] engineUsed?: WatermarkEngine | 'legacy' fallbackUsed?: boolean confidence?: number diagnostics?: Record + payloadMode?: ImagePayloadMode + fingerprint?: string + codec?: string + berEstimate?: number + spreadConfidence?: number } type ImageWmExtractResult = { ok: boolean wm?: string error?: string + failureCode?: string + userMessage?: string + recoveryHints?: string[] engineUsed?: WatermarkEngine | 'legacy' fallbackUsed?: boolean confidence?: number diagnostics?: Record + payloadMode?: ImagePayloadMode + fingerprint?: string + codec?: string + berEstimate?: number + spreadConfidence?: number +} + +type ImageWmBatchResult = { + ok: boolean + batchId?: string + total?: number + successCount?: number + failureCount?: number + failureCode?: string | null + error?: string + results?: ImageWmEmbedResult[] +} + +type ImageWmBatchProgress = { + event?: 'progress' | 'complete' + batchId: string + index?: number + total?: number + input?: string + output?: string + status?: 'running' | 'done' | 'failed' + progress?: number + failureCode?: string + error?: string } interface AppAPI { @@ -51,8 +94,12 @@ interface AppAPI { storeSet: (key: string, value: unknown) => Promise storeGetAll: () => Promise> imageWmCheckPython: () => Promise + imageWmBackendStatus: () => Promise + imageWmWarmup: () => Promise imageWmOpenImage: () => Promise + imageWmOpenImages: () => Promise imageWmSaveImage: () => Promise + imageWmChooseOutputDir: () => Promise imageWmEmbed: (opts: { inputPath: string outputPath: string @@ -60,13 +107,27 @@ interface AppAPI { password: number quality: WatermarkQuality engine: WatermarkEngine + payloadMode?: ImagePayloadMode }) => Promise imageWmExtract: (opts: { inputPath: string password: number quality: WatermarkQuality engine: WatermarkEngine + payloadMode?: ImagePayloadMode }) => Promise + imageWmEmbedBatch: (opts: { + inputPaths: string[] + outputDir: string + wmText: string + password: number + quality: WatermarkQuality + engine: WatermarkEngine + payloadMode?: ImagePayloadMode + selfCheckMode?: 'sampled' | 'all' | 'off' + }) => Promise + imageWmCancelBatch: (batchId: string) => Promise + onImageWmBatchProgress: (callback: (payload: ImageWmBatchProgress) => void) => () => void } declare global { diff --git a/src/preload/index.ts b/src/preload/index.ts index 41fa633..631ba46 100644 --- a/src/preload/index.ts +++ b/src/preload/index.ts @@ -4,7 +4,8 @@ import { electronAPI } from '@electron-toolkit/preload' type SaveResult = { success: boolean; filePath?: string; error?: string } type RunnerMode = 'exe' | 'python' type WatermarkEngine = 'auto' | 'legacy' | 'neural' -type WatermarkQuality = 'invisible' | 'balanced' | 'robust' +type WatermarkQuality = 'trace' | 'faint' | 'light' | 'invisible' | 'balanced' | 'strong' | 'robust' +type ImagePayloadMode = 'fingerprint64' | 'text16' type ImageWmCheckResult = { ok: boolean @@ -24,20 +25,62 @@ type ImageWmEmbedResult = { output?: string quality?: string error?: string + failureCode?: string + userMessage?: string + recoveryHints?: string[] + warningCode?: string + warnings?: string[] engineUsed?: WatermarkEngine | 'legacy' fallbackUsed?: boolean confidence?: number diagnostics?: Record + payloadMode?: ImagePayloadMode + fingerprint?: string + codec?: string + berEstimate?: number + spreadConfidence?: number } type ImageWmExtractResult = { ok: boolean wm?: string error?: string + failureCode?: string + userMessage?: string + recoveryHints?: string[] engineUsed?: WatermarkEngine | 'legacy' fallbackUsed?: boolean confidence?: number diagnostics?: Record + payloadMode?: ImagePayloadMode + fingerprint?: string + codec?: string + berEstimate?: number + spreadConfidence?: number +} + +type ImageWmBatchResult = { + ok: boolean + batchId?: string + total?: number + successCount?: number + failureCount?: number + failureCode?: string | null + error?: string + results?: ImageWmEmbedResult[] +} + +type ImageWmBatchProgress = { + event?: 'progress' | 'complete' + batchId: string + index?: number + total?: number + input?: string + output?: string + status?: 'running' | 'done' | 'failed' + progress?: number + failureCode?: string + error?: string } const api = { @@ -52,12 +95,19 @@ const api = { ipcRenderer.invoke('dialog:exportPDF', htmlB64, defaultName), readClipboard: (): Promise => ipcRenderer.invoke('clipboard:read'), storeGet: (key: string): Promise => ipcRenderer.invoke('store:get', key), - storeSet: (key: string, value: unknown): Promise => ipcRenderer.invoke('store:set', key, value), + storeSet: (key: string, value: unknown): Promise => + ipcRenderer.invoke('store:set', key, value), storeGetAll: (): Promise> => ipcRenderer.invoke('store:getAll'), imageWmCheckPython: (): Promise => ipcRenderer.invoke('image-wm:checkPython'), + imageWmBackendStatus: (): Promise => + ipcRenderer.invoke('image-wm:backendStatus'), + imageWmWarmup: (): Promise => ipcRenderer.invoke('image-wm:warmup'), imageWmOpenImage: (): Promise => ipcRenderer.invoke('image-wm:openImage'), + imageWmOpenImages: (): Promise => ipcRenderer.invoke('image-wm:openImages'), imageWmSaveImage: (): Promise => ipcRenderer.invoke('image-wm:saveImage'), + imageWmChooseOutputDir: (): Promise => + ipcRenderer.invoke('image-wm:chooseOutputDir'), imageWmEmbed: (opts: { inputPath: string outputPath: string @@ -65,13 +115,35 @@ const api = { password: number quality: WatermarkQuality engine: WatermarkEngine + payloadMode?: ImagePayloadMode }): Promise => ipcRenderer.invoke('image-wm:embed', opts), imageWmExtract: (opts: { inputPath: string password: number quality: WatermarkQuality engine: WatermarkEngine + payloadMode?: ImagePayloadMode }): Promise => ipcRenderer.invoke('image-wm:extract', opts), + imageWmEmbedBatch: (opts: { + inputPaths: string[] + outputDir: string + wmText: string + password: number + quality: WatermarkQuality + engine: WatermarkEngine + payloadMode?: ImagePayloadMode + selfCheckMode?: 'sampled' | 'all' | 'off' + }): Promise => ipcRenderer.invoke('image-wm:embedBatch', opts), + imageWmCancelBatch: (batchId: string): Promise => + ipcRenderer.invoke('image-wm:cancelBatch', batchId), + onImageWmBatchProgress: (callback: (payload: ImageWmBatchProgress) => void): (() => void) => { + const listener = (_event: Electron.IpcRendererEvent, payload: ImageWmBatchProgress): void => + callback(payload) + ipcRenderer.on('image-wm:batch-progress', listener) + return (): void => { + ipcRenderer.removeListener('image-wm:batch-progress', listener) + } + } } if (process.contextIsolated) { diff --git a/src/renderer/src/components/ImageWatermarkPanel.tsx b/src/renderer/src/components/ImageWatermarkPanel.tsx index 7a14ea3..362a317 100644 --- a/src/renderer/src/components/ImageWatermarkPanel.tsx +++ b/src/renderer/src/components/ImageWatermarkPanel.tsx @@ -1,4 +1,4 @@ -import { useState, useEffect, useCallback } from 'react' +import { useState, useEffect, useCallback, type ReactElement, type ReactNode } from 'react' import { motion, AnimatePresence } from 'framer-motion' import { FolderOpen, @@ -12,23 +12,61 @@ import { CircleNotch, WarningCircle, CheckCircle, + ImagesSquare, + X, + Question } from '@phosphor-icons/react' import { I18nKey, useI18n } from '../i18n' type PyStatus = 'idle' | 'checking' | 'ok' | 'no-python' | 'no-lib' -type WatermarkQuality = 'invisible' | 'balanced' | 'robust' +type WatermarkQuality = 'trace' | 'faint' | 'light' | 'balanced' | 'strong' | 'robust' type ImageEngine = 'auto' | 'legacy' | 'neural' +type ImagePayloadMode = 'fingerprint64' | 'text16' +type SelfCheckMode = 'sampled' | 'all' | 'off' interface PanelStatus { kind: 'ok' | 'error' | 'warn' message: string + code?: string + hints?: string[] } interface DiagnosticsRecord { [key: string]: unknown } -function CopyButton({ text, labelKey = 'common.copy' }: { text: string; labelKey?: I18nKey }) { +interface BatchProgress { + event?: 'progress' | 'complete' + batchId: string + index?: number + total?: number + input?: string + output?: string + status?: 'running' | 'done' | 'failed' + progress?: number + failureCode?: string + error?: string +} + +interface BatchSummary { + ok: boolean + batchId?: string + total?: number + successCount?: number + failureCount?: number + failureCode?: string | null + error?: string +} + +type Translate = (key: I18nKey, params?: Record) => string + +function CopyButton({ + text, + labelKey = 'common.copy' +}: { + text: string + labelKey?: I18nKey +}): ReactElement { const { t } = useI18n() const [copied, setCopied] = useState(false) const handle = useCallback(async () => { @@ -48,48 +86,81 @@ function CopyButton({ text, labelKey = 'common.copy' }: { text: string; labelKey ) } -function Label({ children }: { children: React.ReactNode }) { +function Label({ children }: { children: ReactNode }): ReactElement { return {children} } -function HelpText({ children }: { children: React.ReactNode }) { +function HelpText({ children }: { children: ReactNode }): ReactElement { return {children} } -function StatusBadge({ kind, message }: PanelStatus) { +function StatusBadge({ kind, message }: PanelStatus): ReactElement { const styles = { ok: 'bg-emerald-500/10 border-emerald-500/25 text-emerald-200', error: 'bg-red-500/10 border-red-500/25 text-red-200', - warn: 'bg-amber-500/10 border-amber-500/25 text-amber-200', + warn: 'bg-amber-500/10 border-amber-500/25 text-amber-200' } const Icon = kind === 'ok' ? CheckCircle : kind === 'warn' ? Warning : WarningCircle return ( -
+
{message}
) } +function FailureNotice({ + code, + message, + hints +}: { + code?: string + message?: string + hints?: string[] +}): ReactElement { + const { t } = useI18n() + const fallback = failureText(code, message, t) + const recoveryHints = hints && hints.length > 0 ? hints : failureHints(code, t) + return ( +
+
+ +
+

{fallback}

+ {recoveryHints.length > 0 && ( +
    + {recoveryHints.map((hint) => ( +
  • {hint}
  • + ))} +
+ )} +
+
+
+ ) +} + function ActionButton({ onClick, disabled, loading, icon, label, - color = 'blue', + color = 'blue' }: { onClick: () => void disabled?: boolean loading?: boolean - icon: React.ReactNode + icon: ReactNode label: string color?: 'blue' | 'violet' | 'emerald' -}) { +}): ReactElement { const colors = { blue: 'bg-[#3b7cd4] hover:bg-[#4a8ae0]', violet: 'bg-violet-600 hover:bg-violet-500', - emerald: 'bg-emerald-700 hover:bg-emerald-600', + emerald: 'bg-emerald-700 hover:bg-emerald-600' } return (
) } -function PwdInput({ value, onChange }: { value: number; onChange: (v: number) => void }) { +function PwdInput({ + value, + onChange +}: { + value: number + onChange: (v: number) => void +}): ReactElement { const { t } = useI18n() return (
@@ -172,12 +251,18 @@ function PwdInput({ value, onChange }: { value: number; onChange: (v: number) => ) } -function EngineSelector({ value, onChange }: { value: ImageEngine; onChange: (engine: ImageEngine) => void }) { +function EngineSelector({ + value, + onChange +}: { + value: ImageEngine + onChange: (engine: ImageEngine) => void +}): ReactElement { const { t } = useI18n() const options: Array<{ id: ImageEngine; label: string; desc: string }> = [ { id: 'auto', label: t('img.engine.auto'), desc: t('img.engine.autoDesc') }, { id: 'legacy', label: t('img.engine.legacy'), desc: t('img.engine.legacyDesc') }, - { id: 'neural', label: t('img.engine.neural'), desc: t('img.engine.neuralDesc') }, + { id: 'neural', label: t('img.engine.neural'), desc: t('img.engine.neuralDesc') } ] return ( @@ -204,19 +289,107 @@ function EngineSelector({ value, onChange }: { value: ImageEngine; onChange: (en ) } -function QualitySelector({ value, onChange }: { value: WatermarkQuality; onChange: (v: WatermarkQuality) => void }) { +function PayloadModeSelector({ + value, + onChange +}: { + value: ImagePayloadMode + onChange: (mode: ImagePayloadMode) => void +}): ReactElement { + const { t } = useI18n() + const options: Array<{ id: ImagePayloadMode; label: string; desc: string }> = [ + { id: 'fingerprint64', label: t('img.payload.fingerprint'), desc: t('img.payload.fingerprintDesc') }, + { id: 'text16', label: t('img.payload.text16'), desc: t('img.payload.text16Desc') } + ] + + return ( +
+ +
+ {options.map((option) => ( + + ))} +
+
+ ) +} + +function QualitySelector({ + value, + onChange +}: { + value: WatermarkQuality + onChange: (v: WatermarkQuality) => void +}): ReactElement { const { t } = useI18n() - const presets: Array<{ id: WatermarkQuality; label: string; desc: string; hint: string; color: string }> = [ - { id: 'invisible', label: t('img.quality.invisible'), desc: t('img.quality.invisibleDesc'), hint: t('img.quality.invisibleHint'), color: 'text-sky-200' }, - { id: 'balanced', label: t('img.quality.balanced'), desc: t('img.quality.balancedDesc'), hint: t('img.quality.balancedHint'), color: 'text-emerald-300' }, - { id: 'robust', label: t('img.quality.robust'), desc: t('img.quality.robustDesc'), hint: t('img.quality.robustHint'), color: 'text-amber-300' }, + const presets: Array<{ + id: WatermarkQuality + label: string + desc: string + hint: string + color: string + }> = [ + { + id: 'trace', + label: t('img.quality.trace'), + desc: t('img.quality.traceDesc'), + hint: t('img.quality.traceHint'), + color: 'text-zinc-200' + }, + { + id: 'faint', + label: t('img.quality.faint'), + desc: t('img.quality.faintDesc'), + hint: t('img.quality.faintHint'), + color: 'text-sky-200' + }, + { + id: 'light', + label: t('img.quality.light'), + desc: t('img.quality.lightDesc'), + hint: t('img.quality.lightHint'), + color: 'text-cyan-200' + }, + { + id: 'balanced', + label: t('img.quality.balanced'), + desc: t('img.quality.balancedDesc'), + hint: t('img.quality.balancedHint'), + color: 'text-emerald-300' + }, + { + id: 'strong', + label: t('img.quality.strong'), + desc: t('img.quality.strongDesc'), + hint: t('img.quality.strongHint'), + color: 'text-orange-300' + }, + { + id: 'robust', + label: t('img.quality.robust'), + desc: t('img.quality.robustDesc'), + hint: t('img.quality.robustHint'), + color: 'text-amber-300' + } ] const current = presets.find((preset) => preset.id === value)! return (
- -
+ +
{presets.map((preset) => ( ))} @@ -245,7 +422,7 @@ function PythonBanner({ error, runnerMode, neuralReady, - neuralModelVersion, + neuralModelVersion }: { status: PyStatus version?: string @@ -254,7 +431,7 @@ function PythonBanner({ runnerMode?: 'exe' | 'python' | null neuralReady?: boolean neuralModelVersion?: string | null -}) { +}): ReactElement { const { t } = useI18n() if (status === 'idle' || status === 'checking') { return ( @@ -266,15 +443,21 @@ function PythonBanner({ } if (status === 'ok') { - const runnerText = runnerMode === 'exe' ? t('img.backend.exeReady') : t('img.backend.pythonReady', { python: python ?? 'unknown' }) + const runnerText = + runnerMode === 'exe' + ? t('img.backend.exeReady') + : t('img.backend.pythonReady', { python: python ?? 'unknown' }) const neuralText = neuralReady - ? t('img.backend.neuralReady', { version: neuralModelVersion ? ` · ${neuralModelVersion}` : '' }) + ? t('img.backend.neuralReady', { + version: neuralModelVersion ? ` · ${neuralModelVersion}` : '' + }) : t('img.backend.neuralMissing') return (
- {runnerText}{version ? ` · engine v${version}` : ''} + {runnerText} + {version ? ` · engine v${version}` : ''}
@@ -311,7 +494,11 @@ function PythonBanner({ ) } -function DiagnosticsList({ diagnostics }: { diagnostics?: DiagnosticsRecord }) { +function DiagnosticsList({ + diagnostics +}: { + diagnostics?: DiagnosticsRecord +}): ReactElement | null { const { t } = useI18n() if (!diagnostics || Object.keys(diagnostics).length === 0) return null return ( @@ -320,8 +507,13 @@ function DiagnosticsList({ diagnostics }: { diagnostics?: DiagnosticsRecord }) {
{Object.entries(diagnostics).map(([key, value]) => (
-
{key}
-
{typeof value === 'string' ? value : JSON.stringify(value)}
+
+ {diagnosticLabel(key, t)} + +
+
+ {typeof value === 'string' ? value : JSON.stringify(value)} +
))}
@@ -329,23 +521,128 @@ function DiagnosticsList({ diagnostics }: { diagnostics?: DiagnosticsRecord }) { ) } -function normalizeExtractError(message: string, fallback: string) { - const lower = message.toLowerCase() - if (lower.includes('no valid watermark found') || lower.includes('chien search') || lower.includes('crc')) { - return fallback +function InfoTip({ text }: { text: string }): ReactElement { + return ( + + + + + + {text} + + + ) +} + +function diagnosticLabel(key: string, t: Translate): string { + const labels: Record = { + profile: 'diag.profile', + codec: 'diag.codec', + protocol: 'diag.protocol', + passwordProtected: 'diag.passwordProtected', + visualStrength: 'diag.visualStrength', + chromaScale: 'diag.chromaScale', + textureFloor: 'diag.textureFloor', + selfCheckRequired: 'diag.selfCheckRequired', + selfCheckPassed: 'diag.selfCheckPassed', + payloadMode: 'diag.payloadMode', + fingerprint: 'diag.fingerprint', + payloadBytes: 'diag.payloadBytes', + modelVersion: 'diag.modelVersion', + modelsDir: 'diag.modelsDir', + fallbackReason: 'diag.fallbackReason', + bitConfidence: 'diag.bitConfidence', + decodeStrategy: 'diag.decodeStrategy', + geometricCorrection: 'diag.geometricCorrection', + warnings: 'diag.warnings', + spreadDelta: 'diag.spreadDelta', + spreadReps: 'diag.spreadReps', + spreadMaskFloor: 'diag.spreadMaskFloor', + spreadMaskGain: 'diag.spreadMaskGain', + spreadBlocks: 'diag.spreadBlocks', + berEstimate: 'diag.berEstimate', + spreadConfidence: 'diag.spreadConfidence' + } + const labelKey = labels[key] + return labelKey ? t(labelKey) : key +} + +function diagnosticHelp(key: string, t: Translate): string { + const helps: Record = { + profile: 'diagHelp.profile', + codec: 'diagHelp.codec', + protocol: 'diagHelp.protocol', + passwordProtected: 'diagHelp.passwordProtected', + visualStrength: 'diagHelp.visualStrength', + chromaScale: 'diagHelp.chromaScale', + textureFloor: 'diagHelp.textureFloor', + selfCheckRequired: 'diagHelp.selfCheckRequired', + selfCheckPassed: 'diagHelp.selfCheckPassed', + payloadMode: 'diagHelp.payloadMode', + fingerprint: 'diagHelp.fingerprint', + payloadBytes: 'diagHelp.payloadBytes', + modelVersion: 'diagHelp.modelVersion', + modelsDir: 'diagHelp.modelsDir', + fallbackReason: 'diagHelp.fallbackReason', + bitConfidence: 'diagHelp.bitConfidence', + decodeStrategy: 'diagHelp.decodeStrategy', + geometricCorrection: 'diagHelp.geometricCorrection', + warnings: 'diagHelp.warnings', + spreadDelta: 'diagHelp.spreadDelta', + spreadReps: 'diagHelp.spreadReps', + spreadMaskFloor: 'diagHelp.spreadMaskFloor', + spreadMaskGain: 'diagHelp.spreadMaskGain', + spreadBlocks: 'diagHelp.spreadBlocks', + berEstimate: 'diagHelp.berEstimate', + spreadConfidence: 'diagHelp.spreadConfidence' + } + const helpKey = helps[key] + return helpKey ? t(helpKey) : t('diagHelp.default') +} + +function failureText(code: string | undefined, message: string | undefined, t: Translate): string { + const keys: Record = { + invalid_request: 'failure.invalidRequest', + input_unreadable: 'failure.inputUnreadable', + model_unavailable: 'failure.modelUnavailable', + payload_too_long: 'failure.payloadTooLong', + no_signal: 'failure.noSignal', + wrong_password_or_corrupted_payload: 'failure.wrongPasswordOrCorrupted', + engine_mismatch: 'failure.engineMismatch', + unsupported_protocol: 'failure.unsupportedProtocol', + batch_partial_failure: 'failure.batchPartial', + batch_cancelled: 'failure.batchCancelled' + } + const key = code ? keys[code] : undefined + return key ? t(key) : message || t('failure.default') +} + +function failureHints(code: string | undefined, t: Translate): string[] { + const keys: Record = { + invalid_request: 'failureHints.invalidRequest', + input_unreadable: 'failureHints.inputUnreadable', + model_unavailable: 'failureHints.modelUnavailable', + payload_too_long: 'failureHints.payloadTooLong', + no_signal: 'failureHints.noSignal', + wrong_password_or_corrupted_payload: 'failureHints.wrongPasswordOrCorrupted', + engine_mismatch: 'failureHints.engineMismatch', + unsupported_protocol: 'failureHints.unsupportedProtocol', + batch_partial_failure: 'failureHints.batchPartial', + batch_cancelled: 'failureHints.batchCancelled' } - return message + const key = code ? keys[code] : undefined + return (key ? t(key) : t('failureHints.default')).split('|').filter(Boolean) } function ResultMeta({ engine, fallback, - confidence, + confidence }: { engine: string fallback?: boolean confidence?: number -}) { +}): ReactElement { const { t } = useI18n() return (
@@ -353,7 +650,8 @@ function ResultMeta({ {' · '} {fallback ? t('img.fallbackUsed') : t('img.directSuccess')} - {typeof confidence === 'number' && ` · ${t('img.confidence', { value: Math.round(confidence * 100) })}`} + {typeof confidence === 'number' && + ` · ${t('img.confidence', { value: Math.round(confidence * 100) })}`}
) @@ -366,7 +664,7 @@ export function ImageEmbedPanel({ pyError, runnerMode, neuralReady, - neuralModelVersion, + neuralModelVersion }: { pyStatus: PyStatus python?: string | null @@ -375,14 +673,21 @@ export function ImageEmbedPanel({ runnerMode?: 'exe' | 'python' | null neuralReady?: boolean neuralModelVersion?: string | null -}) { +}): ReactElement { const { t } = useI18n() const [inputPath, setInputPath] = useState('') const [outputPath, setOutputPath] = useState('') + const [batchInputPaths, setBatchInputPaths] = useState([]) + const [batchOutputDir, setBatchOutputDir] = useState('') + const [batchProgress, setBatchProgress] = useState(null) + const [batchResult, setBatchResult] = useState(null) + const [batchRunning, setBatchRunning] = useState(false) + const [selfCheckMode, setSelfCheckMode] = useState('sampled') const [wmText, setWmText] = useState('') const [pwd, setPwd] = useState(1) - const [quality, setQuality] = useState('balanced') + const [quality, setQuality] = useState('light') const [engine, setEngine] = useState('auto') + const [payloadMode, setPayloadMode] = useState('fingerprint64') const [loading, setLoading] = useState(false) const [result, setResult] = useState<{ output: string @@ -391,11 +696,25 @@ export function ImageEmbedPanel({ fallbackUsed?: boolean confidence?: number diagnostics?: DiagnosticsRecord + warnings?: string[] } | null>(null) const [status, setStatus] = useState(null) const bytes = new TextEncoder().encode(wmText).length const shortPayloadEligible = bytes <= 16 + const batchPercent = Math.round((batchProgress?.progress ?? 0) * 100) + + useEffect(() => { + return window.api.onImageWmBatchProgress((payload) => { + if (payload.event === 'complete') { + setBatchProgress({ ...payload, progress: 1 }) + setBatchRunning(false) + setBatchResult(payload as BatchSummary) + } else { + setBatchProgress(payload) + } + }) + }, []) const handleOpenSrc = useCallback(async () => { const path = await window.api.imageWmOpenImage() @@ -411,6 +730,64 @@ export function ImageEmbedPanel({ if (path) setOutputPath(path) }, []) + const handleOpenBatch = useCallback(async () => { + const paths = await window.api.imageWmOpenImages() + if (paths.length) { + setBatchInputPaths(paths) + setBatchProgress(null) + setBatchResult(null) + setStatus(null) + } + }, []) + + const handleOutputDir = useCallback(async () => { + const dir = await window.api.imageWmChooseOutputDir() + if (dir) setBatchOutputDir(dir) + }, []) + + const handleBatchEmbed = useCallback(async () => { + if (!batchInputPaths.length || !batchOutputDir || !wmText.trim()) return + setBatchRunning(true) + setBatchProgress(null) + setBatchResult(null) + setStatus(null) + try { + const res = await window.api.imageWmEmbedBatch({ + inputPaths: batchInputPaths, + outputDir: batchOutputDir, + wmText: wmText.trim(), + password: pwd, + quality, + engine, + payloadMode, + selfCheckMode + }) + setBatchRunning(false) + setBatchResult(res) + if (res.ok) { + setStatus({ + kind: 'ok', + message: t('img.batchOk', { count: res.successCount ?? batchInputPaths.length }) + }) + } else { + setStatus({ + kind: 'error', + code: res.failureCode ?? undefined, + message: failureText(res.failureCode ?? undefined, res.error ?? t('img.batchFail'), t) + }) + } + } catch (e) { + setBatchRunning(false) + setStatus({ kind: 'error', message: String(e) }) + } + }, [batchInputPaths, batchOutputDir, wmText, pwd, quality, engine, payloadMode, selfCheckMode, t]) + + const handleCancelBatch = useCallback(async () => { + const batchId = batchProgress?.batchId ?? batchResult?.batchId + if (batchId) await window.api.imageWmCancelBatch(batchId) + setBatchRunning(false) + }, [batchProgress?.batchId, batchResult?.batchId]) + const handleEmbed = useCallback(async () => { if (!inputPath || !outputPath || !wmText.trim()) return setLoading(true) @@ -424,6 +801,7 @@ export function ImageEmbedPanel({ password: pwd, quality, engine, + payloadMode }) if (res.ok) { const finalOutput = res.output ?? outputPath @@ -435,20 +813,44 @@ export function ImageEmbedPanel({ fallbackUsed: res.fallbackUsed, confidence: res.confidence, diagnostics: res.diagnostics, + warnings: res.warnings + }) + const engineInfo = res.fallbackUsed + ? `${res.engineUsed ?? 'legacy'} · ${t('img.fallbackUsed')}` + : `${res.engineUsed ?? engine} · ${t('img.directSuccess')}` + const hasWarning = Boolean(res.warningCode || (res.warnings && res.warnings.length > 0)) + setStatus({ + kind: hasWarning ? 'warn' : 'ok', + message: hasWarning + ? t('img.embedOkWithRisk', { engineInfo }) + : t('img.embedOk', { engineInfo }) }) - const engineInfo = res.fallbackUsed ? `${res.engineUsed ?? 'legacy'} · ${t('img.fallbackUsed')}` : `${res.engineUsed ?? engine} · ${t('img.directSuccess')}` - setStatus({ kind: 'ok', message: t('img.embedOk', { engineInfo }) }) } else { - setStatus({ kind: 'error', message: res.error ?? t('img.embedFail') }) + setStatus({ + kind: 'error', + code: res.failureCode, + message: failureText( + res.failureCode, + res.userMessage ?? res.error ?? t('img.embedFail'), + t + ), + hints: res.recoveryHints + }) } } catch (e) { setStatus({ kind: 'error', message: String(e) }) } finally { setLoading(false) } - }, [inputPath, outputPath, wmText, pwd, quality, engine, t]) + }, [inputPath, outputPath, wmText, pwd, quality, engine, payloadMode, t]) const canRun = pyStatus === 'ok' && !!inputPath && !!outputPath && !!wmText.trim() && !loading + const canBatch = + pyStatus === 'ok' && + batchInputPaths.length > 0 && + !!batchOutputDir && + !!wmText.trim() && + !batchRunning return (
@@ -488,12 +890,15 @@ export function ImageEmbedPanel({ {wmText && ( {bytes} bytes - {!shortPayloadEligible && · {t('img.neuralLimitInline')}} + {!shortPayloadEligible && ( + · {t('img.neuralLimitInline')} + )} )}
+ @@ -505,7 +910,7 @@ export function ImageEmbedPanel({ icon={} /> - {engine === 'neural' && !shortPayloadEligible && ( + {engine === 'neural' && payloadMode === 'text16' && !shortPayloadEligible && ( )} @@ -520,10 +925,142 @@ export function ImageEmbedPanel({
+
+
+
+

{t('img.batchTitle')}

+

{t('img.batchSubtitle')}

+
+ {batchInputPaths.length > 0 && ( + + {t('img.batchSelected', { count: batchInputPaths.length })} + + )} +
+ +
+ 0 + ? t('img.batchSelected', { count: batchInputPaths.length }) + : '' + } + placeholder={t('img.batchNoImages')} + onBrowse={handleOpenBatch} + icon={} + /> + } + /> +
+ + {batchInputPaths.length > 0 && ( +
+ {batchInputPaths.slice(0, 20).map((path) => ( + + {path} + + ))} + {batchInputPaths.length > 20 && ( + + {t('img.batchMore', { count: batchInputPaths.length - 20 })} + + )} +
+ )} + +
+
+ {(['sampled', 'all', 'off'] as SelfCheckMode[]).map((mode) => ( + + ))} +
+ } + label={batchRunning ? t('img.batchRunning') : t('img.batchStart')} + color="emerald" + /> + {batchRunning && ( + + )} +
+ + {(batchRunning || batchProgress || batchResult) && ( +
+
+ + {batchProgress?.input + ? t('img.batchCurrent', { name: batchProgress.input }) + : t('img.batchProgress')} + + {batchPercent}% +
+
+
+
+ {batchResult && ( +
+ {t('img.batchSummary', { + success: batchResult.successCount ?? 0, + failed: batchResult.failureCount ?? 0, + total: batchResult.total ?? batchInputPaths.length + })} +
+ )} + {batchProgress?.status === 'failed' && ( +

+ {failureText(batchProgress.failureCode, batchProgress.error, t)} +

+ )} +
+ )} +
+ {status && ( - - + + {status.kind === 'error' ? ( + + ) : ( + + )} )} {result && ( @@ -571,7 +1108,7 @@ export function ImageExtractPanel({ pyError, runnerMode, neuralReady, - neuralModelVersion, + neuralModelVersion }: { pyStatus: PyStatus python?: string | null @@ -580,12 +1117,13 @@ export function ImageExtractPanel({ runnerMode?: 'exe' | 'python' | null neuralReady?: boolean neuralModelVersion?: string | null -}) { +}): ReactElement { const { t } = useI18n() const [inputPath, setInputPath] = useState('') const [pwd, setPwd] = useState(1) const [quality, setQuality] = useState('balanced') const [engine, setEngine] = useState('auto') + const [showAdvanced, setShowAdvanced] = useState(false) const [loading, setLoading] = useState(false) const [extracted, setExtracted] = useState(null) const [diagnostics, setDiagnostics] = useState() @@ -595,9 +1133,12 @@ export function ImageExtractPanel({ const [status, setStatus] = useState(null) useEffect(() => { - window.api.storeGet('imageWm:lastOutputPath').then((value) => { - if (typeof value === 'string' && value) setInputPath(value) - }).catch(() => undefined) + window.api + .storeGet('imageWm:lastOutputPath') + .then((value) => { + if (typeof value === 'string' && value) setInputPath(value) + }) + .catch(() => undefined) }, []) const handleOpenImage = useCallback(async () => { @@ -621,9 +1162,9 @@ export function ImageExtractPanel({ inputPath, password: pwd, quality, - engine, + engine }) - if (res.ok && res.wm != null) { + if (res.ok && res.wm != null && res.wm !== '') { setExtracted(res.wm) setDiagnostics(res.diagnostics) setEngineUsed(res.engineUsed) @@ -631,10 +1172,19 @@ export function ImageExtractPanel({ setConfidence(res.confidence) setStatus({ kind: 'ok', message: t('img.extractOk') }) } else { - setStatus({ kind: 'error', message: normalizeExtractError(res.error ?? t('img.extractFail'), t('img.noWatermark')) }) + setStatus({ + kind: 'error', + code: res.failureCode ?? (res.ok ? 'wrong_password_or_corrupted_payload' : undefined), + message: failureText( + res.failureCode, + res.userMessage ?? res.error ?? t('img.extractFail'), + t + ), + hints: res.recoveryHints + }) } } catch (e) { - setStatus({ kind: 'error', message: normalizeExtractError(String(e), t('img.noWatermark')) }) + setStatus({ kind: 'error', message: String(e) }) } finally { setLoading(false) } @@ -670,7 +1220,19 @@ export function ImageExtractPanel({ - + + {showAdvanced && ( +
+ + {t('img.extractAdvancedHelp')} +
+ )} {status && ( - - + + {status.kind === 'error' ? ( + + ) : ( + + )} )} {extracted != null && ( @@ -718,8 +1289,8 @@ export function ImageExtractPanel({ export type ImageWmSubTab = 'img-embed' | 'img-extract' -export function ImageWatermarkPanel({ activeTab }: { activeTab: ImageWmSubTab }) { - const [pyStatus, setPyStatus] = useState('idle') +export function ImageWatermarkPanel({ activeTab }: { activeTab: ImageWmSubTab }): ReactElement { + const [pyStatus, setPyStatus] = useState('checking') const [python, setPython] = useState(null) const [pyVersion, setPyVersion] = useState() const [pyError, setPyError] = useState() @@ -728,40 +1299,62 @@ export function ImageWatermarkPanel({ activeTab }: { activeTab: ImageWmSubTab }) const [neuralModelVersion, setNeuralModelVersion] = useState(null) useEffect(() => { - setPyStatus('checking') - window.api.imageWmCheckPython().then((res) => { - setPython(res.python ?? null) - setRunnerMode((res.mode as 'exe' | 'python') ?? null) - setNeuralReady(Boolean(res.neuralReady)) - setNeuralModelVersion(res.neuralModelVersion ?? null) - if (!res.python && res.mode !== 'exe') { + window.api + .imageWmCheckPython() + .then((res) => { + setPython(res.python ?? null) + setRunnerMode((res.mode as 'exe' | 'python') ?? null) + setNeuralReady(Boolean(res.neuralReady)) + setNeuralModelVersion(res.neuralModelVersion ?? null) + if (!res.python && res.mode !== 'exe') { + setPyStatus('no-python') + setPyError(res.error) + } else if (!res.ok) { + setPyStatus('no-lib') + setPyError(res.error) + } else { + setPyStatus('ok') + setPyVersion(res.version) + } + }) + .catch((e) => { setPyStatus('no-python') - setPyError(res.error) - } else if (!res.ok) { - setPyStatus('no-lib') - setPyError(res.error) - } else { - setPyStatus('ok') - setPyVersion(res.version) - } - }).catch((e) => { - setPyStatus('no-python') - setPyError(String(e)) - }) + setPyError(String(e)) + }) }, []) - const sharedProps = { pyStatus, python, pyVersion, pyError, runnerMode, neuralReady, neuralModelVersion } + const sharedProps = { + pyStatus, + python, + pyVersion, + pyError, + runnerMode, + neuralReady, + neuralModelVersion + } return (
{activeTab === 'img-embed' && ( - + )} {activeTab === 'img-extract' && ( - + )} diff --git a/src/renderer/src/i18n.ts b/src/renderer/src/i18n.ts index a3b578c..d2aadb6 100644 --- a/src/renderer/src/i18n.ts +++ b/src/renderer/src/i18n.ts @@ -5,6 +5,17 @@ export type UiLanguage = 'zh-CN' | 'en' export type LanguageSetting = 'system' | UiLanguage const zh = { + 'img.payloadMode': 'Payload mode', + 'img.payload.fingerprint': 'Short fingerprint', + 'img.payload.fingerprintDesc': 'Default invisible mode; extracts fp:16hex.', + 'img.payload.text16': 'Text16 compat', + 'img.payload.text16Desc': 'Legacy short text recovery, 16 UTF-8 bytes max.', + 'diag.payloadMode': 'Payload mode', + 'diag.fingerprint': 'Fingerprint', + 'diag.berEstimate': 'BER estimate', + 'diagHelp.payloadMode': 'Whether the image stores a short fingerprint or a recoverable 16-byte text payload.', + 'diagHelp.fingerprint': 'The recovered or embedded 64-bit keyed fingerprint.', + 'diagHelp.berEstimate': 'Soft estimate of bit error likelihood from decoder probabilities.', 'nav.detect': '检测', 'nav.batch': '批量', 'nav.watermark': '水印', @@ -125,7 +136,8 @@ const zh = { 'settings.threshold.midHigh': '中 / 高 分界', 'settings.threshold.low': '低风险', 'settings.threshold.mid': '中风险', - 'settings.threshold.summary': '当前阈值:0-{low} 低风险 · {lowPlus}- {mid} 中等风险 · {midPlus}-100 高风险', + 'settings.threshold.summary': + '当前阈值:0-{low} 低风险 · {lowPlus}- {mid} 中等风险 · {midPlus}-100 高风险', 'settings.clipboard.title': '剪贴板监听', 'settings.clipboard.subtitle': '自动检测剪贴板变化并提示扫描', 'settings.clipboard.enable': '启用监听', @@ -159,14 +171,27 @@ const zh = { 'img.engine.legacyDesc': 'DCT + RS 旧引擎,适合长文本和历史兼容。', 'img.engine.neuralDesc': '短载荷神经水印,抗社交平台降质更强。', 'img.quality': '画质 / 强度', + 'img.strength': '嵌入强度', + 'img.quality.trace': '极轻', + 'img.quality.faint': '微痕', + 'img.quality.light': '轻痕', 'img.quality.invisible': '轻痕', 'img.quality.balanced': '均衡', + 'img.quality.strong': '增强', 'img.quality.robust': '强鲁棒', + 'img.quality.traceDesc': '风险自担', + 'img.quality.faintDesc': '近乎不可见', + 'img.quality.lightDesc': '画质优先', 'img.quality.invisibleDesc': '画质优先', 'img.quality.balancedDesc': '推荐', + 'img.quality.strongDesc': '平台优先', 'img.quality.robustDesc': '传输优先', - 'img.quality.invisibleHint': '最弱 neural 残差,适合轻度压缩或干净保存。', - 'img.quality.balancedHint': '默认档:比 alpha1 中档更轻,同时通过本地 JPEG50/resize50 校准。', + 'img.quality.traceHint': '最低强度频域扩频档,优先不可见;嵌入后必须通过本地自检。', + 'img.quality.faintHint': '非常低强度频域扩频档,适合对观感极敏感的图片。', + 'img.quality.lightHint': '低可见推荐档,使用 Y 通道 DCT 扩频,避免规则方块和粉红交叉印记。', + 'img.quality.invisibleHint': '低可见推荐档,优先减弱粉红交叉印记。', + 'img.quality.balancedHint': '默认档:频域扩频更强,仍优先隐藏图案;平台转发请用增强或强鲁棒。', + 'img.quality.strongHint': '面向 QQ、微信等社交平台,增强 JPEG 转发后的可提取性。', 'img.quality.robustHint': '社交平台传输优先,观感会更明显。', 'img.backend.checking': '正在检查水印后端...', 'img.backend.preparing': '准备检查后端...', @@ -182,13 +207,18 @@ const zh = { 'img.embed': '嵌入水印', 'img.embedding': '嵌入中...', 'img.embedOk': '图片水印嵌入成功。{engineInfo}', + 'img.embedOkWithRisk': '图片水印已输出,但当前强度存在提取风险。{engineInfo}', 'img.embedFail': '嵌入失败', 'img.embedComplete': '嵌入完成', 'img.extract': '提取水印', 'img.extracting': '提取中...', 'img.extractOk': '图片水印提取成功。', 'img.extractFail': '提取失败', - 'img.extractWarn': 'Auto 会先尝试 neural,再回退 legacy;比较模型迭代时建议明确选择引擎。', + 'img.extractWarn': '提取通常只需要图片、密码和引擎;强度只在 Legacy 兼容场景中作为高级参数。', + 'img.showAdvanced': '高级兼容参数', + 'img.hideAdvanced': '收起高级参数', + 'img.extractAdvancedHelp': + '普通 neural 提取不需要选择嵌入强度;这里主要用于旧版 Legacy 参数兼容。', 'img.noWatermark': '未找到可提取水印。请确认选择了含水印图片,并且密码和引擎匹配。', 'img.extracted': '提取出的水印', 'img.engineResult': '引擎:{engine}', @@ -197,6 +227,97 @@ const zh = { 'img.confidence': '置信度 {value}%', 'img.qualityResult': '质量档:{quality}', 'img.diagnostics': '诊断信息', + 'img.batchTitle': '批量嵌入', + 'img.batchSubtitle': '多张图片使用同一水印、密码、引擎和嵌入强度。', + 'img.batchImages': '批量图片', + 'img.batchNoImages': '未选择图片', + 'img.batchSelected': '已选择 {count} 张', + 'img.batchMore': '还有 {count} 张', + 'img.batchOutputDir': '输出文件夹', + 'img.batchNoOutputDir': '未选择输出文件夹', + 'img.selfCheck.sampled': '抽样校验', + 'img.selfCheck.all': '全部校验', + 'img.selfCheck.off': '不校验', + 'img.batchStart': '开始批量嵌入', + 'img.batchRunning': '批量处理中...', + 'img.batchCancel': '取消批量', + 'img.batchProgress': '批量进度', + 'img.batchCurrent': '正在处理:{name}', + 'img.batchSummary': '完成:成功 {success},失败 {failed},共 {total}', + 'img.batchOk': '批量嵌入完成,成功 {count} 张。', + 'img.batchFail': '批量嵌入失败', + 'failure.invalidRequest': '请求参数不完整或不合法。', + 'failure.inputUnreadable': '图片无法读取或写入。', + 'failure.modelUnavailable': '水印后端或神经网络模型不可用。', + 'failure.payloadTooLong': 'Neural 水印文本超过 16 UTF-8 bytes。', + 'failure.noSignal': '没有检测到可确认的水印信号。', + 'failure.wrongPasswordOrCorrupted': '检测到疑似水印信号,但校验没有通过。', + 'failure.engineMismatch': '当前引擎无法解码这张图片。', + 'failure.unsupportedProtocol': '当前版本不支持这张图里的水印协议。', + 'failure.batchPartial': '批量任务部分图片失败。', + 'failure.batchCancelled': '批量任务已取消。', + 'failure.default': '操作失败,请检查图片、密码和引擎。', + 'failureHints.invalidRequest': + '确认图片、输出路径、水印文本和密码都已填写。|如果是批量任务,确认所有路径仍然存在。', + 'failureHints.inputUnreadable': '确认文件没有被移动或占用。|尝试换成 PNG 或 JPG 图片。', + 'failureHints.modelUnavailable': '确认 helper 和 ONNX 模型已打包。|点击重试或重新启动应用。', + 'failureHints.payloadTooLong': '缩短文本到 16 bytes 内。|长文本请使用 Auto 或 Legacy。', + 'failureHints.noSignal': '确认这确实是含水印图片。|尝试 Auto 模式,或切换 Neural / Legacy。', + 'failureHints.wrongPasswordOrCorrupted': + '优先确认密码是否正确。|如果密码正确,图片可能被过度压缩、裁剪或遮挡。', + 'failureHints.engineMismatch': '尝试 Auto 模式。|在 Neural 和 Legacy 之间切换重试。', + 'failureHints.unsupportedProtocol': '更新应用和 helper 后重试。', + 'failureHints.batchPartial': '查看失败项,确认图片格式、路径和输出权限。', + 'failureHints.batchCancelled': '任务已停止,已完成的图片会保留在输出文件夹。', + 'failureHints.default': + '确认图片、密码和引擎是否匹配。|如果图片经过平台转发,尝试更强的嵌入档位。', + 'diag.profile': '强度档位', + 'diag.codec': '编码器', + 'diag.protocol': '协议', + 'diag.passwordProtected': '密码保护', + 'diag.visualStrength': '视觉强度', + 'diag.chromaScale': '色彩残差', + 'diag.textureFloor': '纹理遮罩', + 'diag.selfCheckRequired': '要求自检', + 'diag.selfCheckPassed': '自检通过', + 'diag.payloadBytes': '载荷字节', + 'diag.modelVersion': '模型版本', + 'diag.modelsDir': '模型目录', + 'diag.fallbackReason': '回退原因', + 'diag.bitConfidence': 'bit 置信度', + 'diag.decodeStrategy': '解码策略', + 'diag.geometricCorrection': '几何校正', + 'diag.warnings': '风险提示', + 'diag.spreadDelta': '扩频强度', + 'diag.spreadReps': '扩频重复', + 'diag.spreadMaskFloor': '遮罩下限', + 'diag.spreadMaskGain': '遮罩增益', + 'diag.spreadBlocks': '可用块数', + 'diag.spreadConfidence': '扩频置信度', + 'diagHelp.default': '用于排查问题的技术信息。', + 'diagHelp.profile': '嵌入时选择的强度档位,越强越抗压缩但越可能可见。', + 'diagHelp.codec': '实际使用的嵌入或解码方案。', + 'diagHelp.protocol': '水印载荷的内部格式版本。', + 'diagHelp.passwordProtected': '是否使用密码扰码保护载荷。', + 'diagHelp.visualStrength': '神经残差叠加强度,数值越高水印越强。', + 'diagHelp.chromaScale': '色彩通道扰动比例,越低越能减少粉红/紫色印记。', + 'diagHelp.textureFloor': '平坦区域最低嵌入比例,越低越偏向把水印藏到纹理区域。', + 'diagHelp.selfCheckRequired': '嵌入后是否必须立刻自提取成功才算完成。', + 'diagHelp.selfCheckPassed': '嵌入后是否通过了本机自提取校验。', + 'diagHelp.payloadBytes': '水印文本编码后的字节数。', + 'diagHelp.modelVersion': '当前使用的 neural ONNX 模型版本。', + 'diagHelp.modelsDir': '模型文件所在目录。', + 'diagHelp.fallbackReason': 'Auto 模式从一个引擎切到另一个引擎的原因。', + 'diagHelp.bitConfidence': '神经网络读取每个 bit 时的平均把握程度。', + 'diagHelp.decodeStrategy': '单视图或多视图聚合的解码方式。', + 'diagHelp.geometricCorrection': '旋转、缩放、裁剪等几何恢复信息。', + 'diagHelp.warnings': '低强度或自检失败等需要注意的风险。', + 'diagHelp.spreadDelta': 'DCT 系数对的最小差值目标,越高越稳但改动更大。', + 'diagHelp.spreadReps': '每个 payload bit 分散到多少个随机 DCT 块。', + 'diagHelp.spreadMaskFloor': '平坦区域保留的最低嵌入比例。', + 'diagHelp.spreadMaskGain': '纹理和边缘区域额外提高嵌入强度的比例。', + 'diagHelp.spreadBlocks': '当前图像可用于 8x8 DCT 扩频的块数量。', + 'diagHelp.spreadConfidence': '扩频相关性投票的平均置信度。', 'text.host': '宿主文本', 'text.hostPlaceholder': '粘贴要嵌入水印的原文...', 'text.message': '水印消息(明文)', @@ -220,7 +341,8 @@ const zh = { 'text.sourcePoisonPlaceholder': '粘贴要投毒的文本...', 'text.watermarked': '含水印文本', 'text.watermarkedResult': '含水印文本(可直接复制使用)', - 'text.watermarkedHint': '共 {bits} bits -> {chars} 个不可见字符,已随机散布于宿主文本 {hostChars} 个字符中', + 'text.watermarkedHint': + '共 {bits} bits -> {chars} 个不可见字符,已随机散布于宿主文本 {hostChars} 个字符中', 'text.messageStats': '{chars} 字符 = {bytes} 字节', 'text.messageLong': '消息较长,请确保宿主文本足够长', 'text.profile.balanced': '鲁棒-均衡', @@ -259,10 +381,21 @@ const zh = { 'text.poison.tagsBlock': 'Tags Block', 'text.poison.tagsBlockDesc': 'U+E0041-E005A,AI 水印常用范围', 'text.poison.variationSelectors': '变体选择器', - 'text.poison.variationSelectorsDesc': 'U+FE00-FE07,附着于普通字符', + 'text.poison.variationSelectorsDesc': 'U+FE00-FE07,附着于普通字符' } as const const en: Record = { + 'img.payloadMode': 'Payload mode', + 'img.payload.fingerprint': 'Short fingerprint', + 'img.payload.fingerprintDesc': 'Default invisible mode; extracts fp:16hex.', + 'img.payload.text16': 'Text16 compat', + 'img.payload.text16Desc': 'Recoverable short text, 16 UTF-8 bytes max.', + 'diag.payloadMode': 'Payload mode', + 'diag.fingerprint': 'Fingerprint', + 'diag.berEstimate': 'BER estimate', + 'diagHelp.payloadMode': 'Whether the image stores a short fingerprint or a recoverable 16-byte text payload.', + 'diagHelp.fingerprint': 'The recovered or embedded 64-bit keyed fingerprint.', + 'diagHelp.berEstimate': 'Soft estimate of bit error likelihood from decoder probabilities.', 'nav.detect': 'Scan', 'nav.batch': 'Batch', 'nav.watermark': 'Watermark', @@ -279,7 +412,8 @@ const en: Record = { 'common.remove': 'Remove', 'common.scanning': 'Scanning...', 'detect.empty': 'Paste text and press {shortcut} to scan', - 'detect.emptyHint': 'Detect zero-width characters, homoglyphs, BiDi controls, Tags blocks, and more', + 'detect.emptyHint': + 'Detect zero-width characters, homoglyphs, BiDi controls, Tags blocks, and more', 'detect.analyzing': 'Analyzing characters', 'detect.error': 'Scan failed', 'detect.unknownError': 'Unknown error, please try again', @@ -366,7 +500,8 @@ const en: Record = { 'category.tagsBlockDesc': 'Unicode Tags block, often used for AI text watermarking', 'category.homoglyphDesc': 'Characters from other scripts that resemble Latin letters', 'category.variationDesc': 'Invisible glyph modifiers that can encode hidden data', - 'category.typoPunctDesc': 'Vertical quotes or unusual punctuation in Chinese context, possible AI-generation trace', + 'category.typoPunctDesc': + 'Vertical quotes or unusual punctuation in Chinese context, possible AI-generation trace', 'settings.title': 'Settings', 'settings.subtitle': 'Detection rules and app behavior', 'settings.reset': 'Reset defaults', @@ -383,7 +518,8 @@ const en: Record = { 'settings.threshold.midHigh': 'Medium / High boundary', 'settings.threshold.low': 'Low risk', 'settings.threshold.mid': 'Medium risk', - 'settings.threshold.summary': 'Current thresholds: 0-{low} low · {lowPlus}-{mid} medium · {midPlus}-100 high', + 'settings.threshold.summary': + 'Current thresholds: 0-{low} low · {lowPlus}-{mid} medium · {midPlus}-100 high', 'settings.clipboard.title': 'Clipboard Monitor', 'settings.clipboard.subtitle': 'Detect clipboard changes and prompt scans', 'settings.clipboard.enable': 'Enable monitoring', @@ -408,7 +544,8 @@ const en: Record = { 'img.text': 'Watermark text', 'img.textPlaceholder': 'Enter a short text or ID to hide in the image...', 'img.password': 'Password', - 'img.passwordHelp': 'Embed and extract must use the same integer password. A wrong password cannot decode neural watermarks.', + 'img.passwordHelp': + 'Embed and extract must use the same integer password. A wrong password cannot decode neural watermarks.', 'img.engine': 'Engine', 'img.engine.auto': 'Auto', 'img.engine.legacy': 'Legacy', @@ -417,14 +554,31 @@ const en: Record = { 'img.engine.legacyDesc': 'DCT + RS engine for long text and old images.', 'img.engine.neuralDesc': 'Short neural payload with stronger transport robustness.', 'img.quality': 'Quality / strength', + 'img.strength': 'Embedding strength', + 'img.quality.trace': 'Trace', + 'img.quality.faint': 'Faint', + 'img.quality.light': 'Light', 'img.quality.invisible': 'Light', 'img.quality.balanced': 'Balanced', + 'img.quality.strong': 'Strong', 'img.quality.robust': 'Robust', + 'img.quality.traceDesc': 'Risk accepted', + 'img.quality.faintDesc': 'Nearly invisible', + 'img.quality.lightDesc': 'Quality first', 'img.quality.invisibleDesc': 'Quality first', 'img.quality.balancedDesc': 'Recommended', + 'img.quality.strongDesc': 'Platform first', 'img.quality.robustDesc': 'Transport first', - 'img.quality.invisibleHint': 'Weakest neural residual. Best for clean saves or mild compression.', - 'img.quality.balancedHint': 'Default: lighter than alpha1 medium, calibrated against local JPEG50/resize50.', + 'img.quality.traceHint': + 'Lowest frequency-spread profile. Prioritizes invisibility and must pass local self-check.', + 'img.quality.faintHint': 'Very low frequency-spread profile for visually sensitive images.', + 'img.quality.lightHint': + 'Recommended low-visibility profile. Uses Y-channel DCT spreading to avoid grid and pink cross marks.', + 'img.quality.invisibleHint': + 'Low-visibility recommended profile, tuned to reduce pink cross marks.', + 'img.quality.balancedHint': + 'Default: stronger frequency spreading while hiding visible patterns. Use Strong or Robust for platform resharing.', + 'img.quality.strongHint': 'Improves extraction after QQ/WeChat-style JPEG resharing.', 'img.quality.robustHint': 'Prioritizes social-platform transport. The mark can be more visible.', 'img.backend.checking': 'Checking watermark backend...', 'img.backend.preparing': 'Preparing backend check...', @@ -435,19 +589,29 @@ const en: Record = { 'img.backend.missingLib': 'Python was found, but runtime packages are missing.\n{error}', 'img.backend.noRunner': 'No runnable image watermark backend was found.', 'img.backend.devFallback': 'Developer fallback: install Python runtime dependencies.', - 'img.neuralLimitInline': 'neural payload limit is 16 UTF-8 bytes, so Auto will fall back to legacy.', - 'img.neuralLimitWarn': 'Neural mode only supports payloads up to 16 UTF-8 bytes. Use Auto or Legacy for longer text.', + 'img.neuralLimitInline': + 'neural payload limit is 16 UTF-8 bytes, so Auto will fall back to legacy.', + 'img.neuralLimitWarn': + 'Neural mode only supports payloads up to 16 UTF-8 bytes. Use Auto or Legacy for longer text.', 'img.embed': 'Embed watermark', 'img.embedding': 'Embedding...', 'img.embedOk': 'Watermark embed succeeded. {engineInfo}', + 'img.embedOkWithRisk': + 'Watermark was written, but this strength has extraction risk. {engineInfo}', 'img.embedFail': 'Embed failed', 'img.embedComplete': 'Embed complete', 'img.extract': 'Extract watermark', 'img.extracting': 'Extracting...', 'img.extractOk': 'Watermark extract succeeded.', 'img.extractFail': 'Extract failed', - 'img.extractWarn': 'Auto tries neural first, then falls back to legacy. Choose an explicit engine when comparing model iterations.', - 'img.noWatermark': 'No extractable watermark was found. Make sure the image, password, and engine match.', + 'img.extractWarn': + 'Extraction usually only needs image, password, and engine. Strength is an advanced Legacy compatibility setting.', + 'img.showAdvanced': 'Advanced compatibility', + 'img.hideAdvanced': 'Hide advanced', + 'img.extractAdvancedHelp': + 'Neural extraction does not normally need the embedding strength. This is mainly for old Legacy compatibility.', + 'img.noWatermark': + 'No extractable watermark was found. Make sure the image, password, and engine match.', 'img.extracted': 'Extracted watermark', 'img.engineResult': 'Engine: {engine}', 'img.fallbackUsed': 'fallback used', @@ -455,6 +619,108 @@ const en: Record = { 'img.confidence': 'confidence {value}%', 'img.qualityResult': 'Quality: {quality}', 'img.diagnostics': 'Diagnostics', + 'img.batchTitle': 'Batch Embed', + 'img.batchSubtitle': + 'Apply the same watermark, password, engine, and strength to multiple images.', + 'img.batchImages': 'Batch images', + 'img.batchNoImages': 'No images selected', + 'img.batchSelected': '{count} selected', + 'img.batchMore': '{count} more', + 'img.batchOutputDir': 'Output folder', + 'img.batchNoOutputDir': 'No output folder selected', + 'img.selfCheck.sampled': 'Sample check', + 'img.selfCheck.all': 'Check all', + 'img.selfCheck.off': 'No check', + 'img.batchStart': 'Start batch embed', + 'img.batchRunning': 'Batch running...', + 'img.batchCancel': 'Cancel batch', + 'img.batchProgress': 'Batch progress', + 'img.batchCurrent': 'Processing: {name}', + 'img.batchSummary': 'Done: {success} succeeded, {failed} failed, {total} total', + 'img.batchOk': 'Batch embed complete: {count} images succeeded.', + 'img.batchFail': 'Batch embed failed', + 'failure.invalidRequest': 'The request is incomplete or invalid.', + 'failure.inputUnreadable': 'The image could not be read or written.', + 'failure.modelUnavailable': 'The watermark backend or neural model is unavailable.', + 'failure.payloadTooLong': 'Neural watermark text is longer than 16 UTF-8 bytes.', + 'failure.noSignal': 'No reliable watermark signal was found.', + 'failure.wrongPasswordOrCorrupted': 'A watermark-like signal was found, but validation failed.', + 'failure.engineMismatch': 'The selected engine could not decode this image.', + 'failure.unsupportedProtocol': + 'This app version does not support the watermark protocol in this image.', + 'failure.batchPartial': 'Some images failed in the batch.', + 'failure.batchCancelled': 'The batch was cancelled.', + 'failure.default': 'The operation failed. Check the image, password, and engine.', + 'failureHints.invalidRequest': + 'Confirm image, output path, watermark text, and password are set.|For batch work, confirm all paths still exist.', + 'failureHints.inputUnreadable': + 'Confirm the file was not moved or locked by another app.|Try PNG or JPG.', + 'failureHints.modelUnavailable': + 'Confirm the helper and ONNX model are bundled.|Retry or restart the app.', + 'failureHints.payloadTooLong': + 'Shorten the text to 16 bytes or less.|Use Auto or Legacy for longer text.', + 'failureHints.noSignal': + 'Confirm this is the watermarked image.|Try Auto mode, or switch Neural / Legacy.', + 'failureHints.wrongPasswordOrCorrupted': + 'Check the password first.|If the password is correct, the image may be over-compressed, cropped, or covered.', + 'failureHints.engineMismatch': 'Try Auto mode.|Switch between Neural and Legacy.', + 'failureHints.unsupportedProtocol': 'Update the app and helper, then try again.', + 'failureHints.batchPartial': + 'Review failed items and check image format, paths, and output permissions.', + 'failureHints.batchCancelled': 'The job stopped. Finished images remain in the output folder.', + 'failureHints.default': + 'Confirm image, password, and engine match.|If the image was re-shared by a platform, try a stronger embedding profile.', + 'diag.profile': 'Strength profile', + 'diag.codec': 'Codec', + 'diag.protocol': 'Protocol', + 'diag.passwordProtected': 'Password protected', + 'diag.visualStrength': 'Visual strength', + 'diag.chromaScale': 'Color residual', + 'diag.textureFloor': 'Texture mask', + 'diag.selfCheckRequired': 'Self-check required', + 'diag.selfCheckPassed': 'Self-check passed', + 'diag.payloadBytes': 'Payload bytes', + 'diag.modelVersion': 'Model version', + 'diag.modelsDir': 'Model folder', + 'diag.fallbackReason': 'Fallback reason', + 'diag.bitConfidence': 'Bit confidence', + 'diag.decodeStrategy': 'Decode strategy', + 'diag.geometricCorrection': 'Geometry correction', + 'diag.warnings': 'Warnings', + 'diag.spreadDelta': 'Spread strength', + 'diag.spreadReps': 'Spread repeats', + 'diag.spreadMaskFloor': 'Mask floor', + 'diag.spreadMaskGain': 'Mask gain', + 'diag.spreadBlocks': 'Usable blocks', + 'diag.spreadConfidence': 'Spread confidence', + 'diagHelp.default': 'Technical detail used for troubleshooting.', + 'diagHelp.profile': + 'The embedding strength profile. Stronger means more robust but more visible.', + 'diagHelp.codec': 'Actual embedding or decoding method used.', + 'diagHelp.protocol': 'Internal payload format version.', + 'diagHelp.passwordProtected': 'Whether the payload bits were protected by the password mask.', + 'diagHelp.visualStrength': 'Neural residual strength. Higher values write a stronger watermark.', + 'diagHelp.chromaScale': + 'Color-channel perturbation ratio. Lower values reduce pink or purple marks.', + 'diagHelp.textureFloor': + 'Minimum embedding ratio in flat areas. Lower values hide more in textured regions.', + 'diagHelp.selfCheckRequired': 'Whether embedding must immediately pass local extraction.', + 'diagHelp.selfCheckPassed': 'Whether local extraction passed after embedding.', + 'diagHelp.payloadBytes': 'Byte length of the encoded watermark text.', + 'diagHelp.modelVersion': 'Version of the neural ONNX model in use.', + 'diagHelp.modelsDir': 'Folder that contains model files.', + 'diagHelp.fallbackReason': 'Why Auto mode switched from one engine to another.', + 'diagHelp.bitConfidence': 'Average confidence for recovered payload bits.', + 'diagHelp.decodeStrategy': 'Single-view or multi-view aggregate decoding path.', + 'diagHelp.geometricCorrection': 'Rotation, scaling, crop, or alignment recovery information.', + 'diagHelp.warnings': 'Risks such as low strength or self-check failure.', + 'diagHelp.spreadDelta': + 'Minimum target difference for DCT coefficient pairs. Higher is stronger but changes more pixels.', + 'diagHelp.spreadReps': 'How many random DCT blocks carry each payload bit.', + 'diagHelp.spreadMaskFloor': 'Minimum embedding ratio kept in flat image areas.', + 'diagHelp.spreadMaskGain': 'Extra strength applied in textured and edge-rich areas.', + 'diagHelp.spreadBlocks': 'Number of 8x8 DCT blocks available in this image.', + 'diagHelp.spreadConfidence': 'Average correlation-vote confidence for spread-spectrum decoding.', 'text.host': 'Host text', 'text.hostPlaceholder': 'Paste original text that will carry the watermark...', 'text.message': 'Watermark message', @@ -478,7 +744,8 @@ const en: Record = { 'text.sourcePoisonPlaceholder': 'Paste text to poison...', 'text.watermarked': 'Watermarked text', 'text.watermarkedResult': 'Watermarked text (ready to copy)', - 'text.watermarkedHint': '{bits} bits -> {chars} invisible characters, scattered across {hostChars} host characters', + 'text.watermarkedHint': + '{bits} bits -> {chars} invisible characters, scattered across {hostChars} host characters', 'text.messageStats': '{chars} characters = {bytes} bytes', 'text.messageLong': 'Long message; make sure the host text is long enough', 'text.profile.balanced': 'Robust balanced', @@ -517,36 +784,52 @@ const en: Record = { 'text.poison.tagsBlock': 'Tags Block', 'text.poison.tagsBlockDesc': 'U+E0041-E005A, common AI watermark range', 'text.poison.variationSelectors': 'Variation selectors', - 'text.poison.variationSelectorsDesc': 'U+FE00-FE07, attached to normal characters', + 'text.poison.variationSelectorsDesc': 'U+FE00-FE07, attached to normal characters' } export type I18nKey = keyof typeof zh const dictionaries = { 'zh-CN': zh, en } +type I18nContextValue = { + t: (key: I18nKey, params?: Record) => string + language: UiLanguage + languageSetting: LanguageSetting + setLanguage: (value: LanguageSetting) => void +} + export function resolveUiLanguage(setting: LanguageSetting): UiLanguage { if (setting === 'zh-CN' || setting === 'en') return setting return navigator.language.toLowerCase().startsWith('zh') ? 'zh-CN' : 'en' } -export function formatMessage(template: string, params?: Record) { +export function formatMessage(template: string, params?: Record): string { if (!params) return template return template.replace(/\{(\w+)\}/g, (_, key: string) => String(params[key] ?? '')) } -export function useI18n() { +export function useI18n(): I18nContextValue { const { settings, updateSettings } = useSettings() const language = resolveUiLanguage(settings.language) - const t = useCallback((key: I18nKey, params?: Record) => { - return formatMessage(dictionaries[language][key] ?? dictionaries.en[key] ?? key, params) - }, [language]) - const setLanguage = useCallback((value: LanguageSetting) => { - updateSettings('language', value) - }, [updateSettings]) - return useMemo(() => ({ - t, - language, - languageSetting: settings.language, - setLanguage, - }), [language, settings.language, setLanguage, t]) + const t = useCallback( + (key: I18nKey, params?: Record) => { + return formatMessage(dictionaries[language][key] ?? dictionaries.en[key] ?? key, params) + }, + [language] + ) + const setLanguage = useCallback( + (value: LanguageSetting) => { + updateSettings('language', value) + }, + [updateSettings] + ) + return useMemo( + () => ({ + t, + language, + languageSetting: settings.language, + setLanguage + }), + [language, settings.language, setLanguage, t] + ) }