diff --git a/ExtractionTool/Features/MainFeature.cs b/ExtractionTool/Features/MainFeature.cs
index dc05ac5e1..b5f8c19bf 100644
--- a/ExtractionTool/Features/MainFeature.cs
+++ b/ExtractionTool/Features/MainFeature.cs
@@ -118,8 +118,8 @@ private void ExtractFile(string file)
Console.WriteLine($"Attempting to extract all files from {file}");
using Stream stream = File.Open(file, FileMode.Open, FileAccess.Read, FileShare.ReadWrite);
- // Read the first 16 bytes
- byte[] magic = stream.PeekBytes(16);
+ // Read the first 32 bytes — needed to detect NintendoDisc magic at 0x18/0x1C
+ byte[] magic = stream.PeekBytes(32);
// Get the file type
string extension = Path.GetExtension(file).TrimStart('.');
diff --git a/InfoPrint/Features/MainFeature.cs b/InfoPrint/Features/MainFeature.cs
index 4e2260bbe..b86058f03 100644
--- a/InfoPrint/Features/MainFeature.cs
+++ b/InfoPrint/Features/MainFeature.cs
@@ -158,8 +158,8 @@ private void PrintFileInfo(string file)
{
using Stream stream = File.Open(file, FileMode.Open, FileAccess.Read, FileShare.ReadWrite);
- // Read the first 16 bytes
- byte[] magic = stream.PeekBytes(16);
+ // Read the first 32 bytes — needed to detect NintendoDisc magic at 0x18/0x1C
+ byte[] magic = stream.PeekBytes(32);
// Get the file type
string extension = Path.GetExtension(file).TrimStart('.');
diff --git a/SabreTools.Data.Models/GCZ/Archive.cs b/SabreTools.Data.Models/GCZ/Archive.cs
new file mode 100644
index 000000000..63931bee8
--- /dev/null
+++ b/SabreTools.Data.Models/GCZ/Archive.cs
@@ -0,0 +1,40 @@
+namespace SabreTools.Data.Models.GCZ
+{
+ ///
+ /// Represents a parsed GCZ (GameCube Zip) compressed disc image.
+ /// Contains header metadata and block lookup tables.
+ /// Actual compressed block data is accessed via the source stream.
+ ///
+ public class DiscImage
+ {
+ ///
+ /// GCZ file header
+ ///
+ public GczHeader Header { get; set; } = new();
+
+ ///
+ /// Block pointer table (one entry per block).
+ /// Each value encodes both the offset of the block within the compressed data section
+ /// and a compression flag in the top bit:
+ ///
+ /// - Top bit CLEAR → block is zlib/deflate-compressed at that offset.
+ /// - Top bit SET → block is stored uncompressed at that offset.
+ ///
+ /// Offset is value & ~UncompressedFlag.
+ ///
+ public ulong[] BlockPointers { get; set; } = [];
+
+ ///
+ /// Adler-32 (stored as CRC32) hashes of the uncompressed block data,
+ /// one per block. Used for integrity verification.
+ ///
+ public uint[] BlockHashes { get; set; } = [];
+
+ ///
+ /// Byte offset within the GCZ file where the compressed block data begins.
+ /// Computed as: HeaderSize + (NumBlocks * 8) + (NumBlocks * 4).
+ ///
+ /// Not parsed from stream; computed during deserialization.
+ public long DataOffset { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/GCZ/Constants.cs b/SabreTools.Data.Models/GCZ/Constants.cs
new file mode 100644
index 000000000..3b5388a59
--- /dev/null
+++ b/SabreTools.Data.Models/GCZ/Constants.cs
@@ -0,0 +1,23 @@
+namespace SabreTools.Data.Models.GCZ
+{
+ public static class Constants
+ {
+ /// GCZ magic cookie (little-endian u32 at offset 0)
+ public const uint MagicCookie = 0xB10BC001;
+
+ /// Size of the GCZ file header in bytes
+ public const int HeaderSize = 32;
+
+ // Valid GCZ block sizes (Dolphin-compatible)
+ public const uint BlockSize32K = 0x8000;
+ public const uint BlockSize64K = 0x10000;
+ public const uint BlockSize128K = 0x20000;
+ public const uint DefaultBlockSize = BlockSize32K;
+
+ ///
+ /// Top bit of a block-pointer value: when CLEAR the block is zlib/deflate compressed;
+ /// when SET the block is stored uncompressed.
+ ///
+ public const ulong UncompressedFlag = 0x8000000000000000;
+ }
+}
diff --git a/SabreTools.Data.Models/GCZ/GczHeader.cs b/SabreTools.Data.Models/GCZ/GczHeader.cs
new file mode 100644
index 000000000..e78b0d5f7
--- /dev/null
+++ b/SabreTools.Data.Models/GCZ/GczHeader.cs
@@ -0,0 +1,39 @@
+namespace SabreTools.Data.Models.GCZ
+{
+ ///
+ /// GCZ (GameCube Zip) file header — 32 bytes at the start of the file
+ ///
+ ///
+ public sealed class GczHeader
+ {
+ ///
+ /// Magic cookie identifying a GCZ file (0xB10BC001)
+ ///
+ public uint MagicCookie { get; set; }
+
+ ///
+ /// Sub-type; always 0 for GameCube / Wii disc images
+ ///
+ public uint SubType { get; set; }
+
+ ///
+ /// Total size of the compressed block data section in bytes
+ ///
+ public ulong CompressedDataSize { get; set; }
+
+ ///
+ /// Total decompressed (ISO) size in bytes
+ ///
+ public ulong DataSize { get; set; }
+
+ ///
+ /// Size of each uncompressed block in bytes (must be 32 KiB, 64 KiB, or 128 KiB)
+ ///
+ public uint BlockSize { get; set; }
+
+ ///
+ /// Number of blocks in the image
+ ///
+ public uint NumBlocks { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/NintendoDisc/Constants.cs b/SabreTools.Data.Models/NintendoDisc/Constants.cs
new file mode 100644
index 000000000..0550c9024
--- /dev/null
+++ b/SabreTools.Data.Models/NintendoDisc/Constants.cs
@@ -0,0 +1,120 @@
+namespace SabreTools.Data.Models.NintendoDisc
+{
+ public static class Constants
+ {
+ #region Disc identification magic values
+
+ /// Magic word present at offset 0x01C on GameCube discs
+ public const uint GCMagicWord = 0xC2339F3D;
+
+ /// Magic word present at offset 0x018 on Wii discs
+ public const uint WiiMagicWord = 0x5D1C9EA3;
+
+ #endregion
+
+ #region Disc header layout
+
+ // Offsets within the 0x440-byte boot block.
+ // Layout confirmed against Dolphin source (VolumeDisc.cpp / DiscUtils.h):
+ // 0x000–0x003 Title code (4 chars, e.g. "GAFE")
+ // 0x004–0x005 Maker code (2 chars, e.g. "01") — Dolphin Read(0x4, 2)
+ // 0x006 Disc number — Dolphin GetDiscNumber() Read(6)
+ // 0x007 Revision — Dolphin GetRevision() Read(7)
+ // 0x008 Audio streaming
+ // 0x009 Streaming buffer size
+ // 0x00A–0x017 Unused (14 bytes)
+ // 0x018 Wii magic (0x5D1C9EA3)
+ // 0x01C GC magic (0xC2339F3D)
+ // 0x020–0x07F Game title (0x60 bytes)
+ // 0x080 Disable hash verification
+ // 0x081 Disable disc encryption
+ public const int TitleCodeOffset = 0x000;
+ public const int TitleCodeLength = 4;
+ public const int MakerCodeOffset = 0x004;
+ public const int MakerCodeLength = 2;
+ /// Full 6-char game ID = TitleCode[4] + MakerCode[2]
+ public const int GameIdOffset = 0x000;
+ public const int GameIdLength = 6;
+ public const int DiscNumberOffset = 0x006;
+ public const int DiscVersionOffset = 0x007;
+ public const int AudioStreamingOffset = 0x008;
+ public const int StreamingBufferSizeOffset = 0x009;
+ public const int WiiMagicOffset = 0x018;
+ public const int GCMagicOffset = 0x01C;
+ public const int GameTitleOffset = 0x020;
+ public const int GameTitleLength = 0x060;
+ public const int DisableHashVerificationOffset = 0x080;
+ public const int DisableDiscEncryptionOffset = 0x081;
+ public const int DolOffsetField = 0x420;
+ public const int FstOffsetField = 0x424;
+ public const int FstSizeField = 0x428;
+ public const int DiscHeaderSize = 0x440;
+
+ #endregion
+
+ #region BI2 data
+
+ public const int Bi2Address = 0x000440;
+ public const int Bi2Size = 0x2000;
+
+ #endregion
+
+ #region Apploader
+
+ public const int ApploaderAddress = 0x002440;
+ public const int ApploaderCodeSizeOffset = 0x14;
+ public const int ApploaderTrailerSizeOffset = 0x18;
+ public const int ApploaderHeaderSize = 0x20;
+
+ #endregion
+
+ #region Wii-specific disc layout
+
+ public const int WiiPartitionTableAddress = 0x40000;
+ public const int WiiPartitionGroupCount = 4;
+ public const int WiiRegionDataAddress = 0x04E000;
+ public const int WiiRegionDataSize = 0x20;
+
+ #endregion
+
+ #region Wii partition header fields
+
+ // Offsets relative to partition start
+ public const int WiiTicketSize = 0x2A4;
+ public const int WiiTmdSizeAddress = 0x2A4;
+ public const int WiiTmdOffsetAddress = 0x2A8;
+ public const int WiiCertSizeAddress = 0x2AC;
+ public const int WiiCertOffsetAddress = 0x2B0;
+ public const int WiiH3OffsetAddress = 0x2B4;
+ public const int WiiH3Size = 0x18000;
+ public const int WiiDataOffsetAddress = 0x2B8;
+
+ #endregion
+
+ #region Wii block / group structure
+
+ public const int WiiBlockSize = 0x8000;
+ public const int WiiBlockHeaderSize = 0x0400;
+ public const int WiiBlockDataSize = 0x7C00;
+ public const int WiiBlocksPerGroup = 64;
+ public const int WiiGroupSize = WiiBlocksPerGroup * WiiBlockSize;
+ public const int WiiGroupDataSize = WiiBlocksPerGroup * WiiBlockDataSize;
+
+ #endregion
+
+ #region DVD sector size
+
+ public const int DvdSectorSize = 0x800;
+
+ #endregion
+
+ #region Wii ticket fields
+
+ // Offsets relative to ticket start
+ public const int TicketEncryptedTitleKeyOffset = 0x1BF;
+ public const int TicketTitleIdOffset = 0x1DC;
+ public const int TicketCommonKeyIndexOffset = 0x1F1;
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Data.Models/NintendoDisc/Disc.cs b/SabreTools.Data.Models/NintendoDisc/Disc.cs
new file mode 100644
index 000000000..ac4afc4bb
--- /dev/null
+++ b/SabreTools.Data.Models/NintendoDisc/Disc.cs
@@ -0,0 +1,28 @@
+namespace SabreTools.Data.Models.NintendoDisc
+{
+ ///
+ /// Represents a parsed GameCube or Wii disc image
+ ///
+ public class Disc
+ {
+ ///
+ /// Disc boot block header (first 0x440 bytes)
+ ///
+ public DiscHeader Header { get; set; } = new();
+
+ ///
+ /// Detected platform (GameCube or Wii)
+ ///
+ public Platform Platform { get; set; }
+
+ ///
+ /// Wii partition table entries (Wii discs only)
+ ///
+ public WiiPartitionTableEntry[]? PartitionTableEntries { get; set; }
+
+ ///
+ /// Wii region data at disc offset 0x4E000 (Wii discs only)
+ ///
+ public WiiRegionData? RegionData { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/NintendoDisc/DiscHeader.cs b/SabreTools.Data.Models/NintendoDisc/DiscHeader.cs
new file mode 100644
index 000000000..c1dc58e87
--- /dev/null
+++ b/SabreTools.Data.Models/NintendoDisc/DiscHeader.cs
@@ -0,0 +1,81 @@
+namespace SabreTools.Data.Models.NintendoDisc
+{
+ ///
+ /// GameCube / Wii disc boot block header (first 0x440 bytes of the disc)
+ ///
+ ///
+ public sealed class DiscHeader
+ {
+ ///
+ /// 6-character ASCII game ID (e.g. "GALE01")
+ ///
+ /// 6 bytes at offset 0x000
+ public string GameId { get; set; } = string.Empty;
+
+ ///
+ /// 2-character ASCII maker / publisher code (e.g. "01")
+ ///
+ /// Derived from GameId bytes at offset 0x004–0x005; not a separate on-disc field
+ public string MakerCode { get; set; } = string.Empty;
+
+ ///
+ /// Zero-based disc number for multi-disc games
+ ///
+ public byte DiscNumber { get; set; }
+
+ ///
+ /// Disc version
+ ///
+ public byte DiscVersion { get; set; }
+
+ ///
+ /// Non-zero if audio streaming is enabled
+ ///
+ public byte AudioStreaming { get; set; }
+
+ ///
+ /// Audio streaming buffer size (in 16 KiB units)
+ ///
+ public byte StreamingBufferSize { get; set; }
+
+ ///
+ /// Wii magic word at offset 0x018 (0x5D1C9EA3 for Wii discs, 0 for GameCube)
+ ///
+ public uint WiiMagic { get; set; }
+
+ ///
+ /// GameCube magic word at offset 0x01C (0xC2339F3D for GameCube discs)
+ ///
+ public uint GCMagic { get; set; }
+
+ ///
+ /// Null-terminated ASCII game title (up to 0x60 bytes at offset 0x020)
+ ///
+ public string GameTitle { get; set; } = string.Empty;
+
+ ///
+ /// Non-zero to disable hash verification (GameCube only)
+ ///
+ public byte DisableHashVerification { get; set; }
+
+ ///
+ /// Non-zero to disable disc encryption (GameCube only)
+ ///
+ public byte DisableDiscEncryption { get; set; }
+
+ ///
+ /// Offset of the main DOL executable (no shift for GameCube; <<2 for Wii)
+ ///
+ public uint DolOffset { get; set; }
+
+ ///
+ /// Offset of the File System Table (no shift for GameCube; <<2 for Wii)
+ ///
+ public uint FstOffset { get; set; }
+
+ ///
+ /// Maximum size of the File System Table in bytes
+ ///
+ public uint FstSize { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/NintendoDisc/Enums.cs b/SabreTools.Data.Models/NintendoDisc/Enums.cs
new file mode 100644
index 000000000..6fcc5640e
--- /dev/null
+++ b/SabreTools.Data.Models/NintendoDisc/Enums.cs
@@ -0,0 +1,32 @@
+namespace SabreTools.Data.Models.NintendoDisc
+{
+ ///
+ /// Platform / console type for a Nintendo disc image
+ ///
+ public enum Platform
+ {
+ /// Platform could not be determined
+ Unknown = 0,
+
+ /// Nintendo GameCube
+ GameCube = 1,
+
+ /// Nintendo Wii
+ Wii = 2,
+ }
+
+ ///
+ /// Wii partition type
+ ///
+ public enum WiiPartitionType : uint
+ {
+ /// Game data partition (DATA)
+ Data = 0,
+
+ /// System update partition (UPDATE)
+ Update = 1,
+
+ /// Channel installer partition (CHANNEL)
+ Channel = 2,
+ }
+}
diff --git a/SabreTools.Data.Models/NintendoDisc/WiiPartitionTableEntry.cs b/SabreTools.Data.Models/NintendoDisc/WiiPartitionTableEntry.cs
new file mode 100644
index 000000000..c463b110b
--- /dev/null
+++ b/SabreTools.Data.Models/NintendoDisc/WiiPartitionTableEntry.cs
@@ -0,0 +1,21 @@
+namespace SabreTools.Data.Models.NintendoDisc
+{
+ ///
+ /// A single entry in the Wii disc partition table.
+ /// The table lives at 0x40000–0x4FFFF on the disc.
+ ///
+ ///
+ public sealed class WiiPartitionTableEntry
+ {
+ ///
+ /// Absolute byte offset of the partition on the disc.
+ /// Stored on-disc as offset >> 2 (big-endian u32).
+ ///
+ public long Offset { get; set; }
+
+ ///
+ /// Partition type: 0 = DATA, 1 = UPDATE, 2 = CHANNEL, or an ASCII title ID.
+ ///
+ public uint Type { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/NintendoDisc/WiiRegionData.cs b/SabreTools.Data.Models/NintendoDisc/WiiRegionData.cs
new file mode 100644
index 000000000..a71adfb6f
--- /dev/null
+++ b/SabreTools.Data.Models/NintendoDisc/WiiRegionData.cs
@@ -0,0 +1,22 @@
+namespace SabreTools.Data.Models.NintendoDisc
+{
+ ///
+ /// Wii disc region data block (0x20 bytes at disc offset 0x4E000)
+ ///
+ ///
+ public sealed class WiiRegionData
+ {
+ ///
+ /// Region setting byte:
+ /// 0 = Japan, 1 = USA, 2 = Europe, 3 = Korea,
+ /// 4 = China, 5 = Taiwan, 6 = Germany, 7 = France
+ ///
+ public uint RegionSetting { get; set; }
+
+ ///
+ /// Age ratings for various regions (0x10 bytes)
+ ///
+ /// 16 bytes
+ public byte[] AgeRatings { get; set; } = new byte[16];
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/Archive.cs b/SabreTools.Data.Models/WIA/Archive.cs
new file mode 100644
index 000000000..4bab45c84
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/Archive.cs
@@ -0,0 +1,45 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// Represents a parsed WIA or RVZ compressed disc image.
+ /// Contains the two headers and all lookup tables.
+ /// Actual group (compressed block) data is accessed via the source stream.
+ ///
+ public class DiscImage
+ {
+ ///
+ /// WIA / RVZ primary header (0x48 bytes)
+ ///
+ public WiaHeader1 Header1 { get; set; } = new();
+
+ ///
+ /// WIA / RVZ secondary header (0xDC bytes)
+ ///
+ public WiaHeader2 Header2 { get; set; } = new();
+
+ ///
+ /// Wii partition entries. Null or empty for GameCube discs.
+ ///
+ public PartitionEntry[]? PartitionEntries { get; set; }
+
+ ///
+ /// Raw (non-partition) data region entries
+ ///
+ public RawDataEntry[] RawDataEntries { get; set; } = [];
+
+ ///
+ /// WIA group entries (populated when is false)
+ ///
+ public WiaGroupEntry[]? GroupEntries { get; set; }
+
+ ///
+ /// RVZ group entries (populated when is true)
+ ///
+ public RvzGroupEntry[]? RvzGroupEntries { get; set; }
+
+ ///
+ /// True if this is an RVZ file; false if this is a WIA file
+ ///
+ public bool IsRvz { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/Constants.cs b/SabreTools.Data.Models/WIA/Constants.cs
new file mode 100644
index 000000000..105c94ecc
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/Constants.cs
@@ -0,0 +1,47 @@
+namespace SabreTools.Data.Models.WIA
+{
+ public static class Constants
+ {
+ /// WIA magic (little-endian u32): "WIA\x01"
+ public const uint WiaMagic = 0x01414957;
+
+ /// RVZ magic (little-endian u32): "RVZ\x01"
+ public const uint RvzMagic = 0x015A5652;
+
+ /// Size of WiaHeader1 in bytes
+ public const int Header1Size = 0x48;
+
+ /// Size of WiaHeader2 in bytes
+ public const int Header2Size = 0xDC;
+
+ /// Size of a PartitionEntry in bytes
+ public const int PartitionEntrySize = 0x30;
+
+ /// Size of a PartitionDataEntry in bytes
+ public const int PartitionDataEntrySize = 0x10;
+
+ /// Size of a RawDataEntry in bytes
+ public const int RawDataEntrySize = 0x18;
+
+ /// Size of a WiaGroupEntry in bytes
+ public const int WiaGroupEntrySize = 0x08;
+
+ /// Size of an RvzGroupEntry in bytes
+ public const int RvzGroupEntrySize = 0x0C;
+
+ /// Size of a HashExceptionEntry in bytes (2-byte offset + 20-byte SHA-1)
+ public const int HashExceptionEntrySize = 0x16;
+
+ /// Number of bytes of disc header stored in WiaHeader2.DiscHeader
+ public const int DiscHeaderStoredSize = 0x80;
+
+ // WIA version numbers
+ public const uint WiaVersion = 0x01000000;
+ public const uint WiaVersionWriteCompatible = 0x01000000;
+ public const uint RvzVersion = 0x01000000;
+ public const uint RvzVersionWriteCompatible = 0x00030000;
+
+ // Default chunk size (2 MiB = one Wii group)
+ public const uint DefaultChunkSize = 2 * 1024 * 1024;
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/Enums.cs b/SabreTools.Data.Models/WIA/Enums.cs
new file mode 100644
index 000000000..c68ed2fa7
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/Enums.cs
@@ -0,0 +1,40 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// WIA / RVZ disc type
+ ///
+ public enum WiaDiscType : uint
+ {
+ /// Nintendo GameCube disc
+ GameCube = 1,
+
+ /// Nintendo Wii disc
+ Wii = 2,
+ }
+
+ ///
+ /// Compression algorithm used inside a WIA or RVZ file.
+ /// WIA supports None / Purge / Bzip2 / LZMA / LZMA2.
+ /// RVZ additionally supports Zstd; Purge is not used in RVZ.
+ ///
+ public enum WiaRvzCompressionType : uint
+ {
+ /// No compression — data stored verbatim
+ None = 0,
+
+ /// Purge — strips known-zero regions (hash blocks, padding). WIA only.
+ Purge = 1,
+
+ /// bzip2 block compression
+ Bzip2 = 2,
+
+ /// LZMA compression
+ LZMA = 3,
+
+ /// LZMA2 compression
+ LZMA2 = 4,
+
+ /// Zstandard compression. RVZ only.
+ Zstd = 5,
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/GroupEntries.cs b/SabreTools.Data.Models/WIA/GroupEntries.cs
new file mode 100644
index 000000000..cf0e79f0c
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/GroupEntries.cs
@@ -0,0 +1,44 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// WIA group entry pointing to compressed data for one group. Size: 0x08 bytes.
+ /// DataOffset is stored on-disk as the actual byte offset shifted right by 2 (i.e. >>2).
+ ///
+ public sealed class WiaGroupEntry
+ {
+ ///
+ /// Actual byte offset of this group's data within the WIA file.
+ /// (On disk this value is stored as offset >> 2.)
+ ///
+ public ulong DataOffset { get; set; }
+
+ ///
+ /// Compressed size of this group's data in bytes (0 means group contains only zeroes)
+ ///
+ public uint DataSize { get; set; }
+ }
+
+ ///
+ /// RVZ group entry — extends with a packed-data size field.
+ /// Size: 0x0C bytes.
+ ///
+ public sealed class RvzGroupEntry
+ {
+ ///
+ /// Actual byte offset of this group's data within the RVZ file.
+ /// (On disk this value is stored as offset >> 2.)
+ ///
+ public ulong DataOffset { get; set; }
+
+ ///
+ /// Total size of this group's data (compressed + any RVZ-pack section) in bytes
+ ///
+ public uint DataSize { get; set; }
+
+ ///
+ /// Size of the RVZ-packed (junk-stripped) portion within this group's data.
+ /// 0 means no RVZ packing was applied.
+ ///
+ public uint RvzPackedSize { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/HashExceptionEntry.cs b/SabreTools.Data.Models/WIA/HashExceptionEntry.cs
new file mode 100644
index 000000000..66003964f
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/HashExceptionEntry.cs
@@ -0,0 +1,23 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// A single hash exception entry within a WIA / RVZ Wii-partition group.
+ /// Used to restore the correct SHA-1 hash values that were stripped when
+ /// the Wii block hash data was removed during compression.
+ /// Size: 0x16 bytes (2-byte offset + 20-byte SHA-1).
+ ///
+ public sealed class HashExceptionEntry
+ {
+ ///
+ /// Byte offset within the reconstructed 0x400-byte hash block where
+ /// this SHA-1 value must be written
+ ///
+ public ushort Offset { get; set; }
+
+ ///
+ /// SHA-1 hash value (20 bytes)
+ ///
+ /// 20 bytes
+ public byte[] Hash { get; set; } = new byte[20];
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/PartitionEntry.cs b/SabreTools.Data.Models/WIA/PartitionEntry.cs
new file mode 100644
index 000000000..13fdb514d
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/PartitionEntry.cs
@@ -0,0 +1,40 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// Describes a contiguous range of sectors within a Wii partition.
+ /// Part of a . Size: 0x10 bytes.
+ ///
+ public sealed class PartitionDataEntry
+ {
+ /// Zero-based index of the first sector covered by this range
+ public uint FirstSector { get; set; }
+
+ /// Number of sectors covered by this range
+ public uint NumberOfSectors { get; set; }
+
+ /// Index into the group-entry array of the first group for this range
+ public uint GroupIndex { get; set; }
+
+ /// Number of groups covering this range
+ public uint NumberOfGroups { get; set; }
+ }
+
+ ///
+ /// Describes a single Wii partition: its AES title key and two sector ranges.
+ /// Size: 0x30 bytes.
+ ///
+ public sealed class PartitionEntry
+ {
+ ///
+ /// Decrypted AES-128 partition title key
+ ///
+ /// 16 bytes
+ public byte[] PartitionKey { get; set; } = new byte[16];
+
+ /// First sector range for this partition (typically encrypted data)
+ public PartitionDataEntry DataEntry0 { get; set; } = new();
+
+ /// Second sector range for this partition (typically decrypted/raw data)
+ public PartitionDataEntry DataEntry1 { get; set; } = new();
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/RawDataEntry.cs b/SabreTools.Data.Models/WIA/RawDataEntry.cs
new file mode 100644
index 000000000..8d6c845c8
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/RawDataEntry.cs
@@ -0,0 +1,21 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// Describes a region of non-partition data (e.g. disc header, partition table).
+ /// Size: 0x18 bytes.
+ ///
+ public sealed class RawDataEntry
+ {
+ /// Byte offset of this region within the equivalent ISO image
+ public ulong DataOffset { get; set; }
+
+ /// Size of this region in bytes
+ public ulong DataSize { get; set; }
+
+ /// Index into the group-entry array of the first group for this region
+ public uint GroupIndex { get; set; }
+
+ /// Number of groups covering this region
+ public uint NumberOfGroups { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/WiaHeader1.cs b/SabreTools.Data.Models/WIA/WiaHeader1.cs
new file mode 100644
index 000000000..9e9703d22
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/WiaHeader1.cs
@@ -0,0 +1,52 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// WIA / RVZ first header (0x48 bytes at the start of the file).
+ /// All multi-byte fields are big-endian on disk; the reader converts to host order.
+ ///
+ ///
+ public sealed class WiaHeader1
+ {
+ ///
+ /// Format magic: 0x01414957 ("WIA\x01") or 0x015A5652 ("RVZ\x01")
+ ///
+ public uint Magic { get; set; }
+
+ ///
+ /// Format version (e.g. 0x01000000)
+ ///
+ public uint Version { get; set; }
+
+ ///
+ /// Minimum version required to read this file
+ ///
+ public uint VersionCompatible { get; set; }
+
+ ///
+ /// Size of WiaHeader2 in bytes
+ ///
+ public uint Header2Size { get; set; }
+
+ ///
+ /// SHA-1 hash of WiaHeader2
+ ///
+ /// 20 bytes
+ public byte[] Header2Hash { get; set; } = new byte[20];
+
+ ///
+ /// Total size of the equivalent uncompressed ISO image in bytes
+ ///
+ public ulong IsoFileSize { get; set; }
+
+ ///
+ /// Total size of this WIA / RVZ file in bytes
+ ///
+ public ulong WiaFileSize { get; set; }
+
+ ///
+ /// SHA-1 hash of this header, excluding this field itself
+ ///
+ /// 20 bytes
+ public byte[] Header1Hash { get; set; } = new byte[20];
+ }
+}
diff --git a/SabreTools.Data.Models/WIA/WiaHeader2.cs b/SabreTools.Data.Models/WIA/WiaHeader2.cs
new file mode 100644
index 000000000..f97dbac39
--- /dev/null
+++ b/SabreTools.Data.Models/WIA/WiaHeader2.cs
@@ -0,0 +1,100 @@
+namespace SabreTools.Data.Models.WIA
+{
+ ///
+ /// WIA / RVZ second header (0xDC bytes).
+ /// Immediately follows WiaHeader1 in the file.
+ /// All multi-byte fields are big-endian on disk; the reader converts to host order.
+ ///
+ public sealed class WiaHeader2
+ {
+ ///
+ /// Disc type: 1 = GameCube, 2 = Wii
+ ///
+ public WiaDiscType DiscType { get; set; }
+
+ ///
+ /// Compression algorithm applied to group data
+ ///
+ public WiaRvzCompressionType CompressionType { get; set; }
+
+ ///
+ /// Informational compression level used when writing (1–9)
+ ///
+ public int CompressionLevel { get; set; }
+
+ ///
+ /// Group / chunk size in bytes.
+ /// WIA requires exactly 2 MiB; RVZ accepts powers of 2 between 32 KiB and 2 MiB.
+ ///
+ public uint ChunkSize { get; set; }
+
+ ///
+ /// First 0x80 bytes of the disc image (unencrypted disc header)
+ ///
+ /// 0x80 bytes
+ public byte[] DiscHeader { get; set; } = new byte[0x80];
+
+ ///
+ /// Number of PartitionEntry structures that follow the raw-data entries
+ ///
+ public uint NumberOfPartitionEntries { get; set; }
+
+ ///
+ /// Size of each PartitionEntry in bytes
+ ///
+ public uint PartitionEntrySize { get; set; }
+
+ ///
+ /// File offset of the PartitionEntry array
+ ///
+ public ulong PartitionEntriesOffset { get; set; }
+
+ ///
+ /// SHA-1 hash of all PartitionEntry data
+ ///
+ /// 20 bytes
+ public byte[] PartitionEntriesHash { get; set; } = new byte[20];
+
+ ///
+ /// Number of RawDataEntry structures
+ ///
+ public uint NumberOfRawDataEntries { get; set; }
+
+ ///
+ /// File offset of the RawDataEntry array
+ ///
+ public ulong RawDataEntriesOffset { get; set; }
+
+ ///
+ /// Total size in bytes of all RawDataEntry structures
+ ///
+ public uint RawDataEntriesSize { get; set; }
+
+ ///
+ /// Number of group entries (WiaGroupEntry or RvzGroupEntry)
+ ///
+ public uint NumberOfGroupEntries { get; set; }
+
+ ///
+ /// File offset of the group-entry array
+ ///
+ public ulong GroupEntriesOffset { get; set; }
+
+ ///
+ /// Total size in bytes of all group entries
+ ///
+ public uint GroupEntriesSize { get; set; }
+
+ ///
+ /// Number of valid bytes in
+ ///
+ public byte CompressorDataSize { get; set; }
+
+ ///
+ /// Algorithm-specific compressor parameters (up to 7 bytes).
+ /// LZMA: 5-byte prop block. LZMA2: 1-byte dict-size code. Others: unused.
+ ///
+ /// 7 bytes
+ public byte[] CompressorData { get; set; } = new byte[7];
+ }
+}
diff --git a/SabreTools.Serialization.Readers/GCZ.cs b/SabreTools.Serialization.Readers/GCZ.cs
new file mode 100644
index 000000000..122562c5e
--- /dev/null
+++ b/SabreTools.Serialization.Readers/GCZ.cs
@@ -0,0 +1,78 @@
+using System.IO;
+using SabreTools.Data.Models.GCZ;
+using SabreTools.IO.Extensions;
+using SabreTools.Numerics.Extensions;
+
+#pragma warning disable IDE0017 // Simplify object initialization
+namespace SabreTools.Serialization.Readers
+{
+ public class GCZ : BaseBinaryReader
+ {
+ ///
+ public override DiscImage? Deserialize(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ // Need at least the header
+ if (data.Length - data.Position < Constants.HeaderSize)
+ return null;
+
+ try
+ {
+ long initialOffset = data.Position;
+
+ var archive = new DiscImage();
+
+ // Parse the header
+ archive.Header = ParseGczHeader(data);
+
+ // Validate magic
+ if (archive.Header.MagicCookie != Constants.MagicCookie)
+ return null;
+
+ // Validate block count — guard against absurdly large tables
+ if (archive.Header.NumBlocks == 0 || archive.Header.NumBlocks > 0x100000)
+ return null;
+
+ int numBlocks = (int)archive.Header.NumBlocks;
+
+ // Read block pointer table (8 bytes per block)
+ archive.BlockPointers = new ulong[numBlocks];
+ byte[] ptrBuf = data.ReadBytes(numBlocks * 8);
+ for (int i = 0; i < numBlocks; i++)
+ archive.BlockPointers[i] = System.BitConverter.ToUInt64(ptrBuf, i * 8);
+
+ // Read block hash table (4 bytes per block, Adler-32)
+ archive.BlockHashes = new uint[numBlocks];
+ byte[] hashBuf = data.ReadBytes(numBlocks * 4);
+ for (int i = 0; i < numBlocks; i++)
+ archive.BlockHashes[i] = System.BitConverter.ToUInt32(hashBuf, i * 4);
+
+ // Compressed data begins immediately after the tables
+ archive.DataOffset = initialOffset + Constants.HeaderSize
+ + ((long)numBlocks * 8)
+ + ((long)numBlocks * 4);
+
+ return archive;
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ private static GczHeader ParseGczHeader(Stream data)
+ {
+ var header = new GczHeader();
+ header.MagicCookie = data.ReadUInt32LittleEndian();
+ header.SubType = data.ReadUInt32LittleEndian();
+ header.CompressedDataSize = data.ReadUInt64LittleEndian();
+ header.DataSize = data.ReadUInt64LittleEndian();
+ header.BlockSize = data.ReadUInt32LittleEndian();
+ header.NumBlocks = data.ReadUInt32LittleEndian();
+ return header;
+ }
+ }
+}
diff --git a/SabreTools.Serialization.Readers/NintendoDisc.cs b/SabreTools.Serialization.Readers/NintendoDisc.cs
new file mode 100644
index 000000000..83dce4a9d
--- /dev/null
+++ b/SabreTools.Serialization.Readers/NintendoDisc.cs
@@ -0,0 +1,205 @@
+using System.IO;
+using System.Text;
+using SabreTools.Data.Models.NintendoDisc;
+using SabreTools.IO.Extensions;
+using SabreTools.Numerics.Extensions;
+
+#pragma warning disable IDE0017 // Simplify object initialization
+namespace SabreTools.Serialization.Readers
+{
+ public class NintendoDisc : BaseBinaryReader
+ {
+ ///
+ public override Disc? Deserialize(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ // Need at least the disc header
+ if (data.Length - data.Position < Constants.DiscHeaderSize)
+ return null;
+
+ try
+ {
+ long initialOffset = data.Position;
+
+ var disc = new Disc();
+
+ // Parse the disc header
+ disc.Header = ParseDiscHeader(data);
+
+ // Determine platform from magic words; fall back to GameId prefix for
+ // GC discs that omit the magic word (e.g. some redump/scene ISOs)
+ if (disc.Header.WiiMagic == Constants.WiiMagicWord)
+ disc.Platform = Platform.Wii;
+ else if (disc.Header.GCMagic == Constants.GCMagicWord)
+ disc.Platform = Platform.GameCube;
+ else if (disc.Header.GameId != null && disc.Header.GameId.Length >= 1
+ && IsGameCubeTitleType(disc.Header.GameId[0]))
+ disc.Platform = Platform.GameCube;
+ else
+ disc.Platform = Platform.Unknown;
+
+ // Parse Wii-specific structures
+ if (disc.Platform == Platform.Wii)
+ {
+ // Partition table starts at 0x40000
+ long partTableEnd = initialOffset + Constants.WiiPartitionTableAddress
+ + (Constants.WiiPartitionGroupCount * 8);
+ if (data.Length >= partTableEnd)
+ disc.PartitionTableEntries = ParsePartitionTable(data, initialOffset);
+
+ // Region data at 0x4E000
+ long regionEnd = initialOffset + Constants.WiiRegionDataAddress + Constants.WiiRegionDataSize;
+ if (data.Length >= regionEnd)
+ {
+ data.Seek(initialOffset + Constants.WiiRegionDataAddress, SeekOrigin.Begin);
+ disc.RegionData = ParseRegionData(data);
+ }
+ }
+
+ return disc;
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ #region Header parsing
+
+ ///
+ /// Parses just the disc header fields from the given stream without requiring
+ /// the full 0x440-byte boot block. Requires at least 0x82 bytes (enough to
+ /// reach AudioStreaming and StreamingBufferSize) to be useful; the DOL/FST
+ /// fields will be zero when the stream is shorter than 0x42B bytes.
+ ///
+ public static DiscHeader? ParseDiscHeaderOnly(Stream? data)
+ {
+ if (data is null || !data.CanRead || data.Length - data.Position < 6)
+ return null;
+ try { return ParseDiscHeader(data); }
+ catch { return null; }
+ }
+
+ private static DiscHeader ParseDiscHeader(Stream data)
+ {
+ var header = new DiscHeader();
+
+ // 0x000: 4-char title code + 2-char maker code stored as one 6-byte GameId field
+ byte[] gameIdBytes = data.ReadBytes(Constants.GameIdLength);
+ header.GameId = Encoding.ASCII.GetString(gameIdBytes).TrimEnd('\0');
+
+ // Maker code is the last 2 chars of the GameId (offsets 0x004–0x005).
+ // Dolphin reads it with Read(0x4, 2) — there is no separate field at 0x006.
+ header.MakerCode = header.GameId != null && header.GameId.Length >= 6
+ ? header.GameId.Substring(4, 2)
+ : string.Empty;
+
+ // 0x006: disc number, 0x007: revision (Dolphin GetDiscNumber/GetRevision)
+ header.DiscNumber = data.ReadByteValue();
+ header.DiscVersion = data.ReadByteValue();
+ // 0x008: audio streaming, 0x009: streaming buffer size
+ header.AudioStreaming = data.ReadByteValue();
+ header.StreamingBufferSize = data.ReadByteValue();
+
+ // Skip unused 0x0E bytes (offsets 0x00A–0x017)
+ data.ReadBytes(0x0E);
+
+ header.WiiMagic = data.ReadUInt32BigEndian();
+ header.GCMagic = data.ReadUInt32BigEndian();
+
+ byte[] titleBytes = data.ReadBytes(Constants.GameTitleLength);
+ header.GameTitle = Encoding.ASCII.GetString(titleBytes).TrimEnd('\0');
+
+ header.DisableHashVerification = data.Position < data.Length ? data.ReadByteValue() : (byte)0;
+ header.DisableDiscEncryption = data.Position < data.Length ? data.ReadByteValue() : (byte)0;
+
+ // Skip to DOL/FST offset fields at 0x420.
+ // Position so far: 6+1+1+1+1+14+4+4+96+1+1 = 130 = 0x82
+ int skipToBootBlock = Constants.DolOffsetField - 0x82;
+ if (data.Length - data.Position < skipToBootBlock + 12)
+ return header;
+
+ data.ReadBytes(skipToBootBlock);
+
+ header.DolOffset = data.ReadUInt32BigEndian();
+ header.FstOffset = data.ReadUInt32BigEndian();
+ header.FstSize = data.ReadUInt32BigEndian();
+
+ // Skip the remaining bytes to complete the 0x440 header
+ // We are at 0x420 + 12 = 0x42C; need to reach 0x440
+ data.ReadBytes(Constants.DiscHeaderSize - (Constants.DolOffsetField + 12));
+
+ return header;
+ }
+
+ #endregion
+
+ #region Wii partition table parsing
+
+ private static WiiPartitionTableEntry[]? ParsePartitionTable(Stream data, long baseOffset)
+ {
+ data.Seek(baseOffset + Constants.WiiPartitionTableAddress, SeekOrigin.Begin);
+
+ // Read 4 partition groups; each group has a count and a shifted offset
+ var allEntries = new System.Collections.Generic.List();
+
+ for (int g = 0; g < Constants.WiiPartitionGroupCount; g++)
+ {
+ uint count = data.ReadUInt32BigEndian();
+ uint shiftedOffset = data.ReadUInt32BigEndian();
+
+ if (count == 0)
+ continue;
+
+ long tableOffset = baseOffset + ((long)shiftedOffset << 2);
+ long savedPosition = data.Position;
+
+ if (tableOffset + ((long)count * 8) > data.Length)
+ {
+ data.Seek(savedPosition, SeekOrigin.Begin);
+ continue;
+ }
+
+ data.Seek(tableOffset, SeekOrigin.Begin);
+ for (uint i = 0; i < count; i++)
+ {
+ var entry = new WiiPartitionTableEntry();
+ uint rawOffset = data.ReadUInt32BigEndian();
+ entry.Offset = (long)rawOffset << 2;
+ entry.Type = data.ReadUInt32BigEndian();
+ allEntries.Add(entry);
+ }
+
+ data.Seek(savedPosition, SeekOrigin.Begin);
+ }
+
+ return allEntries.Count > 0 ? allEntries.ToArray() : null;
+ }
+
+ #endregion
+
+ #region Wii region data parsing
+
+ private static WiiRegionData ParseRegionData(Stream data)
+ {
+ var region = new WiiRegionData();
+ region.RegionSetting = data.ReadUInt32BigEndian();
+ region.AgeRatings = data.ReadBytes(16);
+ return region;
+ }
+
+ #endregion
+
+ ///
+ /// Returns true if the GameId first character is a known GameCube title type prefix.
+ /// Used as a fallback when the GC magic word is absent from the disc image.
+ ///
+ private static bool IsGameCubeTitleType(char c)
+ {
+ return c == 'G' || c == 'D' || c == 'R';
+ }
+ }
+}
diff --git a/SabreTools.Serialization.Readers/WIA.cs b/SabreTools.Serialization.Readers/WIA.cs
new file mode 100644
index 000000000..311b3d8e0
--- /dev/null
+++ b/SabreTools.Serialization.Readers/WIA.cs
@@ -0,0 +1,196 @@
+using System.IO;
+using SabreTools.Data.Models.WIA;
+using SabreTools.IO.Extensions;
+using SabreTools.Numerics.Extensions;
+
+#pragma warning disable IDE0017 // Simplify object initialization
+namespace SabreTools.Serialization.Readers
+{
+ public class WIA : BaseBinaryReader
+ {
+ ///
+ public override DiscImage? Deserialize(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ // Need at least Header1
+ if (data.Length - data.Position < Constants.Header1Size)
+ return null;
+
+ try
+ {
+ long initialOffset = data.Position;
+
+ var archive = new DiscImage();
+
+ // Parse Header1
+ archive.Header1 = ParseHeader1(data);
+
+ // Validate magic
+ if (archive.Header1.Magic != Constants.WiaMagic && archive.Header1.Magic != Constants.RvzMagic)
+ return null;
+
+ archive.IsRvz = archive.Header1.Magic == Constants.RvzMagic;
+
+ // Parse Header2
+ archive.Header2 = ParseHeader2(data);
+
+ // Parse partition entries (Wii discs only)
+ if (archive.Header2.NumberOfPartitionEntries > 0
+ && archive.Header2.PartitionEntriesOffset > 0)
+ {
+ data.Seek(initialOffset + (long)archive.Header2.PartitionEntriesOffset, SeekOrigin.Begin);
+ archive.PartitionEntries = ParsePartitionEntries(
+ data, (int)archive.Header2.NumberOfPartitionEntries);
+ }
+
+ // Parse raw data entries
+ if (archive.Header2.NumberOfRawDataEntries > 0
+ && archive.Header2.RawDataEntriesOffset > 0)
+ {
+ data.Seek(initialOffset + (long)archive.Header2.RawDataEntriesOffset, SeekOrigin.Begin);
+ archive.RawDataEntries = ParseRawDataEntries(
+ data, (int)archive.Header2.NumberOfRawDataEntries);
+ }
+
+ // Parse group entries
+ if (archive.Header2.NumberOfGroupEntries > 0
+ && archive.Header2.GroupEntriesOffset > 0)
+ {
+ data.Seek(initialOffset + (long)archive.Header2.GroupEntriesOffset, SeekOrigin.Begin);
+ if (archive.IsRvz)
+ archive.RvzGroupEntries = ParseRvzGroupEntries(
+ data, (int)archive.Header2.NumberOfGroupEntries);
+ else
+ archive.GroupEntries = ParseWiaGroupEntries(
+ data, (int)archive.Header2.NumberOfGroupEntries);
+ }
+
+ return archive;
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ #region Header parsing
+
+ private static WiaHeader1 ParseHeader1(Stream data)
+ {
+ var h = new WiaHeader1();
+ h.Magic = data.ReadUInt32LittleEndian();
+ h.Version = data.ReadUInt32BigEndian();
+ h.VersionCompatible = data.ReadUInt32BigEndian();
+ h.Header2Size = data.ReadUInt32BigEndian();
+ h.Header2Hash = data.ReadBytes(20);
+ h.IsoFileSize = data.ReadUInt64BigEndian();
+ h.WiaFileSize = data.ReadUInt64BigEndian();
+ h.Header1Hash = data.ReadBytes(20);
+ return h;
+ }
+
+ private static WiaHeader2 ParseHeader2(Stream data)
+ {
+ var h = new WiaHeader2();
+ h.DiscType = (WiaDiscType)data.ReadUInt32BigEndian();
+ h.CompressionType = (WiaRvzCompressionType)data.ReadUInt32BigEndian();
+ h.CompressionLevel = data.ReadInt32BigEndian();
+ h.ChunkSize = data.ReadUInt32BigEndian();
+ h.DiscHeader = data.ReadBytes(0x80);
+ h.NumberOfPartitionEntries = data.ReadUInt32BigEndian();
+ h.PartitionEntrySize = data.ReadUInt32BigEndian();
+ h.PartitionEntriesOffset = data.ReadUInt64BigEndian();
+ h.PartitionEntriesHash = data.ReadBytes(20);
+ h.NumberOfRawDataEntries = data.ReadUInt32BigEndian();
+ h.RawDataEntriesOffset = data.ReadUInt64BigEndian();
+ h.RawDataEntriesSize = data.ReadUInt32BigEndian();
+ h.NumberOfGroupEntries = data.ReadUInt32BigEndian();
+ h.GroupEntriesOffset = data.ReadUInt64BigEndian();
+ h.GroupEntriesSize = data.ReadUInt32BigEndian();
+ h.CompressorDataSize = data.ReadByteValue();
+ h.CompressorData = data.ReadBytes(7);
+ return h;
+ }
+
+ #endregion
+
+ #region Table parsing
+
+ private static PartitionEntry[] ParsePartitionEntries(Stream data, int count)
+ {
+ var entries = new PartitionEntry[count];
+ for (int i = 0; i < count; i++)
+ {
+ var e = new PartitionEntry();
+ e.PartitionKey = data.ReadBytes(16);
+ e.DataEntry0 = ParsePartitionDataEntry(data);
+ e.DataEntry1 = ParsePartitionDataEntry(data);
+ entries[i] = e;
+ }
+
+ return entries;
+ }
+
+ private static PartitionDataEntry ParsePartitionDataEntry(Stream data)
+ {
+ var e = new PartitionDataEntry();
+ e.FirstSector = data.ReadUInt32BigEndian();
+ e.NumberOfSectors = data.ReadUInt32BigEndian();
+ e.GroupIndex = data.ReadUInt32BigEndian();
+ e.NumberOfGroups = data.ReadUInt32BigEndian();
+ return e;
+ }
+
+ private static RawDataEntry[] ParseRawDataEntries(Stream data, int count)
+ {
+ var entries = new RawDataEntry[count];
+ for (int i = 0; i < count; i++)
+ {
+ var e = new RawDataEntry();
+ e.DataOffset = data.ReadUInt64BigEndian();
+ e.DataSize = data.ReadUInt64BigEndian();
+ e.GroupIndex = data.ReadUInt32BigEndian();
+ e.NumberOfGroups = data.ReadUInt32BigEndian();
+ entries[i] = e;
+ }
+
+ return entries;
+ }
+
+ private static WiaGroupEntry[] ParseWiaGroupEntries(Stream data, int count)
+ {
+ var entries = new WiaGroupEntry[count];
+ for (int i = 0; i < count; i++)
+ {
+ var e = new WiaGroupEntry();
+ // DataOffset stored as actual_offset >> 2
+ e.DataOffset = (ulong)data.ReadUInt32BigEndian() << 2;
+ e.DataSize = data.ReadUInt32BigEndian();
+ entries[i] = e;
+ }
+
+ return entries;
+ }
+
+ private static RvzGroupEntry[] ParseRvzGroupEntries(Stream data, int count)
+ {
+ var entries = new RvzGroupEntry[count];
+ for (int i = 0; i < count; i++)
+ {
+ var e = new RvzGroupEntry();
+ // DataOffset stored as actual_offset >> 2
+ e.DataOffset = (ulong)data.ReadUInt32BigEndian() << 2;
+ e.DataSize = data.ReadUInt32BigEndian();
+ e.RvzPackedSize = data.ReadUInt32BigEndian();
+ entries[i] = e;
+ }
+
+ return entries;
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Serialization.Writers/GCZ.cs b/SabreTools.Serialization.Writers/GCZ.cs
new file mode 100644
index 000000000..7e57922f2
--- /dev/null
+++ b/SabreTools.Serialization.Writers/GCZ.cs
@@ -0,0 +1,93 @@
+using System.IO;
+using SabreTools.Data.Models.GCZ;
+
+namespace SabreTools.Serialization.Writers
+{
+ // TODO: Full round-trip write (including compressed block data) requires a source
+ // IBlobReader. This implementation serializes only the structural metadata
+ // (header + block pointer table + block hash table) to a stream or file.
+ public class GCZ : IFileWriter
+ {
+ ///
+ public bool Debug { get; set; } = false;
+
+ ///
+ public bool SerializeFile(DiscImage? obj, string? path)
+ {
+ if (string.IsNullOrEmpty(path))
+ return false;
+
+ if (obj is null || !ValidateArchive(obj))
+ return false;
+
+ using var fs = File.Open(path, FileMode.Create, FileAccess.Write, FileShare.None);
+ return SerializeStream(obj, fs);
+ }
+
+ ///
+ /// Serialize the GCZ structural metadata (header + tables) to a stream.
+ /// Writes: 32-byte header, block pointer table, block hash table.
+ /// The caller is responsible for writing compressed block data afterward.
+ ///
+ public bool SerializeStream(DiscImage? obj, Stream? stream)
+ {
+ if (stream is null || !stream.CanWrite)
+ return false;
+
+ if (obj is null || !ValidateArchive(obj))
+ return false;
+
+ // Header (32 bytes, little-endian)
+ WriteUInt32LE(stream, obj.Header.MagicCookie);
+ WriteUInt32LE(stream, obj.Header.SubType);
+ WriteUInt64LE(stream, obj.Header.CompressedDataSize);
+ WriteUInt64LE(stream, obj.Header.DataSize);
+ WriteUInt32LE(stream, obj.Header.BlockSize);
+ WriteUInt32LE(stream, obj.Header.NumBlocks);
+
+ // Block pointer table (8 bytes per block, little-endian)
+ foreach (ulong ptr in obj.BlockPointers)
+ WriteUInt64LE(stream, ptr);
+
+ // Block hash table (4 bytes per block, little-endian)
+ foreach (uint hash in obj.BlockHashes)
+ WriteUInt32LE(stream, hash);
+
+ stream.Flush();
+ return true;
+ }
+
+ private static bool ValidateArchive(DiscImage obj)
+ {
+ if (obj.Header is null)
+ return false;
+ if (obj.Header.MagicCookie != Constants.MagicCookie)
+ return false;
+ if (obj.Header.NumBlocks == 0)
+ return false;
+ if (obj.BlockPointers is null || obj.BlockPointers.Length != (int)obj.Header.NumBlocks)
+ return false;
+ if (obj.BlockHashes is null || obj.BlockHashes.Length != (int)obj.Header.NumBlocks)
+ return false;
+ return true;
+ }
+
+ #region Little-endian write helpers
+
+ private static void WriteUInt32LE(Stream s, uint value)
+ {
+ s.WriteByte((byte)value);
+ s.WriteByte((byte)(value >> 8));
+ s.WriteByte((byte)(value >> 16));
+ s.WriteByte((byte)(value >> 24));
+ }
+
+ private static void WriteUInt64LE(Stream s, ulong value)
+ {
+ WriteUInt32LE(s, (uint)value);
+ WriteUInt32LE(s, (uint)(value >> 32));
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Serialization.Writers/WIA.cs b/SabreTools.Serialization.Writers/WIA.cs
new file mode 100644
index 000000000..ac6ea3b01
--- /dev/null
+++ b/SabreTools.Serialization.Writers/WIA.cs
@@ -0,0 +1,178 @@
+using System.IO;
+using SabreTools.Data.Models.WIA;
+
+namespace SabreTools.Serialization.Writers
+{
+ // TODO: Full round-trip write (including compressed group data) requires a source
+ // IBlobReader and compression pipeline. This implementation serializes only
+ // the structural metadata (Header1, Header2, and all lookup tables).
+ public class WIA : IFileWriter
+ {
+ ///
+ public bool Debug { get; set; } = false;
+
+ ///
+ public bool SerializeFile(DiscImage? obj, string? path)
+ {
+ if (string.IsNullOrEmpty(path))
+ return false;
+
+ if (obj is null || !ValidateArchive(obj))
+ return false;
+
+ using var fs = File.Open(path, FileMode.Create, FileAccess.Write, FileShare.None);
+ return SerializeStream(obj, fs);
+ }
+
+ ///
+ /// Serialize the WIA / RVZ structural metadata to a stream.
+ /// Writes Header1, Header2, partition entries, raw data entries, and group entries.
+ /// The caller is responsible for writing group (compressed block) data.
+ ///
+ public bool SerializeStream(DiscImage? obj, Stream? stream)
+ {
+ if (stream is null || !stream.CanWrite)
+ return false;
+
+ if (obj is null || !ValidateArchive(obj))
+ return false;
+
+ WriteHeader1(stream, obj.Header1);
+ WriteHeader2(stream, obj.Header2);
+
+ // Partition entries
+ if (obj.PartitionEntries != null)
+ {
+ foreach (var pe in obj.PartitionEntries)
+ WritePartitionEntry(stream, pe);
+ }
+
+ // Raw data entries
+ foreach (var re in obj.RawDataEntries)
+ WriteRawDataEntry(stream, re);
+
+ // Group entries
+ if (obj.IsRvz && obj.RvzGroupEntries != null)
+ {
+ foreach (var ge in obj.RvzGroupEntries)
+ WriteRvzGroupEntry(stream, ge);
+ }
+ else if (!obj.IsRvz && obj.GroupEntries != null)
+ {
+ foreach (var ge in obj.GroupEntries)
+ WriteWiaGroupEntry(stream, ge);
+ }
+
+ stream.Flush();
+ return true;
+ }
+
+ private static bool ValidateArchive(DiscImage obj)
+ {
+ if (obj.Header1 is null || obj.Header2 is null)
+ return false;
+ if (obj.Header1.Magic != Constants.WiaMagic && obj.Header1.Magic != Constants.RvzMagic)
+ return false;
+ return true;
+ }
+
+ #region Write helpers
+
+ private static void WriteHeader1(Stream s, WiaHeader1 h)
+ {
+ WriteUInt32LE(s, h.Magic);
+ WriteUInt32BE(s, h.Version);
+ WriteUInt32BE(s, h.VersionCompatible);
+ WriteUInt32BE(s, h.Header2Size);
+ s.Write(h.Header2Hash, 0, 20);
+ WriteUInt64BE(s, h.IsoFileSize);
+ WriteUInt64BE(s, h.WiaFileSize);
+ s.Write(h.Header1Hash, 0, 20);
+ }
+
+ private static void WriteHeader2(Stream s, WiaHeader2 h)
+ {
+ WriteUInt32BE(s, (uint)h.DiscType);
+ WriteUInt32BE(s, (uint)h.CompressionType);
+ WriteInt32BE(s, h.CompressionLevel);
+ WriteUInt32BE(s, h.ChunkSize);
+ s.Write(h.DiscHeader, 0, 0x80);
+ WriteUInt32BE(s, h.NumberOfPartitionEntries);
+ WriteUInt32BE(s, h.PartitionEntrySize);
+ WriteUInt64BE(s, h.PartitionEntriesOffset);
+ s.Write(h.PartitionEntriesHash, 0, 20);
+ WriteUInt32BE(s, h.NumberOfRawDataEntries);
+ WriteUInt64BE(s, h.RawDataEntriesOffset);
+ WriteUInt32BE(s, h.RawDataEntriesSize);
+ WriteUInt32BE(s, h.NumberOfGroupEntries);
+ WriteUInt64BE(s, h.GroupEntriesOffset);
+ WriteUInt32BE(s, h.GroupEntriesSize);
+ s.WriteByte(h.CompressorDataSize);
+ s.Write(h.CompressorData, 0, 7);
+ }
+
+ private static void WritePartitionDataEntry(Stream s, PartitionDataEntry e)
+ {
+ WriteUInt32BE(s, e.FirstSector);
+ WriteUInt32BE(s, e.NumberOfSectors);
+ WriteUInt32BE(s, e.GroupIndex);
+ WriteUInt32BE(s, e.NumberOfGroups);
+ }
+
+ private static void WritePartitionEntry(Stream s, PartitionEntry e)
+ {
+ s.Write(e.PartitionKey, 0, 16);
+ WritePartitionDataEntry(s, e.DataEntry0);
+ WritePartitionDataEntry(s, e.DataEntry1);
+ }
+
+ private static void WriteRawDataEntry(Stream s, RawDataEntry e)
+ {
+ WriteUInt64BE(s, e.DataOffset);
+ WriteUInt64BE(s, e.DataSize);
+ WriteUInt32BE(s, e.GroupIndex);
+ WriteUInt32BE(s, e.NumberOfGroups);
+ }
+
+ private static void WriteWiaGroupEntry(Stream s, WiaGroupEntry e)
+ {
+ // DataOffset stored as actual_offset >> 2
+ WriteUInt32BE(s, (uint)(e.DataOffset >> 2));
+ WriteUInt32BE(s, e.DataSize);
+ }
+
+ private static void WriteRvzGroupEntry(Stream s, RvzGroupEntry e)
+ {
+ // DataOffset stored as actual_offset >> 2
+ WriteUInt32BE(s, (uint)(e.DataOffset >> 2));
+ WriteUInt32BE(s, e.DataSize);
+ WriteUInt32BE(s, e.RvzPackedSize);
+ }
+
+ private static void WriteUInt32LE(Stream s, uint v)
+ {
+ s.WriteByte((byte)v);
+ s.WriteByte((byte)(v >> 8));
+ s.WriteByte((byte)(v >> 16));
+ s.WriteByte((byte)(v >> 24));
+ }
+
+ private static void WriteUInt32BE(Stream s, uint v)
+ {
+ s.WriteByte((byte)(v >> 24));
+ s.WriteByte((byte)(v >> 16));
+ s.WriteByte((byte)(v >> 8));
+ s.WriteByte((byte)v);
+ }
+
+ private static void WriteInt32BE(Stream s, int v) => WriteUInt32BE(s, (uint)v);
+
+ private static void WriteUInt64BE(Stream s, ulong v)
+ {
+ WriteUInt32BE(s, (uint)(v >> 32));
+ WriteUInt32BE(s, (uint)v);
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Wrappers/GCZ.Extraction.cs b/SabreTools.Wrappers/GCZ.Extraction.cs
new file mode 100644
index 000000000..bafc28333
--- /dev/null
+++ b/SabreTools.Wrappers/GCZ.Extraction.cs
@@ -0,0 +1,13 @@
+namespace SabreTools.Wrappers
+{
+ public partial class GCZ : IExtractable
+ {
+ ///
+ public bool Extract(string outputDirectory, bool includeDebug)
+ {
+ // Decompress GCZ to obtain the inner disc image, then delegate extraction.
+ var inner = GetInnerWrapper();
+ return inner?.Extract(outputDirectory, includeDebug) ?? false;
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/GCZ.Printing.cs b/SabreTools.Wrappers/GCZ.Printing.cs
new file mode 100644
index 000000000..ed3a8e656
--- /dev/null
+++ b/SabreTools.Wrappers/GCZ.Printing.cs
@@ -0,0 +1,39 @@
+using System.Text;
+using SabreTools.Text.Extensions;
+
+namespace SabreTools.Wrappers
+{
+ public partial class GCZ : IPrintable
+ {
+#if NETCOREAPP
+ ///
+ public string ExportJSON() => System.Text.Json.JsonSerializer.Serialize(Model, _jsonSerializerOptions);
+#endif
+
+ ///
+ public void PrintInformation(StringBuilder builder)
+ {
+ builder.AppendLine("GCZ Information:");
+ builder.AppendLine("-------------------------");
+ builder.AppendLine(Header.MagicCookie, "Magic Cookie");
+ builder.AppendLine(Header.SubType, "Sub-Type");
+ builder.AppendLine(Header.CompressedDataSize, "Compressed Data Size");
+ builder.AppendLine(Header.DataSize, "Uncompressed Data Size");
+ builder.AppendLine(Header.BlockSize, "Block Size");
+ builder.AppendLine(Header.NumBlocks, "Block Count");
+ builder.AppendLine();
+
+ var discHeader = DiscHeader;
+ if (discHeader is not null)
+ {
+ builder.AppendLine("Embedded Disc Header:");
+ builder.AppendLine(discHeader.GameId, " Game ID");
+ builder.AppendLine(discHeader.MakerCode, " Maker Code");
+ builder.AppendLine(discHeader.DiscNumber, " Disc Number");
+ builder.AppendLine(discHeader.DiscVersion, " Disc Version");
+ builder.AppendLine(discHeader.GameTitle, " Game Title");
+ builder.AppendLine();
+ }
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/GCZ.Writing.cs b/SabreTools.Wrappers/GCZ.Writing.cs
new file mode 100644
index 000000000..aa564da22
--- /dev/null
+++ b/SabreTools.Wrappers/GCZ.Writing.cs
@@ -0,0 +1,252 @@
+using System;
+using System.IO;
+using SabreTools.Data.Models.GCZ;
+
+namespace SabreTools.Wrappers
+{
+ public partial class GCZ : IWritable
+ {
+ ///
+ /// Compress a NintendoDisc wrapper to a GCZ file at the given path.
+ ///
+ /// Decompressed disc image to compress.
+ /// Destination file path.
+ ///
+ /// GCZ block size: 32 KiB, 64 KiB, or 128 KiB.
+ /// Defaults to (32 KiB).
+ ///
+ /// True on success, false on failure.
+ public static bool ConvertFromDisc(NintendoDisc source, string outputPath,
+ uint blockSize = Constants.DefaultBlockSize)
+ {
+ if (source is null)
+ return false;
+ if (string.IsNullOrEmpty(outputPath))
+ return false;
+ if (blockSize != Constants.BlockSize32K &&
+ blockSize != Constants.BlockSize64K &&
+ blockSize != Constants.BlockSize128K)
+ return false;
+
+ try
+ {
+ using var fs = File.Open(outputPath, FileMode.Create, FileAccess.ReadWrite, FileShare.None);
+ return WriteGcz(source, fs, blockSize);
+ }
+ catch
+ {
+ return false;
+ }
+ }
+
+ ///
+ public bool Write(string outputPath, bool includeDebug)
+ {
+ // Re-serialise the structural metadata (header + tables) only.
+ // Full round-trip compression from an already-GCZ source requires ConvertFromDisc.
+ if (string.IsNullOrEmpty(outputPath))
+ {
+ string outputFilename = Filename is null
+ ? (Guid.NewGuid().ToString() + ".gcz")
+ : (Filename + ".new");
+ outputPath = Path.GetFullPath(outputFilename);
+ }
+
+ if (Model?.Header is null)
+ {
+ if (includeDebug) Console.WriteLine("Model was invalid, cannot write!");
+ return false;
+ }
+
+ var writer = new Serialization.Writers.GCZ { Debug = includeDebug };
+ return writer.SerializeFile(Model, outputPath);
+ }
+
+ // -----------------------------------------------------------------------
+ // Core GCZ compression pipeline (ISO → GCZ)
+ // -----------------------------------------------------------------------
+
+ ///
+ /// Write a GCZ image to from a decompressed disc source.
+ /// Matches Dolphin's CompressFileToBlob() in CompressedBlob.cpp.
+ ///
+ private static bool WriteGcz(NintendoDisc source, Stream destination, uint blockSize)
+ {
+ long sourceSize = source.DataLength;
+ if (sourceSize <= 0)
+ return false;
+
+ uint numBlocks = (uint)((sourceSize + blockSize - 1) / blockSize);
+
+ // ---- Step 1: Write placeholder header (will be patched at end) ----
+ long headerPos = destination.Position;
+ var header = new GczHeader
+ {
+ MagicCookie = Constants.MagicCookie,
+ SubType = 0,
+ CompressedDataSize = 0,
+ DataSize = (ulong)sourceSize,
+ BlockSize = blockSize,
+ NumBlocks = numBlocks,
+ };
+ WriteHeader(destination, header);
+
+ // ---- Step 2: Reserve block-pointer table (8 bytes each) ----
+ long blockTablePos = destination.Position;
+ var blockPointers = new ulong[numBlocks];
+ destination.Position += (long)numBlocks * 8;
+
+ // ---- Step 3: Reserve block-hash table (4 bytes each) ----
+ var blockHashes = new uint[numBlocks];
+ destination.Position += (long)numBlocks * 4;
+
+ // ---- Step 4: Data section starts here ----
+ long dataStartPos = destination.Position;
+ var readBuf = new byte[blockSize];
+ var compressBuf = new byte[(int)blockSize * 2];
+
+ for (uint bi = 0; bi < numBlocks; bi++)
+ {
+ long blockOffset = (long)bi * blockSize;
+ int blockDataSize = (int)Math.Min(blockSize, sourceSize - blockOffset);
+
+ byte[]? raw = source.ReadData(blockOffset, blockDataSize);
+ if (raw is null || raw.Length != blockDataSize)
+ return false;
+
+ if (blockDataSize < readBuf.Length)
+ Array.Copy(raw, readBuf, blockDataSize);
+ else
+ readBuf = raw;
+
+ // Record pointer as offset relative to data section start
+ ulong blockPointer = (ulong)(destination.Position - dataStartPos);
+
+ int compressedSize;
+ bool useCompression = TryCompressBlock(readBuf, blockDataSize, compressBuf, out compressedSize);
+
+ if (useCompression)
+ {
+ blockPointers[bi] = blockPointer;
+ destination.Write(compressBuf, 0, compressedSize);
+ blockHashes[bi] = Adler32(compressBuf, compressedSize);
+ }
+ else
+ {
+ blockPointers[bi] = blockPointer | Constants.UncompressedFlag;
+ destination.Write(readBuf, 0, blockDataSize);
+ blockHashes[bi] = Adler32(readBuf, blockDataSize);
+ }
+ }
+
+ // ---- Step 5: Patch header with final compressed-data size ----
+ long finalEnd = destination.Position;
+ header.CompressedDataSize = (ulong)(finalEnd - dataStartPos);
+
+ // ---- Step 6: Write block-pointer table ----
+ destination.Position = blockTablePos;
+ foreach (ulong ptr in blockPointers)
+ WriteUInt64LE(destination, ptr);
+
+ // ---- Step 7: Write block-hash table ----
+ foreach (uint h in blockHashes)
+ WriteUInt32LE(destination, h);
+
+ // ---- Step 8: Patch header ----
+ destination.Position = headerPos;
+ WriteHeader(destination, header);
+
+ destination.Position = finalEnd;
+ destination.Flush();
+ return true;
+ }
+
+ // -----------------------------------------------------------------------
+ // Compression helpers
+ // -----------------------------------------------------------------------
+
+ ///
+ /// Attempts to zlib-compress bytes of
+ /// into . Returns true and sets
+ /// when the result is smaller than 97 % of the original (Dolphin's threshold).
+ /// GCZ uses the zlib framing: 2-byte header (0x78 0x9C) + deflate stream + 4-byte Adler-32 tail.
+ ///
+#if NET20 || NET35 || NET40
+ private static bool TryCompressBlock(byte[] input, int inputSize, byte[] output, out int compressedSize)
+ {
+ // DeflateStream leaveOpen overload and CompressionLevel are not available on net20/net35/net40.
+ // Fall back to storing all blocks uncompressed on those targets.
+ compressedSize = 0;
+ return false;
+ }
+#else
+ private static bool TryCompressBlock(byte[] input, int inputSize, byte[] output, out int compressedSize)
+ {
+ using (var ms = new MemoryStream(output))
+ {
+ ms.WriteByte(0x78);
+ ms.WriteByte(0x9C);
+
+ using (var ds = new System.IO.Compression.DeflateStream(
+ ms, System.IO.Compression.CompressionLevel.Optimal, leaveOpen: true))
+ {
+ ds.Write(input, 0, inputSize);
+ }
+
+ uint adler = Adler32(input, inputSize);
+ ms.WriteByte((byte)(adler >> 24));
+ ms.WriteByte((byte)(adler >> 16));
+ ms.WriteByte((byte)(adler >> 8));
+ ms.WriteByte((byte)adler);
+
+ compressedSize = (int)ms.Position;
+ }
+
+ int threshold = inputSize * 97 / 100;
+ return compressedSize < threshold;
+ }
+#endif
+
+ /// Adler-32 checksum (zlib/deflate standard).
+ private static uint Adler32(byte[] data, int length)
+ {
+ const uint MOD = 65521;
+ uint a = 1, b = 0;
+ for (int i = 0; i < length; i++)
+ {
+ a = (a + data[i]) % MOD;
+ b = (b + a) % MOD;
+ }
+
+ return (b << 16) | a;
+ }
+
+ // -----------------------------------------------------------------------
+ // Little-endian binary write helpers
+ // -----------------------------------------------------------------------
+
+ private static void WriteHeader(Stream s, GczHeader h)
+ {
+ WriteUInt32LE(s, h.MagicCookie);
+ WriteUInt32LE(s, h.SubType);
+ WriteUInt64LE(s, h.CompressedDataSize);
+ WriteUInt64LE(s, h.DataSize);
+ WriteUInt32LE(s, h.BlockSize);
+ WriteUInt32LE(s, h.NumBlocks);
+ }
+
+ private static void WriteUInt32LE(Stream s, uint v)
+ {
+ s.WriteByte((byte)v);
+ s.WriteByte((byte)(v >> 8));
+ s.WriteByte((byte)(v >> 16));
+ s.WriteByte((byte)(v >> 24));
+ }
+
+ private static void WriteUInt64LE(Stream s, ulong v)
+ {
+ WriteUInt32LE(s, (uint)v);
+ WriteUInt32LE(s, (uint)(v >> 32));
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/GCZ.cs b/SabreTools.Wrappers/GCZ.cs
new file mode 100644
index 000000000..d188c584d
--- /dev/null
+++ b/SabreTools.Wrappers/GCZ.cs
@@ -0,0 +1,271 @@
+using System.IO;
+using SabreTools.Data.Models.GCZ;
+using SabreTools.Data.Models.NintendoDisc;
+#if !NET20 && !NET35
+using System.IO.Compression;
+#endif
+
+namespace SabreTools.Wrappers
+{
+ public partial class GCZ : WrapperBase
+ {
+ #region Descriptive Properties
+
+ ///
+ public override string DescriptionString => "GCZ Compressed GameCube / Wii Disc Image";
+
+ #endregion
+
+ #region Extension Properties
+
+ ///
+ public GczHeader Header => Model.Header;
+
+ ///
+ /// Total decompressed size of the disc image in bytes
+ ///
+ public ulong DataSize => Model.Header.DataSize;
+
+ ///
+ /// Number of compressed blocks in this image
+ ///
+ public uint NumBlocks => Model.Header.NumBlocks;
+
+ ///
+ /// Size of each uncompressed block in bytes
+ ///
+ public uint BlockSize => Model.Header.BlockSize;
+
+ ///
+ /// Block pointer table — top bit indicates uncompressed flag
+ ///
+ public ulong[] BlockPointers => Model.BlockPointers;
+
+ ///
+ /// Adler-32 hashes of each uncompressed block
+ ///
+ public uint[] BlockHashes => Model.BlockHashes;
+
+ ///
+ /// Disc header parsed by decompressing the first block of the GCZ image.
+ ///
+ public DiscHeader? DiscHeader
+ {
+ get
+ {
+ if (_discHeaderCached)
+ return _discHeader;
+ _discHeader = ReadDiscHeader();
+ _discHeaderCached = true;
+ return _discHeader;
+ }
+ }
+
+ private DiscHeader? _discHeader;
+ private bool _discHeaderCached;
+
+ #endregion
+
+ #region Constructors
+
+ ///
+ public GCZ(DiscImage model, byte[] data) : base(model, data) { }
+
+ ///
+ public GCZ(DiscImage model, byte[] data, int offset) : base(model, data, offset) { }
+
+ ///
+ public GCZ(DiscImage model, byte[] data, int offset, int length) : base(model, data, offset, length) { }
+
+ ///
+ public GCZ(DiscImage model, Stream data) : base(model, data) { }
+
+ ///
+ public GCZ(DiscImage model, Stream data, long offset) : base(model, data, offset) { }
+
+ ///
+ public GCZ(DiscImage model, Stream data, long offset, long length) : base(model, data, offset, length) { }
+
+ #endregion
+
+ #region Static Constructors
+
+ ///
+ /// Create a GCZ wrapper from a byte array and offset
+ ///
+ /// Byte array representing the GCZ image
+ /// Offset within the array to parse
+ /// A GCZ wrapper on success, null on failure
+ public static GCZ? Create(byte[]? data, int offset)
+ {
+ // If the data is invalid
+ if (data is null || data.Length == 0)
+ return null;
+
+ // If the offset is out of bounds
+ if (offset < 0 || offset >= data.Length)
+ return null;
+
+ // Create a memory stream and use that
+ var dataStream = new MemoryStream(data, offset, data.Length - offset);
+ return Create(dataStream);
+ }
+
+ ///
+ /// Create a GCZ wrapper from a Stream
+ ///
+ /// Stream representing the GCZ image
+ /// A GCZ wrapper on success, null on failure
+ public static GCZ? Create(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ try
+ {
+ long currentOffset = data.Position;
+
+ var model = new Serialization.Readers.GCZ().Deserialize(data);
+ if (model is null)
+ return null;
+
+ return new GCZ(model, data, currentOffset);
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ #endregion
+
+ #region Inner Wrapper
+
+ ///
+ /// Returns a NintendoDisc wrapper backed by a virtual stream that decompresses
+ /// GCZ blocks on demand, avoiding loading the entire ISO into memory.
+ ///
+ public NintendoDisc? GetInnerWrapper()
+ {
+ if (Model.BlockPointers is null || Model.BlockPointers.Length == 0)
+ return null;
+
+ if (Model.Header.DataSize == 0)
+ return null;
+
+ var vStream = new GczVirtualStream(this);
+ return NintendoDisc.Create(vStream);
+ }
+
+ ///
+ /// Decompresses a single GCZ block by index and returns its raw bytes.
+ /// Returns null on failure; returns a zero-filled block if the compressed size is zero.
+ ///
+ internal byte[]? DecompressBlock(int blockIndex)
+ {
+ const ulong UncompressedFlag = 0x8000000000000000UL;
+
+ if (blockIndex < 0 || blockIndex >= Model.BlockPointers.Length)
+ return null;
+
+ ulong ptr = Model.BlockPointers[blockIndex];
+ bool uncompressed = (ptr & UncompressedFlag) != 0;
+ long blockFileOffset = Model.DataOffset + (long)(ptr & ~UncompressedFlag);
+
+ ulong nextRaw = (blockIndex + 1 < Model.BlockPointers.Length)
+ ? Model.BlockPointers[blockIndex + 1] & ~UncompressedFlag
+ : Model.Header.CompressedDataSize;
+ int compSize = (int)(nextRaw - (ptr & ~UncompressedFlag));
+
+ if (compSize <= 0)
+ return new byte[Model.Header.BlockSize];
+
+ byte[] raw = ReadRangeFromSource(blockFileOffset, compSize);
+ if (raw is null || raw.Length != compSize)
+ return null;
+
+ if (uncompressed)
+ return raw;
+
+ if (raw.Length < 6)
+ return null;
+
+#if NET20 || NET35
+ return null;
+#else
+ try
+ {
+ using var cs = new MemoryStream(raw, 2, raw.Length - 6);
+ using var ds = new DeflateStream(cs, CompressionMode.Decompress);
+ using var os = new MemoryStream();
+ ds.CopyTo(os);
+ return os.ToArray();
+ }
+ catch
+ {
+ return null;
+ }
+#endif
+ }
+
+ ///
+ /// Decompresses just the first block of the GCZ image to read the disc header,
+ /// without decompressing the entire image.
+ ///
+ private DiscHeader? ReadDiscHeader()
+ {
+ const ulong UncompressedFlag = 0x8000000000000000UL;
+
+ if (Model.BlockPointers is null || Model.BlockPointers.Length == 0)
+ return null;
+
+ ulong ptr = Model.BlockPointers[0];
+ bool uncompressed = (ptr & UncompressedFlag) != 0;
+ long blockFileOffset = Model.DataOffset + (long)(ptr & ~UncompressedFlag);
+
+ ulong nextRaw = Model.BlockPointers.Length > 1
+ ? Model.BlockPointers[1] & ~UncompressedFlag
+ : Model.Header.CompressedDataSize;
+ int compSize = (int)(nextRaw - (ptr & ~UncompressedFlag));
+
+ if (compSize <= 0)
+ return null;
+
+ byte[] raw = ReadRangeFromSource(blockFileOffset, compSize);
+ if (raw is null || raw.Length != compSize)
+ return null;
+
+ byte[] block;
+ if (uncompressed)
+ {
+ block = raw;
+ }
+ else
+ {
+#if NET20 || NET35
+ return null;
+#else
+ try
+ {
+ using var cs = new MemoryStream(raw, 2, raw.Length - 6);
+ using var ds = new DeflateStream(cs, CompressionMode.Decompress);
+ using var os = new MemoryStream();
+ ds.CopyTo(os);
+ block = os.ToArray();
+ }
+ catch
+ {
+ return null;
+ }
+#endif
+ }
+
+ using var ms = new MemoryStream(block);
+ var disc = new Serialization.Readers.NintendoDisc().Deserialize(ms);
+ return disc?.Header;
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Wrappers/GcFst.cs b/SabreTools.Wrappers/GcFst.cs
new file mode 100644
index 000000000..e5b36e82d
--- /dev/null
+++ b/SabreTools.Wrappers/GcFst.cs
@@ -0,0 +1,167 @@
+using System.Collections.Generic;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// Lightweight GameCube / Wii File-System Table (FST) reader used by
+ /// to distinguish real-file regions from junk.
+ ///
+ /// Mirrors Dolphin's FileSystemGCWii offset-to-file-info cache
+ /// (m_offset_file_info_cache).
+ ///
+ internal sealed class GcFst
+ {
+ private const int EntrySize = 12;
+
+ /// File entry with start and end byte offsets on disc.
+ internal struct FileEntry
+ {
+ public long FileStart;
+ public long FileEnd;
+ }
+
+ // Sorted ascending by FileEnd for O(log n) upper_bound queries.
+ private readonly List _files;
+
+ private GcFst(List files)
+ {
+ _files = files;
+ }
+
+ ///
+ /// Parses a raw FST binary blob and returns a ,
+ /// or null if the data is too short or structurally invalid.
+ ///
+ ///
+ /// Raw FST bytes exactly as stored on disc (GameCube) or in decrypted
+ /// Wii partition data.
+ ///
+ ///
+ /// Bit-shift to convert raw file-offset fields to byte addresses.
+ /// 0 for GameCube (direct bytes); 2 for Wii (offset × 4).
+ ///
+ public static GcFst? TryParse(byte[] fstData, int offsetShift)
+ {
+ if (fstData == null || fstData.Length < EntrySize)
+ return null;
+
+ // Root entry (index 0): FILE_SIZE field = total number of FST entries.
+ uint totalEntries = ReadBEU32(fstData, 8);
+ if (totalEntries < 1 || ((long)totalEntries * EntrySize) > fstData.Length)
+ return null;
+
+ var files = new List((int)(totalEntries - 1));
+
+ for (uint i = 1; i < totalEntries; i++)
+ {
+ int off = (int)(i * EntrySize);
+ uint nameOffField = ReadBEU32(fstData, off + 0);
+ uint fileOffField = ReadBEU32(fstData, off + 4);
+ uint fileSizeField = ReadBEU32(fstData, off + 8);
+
+ if ((nameOffField & 0xFF000000u) != 0) continue; // directory entry
+ if (fileSizeField == 0) continue; // empty file
+
+ long fileStart = (long)fileOffField << offsetShift;
+ long fileEnd = fileStart + fileSizeField;
+ files.Add(new FileEntry { FileStart = fileStart, FileEnd = fileEnd });
+ }
+
+ // Sort ascending by FileEnd so binary-search upper_bound works correctly.
+ files.Sort(delegate(FileEntry a, FileEntry b)
+ {
+ return a.FileEnd.CompareTo(b.FileEnd);
+ });
+
+ return new GcFst(files);
+ }
+
+ ///
+ /// Returns the file entry whose byte range contains ,
+ /// or null if no file does.
+ ///
+ public FileEntry? FindFileInfo(long discOffset)
+ {
+ if (_files.Count == 0)
+ return null;
+
+ // Binary search: first index where _files[i].FileEnd > discOffset
+ int lo = 0, hi = _files.Count;
+ while (lo < hi)
+ {
+ int mid = (lo + hi) >> 1;
+ if (_files[mid].FileEnd <= discOffset)
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+
+ if (lo >= _files.Count)
+ return null;
+
+ FileEntry e = _files[lo];
+ if (e.FileStart <= discOffset)
+ return e;
+
+ return null;
+ }
+
+ ///
+ /// Returns the smallest FileEnd value strictly greater than
+ /// , or null if there is none.
+ ///
+ public long? FindNextFileEnd(long discOffset)
+ {
+ if (_files.Count == 0)
+ return null;
+
+ int lo = 0, hi = _files.Count;
+ while (lo < hi)
+ {
+ int mid = (lo + hi) >> 1;
+ if (_files[mid].FileEnd <= discOffset)
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+
+ return lo < _files.Count ? _files[lo].FileEnd : null;
+ }
+
+ ///
+ /// Returns the smallest FileStart value strictly greater than
+ /// , or null if there is none.
+ ///
+ public long? FindNextFileStart(long discOffset)
+ {
+ if (_files.Count == 0)
+ return null;
+
+ // Sort is by FileEnd; scan all entries whose FileEnd > discOffset
+ int lo = 0, hi = _files.Count;
+ while (lo < hi)
+ {
+ int mid = (lo + hi) >> 1;
+ if (_files[mid].FileEnd <= discOffset)
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+
+ long? best = null;
+ for (int i = lo; i < _files.Count; i++)
+ {
+ long start = _files[i].FileStart;
+ if (start <= discOffset)
+ continue;
+
+ if (best == null || start < best.Value)
+ best = start;
+ }
+
+ return best;
+ }
+
+ private static uint ReadBEU32(byte[] data, int offset) => ((uint)data[offset] << 24) | ((uint)data[offset + 1] << 16) | ((uint)data[offset + 2] << 8) | data[offset + 3];
+ }
+}
diff --git a/SabreTools.Wrappers/GczVirtualStream.cs b/SabreTools.Wrappers/GczVirtualStream.cs
new file mode 100644
index 000000000..f7c11348c
--- /dev/null
+++ b/SabreTools.Wrappers/GczVirtualStream.cs
@@ -0,0 +1,116 @@
+using System;
+using System.IO;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// A read-only seekable stream that decompresses GCZ blocks on demand.
+ /// Avoids loading the entire decompressed disc image into memory.
+ ///
+ internal sealed class GczVirtualStream : Stream
+ {
+ private readonly GCZ _gcz;
+ private long _position;
+
+ // Single-block cache to avoid re-decompressing on adjacent reads within the same block.
+ private int _cachedBlockIndex = -1;
+ private byte[]? _cachedBlock;
+
+ public GczVirtualStream(GCZ gcz)
+ {
+ _gcz = gcz ?? throw new ArgumentNullException(nameof(gcz));
+ }
+
+ public override bool CanRead => true;
+ public override bool CanSeek => true;
+ public override bool CanWrite => false;
+ public override long Length => (long)_gcz.DataSize;
+ public override long Position
+ {
+ get => _position;
+ set
+ {
+ if (value < 0)
+ throw new ArgumentOutOfRangeException(nameof(value));
+ _position = value;
+ }
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ if (buffer is null)
+ throw new ArgumentNullException(nameof(buffer));
+ if (offset < 0)
+ throw new ArgumentOutOfRangeException(nameof(offset));
+ if (count < 0)
+ throw new ArgumentOutOfRangeException(nameof(count));
+ if (offset + count > buffer.Length)
+ throw new ArgumentException("offset + count exceeds buffer length");
+
+ long remaining = Length - _position;
+ if (remaining <= 0 || count <= 0)
+ return 0;
+
+ count = (int)Math.Min(count, remaining);
+
+ int totalRead = 0;
+ uint blockSize = _gcz.BlockSize;
+
+ while (totalRead < count && _position < Length)
+ {
+ int blockIndex = (int)(_position / blockSize);
+ int offsetInBlock = (int)(_position % blockSize);
+
+ byte[]? block = GetBlock(blockIndex);
+ if (block is null)
+ break;
+
+ int available = block.Length - offsetInBlock;
+ int toCopy = Math.Min(count - totalRead, available);
+ if (toCopy <= 0)
+ break;
+
+ Array.Copy(block, offsetInBlock, buffer, offset + totalRead, toCopy);
+ totalRead += toCopy;
+ _position += toCopy;
+ }
+
+ return totalRead;
+ }
+
+ private byte[]? GetBlock(int blockIndex)
+ {
+ if (_cachedBlockIndex == blockIndex)
+ return _cachedBlock;
+
+ byte[]? block = _gcz.DecompressBlock(blockIndex);
+ _cachedBlockIndex = blockIndex;
+ _cachedBlock = block;
+ return block;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ long newPos;
+ switch (origin)
+ {
+ case SeekOrigin.Begin: newPos = offset; break;
+ case SeekOrigin.Current: newPos = _position + offset; break;
+ case SeekOrigin.End: newPos = Length + offset; break;
+ default: throw new ArgumentOutOfRangeException(nameof(origin));
+ }
+
+ if (newPos < 0)
+ throw new IOException("Seek position cannot be negative.");
+
+ _position = newPos;
+ return _position;
+ }
+
+ public override void Flush() { }
+
+ public override void SetLength(long value) => throw new NotSupportedException();
+
+ public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException();
+ }
+}
diff --git a/SabreTools.Wrappers/LaggedFibonacciGenerator.cs b/SabreTools.Wrappers/LaggedFibonacciGenerator.cs
new file mode 100644
index 000000000..2813aa085
--- /dev/null
+++ b/SabreTools.Wrappers/LaggedFibonacciGenerator.cs
@@ -0,0 +1,319 @@
+using System;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// Lagged Fibonacci Generator matching Dolphin's LaggedFibonacciGenerator exactly.
+ /// Used to regenerate Nintendo's deterministic "junk" padding data in disc images.
+ /// RVZ format identifies junk regions and stores only a 68-byte seed (17 u32 words)
+ /// instead of the full data, enabling significant compression of padding areas.
+ ///
+ internal class LaggedFibonacciGenerator
+ {
+ private const int LFG_K = 521;
+ private const int LFG_J = 32;
+
+ /// Size of the LFG output buffer in bytes (LFG_K * 4 = 2084).
+ public const int BUFFER_BYTES = LFG_K * 4;
+
+ /// Size of the seed in 32-bit words (68 bytes total).
+ public const int SEED_SIZE = 17;
+
+ private readonly uint[] m_buffer = new uint[LFG_K];
+ private int m_position_bytes = 0;
+
+ ///
+ /// Initializes the generator from a 17-element u32 seed array.
+ /// Each seed word is treated as a raw LE u32 from the file (Dolphin: reinterpret_cast then swap32).
+ ///
+ public void SetSeed(uint[] seed)
+ {
+ if (seed == null || seed.Length < SEED_SIZE)
+ throw new ArgumentException($"Seed must contain at least {SEED_SIZE} u32 values.", nameof(seed));
+
+ m_position_bytes = 0;
+ for (int i = 0; i < SEED_SIZE; i++)
+ m_buffer[i] = SwapU32(seed[i]); // reinterpret LE bytes as BE (Dolphin swap32)
+ Initialize(false);
+ }
+
+ ///
+ /// Initializes the generator from a 68-byte seed (17 BE u32 values as in the RVZ file).
+ /// Matches Dolphin: m_buffer[i] = Common::swap32(seed + i * 4).
+ ///
+ public void SetSeed(byte[] seedBytes)
+ {
+ if (seedBytes == null || seedBytes.Length < SEED_SIZE * 4)
+ throw new ArgumentException($"Seed must be {SEED_SIZE * 4} bytes.", nameof(seedBytes));
+
+ m_position_bytes = 0;
+ for (int i = 0; i < SEED_SIZE; i++)
+ m_buffer[i] = ReadBigEndianU32(seedBytes, i * 4);
+ Initialize(false);
+ }
+
+ ///
+ /// Skips forward by bytes in the output stream.
+ /// Matches Dolphin: LaggedFibonacciGenerator::Forward(size_t count).
+ ///
+ public void Forward(int count)
+ {
+ m_position_bytes += count;
+ while (m_position_bytes >= BUFFER_BYTES)
+ {
+ ForwardStep();
+ m_position_bytes -= BUFFER_BYTES;
+ }
+ }
+
+ /// Generates junk bytes and returns them.
+ public byte[] GetBytes(int count)
+ {
+ byte[] output = new byte[count];
+ GetBytes(count, output, 0);
+ return output;
+ }
+
+ ///
+ /// Generates junk bytes into starting at .
+ /// Matches Dolphin: LaggedFibonacciGenerator::GetBytes using memcpy pattern.
+ ///
+ public void GetBytes(int count, byte[] output, int outputOffset)
+ {
+ while (count > 0)
+ {
+ int length = Math.Min(count, BUFFER_BYTES - m_position_bytes);
+ Buffer.BlockCopy(m_buffer, m_position_bytes, output, outputOffset, length);
+ m_position_bytes += length;
+ count -= length;
+ outputOffset += length;
+
+ if (m_position_bytes == BUFFER_BYTES)
+ {
+ ForwardStep();
+ m_position_bytes = 0;
+ }
+ }
+ }
+
+ ///
+ /// Returns a single junk byte at the current position, advancing by one byte.
+ /// Matches Dolphin: LaggedFibonacciGenerator::GetByte.
+ ///
+ internal byte GetByte()
+ {
+ int wordIdx = m_position_bytes / 4;
+ int byteInWord = m_position_bytes % 4;
+ byte result = (byte)(m_buffer[wordIdx] >> (byteInWord * 8)); // LE byte order
+
+ m_position_bytes++;
+ if (m_position_bytes == BUFFER_BYTES)
+ {
+ ForwardStep();
+ m_position_bytes = 0;
+ }
+
+ return result;
+ }
+
+ // -------------------------------------------------------------------
+ // Private forward/backward state steps
+ // -------------------------------------------------------------------
+
+ ///
+ /// Full buffer state step forward — Dolphin: Forward() (no args).
+ /// for i in [0,J): buf[i] ^= buf[i + K - J] (= buf[i + 489])
+ /// for i in [J,K): buf[i] ^= buf[i - J] (= buf[i - 32])
+ ///
+ private void ForwardStep()
+ {
+ for (int i = 0; i < LFG_J; i++)
+ m_buffer[i] ^= m_buffer[i + LFG_K - LFG_J];
+ for (int i = LFG_J; i < LFG_K; i++)
+ m_buffer[i] ^= m_buffer[i - LFG_J];
+ }
+
+ ///
+ /// Partial or full buffer state step backward — undoes ForwardStep.
+ /// Dolphin: Backward(size_t start_word, size_t end_word).
+ ///
+ private void Backward(int startWord = 0, int endWord = LFG_K)
+ {
+ int loopEnd = Math.Max(LFG_J, startWord);
+
+ // Undo second loop of ForwardStep (reversed)
+ for (int i = Math.Min(endWord, LFG_K); i > loopEnd; i--)
+ m_buffer[i - 1] ^= m_buffer[i - 1 - LFG_J];
+
+ // Undo first loop of ForwardStep (reversed)
+ for (int i = Math.Min(endWord, LFG_J); i > startWord; i--)
+ m_buffer[i - 1] ^= m_buffer[i - 1 + LFG_K - LFG_J];
+ }
+
+ ///
+ /// Recovers the original 17-word seed from the current buffer state and outputs it
+ /// as LE u32 values into .
+ /// Dolphin: Reinitialize(u32 seed_out[]).
+ ///
+ private bool Reinitialize(uint[] seedOut)
+ {
+ for (int i = 0; i < 4; i++)
+ Backward();
+
+ // Swap all words back to big-endian representation
+ for (int i = 0; i < LFG_K; i++)
+ m_buffer[i] = SwapU32(m_buffer[i]);
+
+ // Reconstruct bits 16-17 for the first SEED_SIZE words
+ for (int i = 0; i < SEED_SIZE; i++)
+ {
+ m_buffer[i] = (m_buffer[i] & 0xFF00FFFF)
+ | ((m_buffer[i] << 2) & 0x00FC0000)
+ | (((m_buffer[i + 16] ^ m_buffer[i + 15]) << 9) & 0x00030000);
+ }
+
+ // Output seed as LE u32 values (swap32 converts BE→LE)
+ for (int i = 0; i < SEED_SIZE; i++)
+ seedOut[i] = SwapU32(m_buffer[i]);
+
+ return Initialize(true);
+ }
+
+ ///
+ /// Fills m_buffer[SEED_SIZE..K-1] from the first SEED_SIZE words, applies the output
+ /// transform, and runs 4× ForwardStep. When is true,
+ /// verifies the data in m_buffer[SEED_SIZE..] matches the recurrence.
+ /// Dolphin: Initialize(bool check_existing_data).
+ ///
+ private bool Initialize(bool checkExisting)
+ {
+ for (int i = SEED_SIZE; i < LFG_K; i++)
+ {
+ uint calculated = (m_buffer[i - 17] << 23)
+ ^ (m_buffer[i - 16] >> 9)
+ ^ m_buffer[i - 1];
+
+ if (checkExisting)
+ {
+ uint actual = (m_buffer[i] & 0xFF00FFFF) | ((m_buffer[i] << 2) & 0x00FC0000);
+ if ((calculated & 0xFFFCFFFF) != actual)
+ return false;
+ }
+
+ m_buffer[i] = calculated;
+ }
+
+ // Output transform: each word → swap32((x & 0xFF00FFFF) | ((x >> 2) & 0x00FF0000))
+ for (int i = 0; i < LFG_K; i++)
+ m_buffer[i] = SwapU32((m_buffer[i] & 0xFF00FFFF) | ((m_buffer[i] >> 2) & 0x00FF0000));
+
+ for (int i = 0; i < 4; i++)
+ ForwardStep();
+
+ return true;
+ }
+
+ // -------------------------------------------------------------------
+ // Static seed-recovery API (used by RvzPackDecompressor)
+ // -------------------------------------------------------------------
+
+ ///
+ /// Attempts to recover a 17-word seed from disc data starting at
+ /// within .
+ /// is the number of bytes to match (up to the next 32 KiB boundary).
+ /// is discOffset % 0x8000 — the offset within
+ /// the current LFG cycle.
+ /// Returns the number of bytes that were successfully reconstructed (0 = not junk data).
+ /// Matches Dolphin: LaggedFibonacciGenerator::GetSeed(u8*, size_t, size_t, u32[]).
+ ///
+ public static int GetSeed(byte[] data, int dataStart, int size, int dataOffsetMod, uint[] seedOut)
+ {
+ if (size <= 0 || dataStart < 0 || dataStart + size > data.Length)
+ return 0;
+
+ // Skip any bytes before the next u32-aligned boundary
+ int bytesToSkip = (4 - (dataOffsetMod % 4)) % 4;
+ if (bytesToSkip >= size)
+ return 0;
+
+ int u32DataStart = dataStart + bytesToSkip;
+ int u32Size = (size - bytesToSkip) / 4;
+ int u32DataOffset = (dataOffsetMod + bytesToSkip) / 4;
+
+ if (u32Size < LFG_K)
+ return 0;
+
+ // Read disc bytes as LE u32 values (Dolphin: reinterpret_cast)
+ uint[] u32Data = new uint[u32Size];
+ for (int i = 0; i < u32Size; i++)
+ u32Data[i] = ReadLittleEndianU32(data, u32DataStart + (i * 4));
+
+ LaggedFibonacciGenerator lfg = new LaggedFibonacciGenerator();
+ if (!GetSeed_u32(u32Data, u32Size, u32DataOffset, lfg, seedOut))
+ return 0;
+
+ // Set position to data_offset % BUFFER_BYTES and count matching bytes from data[dataStart]
+ lfg.m_position_bytes = dataOffsetMod % BUFFER_BYTES;
+
+ int reconstructed = 0;
+ for (int i = 0; i < size && lfg.GetByte() == data[dataStart + i]; i++)
+ reconstructed++;
+
+ return reconstructed;
+ }
+
+ ///
+ /// Inner u32-level seed recovery.
+ /// Dolphin: GetSeed(const u32* data, size_t size, size_t data_offset, LFG*, u32[]).
+ ///
+ private static bool GetSeed_u32(uint[] data, int size, int dataOffset,
+ LaggedFibonacciGenerator lfg, uint[] seedOut)
+ {
+ if (size < LFG_K)
+ return false;
+
+ // Quick sanity check: bits 22-23 of swap32(x) must equal bits 20-21
+ // (a property of the LFG output transform).
+ for (int i = 0; i < LFG_K; i++)
+ {
+ uint x = SwapU32(data[i]);
+ if ((x & 0x00C00000) != ((x >> 2) & 0x00C00000))
+ return false;
+ }
+
+ int dataOffsetModK = dataOffset % LFG_K;
+ int dataOffsetDivK = dataOffset / LFG_K;
+
+ // Rotate data into buffer so buffer[dataOffsetModK] = data[0]
+ Array.Copy(data, 0, lfg.m_buffer, dataOffsetModK, LFG_K - dataOffsetModK);
+ if (dataOffsetModK > 0)
+ Array.Copy(data, LFG_K - dataOffsetModK, lfg.m_buffer, 0, dataOffsetModK);
+
+ lfg.Backward(0, dataOffsetModK);
+
+ for (int i = 0; i < dataOffsetDivK; i++)
+ lfg.Backward();
+
+ if (!lfg.Reinitialize(seedOut))
+ return false;
+
+ for (int i = 0; i < dataOffsetDivK; i++)
+ lfg.ForwardStep();
+
+ return true;
+ }
+
+ // -------------------------------------------------------------------
+ // Endian helpers
+ // -------------------------------------------------------------------
+
+ internal static uint ReadBigEndianU32(byte[] data, int offset) =>
+ (uint)((data[offset] << 24) | (data[offset + 1] << 16) | (data[offset + 2] << 8) | data[offset + 3]);
+
+ private static uint ReadLittleEndianU32(byte[] data, int offset) =>
+ (uint)(data[offset] | (data[offset + 1] << 8) | (data[offset + 2] << 16) | (data[offset + 3] << 24));
+
+ internal static uint SwapU32(uint value) =>
+ (value << 24) | ((value << 8) & 0x00FF0000) | ((value >> 8) & 0x0000FF00) | (value >> 24);
+ }
+}
diff --git a/SabreTools.Wrappers/NintendoDisc.Encryption.cs b/SabreTools.Wrappers/NintendoDisc.Encryption.cs
new file mode 100644
index 000000000..ebbaaf26c
--- /dev/null
+++ b/SabreTools.Wrappers/NintendoDisc.Encryption.cs
@@ -0,0 +1,106 @@
+using System.Security.Cryptography;
+using SabreTools.Data.Models.NintendoDisc;
+
+namespace SabreTools.Wrappers
+{
+ public partial class NintendoDisc
+ {
+ #region Wii Encryption / Decryption
+
+ // TODO: Replace hardcoded common keys with a caller-supplied key provider and validator.
+ // The intent is for consumers to inject keys (e.g. from a key file or secure store) rather
+ // than having them embedded here, so these constants can be removed once that API exists.
+ #region Common Keys
+
+ ///
+ /// Wii retail common key (index 0).
+ /// Publicly known; used by Dolphin and other tools to decrypt title keys.
+ ///
+ private static readonly byte[] WiiCommonKeyRetail =
+ {
+ 0xEB, 0xE4, 0x2A, 0x22, 0x5E, 0x85, 0x93, 0xE4,
+ 0x48, 0xD9, 0xC5, 0x45, 0x73, 0x81, 0xAA, 0xF7,
+ };
+
+ ///
+ /// Wii Korean common key (index 1).
+ /// Used for Korean-region titles.
+ ///
+ private static readonly byte[] WiiCommonKeyKorean =
+ {
+ 0x63, 0xB8, 0x2B, 0xB4, 0xF4, 0x61, 0x4E, 0x2E,
+ 0x13, 0xF2, 0xFE, 0xFB, 0xBA, 0x4C, 0x9B, 0x7E,
+ };
+
+ #endregion
+
+ ///
+ /// Decrypt a Wii partition title key from the ticket data.
+ ///
+ /// 16-byte encrypted title key from ticket offset 0x1BF
+ /// 8-byte title ID from ticket offset 0x1DC (big-endian)
+ ///
+ /// Common key index from ticket offset 0x1F1: 0 = retail, 1 = Korean
+ ///
+ /// Decrypted 16-byte title key, or null on error
+ public static byte[]? DecryptTitleKey(byte[] encryptedTitleKey, byte[] titleId, byte commonKeyIndex)
+ {
+ if (encryptedTitleKey is null || encryptedTitleKey.Length != 16)
+ return null;
+ if (titleId is null || titleId.Length != 8)
+ return null;
+
+ byte[] commonKey = commonKeyIndex == 1 ? WiiCommonKeyKorean : WiiCommonKeyRetail;
+
+ // IV is the 8-byte title ID padded with zeros to 16 bytes
+ byte[] iv = new byte[16];
+ System.Array.Copy(titleId, 0, iv, 0, 8);
+
+ return DecryptAesCbc(encryptedTitleKey, commonKey, iv);
+ }
+
+ ///
+ /// Decrypt one Wii block of data (0x7C00 bytes) using AES-128-CBC.
+ ///
+ /// 0x7C00 bytes of encrypted block data
+ /// 16-byte partition title key
+ /// 16-byte initialization vector (last 16 bytes of the preceding hash block)
+ /// Decrypted 0x7C00-byte block data, or null on error
+ public static byte[]? DecryptBlock(byte[] encryptedData, byte[] titleKey, byte[] iv)
+ {
+ if (encryptedData is null || encryptedData.Length != Constants.WiiBlockDataSize)
+ return null;
+ if (titleKey is null || titleKey.Length != 16)
+ return null;
+ if (iv is null || iv.Length != 16)
+ return null;
+
+ return DecryptAesCbc(encryptedData, titleKey, iv);
+ }
+
+ private static byte[]? DecryptAesCbc(byte[] data, byte[] key, byte[] iv)
+ {
+#if NET20
+ return null; // AES not available on net20
+#else
+ try
+ {
+ using var aes = Aes.Create();
+ aes.Key = key;
+ aes.IV = iv;
+ aes.Mode = CipherMode.CBC;
+ aes.Padding = PaddingMode.None;
+
+ using var decryptor = aes.CreateDecryptor();
+ return decryptor.TransformFinalBlock(data, 0, data.Length);
+ }
+ catch
+ {
+ return null;
+ }
+#endif
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Wrappers/NintendoDisc.Extraction.cs b/SabreTools.Wrappers/NintendoDisc.Extraction.cs
new file mode 100644
index 000000000..773e5b532
--- /dev/null
+++ b/SabreTools.Wrappers/NintendoDisc.Extraction.cs
@@ -0,0 +1,534 @@
+using System;
+using System.IO;
+using SabreTools.Data.Models.NintendoDisc;
+
+namespace SabreTools.Wrappers
+{
+ public partial class NintendoDisc : IExtractable
+ {
+ ///
+ public bool Extract(string outputDirectory, bool includeDebug)
+ {
+ if (string.IsNullOrEmpty(outputDirectory))
+ return false;
+
+ try
+ {
+ Directory.CreateDirectory(outputDirectory);
+
+ if (Model.Platform == Platform.GameCube)
+ return ExtractGameCube(outputDirectory);
+ else if (Model.Platform == Platform.Wii)
+ return ExtractWii(outputDirectory);
+
+ return false;
+ }
+ catch (Exception ex)
+ {
+ if (includeDebug)
+ Console.Error.WriteLine(ex);
+ return false;
+ }
+ }
+
+ #region GameCube extraction
+
+ private bool ExtractGameCube(string dest)
+ {
+ string sysDir = Path.Combine(dest, "sys");
+ Directory.CreateDirectory(sysDir);
+
+ // sys/boot.bin (disc header, 0x000 – 0x43F)
+ WriteRange(0, Constants.DiscHeaderSize, Path.Combine(sysDir, "boot.bin"));
+
+ // sys/bi2.bin (0x440 – 0x243F)
+ WriteRange(Constants.Bi2Address, Constants.Bi2Size, Path.Combine(sysDir, "bi2.bin"));
+
+ // sys/apploader.img
+ WriteApploader(sysDir);
+
+ // DOL offset stored without shift on GameCube
+ long dolOffset = Model.Header.DolOffset;
+ if (dolOffset > 0)
+ {
+ byte[]? dolHeader = ReadDisc(dolOffset, 0xE0);
+ if (dolHeader != null)
+ {
+ int dolSize = GetDolSize(dolHeader);
+ WriteRange(dolOffset, dolSize, Path.Combine(sysDir, "main.dol"));
+ }
+ }
+
+ // FST offset stored without shift on GameCube
+ long fstOffset = Model.Header.FstOffset;
+ long fstSize = Model.Header.FstSize;
+ if (fstOffset > 0 && fstSize > 0)
+ {
+ WriteRange(fstOffset, (int)Math.Min(fstSize, int.MaxValue),
+ Path.Combine(sysDir, "fst.bin"));
+
+ byte[]? fstData = ReadDisc(fstOffset, (int)Math.Min(fstSize, int.MaxValue));
+ if (fstData != null)
+ {
+ string filesDir = Path.Combine(dest, "files");
+ Directory.CreateDirectory(filesDir);
+ ExtractFstFiles(fstData, offsetShift: 0, filesDir, ReadDisc);
+ }
+ }
+
+ return true;
+ }
+
+ #endregion
+
+ #region Wii extraction
+
+ private bool ExtractWii(string dest)
+ {
+ // Unencrypted disc header area
+ string discDir = Path.Combine(dest, "disc");
+ Directory.CreateDirectory(discDir);
+ WriteRange(0, 0x100, Path.Combine(discDir, "header.bin"));
+ WriteRange(Constants.WiiRegionDataAddress, Constants.WiiRegionDataSize,
+ Path.Combine(discDir, "region.bin"));
+
+ if (Model.PartitionTableEntries is null)
+ return true;
+
+ var typeCounters = new System.Collections.Generic.Dictionary();
+
+ foreach (var pte in Model.PartitionTableEntries)
+ {
+ long partOffset = pte.Offset;
+ if (partOffset <= 0 || partOffset >= _dataSource.Length)
+ continue;
+
+ string partName = GetPartitionName(pte.Type, typeCounters);
+ string partDir = Path.Combine(dest, partName);
+ Directory.CreateDirectory(partDir);
+
+ ExtractWiiPartition(partOffset, partDir);
+ }
+
+ return true;
+ }
+
+ private void ExtractWiiPartition(long partOffset, string partDir)
+ {
+ // ticket.bin (unencrypted, 0x2A4 bytes at partition start)
+ WriteRange(partOffset, Constants.WiiTicketSize, Path.Combine(partDir, "ticket.bin"));
+
+ byte[]? ticketData = ReadDisc(partOffset, Constants.WiiTicketSize);
+ if (ticketData is null || ticketData.Length < Constants.TicketCommonKeyIndexOffset + 1)
+ return;
+
+ // Decrypt title key
+ byte[] encTitleKey = new byte[16];
+ Array.Copy(ticketData, Constants.TicketEncryptedTitleKeyOffset, encTitleKey, 0, 16);
+ byte[] titleId = new byte[8];
+ Array.Copy(ticketData, Constants.TicketTitleIdOffset, titleId, 0, 8);
+ byte commonKeyIdx = ticketData[Constants.TicketCommonKeyIndexOffset];
+
+ byte[]? titleKey = DecryptTitleKey(encTitleKey, titleId, commonKeyIdx);
+ if (titleKey is null)
+ return;
+
+ // TMD
+ byte[]? tmdSizeBytes = ReadDisc(partOffset + Constants.WiiTmdSizeAddress, 4);
+ uint tmdSize = tmdSizeBytes != null
+ ? (uint)((tmdSizeBytes[0] << 24) | (tmdSizeBytes[1] << 16) | (tmdSizeBytes[2] << 8) | tmdSizeBytes[3])
+ : 0;
+ byte[]? tmdOffBytes = ReadDisc(partOffset + Constants.WiiTmdOffsetAddress, 4);
+ uint tmdOffShifted = tmdOffBytes != null
+ ? (uint)((tmdOffBytes[0] << 24) | (tmdOffBytes[1] << 16) | (tmdOffBytes[2] << 8) | tmdOffBytes[3])
+ : 0;
+ long tmdOffset = (long)tmdOffShifted << 2;
+ if (tmdSize > 0 && tmdOffset > 0)
+ WriteRange(partOffset + tmdOffset, (int)tmdSize, Path.Combine(partDir, "tmd.bin"));
+
+ // cert.bin
+ byte[]? certSizeBytes = ReadDisc(partOffset + Constants.WiiCertSizeAddress, 4);
+ uint certSize = certSizeBytes != null
+ ? (uint)((certSizeBytes[0] << 24) | (certSizeBytes[1] << 16) | (certSizeBytes[2] << 8) | certSizeBytes[3])
+ : 0;
+ byte[]? certOffBytes = ReadDisc(partOffset + Constants.WiiCertOffsetAddress, 4);
+ uint certOffShifted = certOffBytes != null
+ ? (uint)((certOffBytes[0] << 24) | (certOffBytes[1] << 16) | (certOffBytes[2] << 8) | certOffBytes[3])
+ : 0;
+ long certOffset = (long)certOffShifted << 2;
+ if (certSize > 0 && certOffset > 0)
+ WriteRange(partOffset + certOffset, (int)certSize, Path.Combine(partDir, "cert.bin"));
+
+ // h3.bin
+ byte[]? h3OffBytes = ReadDisc(partOffset + Constants.WiiH3OffsetAddress, 4);
+ uint h3OffShifted = h3OffBytes != null
+ ? (uint)((h3OffBytes[0] << 24) | (h3OffBytes[1] << 16) | (h3OffBytes[2] << 8) | h3OffBytes[3])
+ : 0;
+ long h3Offset = (long)h3OffShifted << 2;
+ if (h3Offset > 0)
+ WriteRange(partOffset + h3Offset, Constants.WiiH3Size, Path.Combine(partDir, "h3.bin"));
+
+ // Encrypted partition data start
+ byte[]? dataOffBytes = ReadDisc(partOffset + Constants.WiiDataOffsetAddress, 4);
+ uint dataOffShifted = dataOffBytes != null
+ ? (uint)((dataOffBytes[0] << 24) | (dataOffBytes[1] << 16) | (dataOffBytes[2] << 8) | dataOffBytes[3])
+ : 0;
+ long dataOffset = (long)dataOffShifted << 2;
+ if (dataOffset <= 0)
+ return;
+
+ long absDataOffset = partOffset + dataOffset;
+
+ string sysDir = Path.Combine(partDir, "sys");
+ Directory.CreateDirectory(sysDir);
+
+ // Read boot block from decrypted partition (block 0, offset 0 within data)
+ byte[]? bootBlock = ReadDecryptedPartitionRange(absDataOffset, titleKey, 0, Constants.DiscHeaderSize);
+ if (bootBlock is null)
+ return;
+
+ File.WriteAllBytes(Path.Combine(sysDir, "boot.bin"), bootBlock);
+
+ // bi2.bin
+ byte[]? bi2 = ReadDecryptedPartitionRange(absDataOffset, titleKey,
+ Constants.Bi2Address, Constants.Bi2Size);
+ if (bi2 != null)
+ File.WriteAllBytes(Path.Combine(sysDir, "bi2.bin"), bi2);
+
+ // apploader
+ WriteWiiApploader(absDataOffset, titleKey, sysDir);
+
+ // DOL — stored offset is shifted <<2 in Wii partition
+ uint dolOffShifted = (uint)((bootBlock[0x420] << 24) | (bootBlock[0x421] << 16)
+ | (bootBlock[0x422] << 8) | bootBlock[0x423]);
+ long dolOff = (long)dolOffShifted << 2;
+ if (dolOff > 0)
+ {
+ byte[]? dolHdr = ReadDecryptedPartitionRange(absDataOffset, titleKey, dolOff, 0xE0);
+ if (dolHdr != null)
+ {
+ int dolSize = GetDolSize(dolHdr);
+ byte[]? dol = ReadDecryptedPartitionRange(absDataOffset, titleKey, dolOff, dolSize);
+ if (dol != null)
+ File.WriteAllBytes(Path.Combine(sysDir, "main.dol"), dol);
+ }
+ }
+
+ // FST — stored offset shifted <<2 in Wii partition
+ uint fstOffShifted = (uint)((bootBlock[0x424] << 24) | (bootBlock[0x425] << 16)
+ | (bootBlock[0x426] << 8) | bootBlock[0x427]);
+ uint fstSzShifted = (uint)((bootBlock[0x428] << 24) | (bootBlock[0x429] << 16)
+ | (bootBlock[0x42A] << 8) | bootBlock[0x42B]);
+ long fstOff = (long)fstOffShifted << 2;
+ long fstSize = (long)fstSzShifted << 2; // also stored >>2 on Wii
+ if (fstOff > 0 && fstSize > 0)
+ {
+ byte[]? fstData = ReadDecryptedPartitionRange(absDataOffset, titleKey,
+ fstOff, (int)Math.Min(fstSize, int.MaxValue));
+ if (fstData != null)
+ {
+ File.WriteAllBytes(Path.Combine(sysDir, "fst.bin"), fstData);
+ string filesDir = Path.Combine(partDir, "files");
+ Directory.CreateDirectory(filesDir);
+ ExtractFstFiles(fstData, offsetShift: 2, filesDir,
+ (offset, length) => ReadDecryptedPartitionRange(absDataOffset, titleKey, offset, length));
+ }
+ }
+ }
+
+ #endregion
+
+ #region FST extraction
+
+ private void ExtractFstFiles(byte[] fstData, int offsetShift, string filesDir,
+ Func readFunc)
+ {
+ if (fstData is null || fstData.Length < 12)
+ return;
+
+ // Root entry is at offset 0; its fileSize field = total entry count
+ uint rootCount = (uint)((fstData[8] << 24) | (fstData[9] << 16)
+ | (fstData[10] << 8) | fstData[11]);
+ if (rootCount < 1 || rootCount > 1024 * 1024)
+ return;
+
+ // String table immediately follows all entries
+ long stringTableOffset = rootCount * 12;
+
+ ExtractFstDirectory(fstData, 1, (int)rootCount, stringTableOffset,
+ filesDir, offsetShift, readFunc);
+ }
+
+ ///
+ /// Recursively extracts FST entries [start, end) into .
+ /// Returns the index of the next entry after this directory.
+ ///
+ private int ExtractFstDirectory(byte[] fstData, int start, int end,
+ long stringTableOffset, string currentDir, int offsetShift,
+ Func readFunc)
+ {
+ int i = start;
+ while (i < end)
+ {
+ int fstBase = i * 12;
+ if ((fstBase + 12) > fstData.Length)
+ break;
+
+ byte flags = fstData[fstBase];
+ bool isDir = (flags & 1) != 0;
+ uint nameOff = (uint)((fstData[fstBase + 1] << 16) | (fstData[fstBase + 2] << 8) | fstData[fstBase + 3]);
+ uint fileOffRaw = (uint)((fstData[fstBase + 4] << 24) | (fstData[fstBase + 5] << 16) | (fstData[fstBase + 6] << 8) | fstData[fstBase + 7]);
+ uint fileSize = (uint)((fstData[fstBase + 8] << 24) | (fstData[fstBase + 9] << 16) | (fstData[fstBase + 10] << 8) | fstData[fstBase + 11]);
+
+ string name = ReadFstString(fstData, stringTableOffset + nameOff);
+ if (string.IsNullOrEmpty(name))
+ {
+ i++;
+ continue;
+ }
+
+ // Sanitize name: replace path separators and reject/flatten dot-segments
+ name = name.Replace('/', '_').Replace('\\', '_');
+ if (name == "." || name == "..")
+ name = "_";
+ name = name.TrimStart('.');
+
+ if (isDir)
+ {
+ // fileOffRaw = parent entry index; fileSize = last entry index in this dir
+ int nextEntry = (int)fileSize;
+ string subDir = Path.Combine(currentDir, name);
+ Directory.CreateDirectory(subDir);
+ i = ExtractFstDirectory(fstData, i + 1, nextEntry, stringTableOffset,
+ subDir, offsetShift, readFunc);
+ }
+ else
+ {
+ string outPath = Path.Combine(currentDir, name);
+ string? outDir = Path.GetDirectoryName(outPath);
+ if (!string.IsNullOrEmpty(outDir))
+ Directory.CreateDirectory(outDir);
+
+ if (fileSize == 0)
+ {
+ // Zero-byte file — create empty
+ File.WriteAllBytes(outPath, new byte[0]);
+ }
+ else
+ {
+ long discOffset = (long)fileOffRaw << offsetShift;
+ byte[]? fileData = readFunc(discOffset, (int)Math.Min(fileSize, int.MaxValue));
+ if (fileData != null)
+ File.WriteAllBytes(outPath, fileData);
+ }
+
+ i++;
+ }
+ }
+
+ return i;
+ }
+
+ private static string ReadFstString(byte[] fstData, long offset)
+ {
+ if (offset < 0 || offset >= fstData.Length)
+ return string.Empty;
+
+ int start = (int)offset;
+ int end = start;
+ while (end < fstData.Length && fstData[end] != 0)
+ end++;
+
+ return System.Text.Encoding.ASCII.GetString(fstData, start, end - start);
+ }
+
+ #endregion
+
+ #region Apploader helpers
+
+ private void WriteApploader(string sysDir)
+ {
+ byte[]? hdr = ReadDisc(Constants.ApploaderAddress, Constants.ApploaderHeaderSize);
+ if (hdr is null) return;
+
+ uint codeSize = (uint)((hdr[Constants.ApploaderCodeSizeOffset] << 24)
+ | (hdr[Constants.ApploaderCodeSizeOffset + 1] << 16)
+ | (hdr[Constants.ApploaderCodeSizeOffset + 2] << 8)
+ | hdr[Constants.ApploaderCodeSizeOffset + 3]);
+ uint trailerSize = (uint)((hdr[Constants.ApploaderTrailerSizeOffset] << 24)
+ | (hdr[Constants.ApploaderTrailerSizeOffset + 1] << 16)
+ | (hdr[Constants.ApploaderTrailerSizeOffset + 2] << 8)
+ | hdr[Constants.ApploaderTrailerSizeOffset + 3]);
+
+ int totalSize = Constants.ApploaderHeaderSize + (int)codeSize + (int)trailerSize;
+ WriteRange(Constants.ApploaderAddress, totalSize, Path.Combine(sysDir, "apploader.img"));
+ }
+
+ private void WriteWiiApploader(long absDataOffset, byte[] titleKey, string sysDir)
+ {
+ byte[]? hdr = ReadDecryptedPartitionRange(absDataOffset, titleKey,
+ Constants.ApploaderAddress, Constants.ApploaderHeaderSize);
+ if (hdr is null) return;
+
+ uint codeSize = (uint)((hdr[Constants.ApploaderCodeSizeOffset] << 24)
+ | (hdr[Constants.ApploaderCodeSizeOffset + 1] << 16)
+ | (hdr[Constants.ApploaderCodeSizeOffset + 2] << 8)
+ | hdr[Constants.ApploaderCodeSizeOffset + 3]);
+ uint trailerSize = (uint)((hdr[Constants.ApploaderTrailerSizeOffset] << 24)
+ | (hdr[Constants.ApploaderTrailerSizeOffset + 1] << 16)
+ | (hdr[Constants.ApploaderTrailerSizeOffset + 2] << 8)
+ | hdr[Constants.ApploaderTrailerSizeOffset + 3]);
+
+ int totalSize = Constants.ApploaderHeaderSize + (int)codeSize + (int)trailerSize;
+ byte[]? apploader = ReadDecryptedPartitionRange(absDataOffset, titleKey,
+ Constants.ApploaderAddress, totalSize);
+ if (apploader != null)
+ File.WriteAllBytes(Path.Combine(sysDir, "apploader.img"), apploader);
+ }
+
+ #endregion
+
+ #region DOL size calculation
+
+ private static int GetDolSize(byte[] dolHeader)
+ {
+ // DOL header: 7 text section offsets (0x00), 11 data section offsets (0x1C),
+ // 7 text sizes (0x90), 11 data sizes (0xAC), BSS offset (0xD8), BSS size (0xDC),
+ // entry point (0xE0). Max (offset + size) over all sections gives the DOL size.
+ if (dolHeader is null || dolHeader.Length < 0xE0)
+ return 0;
+
+ int maxEnd = 0;
+ // Text sections (7): offset table at 0x00, size table at 0x90
+ for (int s = 0; s < 7; s++)
+ {
+ int off = (int)ReadBE32(dolHeader, s * 4);
+ int sz = (int)ReadBE32(dolHeader, 0x90 + (s * 4));
+ if (off > 0 && sz > 0) maxEnd = Math.Max(maxEnd, off + sz);
+ }
+ // Data sections (11): offset table at 0x1C, size table at 0xAC
+ for (int s = 0; s < 11; s++)
+ {
+ int off = (int)ReadBE32(dolHeader, 0x1C + (s * 4));
+ int sz = (int)ReadBE32(dolHeader, 0xAC + (s * 4));
+ if (off > 0 && sz > 0) maxEnd = Math.Max(maxEnd, off + sz);
+ }
+
+ return maxEnd;
+ }
+
+ #endregion
+
+ #region Wii partition block decryption helpers
+
+ ///
+ /// Reads bytes at within
+ /// the decrypted partition data, decrypting 0x8000-byte blocks as needed.
+ /// is the absolute ISO offset where the encrypted data begins.
+ ///
+ private byte[]? ReadDecryptedPartitionRange(long absDataOffset, byte[] titleKey,
+ long partitionDataOffset, int length)
+ {
+ if (length <= 0) return null;
+
+ var result = new byte[length];
+ int produced = 0;
+
+ while (produced < length)
+ {
+ long dataOff = partitionDataOffset + produced;
+ long blockNum = dataOff / Constants.WiiBlockDataSize;
+ int offsetInBlock = (int)(dataOff % Constants.WiiBlockDataSize);
+
+ long encBlockOffset = absDataOffset + (blockNum * Constants.WiiBlockSize);
+ byte[]? encBlock = ReadDisc(encBlockOffset, Constants.WiiBlockSize);
+ if (encBlock is null || encBlock.Length < Constants.WiiBlockSize)
+ break;
+
+ // IV is at offset 0x3D0 of the raw (still-encrypted) block.
+ // Matches Dolphin / DolphinIsoLib WiiPartitionDecryptor.DecryptBlock.
+ byte[] iv = new byte[16];
+ Array.Copy(encBlock, 0x3D0, iv, 0, 16);
+
+ // Decrypt the 0x7C00 data portion (bytes 0x400–0x7FFF of the raw block)
+ byte[] encData = new byte[Constants.WiiBlockDataSize];
+ Array.Copy(encBlock, Constants.WiiBlockHeaderSize, encData, 0, Constants.WiiBlockDataSize);
+
+ byte[]? decData = DecryptBlock(encData, titleKey, iv);
+ if (decData is null)
+ break;
+
+ int canCopy = Math.Min(Constants.WiiBlockDataSize - offsetInBlock, length - produced);
+ Array.Copy(decData, offsetInBlock, result, produced, canCopy);
+ produced += canCopy;
+ }
+
+ return produced == length ? result : null;
+ }
+
+ #endregion
+
+ #region Misc helpers
+
+ private void WriteRange(long offset, int length, string filePath)
+ {
+ if (length <= 0) return;
+ byte[]? data = ReadDisc(offset, length);
+ if (data is null) return;
+ string? dir = Path.GetDirectoryName(filePath);
+ if (!string.IsNullOrEmpty(dir)) Directory.CreateDirectory(dir);
+ File.WriteAllBytes(filePath, data);
+ }
+
+ private byte[]? ReadDisc(long offset, int length)
+ {
+ if (length <= 0 || offset < 0) return null;
+ byte[] data = ReadRangeFromSource(offset, length);
+ return data.Length == length ? data : null;
+ }
+
+ /// Total byte length of the raw disc image data.
+ internal long DataLength => _dataSource.Length;
+
+ ///
+ /// Read bytes from the disc image at .
+ /// Returns null if the range is out of bounds or a short read occurs.
+ ///
+ internal byte[]? ReadData(long offset, int length) => ReadDisc(offset, length);
+
+ private static string GetPartitionName(uint type,
+ System.Collections.Generic.Dictionary counters)
+ {
+ // Matches DolphinIsoLib WiiDiscExtractor.PartitionFolderName exactly.
+ // Known types: 0→GM+counter, 1→UP+counter, 2→CH+counter.
+ // Unknown: if all 4 bytes are printable ASCII, use the raw 4-char string (no prefix, no counter).
+ // Otherwise fall back to P{globalIndex} — we use the cumulative counter sum as the index.
+ string code;
+ switch (type)
+ {
+ case 0: code = "GM"; break;
+ case 1: code = "UP"; break;
+ case 2: code = "CH"; break;
+ default:
+ byte b0 = (byte)(type >> 24), b1 = (byte)(type >> 16),
+ b2 = (byte)(type >> 8), b3 = (byte)type;
+ if (b0 >= 0x20 && b0 <= 0x7E && b1 >= 0x20 && b1 <= 0x7E &&
+ b2 >= 0x20 && b2 <= 0x7E && b3 >= 0x20 && b3 <= 0x7E)
+ return System.Text.Encoding.ASCII.GetString(new byte[] { b0, b1, b2, b3 });
+ // Non-printable: use global partition index (sum of all counter values so far)
+ int globalIdx = 0;
+ foreach (var v in counters.Values) globalIdx += v;
+ return $"P{globalIdx}";
+ }
+
+ int idx = counters.TryGetValue(type, out int cv) ? cv : 0;
+ counters[type] = idx + 1;
+ return $"{code}{idx}";
+ }
+
+ private static uint ReadBE32(byte[] data, int offset) => (uint)((data[offset] << 24) | (data[offset + 1] << 16) | (data[offset + 2] << 8) | data[offset + 3]);
+
+ #endregion
+ }
+}
+
diff --git a/SabreTools.Wrappers/NintendoDisc.Printing.cs b/SabreTools.Wrappers/NintendoDisc.Printing.cs
new file mode 100644
index 000000000..3a0947a58
--- /dev/null
+++ b/SabreTools.Wrappers/NintendoDisc.Printing.cs
@@ -0,0 +1,59 @@
+using System.Text;
+using SabreTools.Text.Extensions;
+
+namespace SabreTools.Wrappers
+{
+ public partial class NintendoDisc : IPrintable
+ {
+#if NETCOREAPP
+ ///
+ public string ExportJSON() => System.Text.Json.JsonSerializer.Serialize(Model, _jsonSerializerOptions);
+#endif
+
+ ///
+ public void PrintInformation(StringBuilder builder)
+ {
+ builder.AppendLine($"{Platform} Disc Image Information:");
+ builder.AppendLine("-------------------------");
+
+ builder.AppendLine("Disc Header:");
+ builder.AppendLine(Header.GameId, " Game ID");
+ builder.AppendLine(Header.MakerCode, " Maker Code");
+ builder.AppendLine(Header.DiscNumber, " Disc Number");
+ builder.AppendLine(Header.DiscVersion, " Disc Version");
+ builder.AppendLine(Header.AudioStreaming, " Audio Streaming");
+ builder.AppendLine(Header.StreamingBufferSize, " Streaming Buffer Size");
+ builder.AppendLine(Header.WiiMagic, " Wii Magic");
+ builder.AppendLine(Header.GCMagic, " GC Magic");
+ builder.AppendLine(Header.GameTitle, " Game Title");
+ builder.AppendLine(Header.DisableHashVerification, " Disable Hash Verification");
+ builder.AppendLine(Header.DisableDiscEncryption, " Disable Disc Encryption");
+ builder.AppendLine(Header.DolOffset, " DOL Offset");
+ builder.AppendLine(Header.FstOffset, " FST Offset");
+ builder.AppendLine(Header.FstSize, " FST Size");
+ builder.AppendLine();
+
+ if (PartitionTableEntries is { Length: > 0 })
+ {
+ builder.AppendLine($"Partition Table ({PartitionTableEntries.Length} entries):");
+ for (int i = 0; i < PartitionTableEntries.Length; i++)
+ {
+ var pt = PartitionTableEntries[i];
+ builder.AppendLine($" Partition {i}:");
+ builder.AppendLine(pt.Offset, " Offset");
+ builder.AppendLine(pt.Type, " Type");
+ }
+
+ builder.AppendLine();
+ }
+
+ if (RegionData is not null)
+ {
+ builder.AppendLine("Region Data:");
+ builder.AppendLine(RegionData.RegionSetting, " Region Setting");
+ builder.AppendLine(RegionData.AgeRatings, " Age Ratings");
+ builder.AppendLine();
+ }
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/NintendoDisc.cs b/SabreTools.Wrappers/NintendoDisc.cs
new file mode 100644
index 000000000..b879ebd6e
--- /dev/null
+++ b/SabreTools.Wrappers/NintendoDisc.cs
@@ -0,0 +1,120 @@
+using System.IO;
+using SabreTools.Data.Models.NintendoDisc;
+
+namespace SabreTools.Wrappers
+{
+ public partial class NintendoDisc : WrapperBase
+ {
+ #region Descriptive Properties
+
+ ///
+ public override string DescriptionString => "Nintendo GameCube / Wii Disc Image";
+
+ #endregion
+
+ #region Extension Properties
+
+ ///
+ public DiscHeader Header => Model.Header;
+
+ ///
+ public Platform Platform => Model.Platform;
+
+ ///
+ public string GameId => Model.Header.GameId;
+
+ ///
+ public string MakerCode => Model.Header.MakerCode;
+
+ ///
+ public string GameTitle => Model.Header.GameTitle;
+
+ ///
+ public byte DiscNumber => Model.Header.DiscNumber;
+
+ ///
+ public byte DiscVersion => Model.Header.DiscVersion;
+
+ ///
+ public WiiPartitionTableEntry[]? PartitionTableEntries => Model.PartitionTableEntries;
+
+ ///
+ public WiiRegionData? RegionData => Model.RegionData;
+
+ #endregion
+
+ #region Constructors
+
+ ///
+ public NintendoDisc(Disc model, byte[] data) : base(model, data) { }
+
+ ///
+ public NintendoDisc(Disc model, byte[] data, int offset) : base(model, data, offset) { }
+
+ ///
+ public NintendoDisc(Disc model, byte[] data, int offset, int length) : base(model, data, offset, length) { }
+
+ ///
+ public NintendoDisc(Disc model, Stream data) : base(model, data) { }
+
+ ///
+ public NintendoDisc(Disc model, Stream data, long offset) : base(model, data, offset) { }
+
+ ///
+ public NintendoDisc(Disc model, Stream data, long offset, long length) : base(model, data, offset, length) { }
+
+ #endregion
+
+ #region Static Constructors
+
+ ///
+ /// Create a Nintendo disc image wrapper from a byte array and offset
+ ///
+ /// Byte array representing the disc image
+ /// Offset within the array to parse
+ /// A NintendoDisc wrapper on success, null on failure
+ public static NintendoDisc? Create(byte[]? data, int offset)
+ {
+ // If the data is invalid
+ if (data is null || data.Length == 0)
+ return null;
+
+ // If the offset is out of bounds
+ if (offset < 0 || offset >= data.Length)
+ return null;
+
+ // Create a memory stream and use that
+ var dataStream = new MemoryStream(data, offset, data.Length - offset);
+ return Create(dataStream);
+ }
+
+ ///
+ /// Create a Nintendo disc image wrapper from a Stream
+ ///
+ /// Stream representing the disc image
+ /// A NintendoDisc wrapper on success, null on failure
+ public static NintendoDisc? Create(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ try
+ {
+ long currentOffset = data.Position;
+
+ var model = new Serialization.Readers.NintendoDisc().Deserialize(data);
+ if (model is null)
+ return null;
+
+ return new NintendoDisc(model, data, currentOffset);
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Wrappers/PurgeCompressor.cs b/SabreTools.Wrappers/PurgeCompressor.cs
new file mode 100644
index 000000000..bb87a2f01
--- /dev/null
+++ b/SabreTools.Wrappers/PurgeCompressor.cs
@@ -0,0 +1,140 @@
+using System;
+using System.IO;
+#if !NET20
+using System.Security.Cryptography;
+#endif
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// Compresses data using the WIA PURGE format.
+ ///
+ /// PURGE layout produced:
+ /// [ { u32 offset BE, u32 size BE, data[size] } ] ... (zero or more segments)
+ /// [ SHA-1 (20 bytes) ]
+ ///
+ /// Only non-zero byte runs are emitted as segments; consecutive non-zero regions
+ /// separated by a gap of 8 or fewer zero bytes are merged into a single segment.
+ /// The SHA-1 covers: (e.g. exception-list prefix) +
+ /// all segment headers and data bytes.
+ ///
+ /// This is the exact inverse of .
+ ///
+ internal static class PurgeCompressor
+ {
+ ///
+ /// Compress [ ..
+ /// +) into PURGE format.
+ ///
+ /// Source buffer.
+ /// Start of data within .
+ /// Number of bytes to compress.
+ ///
+ /// Optional bytes that precede this payload in the WIA group
+ /// (e.g. the serialised exception list). Included in the SHA-1 but not emitted.
+ /// Pass null or empty if there are none.
+ ///
+ /// PURGE-compressed byte array (segments + 20-byte SHA-1).
+ public static byte[] Compress(byte[] data, int offset, int count, byte[]? precedingBytes = null)
+ {
+ const int MaxGap = 8; // zero-byte runs of this length or fewer are bridged
+
+ var output = new MemoryStream((count / 2) + 32);
+
+ int end = offset + count;
+ int pos = offset;
+
+ while (pos < end)
+ {
+ // Skip leading zeros
+ while (pos < end && data[pos] == 0)
+ pos++;
+
+ if (pos >= end)
+ break;
+
+ // pos is now the start of a non-zero run (segment start)
+ int segStart = pos;
+ int segEnd = pos;
+
+ // Extend the segment, bridging zero-gaps of <= MaxGap bytes
+ while (segEnd < end)
+ {
+ // advance through non-zero bytes
+ while (segEnd < end && data[segEnd] != 0)
+ segEnd++;
+
+ // peek ahead: count zero bytes
+ int zeroRun = 0;
+ while (segEnd + zeroRun < end && data[segEnd + zeroRun] == 0)
+ zeroRun++;
+
+ // If the gap is small enough (and there is more non-zero data after it),
+ // bridge the gap by including it in the segment.
+ if (zeroRun > 0 && zeroRun <= MaxGap && segEnd + zeroRun < end)
+ {
+ segEnd += zeroRun; // include zeros in segment, keep scanning
+ }
+ else
+ {
+ break; // end of segment
+ }
+ }
+
+ // Trim trailing zeros from segment end
+ while (segEnd > segStart && data[segEnd - 1] == 0)
+ segEnd--;
+
+ if (segEnd <= segStart)
+ {
+ pos = segEnd + 1;
+ continue;
+ }
+
+ uint segOffset = (uint)(segStart - offset);
+ uint segSize = (uint)(segEnd - segStart);
+
+ // Write {u32 offsetBE, u32 sizeBE, data[segSize]}
+ WriteBeU32(output, segOffset);
+ WriteBeU32(output, segSize);
+ output.Write(data, segStart, (int)segSize);
+
+ pos = segEnd;
+ }
+
+ byte[] segments = output.ToArray();
+
+ // SHA-1 over: precedingBytes + segments
+ byte[] hash = ComputeSha1(precedingBytes, segments);
+
+ // Final result: segments + hash
+ byte[] result = new byte[segments.Length + 20];
+ Array.Copy(segments, 0, result, 0, segments.Length);
+ Array.Copy(hash, 0, result, segments.Length, 20);
+ return result;
+ }
+
+ private static byte[] ComputeSha1(byte[]? precedingBytes, byte[] segments)
+ {
+#if NET20
+ return new byte[20];
+#else
+ using var sha1 = SHA1.Create();
+
+ if (precedingBytes != null && precedingBytes.Length > 0)
+ sha1.TransformBlock(precedingBytes, 0, precedingBytes.Length, null, 0);
+
+ sha1.TransformFinalBlock(segments, 0, segments.Length);
+ return sha1.Hash ?? new byte[20];
+#endif
+ }
+
+ private static void WriteBeU32(Stream s, uint value)
+ {
+ s.WriteByte((byte)(value >> 24));
+ s.WriteByte((byte)(value >> 16));
+ s.WriteByte((byte)(value >> 8));
+ s.WriteByte((byte)value);
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/PurgeDecompressor.cs b/SabreTools.Wrappers/PurgeDecompressor.cs
new file mode 100644
index 000000000..a76ea50f7
--- /dev/null
+++ b/SabreTools.Wrappers/PurgeDecompressor.cs
@@ -0,0 +1,102 @@
+using System;
+using System.Security.Cryptography;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// Decompresses WIA PURGE-compressed group data.
+ ///
+ /// PURGE layout (uncompressed exception-list prefix already stripped by caller):
+ /// [ { u32 offset BE, u32 size BE } { data[size] } ] ... (zero or more segments)
+ /// [ SHA-1 (20 bytes) ]
+ ///
+ /// The SHA-1 covers: (exception-list bytes, if any) +
+ /// all segment headers and their data bytes.
+ /// Bytes in the output not covered by any segment are implicitly 0x00.
+ ///
+ /// References: Dolphin WIACompression.cpp — PurgeDecompressor / PurgeCompressor
+ /// docs/WiaAndRvz.md — wia_segment_t section
+ ///
+ internal static class PurgeDecompressor
+ {
+ private const int SHA1_SIZE = 20;
+ private const int SEGMENT_HEADER_SIZE = 8; // u32 offset + u32 size
+
+ ///
+ /// Decompresses a PURGE-compressed block.
+ ///
+ /// Buffer containing the compressed data.
+ /// Byte offset within where compressed data starts.
+ /// Number of bytes of compressed data (segments + trailing SHA-1).
+ /// Expected decompressed output size in bytes.
+ ///
+ /// Bytes that precede the compressed data in the SHA-1 computation — the uncompressed
+ /// exception-list section for Wii partition groups. Pass null for non-Wii groups.
+ ///
+ ///
+ /// The decompressed byte array, or null if the data is malformed or the
+ /// trailing SHA-1 does not match.
+ ///
+ public static byte[]? Decompress(
+ byte[] input, int inputOffset, int inputLength,
+ int decompressedSize,
+ byte[]? precedingBytes = null)
+ {
+ if (input is null) throw new ArgumentNullException(nameof(input));
+ if (inputLength < SHA1_SIZE) return null;
+
+ byte[] output = new byte[decompressedSize];
+ int pos = inputOffset;
+ int dataEnd = inputOffset + inputLength - SHA1_SIZE;
+
+#if NET20 || NET35
+ using (var sha1 = SHA1.Create())
+#else
+ using (var sha1 = SHA1.Create())
+#endif
+ {
+ if (precedingBytes != null && precedingBytes.Length > 0)
+ sha1.TransformBlock(precedingBytes, 0, precedingBytes.Length, null, 0);
+
+ while (pos < dataEnd)
+ {
+ if (pos + SEGMENT_HEADER_SIZE > dataEnd)
+ return null;
+
+ uint segOffset = ReadUInt32BE(input, pos);
+ uint segSize = ReadUInt32BE(input, pos + 4);
+
+ sha1.TransformBlock(input, pos, SEGMENT_HEADER_SIZE, null, 0);
+ pos += SEGMENT_HEADER_SIZE;
+
+ if (segSize == 0)
+ continue;
+
+ if (pos + (int)segSize > dataEnd)
+ return null;
+
+ if (segOffset + segSize > (uint)decompressedSize)
+ return null;
+
+ Array.Copy(input, pos, output, (int)segOffset, (int)segSize);
+ sha1.TransformBlock(input, pos, (int)segSize, null, 0);
+ pos += (int)segSize;
+ }
+
+ sha1.TransformFinalBlock(new byte[0], 0, 0);
+
+ byte[]? computed = sha1.Hash;
+ if (computed is null) return null;
+ for (int i = 0; i < SHA1_SIZE; i++)
+ {
+ if (computed[i] != input[dataEnd + i])
+ return null;
+ }
+ }
+
+ return output;
+ }
+
+ private static uint ReadUInt32BE(byte[] data, int offset) => (uint)((data[offset] << 24) | (data[offset + 1] << 16) | (data[offset + 2] << 8) | data[offset + 3]);
+ }
+}
diff --git a/SabreTools.Wrappers/RvzPackDecompressor.cs b/SabreTools.Wrappers/RvzPackDecompressor.cs
new file mode 100644
index 000000000..5e9e41268
--- /dev/null
+++ b/SabreTools.Wrappers/RvzPackDecompressor.cs
@@ -0,0 +1,116 @@
+using System;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// Decompressor for RVZ packed format.
+ /// RVZ uses run-length encoding to store real data and junk data efficiently:
+ /// - Real data: size (4 bytes) + data bytes
+ /// - Junk data: size with high bit set (4 bytes) + 68-byte seed → regenerate using LFG
+ ///
+ internal class RvzPackDecompressor
+ {
+ private readonly byte[] m_packed_data;
+ private readonly uint m_rvz_packed_size;
+ private long m_data_offset;
+ private readonly LaggedFibonacciGenerator m_lfg;
+
+ private int m_in_position = 0;
+ private uint m_current_size = 0;
+ private bool m_current_is_junk = false;
+
+ ///
+ /// Creates a new RVZ pack decompressor.
+ ///
+ /// The packed RVZ data
+ /// Expected size of packed data (for validation)
+ /// Offset in the virtual disc (for LFG alignment)
+ public RvzPackDecompressor(byte[] packedData, uint rvzPackedSize, long dataOffset)
+ {
+ m_packed_data = packedData ?? throw new ArgumentNullException(nameof(packedData));
+ m_rvz_packed_size = rvzPackedSize;
+ m_data_offset = dataOffset;
+ m_lfg = new LaggedFibonacciGenerator();
+ }
+
+ ///
+ /// Decompresses the packed data into the output buffer.
+ ///
+ /// Destination buffer
+ /// Offset in destination buffer
+ /// Number of bytes to decompress
+ /// Number of bytes actually decompressed
+ public int Decompress(byte[] output, int outputOffset, int count)
+ {
+ int totalWritten = 0;
+
+ while (totalWritten < count && !IsDone())
+ {
+ if (m_current_size == 0)
+ {
+ if (!ReadNextSegment())
+ break;
+ }
+
+ int bytesToWrite = Math.Min((int)m_current_size, count - totalWritten);
+
+ if (m_current_is_junk)
+ {
+ m_lfg.GetBytes(bytesToWrite, output, outputOffset + totalWritten);
+ }
+ else
+ {
+ Array.Copy(m_packed_data, m_in_position, output, outputOffset + totalWritten, bytesToWrite);
+ m_in_position += bytesToWrite;
+ }
+
+ m_current_size -= (uint)bytesToWrite;
+ totalWritten += bytesToWrite;
+ m_data_offset += bytesToWrite;
+ }
+
+ return totalWritten;
+ }
+
+ ///
+ /// Checks if decompression is complete.
+ ///
+ public bool IsDone() => m_current_size == 0 && m_in_position >= m_rvz_packed_size;
+
+ private bool ReadNextSegment()
+ {
+ if (m_in_position + 4 > m_packed_data.Length)
+ return false;
+
+ // Size field is big-endian u32; high bit signals junk data
+ uint sizeField = (uint)((m_packed_data[m_in_position] << 24) |
+ (m_packed_data[m_in_position + 1] << 16) |
+ (m_packed_data[m_in_position + 2] << 8) |
+ m_packed_data[m_in_position + 3]);
+ m_in_position += 4;
+
+ m_current_is_junk = (sizeField & 0x80000000) != 0;
+ m_current_size = sizeField & 0x7FFFFFFF;
+
+ if (m_current_is_junk)
+ {
+ if (m_in_position + (LaggedFibonacciGenerator.SEED_SIZE * 4) > m_packed_data.Length)
+ return false;
+
+ byte[] seed = new byte[LaggedFibonacciGenerator.SEED_SIZE * 4];
+ Array.Copy(m_packed_data, m_in_position, seed, 0, seed.Length);
+ m_in_position += seed.Length;
+
+ m_lfg.SetSeed(seed);
+
+ // Advance LFG to the correct position within the buffer.
+ // Dolphin: lfg.m_position_bytes = data_offset % (LFG_K * sizeof(u32))
+ int offsetInBuffer = (int)(m_data_offset % LaggedFibonacciGenerator.BUFFER_BYTES);
+ if (offsetInBuffer > 0)
+ m_lfg.Forward(offsetInBuffer);
+ }
+
+ return true;
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/RvzPackEncoder.cs b/SabreTools.Wrappers/RvzPackEncoder.cs
new file mode 100644
index 000000000..cbb25db0f
--- /dev/null
+++ b/SabreTools.Wrappers/RvzPackEncoder.cs
@@ -0,0 +1,273 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// Encodes disc data into RVZ-Pack format by replacing predictable LFG
+ /// (Lagged Fibonacci Generator) junk regions with compact seed descriptors.
+ ///
+ /// This is the exact inverse of and mirrors
+ /// Dolphin's RVZPack() in WIABlob.cpp.
+ ///
+ /// Two-phase algorithm:
+ ///
+ /// - Phase 1 (): walk the buffer, identify LFG
+ /// junk regions, build a map keyed by end-offset.
+ /// - Phase 2 (): for each chunk, use the map to
+ /// emit alternating real-data and junk-seed segments.
+ ///
+ ///
+ internal static class RvzPackEncoder
+ {
+ // 17 u32s × 4 bytes = 68 bytes — minimum size to record a seed
+ private const int SeedSizeBytes = LaggedFibonacciGenerator.SEED_SIZE * 4;
+
+ private sealed class JunkRegion
+ {
+ public long StartOffset;
+ public uint[]? Seed;
+ }
+
+ /// Result of packing a single chunk: compressed payload and its logical size.
+ internal struct ChunkResult
+ {
+ /// Packed payload, or null if the chunk contains no junk.
+ public byte[]? Packed;
+ /// Number of bytes the decompressor needs to consume from .
+ public uint RvzPackedSize;
+ }
+
+ // ---------------------------------------------------------------
+ // Public API
+ // ---------------------------------------------------------------
+
+ ///
+ /// RVZ-pack a single chunk.
+ /// Returns null if the chunk contains no junk (write raw instead).
+ /// is the number of bytes actually needed
+ /// by the decompressor (may be < packed.Length due to alignment).
+ ///
+ public static byte[]? Pack(byte[] data, int dataOffset, int size,
+ long discDataOffset, out uint rvzPackedSize, GcFst? fst = null)
+ {
+ rvzPackedSize = 0;
+ if (size <= 0)
+ return null;
+
+ var junkInfo = ScanForJunk(data, dataOffset, size, discDataOffset, fst);
+ if (junkInfo.Count == 0)
+ return null;
+
+ ChunkResult r = EmitChunk(data, dataOffset, 0L, size, size, junkInfo);
+ rvzPackedSize = r.RvzPackedSize;
+ return r.Packed;
+ }
+
+ ///
+ /// RVZ-pack a multi-chunk buffer (e.g. a full 2 MiB Wii group).
+ /// Performs one Phase-1 scan over the entire buffer, then calls
+ /// per chunk.
+ ///
+ /// Source buffer.
+ /// Start of data within .
+ /// Total number of bytes to process.
+ /// Size of each individual chunk.
+ /// Number of chunks.
+ /// Disc-partition byte offset of the first byte.
+ /// Optional FST for file-boundary optimisation.
+ ///
+ /// One per chunk;
+ /// Packed == null means the chunk has no junk and should be written raw.
+ ///
+ public static ChunkResult[] PackGroup(
+ byte[] data, int dataOffset, int totalSize,
+ int bytesPerChunk, int numChunks,
+ long discDataOffset, GcFst? fst = null)
+ {
+ var junkInfo = ScanForJunk(data, dataOffset, totalSize, discDataOffset, fst);
+
+ var result = new ChunkResult[numChunks];
+ for (int c = 0; c < numChunks; c++)
+ {
+ long chunkStart = (long)c * bytesPerChunk;
+ long chunkEnd = Math.Min(chunkStart + bytesPerChunk, totalSize);
+ result[c] = EmitChunk(data, dataOffset, chunkStart, chunkEnd, totalSize, junkInfo);
+ }
+
+ return result;
+ }
+
+ // ---------------------------------------------------------------
+ // Phase 1 — scan buffer for junk regions
+ // ---------------------------------------------------------------
+
+ private static SortedDictionary ScanForJunk(
+ byte[] data, int dataOffset, int totalSize, long discDataOffset, GcFst? fst)
+ {
+ var junkInfo = new SortedDictionary();
+
+ long position = 0;
+ long dataOff = discDataOffset;
+
+ while (position < totalSize)
+ {
+ // Step 1: count and advance past leading zeros
+ long zeroes = 0;
+ while ((position + zeroes) < totalSize &&
+ data[dataOffset + position + zeroes] == 0)
+ zeroes++;
+
+ if (zeroes > SeedSizeBytes)
+ {
+ junkInfo[position + zeroes] = new JunkRegion
+ {
+ StartOffset = position,
+ Seed = new uint[LaggedFibonacciGenerator.SEED_SIZE]
+ };
+ }
+
+ position += zeroes;
+ dataOff += zeroes;
+
+ if (position >= totalSize)
+ break;
+
+ // Step 2: compute aligned read window (next 0x8000 boundary)
+ long nextBoundary = AlignUp(dataOff + 1, 0x8000);
+ long bytesToRead = Math.Min(nextBoundary - dataOff, totalSize - position);
+ int dataOffMod = (int)(dataOff % 0x8000);
+
+ // Step 3: ALWAYS call GetSeed unconditionally — no FST pre-check
+ var seed = new uint[LaggedFibonacciGenerator.SEED_SIZE];
+ int reconstructed = LaggedFibonacciGenerator.GetSeed(
+ data, (int)(dataOffset + position), (int)bytesToRead, dataOffMod, seed);
+
+ if (reconstructed > 0)
+ {
+ junkInfo[position + reconstructed] = new JunkRegion
+ {
+ StartOffset = position,
+ Seed = seed
+ };
+ }
+
+ // Step 4: FST skip AFTER GetSeed
+ if (fst != null)
+ {
+ long queryOff = dataOff + reconstructed;
+ GcFst.FileEntry? fileInfo = fst.FindFileInfo(queryOff);
+ if (fileInfo.HasValue)
+ {
+ long fileEnd = fileInfo.Value.FileEnd;
+ if (fileEnd < (dataOff + bytesToRead))
+ {
+ position += fileEnd - dataOff;
+ dataOff = fileEnd;
+ continue;
+ }
+ }
+ }
+
+ // Step 5: normal advance by block window
+ position += bytesToRead;
+ dataOff += bytesToRead;
+ }
+
+ return junkInfo;
+ }
+
+ // ---------------------------------------------------------------
+ // Phase 2 — emit packed segments for a single chunk
+ // ---------------------------------------------------------------
+
+ private static ChunkResult EmitChunk(
+ byte[] data, int dataOffset,
+ long chunkStart, long chunkEnd, long totalSize,
+ SortedDictionary junkInfo)
+ {
+ long currentOffset = chunkStart;
+ bool firstIteration = true;
+
+ var output = new MemoryStream((int)(chunkEnd - chunkStart));
+ uint packedSize = 0;
+
+ while (currentOffset < chunkEnd)
+ {
+ long remaining = chunkEnd - currentOffset;
+ long nextJunkStart = chunkEnd;
+ long nextJunkEnd = chunkEnd;
+ uint[]? junkSeed = null;
+
+ if (remaining > SeedSizeBytes)
+ {
+ foreach (var kvp in junkInfo)
+ {
+ // Dolphin Phase-2 condition:
+ // key > currentOffset + SEED_SIZE_BYTES AND
+ // startOffset + SEED_SIZE_BYTES < chunkEnd
+ if ((kvp.Key > (currentOffset + SeedSizeBytes)) &&
+ ((kvp.Value.StartOffset + SeedSizeBytes) < chunkEnd))
+ {
+ nextJunkStart = Math.Max(currentOffset, kvp.Value.StartOffset);
+ nextJunkEnd = Math.Min(chunkEnd, kvp.Key);
+ junkSeed = kvp.Value.Seed;
+ break;
+ }
+ }
+ }
+
+ // On the first iteration, bail out if there is no junk in this chunk
+ if (firstIteration)
+ {
+ if (nextJunkStart == chunkEnd)
+ return new ChunkResult { Packed = null, RvzPackedSize = 0 };
+
+ firstIteration = false;
+ }
+
+ // Emit real-data segment before the junk region
+ long nonJunkBytes = nextJunkStart - currentOffset;
+ if (nonJunkBytes > 0)
+ {
+ WriteBeU32(output, (uint)nonJunkBytes);
+ output.Write(data, (int)(dataOffset + currentOffset), (int)nonJunkBytes);
+ packedSize += 4 + (uint)nonJunkBytes;
+ currentOffset += nonJunkBytes;
+ }
+
+ // Emit junk-seed segment
+ long junkBytes = nextJunkEnd - currentOffset;
+ if (junkBytes > 0 && junkSeed != null)
+ {
+ WriteBeU32(output, 0x80000000u | (uint)junkBytes);
+ byte[] seedBytes = new byte[SeedSizeBytes];
+ Buffer.BlockCopy(junkSeed, 0, seedBytes, 0, SeedSizeBytes);
+ output.Write(seedBytes, 0, SeedSizeBytes);
+ packedSize += 4 + (uint)SeedSizeBytes;
+ currentOffset += junkBytes;
+ }
+
+ if (junkSeed == null)
+ break;
+ }
+
+ return new ChunkResult { Packed = output.ToArray(), RvzPackedSize = packedSize };
+ }
+
+ // ---------------------------------------------------------------
+ // Helpers
+ // ---------------------------------------------------------------
+
+ private static void WriteBeU32(Stream s, uint value)
+ {
+ s.WriteByte((byte)(value >> 24));
+ s.WriteByte((byte)(value >> 16));
+ s.WriteByte((byte)(value >> 8));
+ s.WriteByte((byte)value);
+ }
+
+ private static long AlignUp(long value, long alignment) => (value + alignment - 1) & ~(alignment - 1);
+ }
+}
diff --git a/SabreTools.Wrappers/WIA.Extraction.cs b/SabreTools.Wrappers/WIA.Extraction.cs
new file mode 100644
index 000000000..7908d0549
--- /dev/null
+++ b/SabreTools.Wrappers/WIA.Extraction.cs
@@ -0,0 +1,13 @@
+namespace SabreTools.Wrappers
+{
+ public partial class WIA : IExtractable
+ {
+ ///
+ public bool Extract(string outputDirectory, bool includeDebug)
+ {
+ // Decompress WIA/RVZ to obtain the inner disc image, then delegate extraction.
+ var inner = GetInnerWrapper();
+ return inner?.Extract(outputDirectory, includeDebug) ?? false;
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/WIA.Printing.cs b/SabreTools.Wrappers/WIA.Printing.cs
new file mode 100644
index 000000000..bcb90797b
--- /dev/null
+++ b/SabreTools.Wrappers/WIA.Printing.cs
@@ -0,0 +1,101 @@
+using System.Text;
+using SabreTools.Text.Extensions;
+
+namespace SabreTools.Wrappers
+{
+ public partial class WIA : IPrintable
+ {
+#if NETCOREAPP
+ ///
+ public string ExportJSON() => System.Text.Json.JsonSerializer.Serialize(Model, _jsonSerializerOptions);
+#endif
+
+ ///
+ public void PrintInformation(StringBuilder builder)
+ {
+ string formatName = IsRvz ? "RVZ" : "WIA";
+ builder.AppendLine($"{formatName} Information:");
+ builder.AppendLine("-------------------------");
+
+ builder.AppendLine("Header 1:");
+ builder.AppendLine(Header1.Magic, " Magic");
+ builder.AppendLine(Header1.Version, " Version");
+ builder.AppendLine(Header1.VersionCompatible, " Version Compatible");
+ builder.AppendLine(Header1.Header2Size, " Header 2 Size");
+ builder.AppendLine(Header1.Header2Hash, " Header 2 Hash");
+ builder.AppendLine(Header1.IsoFileSize, " ISO File Size");
+ builder.AppendLine(Header1.WiaFileSize, " WIA File Size");
+ builder.AppendLine(Header1.Header1Hash, " Header 1 Hash");
+ builder.AppendLine();
+
+ builder.AppendLine("Header 2:");
+ builder.AppendLine(Header2.DiscType.ToString(), " Disc Type");
+ builder.AppendLine(Header2.CompressionType.ToString(), " Compression Type");
+ builder.AppendLine(Header2.CompressionLevel, " Compression Level");
+ builder.AppendLine(Header2.ChunkSize, " Chunk Size");
+ builder.AppendLine(Header2.DiscHeader, " Disc Header");
+ builder.AppendLine(Header2.NumberOfPartitionEntries, " Partition Entry Count");
+ builder.AppendLine(Header2.PartitionEntrySize, " Partition Entry Size");
+ builder.AppendLine(Header2.PartitionEntriesOffset, " Partition Entries Offset");
+ builder.AppendLine(Header2.PartitionEntriesHash, " Partition Entries Hash");
+ builder.AppendLine(Header2.NumberOfRawDataEntries, " Raw Data Entry Count");
+ builder.AppendLine(Header2.RawDataEntriesOffset, " Raw Data Entries Offset");
+ builder.AppendLine(Header2.RawDataEntriesSize, " Raw Data Entries Size");
+ builder.AppendLine(Header2.NumberOfGroupEntries, " Group Entry Count");
+ builder.AppendLine(Header2.GroupEntriesOffset, " Group Entries Offset");
+ builder.AppendLine(Header2.GroupEntriesSize, " Group Entries Size");
+ builder.AppendLine(Header2.CompressorDataSize, " Compressor Data Size");
+ builder.AppendLine(Header2.CompressorData, " Compressor Data");
+ builder.AppendLine();
+
+ var discHeader = DiscHeader;
+ if (discHeader is not null)
+ {
+ builder.AppendLine("Embedded Disc Header:");
+ builder.AppendLine(discHeader.GameId, " Game ID");
+ builder.AppendLine(discHeader.MakerCode, " Maker Code");
+ builder.AppendLine(discHeader.DiscNumber, " Disc Number");
+ builder.AppendLine(discHeader.DiscVersion, " Disc Version");
+ builder.AppendLine(discHeader.GameTitle, " Game Title");
+ builder.AppendLine();
+ }
+
+ if (PartitionEntries is { Length: > 0 })
+ {
+ builder.AppendLine($"Partition Entries ({PartitionEntries.Length}):");
+ for (int i = 0; i < PartitionEntries.Length; i++)
+ {
+ var pe = PartitionEntries[i];
+ builder.AppendLine($" Partition {i}:");
+ builder.AppendLine(pe.PartitionKey, " Partition Key");
+ builder.AppendLine(pe.DataEntry0.FirstSector, " Data Entry 0 First Sector");
+ builder.AppendLine(pe.DataEntry0.NumberOfSectors, " Data Entry 0 Sector Count");
+ builder.AppendLine(pe.DataEntry0.GroupIndex, " Data Entry 0 Group Index");
+ builder.AppendLine(pe.DataEntry0.NumberOfGroups, " Data Entry 0 Group Count");
+ builder.AppendLine(pe.DataEntry1.FirstSector, " Data Entry 1 First Sector");
+ builder.AppendLine(pe.DataEntry1.NumberOfSectors, " Data Entry 1 Sector Count");
+ builder.AppendLine(pe.DataEntry1.GroupIndex, " Data Entry 1 Group Index");
+ builder.AppendLine(pe.DataEntry1.NumberOfGroups, " Data Entry 1 Group Count");
+ }
+
+ builder.AppendLine();
+ }
+
+ if (RawDataEntries is { Length: > 0 })
+ {
+ builder.AppendLine($"Raw Data Entries ({RawDataEntries.Length}):");
+ for (int i = 0; i < RawDataEntries.Length; i++)
+ {
+ var rde = RawDataEntries[i];
+ builder.AppendLine($" Raw Data Entry {i}:");
+ builder.AppendLine(rde.DataOffset, " Data Offset");
+ builder.AppendLine(rde.DataSize, " Data Size");
+ builder.AppendLine(rde.GroupIndex, " Group Index");
+ builder.AppendLine(rde.NumberOfGroups, " Group Count");
+ }
+
+ builder.AppendLine();
+ }
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/WIA.Writing.cs b/SabreTools.Wrappers/WIA.Writing.cs
new file mode 100644
index 000000000..5def5eff7
--- /dev/null
+++ b/SabreTools.Wrappers/WIA.Writing.cs
@@ -0,0 +1,1513 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+#if !NET20
+using System.Security.Cryptography;
+#endif
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+using System.Threading.Tasks;
+#endif
+using NdConstants = SabreTools.Data.Models.NintendoDisc.Constants;
+using WiaConst = SabreTools.Data.Models.WIA.Constants;
+using SabreTools.Data.Models.NintendoDisc;
+using SabreTools.Data.Models.WIA;
+
+namespace SabreTools.Wrappers
+{
+ public partial class WIA : IWritable
+ {
+ // -----------------------------------------------------------------------
+ // Public entry points
+ // -----------------------------------------------------------------------
+
+ ///
+ /// Compress a wrapper to a WIA or RVZ file.
+ ///
+ public static bool ConvertFromDisc(NintendoDisc source, string outputPath,
+ bool isRvz = false,
+ WiaRvzCompressionType compressionType = WiaRvzCompressionType.None,
+ int compressionLevel = 5,
+ uint chunkSize = WiaConst.DefaultChunkSize)
+ {
+ if (source is null)
+ return false;
+ if (string.IsNullOrEmpty(outputPath))
+ return false;
+ if (!isRvz && chunkSize != WiaConst.DefaultChunkSize)
+ return false;
+ if (isRvz && compressionType == WiaRvzCompressionType.Purge)
+ return false;
+
+ try
+ {
+ using var fs = File.Open(outputPath, FileMode.Create, FileAccess.ReadWrite, FileShare.None);
+ return WriteWiaRvz(source, fs, isRvz, compressionType,
+ Math.Max(1, Math.Min(22, compressionLevel)), chunkSize);
+ }
+ catch
+ {
+ return false;
+ }
+ }
+
+ ///
+ public bool Write(string outputPath, bool includeDebug)
+ {
+ if (string.IsNullOrEmpty(outputPath))
+ {
+ string ext = (Model?.IsRvz == true) ? ".rvz" : ".wia";
+ string outputFilename = Filename is null
+ ? (Guid.NewGuid().ToString() + ext)
+ : (Filename + ".new");
+ outputPath = Path.GetFullPath(outputFilename);
+ }
+
+ if (Model?.Header1 is null || Model?.Header2 is null)
+ {
+ if (includeDebug) Console.WriteLine("Model was invalid, cannot write!");
+ return false;
+ }
+
+ var writer = new Serialization.Writers.WIA { Debug = includeDebug };
+ return writer.SerializeFile(Model, outputPath);
+ }
+
+ // -----------------------------------------------------------------------
+ // Core pipeline
+ // -----------------------------------------------------------------------
+
+ private static bool WriteWiaRvz(NintendoDisc source, Stream dest,
+ bool isRvz, WiaRvzCompressionType compressionType,
+ int compressionLevel, uint chunkSize)
+ {
+ long isoSize = source.DataLength;
+ if (isoSize <= 0)
+ return false;
+
+ byte[]? discHdr = source.ReadData(0, WiaConst.DiscHeaderStoredSize);
+ if (discHdr is null)
+ return false;
+
+ Platform platform = DetectWiaPlatform(discHdr);
+ if (platform == Platform.Unknown)
+ return false;
+
+ if (platform == Platform.Wii)
+ return WriteWii(source, dest, isRvz, compressionType, compressionLevel, chunkSize, isoSize, discHdr);
+
+ return WriteGameCube(source, dest, isRvz, compressionType, compressionLevel, chunkSize, isoSize, discHdr);
+ }
+
+ // -----------------------------------------------------------------------
+ // GameCube path
+ // -----------------------------------------------------------------------
+
+ private static bool WriteGameCube(NintendoDisc source, Stream dest,
+ bool isRvz, WiaRvzCompressionType compressionType,
+ int compressionLevel, uint chunkSize,
+ long isoSize, byte[] discHdr)
+ {
+ const long rawDataStart = WiaConst.DiscHeaderStoredSize;
+ long rawDataSize = isoSize - rawDataStart;
+ if (rawDataSize <= 0)
+ return false;
+
+ uint numGroups = (uint)((rawDataSize + chunkSize - 1) / chunkSize);
+
+ int groupEntrySize = isRvz ? WiaConst.RvzGroupEntrySize : WiaConst.WiaGroupEntrySize;
+
+ long headersBound = AlignWia(
+ WiaConst.Header1Size + WiaConst.Header2Size +
+ WiaConst.RawDataEntrySize + 0x100 +
+ (numGroups * groupEntrySize),
+ NdConstants.WiiBlockSize);
+
+ dest.Write(new byte[headersBound], 0, (int)headersBound);
+ long bytesWritten = headersBound;
+
+ var groupEntries = new WiaRvzGroupEntry[numGroups];
+ var rawDedupMap = new Dictionary();
+ GcFst? gcFst = isRvz ? BuildGcFst(source) : null;
+
+ WiaRvzCompressionHelper.GetCompressorData(compressionType, compressionLevel,
+ out byte[] propData, out byte propSize);
+
+ uint groupIdx = 0;
+ long srcOff = rawDataStart;
+ long remaining = rawDataSize;
+
+#if !NET20
+ int batchSize = Math.Max(Environment.ProcessorCount * 4, 64);
+#else
+ int batchSize = 64;
+#endif
+
+ while (remaining > 0)
+ {
+ int thisBatch = (int)Math.Min(batchSize, (remaining + chunkSize - 1) / chunkSize);
+ var work = new GcGroupWorkEntry[thisBatch];
+ int actualBatch = 0;
+
+ for (int w = 0; w < thisBatch && remaining > 0; w++)
+ {
+ int toRead = (int)Math.Min(chunkSize, remaining);
+ byte[]? raw = source.ReadData(srcOff, toRead);
+ if (raw is null) break;
+
+ var gi = work[w] = new GcGroupWorkEntry
+ {
+ BytesRead = toRead,
+ SourceOffset = srcOff,
+ };
+
+ srcOff += toRead;
+ remaining -= toRead;
+ actualBatch++;
+
+ gi.IsAllSame = IsAllSameWia(raw, toRead);
+ gi.SameByte = raw[0];
+
+ if (gi.IsAllSame)
+ {
+ var dk = new WiaDedupKey2(gi.SameByte, toRead);
+ if (rawDedupMap.TryGetValue(dk, out var cached))
+ {
+ gi.IsDedupHit = true;
+ gi.DedupEntry = cached;
+ continue;
+ }
+
+ if (gi.SameByte == 0)
+ continue;
+ }
+
+ if (isRvz)
+ {
+ byte[]? packed = RvzPackEncoder.Pack(raw, 0, toRead, srcOff - toRead,
+ out gi.RvzPackedSize, gcFst);
+ gi.MainData = packed ?? raw;
+ if (packed is null) gi.RvzPackedSize = 0;
+ }
+ else
+ {
+ gi.MainData = raw;
+ }
+ }
+
+ if (actualBatch == 0)
+ break;
+
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ if (compressionType > WiaRvzCompressionType.Purge)
+ {
+ WiaRvzCompressionType ct = compressionType;
+ int cl = compressionLevel;
+ byte[] pd = propData;
+ byte ps = propSize;
+ Parallel.For(0, actualBatch, w =>
+ {
+ var gi = work[w];
+ if (gi.MainData != null && !gi.IsDedupHit)
+ gi.CompressedData = WiaRvzCompressionHelper.Compress(ct, gi.MainData, 0, gi.MainData.Length, cl, pd, ps);
+ });
+ }
+#endif
+
+ for (int w = 0; w < actualBatch; w++)
+ {
+ uint idx = groupIdx + (uint)w;
+ var gi = work[w];
+
+ if (gi.IsDedupHit)
+ {
+ groupEntries[idx] = gi.DedupEntry;
+ }
+ else if (gi.IsAllSame && gi.SameByte == 0)
+ {
+ var dk = new WiaDedupKey2(0, gi.BytesRead);
+ if (!rawDedupMap.TryGetValue(dk, out var ze))
+ {
+ ze = new WiaRvzGroupEntry((uint)(bytesWritten >> 2), 0, 0);
+ rawDedupMap[dk] = ze;
+ }
+
+ groupEntries[idx] = ze;
+ }
+ else if (gi.MainData != null)
+ {
+ uint groupOff = (uint)(bytesWritten >> 2);
+ uint storedSz = WriteRawGroupData(dest, ref bytesWritten, gi,
+ isRvz, compressionType, compressionLevel, propData, propSize);
+ PadTo4Wia(dest, ref bytesWritten);
+
+ var entry = new WiaRvzGroupEntry(groupOff, storedSz, gi.RvzPackedSize);
+ groupEntries[idx] = entry;
+ if (gi.IsAllSame && gi.SameByte != 0)
+ rawDedupMap[new WiaDedupKey2(gi.SameByte, gi.BytesRead)] = entry;
+ }
+ }
+
+ groupIdx += (uint)actualBatch;
+ }
+
+ // Write tables
+ dest.Seek(WiaConst.Header1Size + WiaConst.Header2Size, SeekOrigin.Begin);
+ long tablePos = WiaConst.Header1Size + WiaConst.Header2Size;
+
+ ulong rawEntriesOffset = (ulong)tablePos;
+ var rawEntry = new WiaRawDataEntry
+ {
+ DataOffset = WiaConst.DiscHeaderStoredSize,
+ DataSize = (ulong)rawDataSize,
+ GroupIndex = 0,
+ NumberOfGroups = numGroups,
+ };
+ byte[] rawEntryBytes = SerializeRawDataEntry(rawEntry);
+ byte[] rawEntryWritten = CompressTableDataWia(rawEntryBytes, compressionType, compressionLevel, propData, propSize);
+ dest.Write(rawEntryWritten, 0, rawEntryWritten.Length);
+ tablePos += rawEntryWritten.Length;
+ PadTableTo4Wia(dest, ref tablePos);
+
+ ulong groupEntriesOffset = (ulong)tablePos;
+ byte[] groupEntryBytes = SerializeGroupEntries(groupEntries, numGroups, isRvz);
+ byte[] groupEntryWritten = CompressTableDataWia(groupEntryBytes, compressionType, compressionLevel, propData, propSize);
+ dest.Write(groupEntryWritten, 0, groupEntryWritten.Length);
+ tablePos += groupEntryWritten.Length;
+
+#if !NET20
+ WriteWiaHeaders(dest, discHdr, isRvz, WiaDiscType.GameCube, compressionType, compressionLevel, chunkSize,
+ 0, (ulong)tablePos, new byte[20], // no partition entries
+ 1u, rawEntriesOffset, (uint)rawEntryWritten.Length,
+ numGroups, groupEntriesOffset, (uint)groupEntryWritten.Length,
+ propData, propSize, isoSize, bytesWritten);
+#endif
+ dest.Flush();
+ return true;
+ }
+
+ // -----------------------------------------------------------------------
+ // Wii path
+ // -----------------------------------------------------------------------
+
+ private static bool WriteWii(NintendoDisc source, Stream dest,
+ bool isRvz, WiaRvzCompressionType compressionType,
+ int compressionLevel, uint chunkSize,
+ long isoSize, byte[] discHdr)
+ {
+#if NET20
+ return false; // AES not available
+#else
+ var partitions = ReadWiiPartitions(source, isoSize);
+ if (partitions is null) return false;
+
+ var rawRegions = BuildRawRegions(source, partitions, isoSize);
+
+ WiaRvzCompressionHelper.GetCompressorData(compressionType, compressionLevel,
+ out byte[] propData, out byte propSize);
+
+ int groupEntrySize = isRvz ? WiaConst.RvzGroupEntrySize : WiaConst.WiaGroupEntrySize;
+ uint totalGroups = CalcTotalGroups(partitions, rawRegions, chunkSize);
+
+ long headersBound = AlignWia(
+ WiaConst.Header1Size + WiaConst.Header2Size +
+ (partitions.Count * WiaConst.PartitionEntrySize) +
+ (rawRegions.Count * WiaConst.RawDataEntrySize) + 0x100 +
+ (totalGroups * groupEntrySize),
+ NdConstants.WiiBlockSize);
+
+ dest.Write(new byte[headersBound], 0, (int)headersBound);
+ long bytesWritten = headersBound;
+
+ var allGroups = new List();
+ uint currentGrpIdx = 0;
+ uint lastValidOff = 0;
+
+ var dedupMap = new Dictionary();
+ var decDedupMap = new Dictionary();
+ var rawDedupMap = new Dictionary();
+ var wiaZeroDedup = new Dictionary();
+
+ var regions = BuildDiscRegions(partitions, rawRegions);
+ foreach (var region in regions)
+ {
+ if (region.IsPartition)
+ {
+ ProcessWiiPartition(source, dest, region.PartitionInfo!,
+ ref bytesWritten, allGroups, ref currentGrpIdx, ref lastValidOff,
+ dedupMap, decDedupMap, wiaZeroDedup,
+ isRvz, compressionType, compressionLevel, chunkSize, propData, propSize);
+ }
+ else
+ {
+ ProcessRawRegion(source, dest, region.RawInfo!,
+ ref bytesWritten, allGroups, ref currentGrpIdx, ref lastValidOff,
+ rawDedupMap, isRvz, compressionType, compressionLevel, chunkSize, propData, propSize);
+ }
+ }
+
+ // Write tables
+ dest.Seek(WiaConst.Header1Size + WiaConst.Header2Size, SeekOrigin.Begin);
+ long tablePos = WiaConst.Header1Size + WiaConst.Header2Size;
+
+ ulong partEntriesOffset = (ulong)tablePos;
+ byte[] partEntriesBytes = SerializePartitionEntries(dest, partitions);
+ tablePos += partEntriesBytes.Length;
+ PadTableTo4Wia(dest, ref tablePos);
+
+ ulong rawEntriesOffset = (ulong)tablePos;
+ byte[] rawEntryBytes = SerializeRawDataEntries(rawRegions);
+ byte[] rawEntryWritten = CompressTableDataWia(rawEntryBytes, compressionType, compressionLevel, propData, propSize);
+ dest.Write(rawEntryWritten, 0, rawEntryWritten.Length);
+ tablePos += rawEntryWritten.Length;
+ PadTableTo4Wia(dest, ref tablePos);
+
+ ulong groupEntriesOffset = (ulong)tablePos;
+ using (var gms = new MemoryStream())
+ {
+ foreach (var e in allGroups)
+ WriteGroupEntryWia(gms, e, isRvz);
+ byte[] gBytes = gms.ToArray();
+ byte[] gWritten = CompressTableDataWia(gBytes, compressionType, compressionLevel, propData, propSize);
+ dest.Write(gWritten, 0, gWritten.Length);
+ tablePos += gWritten.Length;
+
+ byte[] partHashData = ComputeSha1Wia(partEntriesBytes, 0, partEntriesBytes.Length);
+ WriteWiaHeaders(dest, discHdr, isRvz, WiaDiscType.Wii, compressionType, compressionLevel, chunkSize,
+ (uint)partitions.Count, partEntriesOffset, partHashData,
+ (uint)rawRegions.Count, rawEntriesOffset, (uint)rawEntryWritten.Length,
+ (uint)allGroups.Count, groupEntriesOffset, (uint)gWritten.Length,
+ propData, propSize, isoSize, bytesWritten);
+ }
+
+ dest.Flush();
+ return true;
+#endif
+ }
+
+ // -----------------------------------------------------------------------
+ // Wii partition processing
+ // -----------------------------------------------------------------------
+
+#if !NET20
+ private static void ProcessWiiPartition(NintendoDisc source, Stream dest,
+ WiiPartInfo part, ref long bytesWritten,
+ List groupEntries, ref uint currentGrpIdx,
+ ref uint lastValidOff,
+ Dictionary dedupMap,
+ Dictionary decDedupMap,
+ Dictionary wiaZeroDedup,
+ bool isRvz, WiaRvzCompressionType compressionType,
+ int compressionLevel, uint chunkSize,
+ byte[] propData, byte propSize)
+ {
+ long remaining = (long)part.DataSize;
+ long srcOff = (long)part.DataStart;
+ ulong partKeyHash = BitConverter.ToUInt64(part.TitleKey, 0)
+ ^ BitConverter.ToUInt64(part.TitleKey, 8);
+
+ part.FirstGroupIndex = currentGrpIdx;
+
+ int blocksPerChunk = (int)chunkSize / NdConstants.WiiBlockSize;
+ int chunksPerGroup = NdConstants.WiiBlocksPerGroup / blocksPerChunk;
+ int wiiGroupSize = NdConstants.WiiGroupSize;
+
+ int outerBatch = (chunksPerGroup == 1)
+ ? Math.Max(Environment.ProcessorCount * 2, 16) : 1;
+
+ var batchItems = new WiiBatchItem[outerBatch];
+ var flatWork = new List(outerBatch);
+ long regionDecOff = 0;
+
+ while (remaining > 0)
+ {
+ int actualBatch = 0;
+ flatWork.Clear();
+
+ for (int b = 0; b < outerBatch && remaining > 0; b++)
+ {
+ int toRead = (int)Math.Min(wiiGroupSize, remaining);
+ byte[]? encGroup = source.ReadData(srcOff, toRead);
+ if (encGroup is null) break;
+
+ var item = batchItems[b] = new WiiBatchItem
+ {
+ BytesRead = toRead,
+ SrcOffset = srcOff,
+ };
+
+ srcOff += toRead;
+ remaining -= toRead;
+ actualBatch++;
+
+ bool encAllSame = (chunksPerGroup == 1) && IsAllSameWia(encGroup, toRead);
+ item.EncAllSame = encAllSame;
+ item.DedupKey = new WiaDedupKey3(partKeyHash, encGroup[0], toRead);
+
+ if (encAllSame && dedupMap.TryGetValue(item.DedupKey, out var reused))
+ {
+ item.IsInterDedupHit = true;
+ item.DedupResult = reused;
+ regionDecOff += (long)(toRead / NdConstants.WiiBlockSize) * NdConstants.WiiBlockDataSize;
+ continue;
+ }
+
+ int numBlocks = toRead / NdConstants.WiiBlockSize;
+ item.NumChunks = (numBlocks + blocksPerChunk - 1) / blocksPerChunk;
+
+ item.DecryptedAll = DecryptWiiGroup(encGroup, toRead, part.TitleKey);
+ item.AllExceptions = GenerateHashExceptions(encGroup, toRead,
+ item.DecryptedAll, part.TitleKey, numBlocks);
+
+ item.PartWork = new WiiChunkWork[item.NumChunks];
+ for (int c = 0; c < item.NumChunks; c++)
+ {
+ int cBlockStart = c * blocksPerChunk;
+ int cBlockEnd = Math.Min(cBlockStart + blocksPerChunk, numBlocks);
+ int actualBlocks = cBlockEnd - cBlockStart;
+ int decOff = cBlockStart * NdConstants.WiiBlockDataSize;
+ int decLen = actualBlocks * NdConstants.WiiBlockDataSize;
+
+ byte[] procData = new byte[decLen];
+ if (item.DecryptedAll != null && decLen > 0)
+ Array.Copy(item.DecryptedAll, decOff, procData, 0, decLen);
+
+ var chunkEx = new List();
+ if (item.AllExceptions != null)
+ {
+ foreach (var ex in item.AllExceptions)
+ {
+ int exBlock = ex.Offset / NdConstants.WiiBlockHeaderSize;
+ if (exBlock >= cBlockStart && exBlock < cBlockEnd)
+ {
+ int localBlock = exBlock - cBlockStart;
+ ushort localOff = (ushort)((localBlock * NdConstants.WiiBlockHeaderSize)
+ + (ex.Offset % NdConstants.WiiBlockHeaderSize));
+ chunkEx.Add(new HashExceptionEntry { Offset = localOff, Hash = ex.Hash });
+ }
+ }
+ }
+
+ bool isAllZeros = !isRvz && chunksPerGroup == 1
+ && chunkEx.Count == 0 && procData.Length > 0
+ && IsAllSameWia(procData, procData.Length) && procData[0] == 0;
+
+ bool decAllSame = !isRvz && !isAllZeros && chunksPerGroup == 1
+ && chunkEx.Count == 0 && procData.Length > 0
+ && IsAllSameWia(procData, procData.Length);
+
+ var decDedupKey = new WiaDedupKey3(partKeyHash,
+ procData.Length > 0 ? procData[0] : (byte)0, procData.Length);
+
+ var pw = item.PartWork[c] = new WiiChunkWork
+ {
+ IsAllZeros = isAllZeros,
+ DecAllSame = decAllSame,
+ DecDedupKey = decDedupKey,
+ };
+
+ if (isAllZeros) continue;
+
+ if (decAllSame && decDedupMap.TryGetValue(decDedupKey, out var decReused))
+ {
+ pw.IsDecDedupHit = true;
+ pw.DecDedupOffset = decReused.Offset;
+ pw.DecDedupDataSize = decReused.DataSize;
+ continue;
+ }
+
+ byte[] exListBytes = BuildExceptionList(chunkEx);
+ int unpaddedExLen = 2 + (chunkEx.Count * 22);
+
+ byte[] mainData;
+ uint rvzPackedSize = 0;
+ if (isRvz)
+ {
+ long baseDecOff = regionDecOff + ((long)c * (blocksPerChunk * NdConstants.WiiBlockDataSize));
+ byte[]? packed = RvzPackEncoder.Pack(procData, 0, procData.Length,
+ baseDecOff, out rvzPackedSize);
+ mainData = packed ?? procData;
+ if (packed is null) rvzPackedSize = 0;
+ }
+ else
+ {
+ mainData = procData;
+ }
+
+ pw.ExceptionListBytes = exListBytes;
+ pw.UnpaddedExLen = unpaddedExLen;
+ pw.MainDataBytes = mainData;
+ pw.RvzPackedSize = rvzPackedSize;
+
+ if (compressionType != WiaRvzCompressionType.None)
+ flatWork.Add(new WiaFlatWorkItem(b, c));
+ }
+
+ regionDecOff += (long)numBlocks * NdConstants.WiiBlockDataSize;
+ }
+
+ if (actualBatch == 0) break;
+
+ #if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ // Phase 2: compress
+ if (flatWork.Count > 0)
+ {
+ WiaRvzCompressionType ct = compressionType;
+ int cl = compressionLevel;
+ byte[] pd = propData;
+ byte ps = propSize;
+ Parallel.For(0, flatWork.Count, idx =>
+ {
+ var fw = flatWork[idx];
+ var pw = batchItems[fw.BatchIndex].PartWork![fw.ChunkIndex];
+ if (ct > WiaRvzCompressionType.Purge)
+ {
+ byte[] toCompress = ConcatBytesWia(
+ pw.ExceptionListBytes, 0, pw.UnpaddedExLen,
+ pw.MainDataBytes, 0, pw.MainDataBytes.Length);
+ pw.CompressedData = WiaRvzCompressionHelper.Compress(ct, toCompress, 0, toCompress.Length, cl, pd, ps);
+ }
+ else if (ct == WiaRvzCompressionType.Purge)
+ {
+ pw.CompressedData = PurgeCompressor.Compress(pw.MainDataBytes, 0, pw.MainDataBytes.Length, pw.ExceptionListBytes);
+ }
+ });
+ }
+#endif
+
+ // Phase 3: write
+ for (int b = 0; b < actualBatch; b++)
+ {
+ var item = batchItems[b];
+
+ if (item.IsInterDedupHit)
+ {
+ lastValidOff = item.DedupResult.Offset;
+ groupEntries.Add(new WiaRvzGroupEntry(
+ item.DedupResult.Offset,
+ item.DedupResult.DataSize,
+ 0));
+ currentGrpIdx++;
+ continue;
+ }
+
+ for (int c = 0; c < item.NumChunks; c++)
+ {
+ var pw = item.PartWork![c];
+
+ if (pw.IsAllZeros)
+ {
+ uint wouldBeOff = (uint)(bytesWritten >> 2);
+ if (!wiaZeroDedup.TryGetValue(partKeyHash, out uint firstOff))
+ {
+ firstOff = wouldBeOff;
+ wiaZeroDedup[partKeyHash] = firstOff;
+ }
+
+ groupEntries.Add(new WiaRvzGroupEntry(firstOff, 0, 0));
+ }
+ else if (pw.IsDecDedupHit)
+ {
+ groupEntries.Add(new WiaRvzGroupEntry(
+ pw.DecDedupOffset,
+ pw.DecDedupDataSize,
+ 0));
+ }
+ else
+ {
+ uint groupOff = (uint)(bytesWritten >> 2);
+ lastValidOff = groupOff;
+ uint storedSz = WriteWiiChunkData(dest, ref bytesWritten, pw, isRvz, compressionType);
+
+ groupEntries.Add(new WiaRvzGroupEntry(
+ groupOff, storedSz, pw.RvzPackedSize));
+
+ if (item.EncAllSame && c == 0)
+ dedupMap[item.DedupKey] = new WiaDedup2(groupOff, storedSz);
+ if (pw.DecAllSame && c == 0)
+ decDedupMap[pw.DecDedupKey] = new WiaDedup2(groupOff, storedSz);
+
+ PadTo4Wia(dest, ref bytesWritten);
+ }
+ }
+
+ currentGrpIdx++;
+ }
+ }
+
+ part.NumberOfGroups = currentGrpIdx - part.FirstGroupIndex;
+ }
+
+ private static uint WriteWiiChunkData(Stream dest, ref long bytesWritten,
+ WiiChunkWork pw, bool isRvz, WiaRvzCompressionType compressionType)
+ {
+ if (pw.CompressedData != null)
+ {
+ bool useC = !isRvz || pw.CompressedData.Length < pw.MainDataBytes.Length;
+ if (useC && compressionType > WiaRvzCompressionType.Purge)
+ {
+ dest.Write(pw.CompressedData, 0, pw.CompressedData.Length);
+ bytesWritten += pw.CompressedData.Length;
+ return isRvz
+ ? (uint)pw.CompressedData.Length | 0x80000000u
+ : (uint)pw.CompressedData.Length;
+ }
+
+ if (compressionType == WiaRvzCompressionType.Purge)
+ {
+ dest.Write(pw.ExceptionListBytes, 0, pw.ExceptionListBytes.Length);
+ bytesWritten += pw.ExceptionListBytes.Length;
+ dest.Write(pw.CompressedData, 0, pw.CompressedData.Length);
+ bytesWritten += pw.CompressedData.Length;
+ return (uint)(pw.ExceptionListBytes.Length + pw.CompressedData.Length);
+ }
+ }
+
+ dest.Write(pw.ExceptionListBytes, 0, pw.ExceptionListBytes.Length);
+ bytesWritten += pw.ExceptionListBytes.Length;
+ dest.Write(pw.MainDataBytes, 0, pw.MainDataBytes.Length);
+ bytesWritten += pw.MainDataBytes.Length;
+ return (uint)(pw.ExceptionListBytes.Length + pw.MainDataBytes.Length);
+ }
+#endif
+
+ // -----------------------------------------------------------------------
+ // Raw region processing
+ // -----------------------------------------------------------------------
+
+#if !NET20
+ private static void ProcessRawRegion(NintendoDisc source, Stream dest,
+ RawRegionInfo raw, ref long bytesWritten,
+ List groupEntries, ref uint currentGrpIdx,
+ ref uint lastValidOff,
+ Dictionary rawDedupMap,
+ bool isRvz, WiaRvzCompressionType compressionType,
+ int compressionLevel, uint chunkSize,
+ byte[] propData, byte propSize)
+ {
+ raw.FirstGroupIndex = currentGrpIdx;
+
+ long skip = (long)raw.Offset % NdConstants.WiiBlockSize;
+ long adjOffset = (long)raw.Offset - skip;
+ long remaining = (long)raw.Size + skip;
+ long srcOff = adjOffset;
+
+ while (remaining > 0)
+ {
+ int toRead = (int)Math.Min(chunkSize, remaining);
+ byte[]? data = source.ReadData(srcOff, toRead);
+ if (data is null) break;
+
+ bool isAllSame = IsAllSameWia(data, toRead);
+ byte sameByte = data[0];
+
+ if (isAllSame)
+ {
+ var dk = new WiaDedupKey2(sameByte, toRead);
+ if (rawDedupMap.TryGetValue(dk, out var cached))
+ {
+ groupEntries.Add(cached);
+ currentGrpIdx++;
+ srcOff += toRead;
+ remaining -= toRead;
+ continue;
+ }
+
+ if (sameByte == 0)
+ {
+ var ze = new WiaRvzGroupEntry((uint)(bytesWritten >> 2), 0, 0);
+ rawDedupMap[dk] = ze;
+ groupEntries.Add(ze);
+ currentGrpIdx++;
+ srcOff += toRead;
+ remaining -= toRead;
+ continue;
+ }
+ }
+
+ byte[] mainData;
+ uint rvzPackedSize = 0;
+ if (isRvz)
+ {
+ byte[]? packed = RvzPackEncoder.Pack(data, 0, toRead, srcOff, out rvzPackedSize);
+ mainData = packed ?? data;
+ if (packed is null) rvzPackedSize = 0;
+ }
+ else
+ {
+ mainData = data;
+ }
+
+ byte[]? compressed = null;
+ if (compressionType > WiaRvzCompressionType.Purge)
+ {
+ byte[] c2 = WiaRvzCompressionHelper.Compress(compressionType, mainData, 0,
+ mainData.Length, compressionLevel, propData, propSize);
+ if (!isRvz || c2.Length < mainData.Length)
+ compressed = c2;
+ }
+ else if (compressionType == WiaRvzCompressionType.Purge)
+ {
+ compressed = PurgeCompressor.Compress(mainData, 0, mainData.Length);
+ }
+
+ uint groupOff = (uint)(bytesWritten >> 2);
+ lastValidOff = groupOff;
+ uint storedSz;
+
+ if (compressed != null)
+ {
+ bool useC = !isRvz || compressed.Length < mainData.Length;
+ if (useC)
+ {
+ dest.Write(compressed, 0, compressed.Length);
+ bytesWritten += compressed.Length;
+ storedSz = isRvz
+ ? (uint)compressed.Length | 0x80000000u
+ : (uint)compressed.Length;
+ }
+ else
+ {
+ dest.Write(mainData, 0, mainData.Length);
+ bytesWritten += mainData.Length;
+ storedSz = (uint)mainData.Length;
+ }
+ }
+ else
+ {
+ dest.Write(mainData, 0, mainData.Length);
+ bytesWritten += mainData.Length;
+ storedSz = (uint)mainData.Length;
+ }
+
+ PadTo4Wia(dest, ref bytesWritten);
+
+ var entry = new WiaRvzGroupEntry(groupOff, storedSz, rvzPackedSize);
+ groupEntries.Add(entry);
+ if (isAllSame && sameByte != 0)
+ rawDedupMap[new WiaDedupKey2(sameByte, toRead)] = entry;
+
+ currentGrpIdx++;
+ srcOff += toRead;
+ remaining -= toRead;
+ }
+
+ raw.NumberOfGroups = currentGrpIdx - raw.FirstGroupIndex;
+ }
+#endif
+
+ // -----------------------------------------------------------------------
+ // Wii crypto helpers
+ // -----------------------------------------------------------------------
+
+#if !NET20
+ private static byte[]? DecryptWiiGroup(byte[] encGroup, int bytesRead, byte[] titleKey)
+ {
+ int numBlocks = bytesRead / NdConstants.WiiBlockSize;
+ var result = new byte[numBlocks * NdConstants.WiiBlockDataSize];
+
+ for (int i = 0; i < numBlocks; i++)
+ {
+ int off = i * NdConstants.WiiBlockSize;
+ byte[] iv = new byte[16];
+ Array.Copy(encGroup, off + 0x3D0, iv, 0, 16);
+
+ byte[] encData = new byte[NdConstants.WiiBlockDataSize];
+ Array.Copy(encGroup, off + NdConstants.WiiBlockHeaderSize, encData, 0, NdConstants.WiiBlockDataSize);
+
+ byte[]? dec = NintendoDisc.DecryptBlock(encData, titleKey, iv);
+ if (dec is null) return null;
+
+ Array.Copy(dec, 0, result, i * NdConstants.WiiBlockDataSize, NdConstants.WiiBlockDataSize);
+ }
+
+ return result;
+ }
+
+ private static List GenerateHashExceptions(
+ byte[] encGroup, int bytesRead, byte[]? decryptedData, byte[] titleKey, int numBlocks)
+ {
+ var exceptions = new List();
+ if (decryptedData is null) return exceptions;
+
+ // Re-encrypt the decrypted data to obtain recomputed hashes
+ byte[] reEncGroup = EncryptWiiGroup(decryptedData, titleKey, numBlocks);
+
+ for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++)
+ {
+ int blockOff = blockIdx * NdConstants.WiiBlockSize;
+
+ byte[] encHashBlock = new byte[NdConstants.WiiBlockHeaderSize];
+ Array.Copy(encGroup, blockOff, encHashBlock, 0, NdConstants.WiiBlockHeaderSize);
+
+ using var aes = Aes.Create();
+ aes.Key = titleKey;
+ aes.IV = new byte[16];
+ aes.Mode = CipherMode.CBC;
+ aes.Padding = PaddingMode.None;
+
+ byte[] origHash;
+ using (var dec = aes.CreateDecryptor())
+ origHash = dec.TransformFinalBlock(encHashBlock, 0, NdConstants.WiiBlockHeaderSize);
+
+ byte[] reEncHashBlock = new byte[NdConstants.WiiBlockHeaderSize];
+ Array.Copy(reEncGroup, blockOff, reEncHashBlock, 0, NdConstants.WiiBlockHeaderSize);
+ byte[] recompHash;
+ using (var dec = aes.CreateDecryptor())
+ recompHash = dec.TransformFinalBlock(reEncHashBlock, 0, NdConstants.WiiBlockHeaderSize);
+
+ for (int off = 0; off < NdConstants.WiiBlockHeaderSize; off += 20)
+ {
+ bool match = true;
+ for (int j = 0; j < 20 && (off + j) < NdConstants.WiiBlockHeaderSize; j++)
+ {
+ if (origHash[off + j] != recompHash[off + j])
+ {
+ match = false;
+ break;
+ }
+ }
+
+ if (!match)
+ {
+ byte[] hash = new byte[20];
+ Array.Copy(origHash, off, hash, 0, Math.Min(20, NdConstants.WiiBlockHeaderSize - off));
+ exceptions.Add(new HashExceptionEntry
+ {
+ Offset = (ushort)((blockIdx * NdConstants.WiiBlockHeaderSize) + off),
+ Hash = hash,
+ });
+ }
+ }
+ }
+
+ return exceptions;
+ }
+
+ private static List? ReadWiiPartitions(NintendoDisc source, long isoSize)
+ {
+ var result = new List();
+
+ for (int group = 0; group < NdConstants.WiiPartitionGroupCount; group++)
+ {
+ byte[]? gEntry = source.ReadData(NdConstants.WiiPartitionTableAddress + (group * 8), 8);
+ if (gEntry is null) continue;
+
+ uint count = ReadBE32Wia(gEntry, 0);
+ uint offset = ReadBE32Wia(gEntry, 4) << 2;
+ if (count == 0 || offset == 0) continue;
+
+ for (int i = 0; i < (int)count; i++)
+ {
+ byte[]? pEntry = source.ReadData(offset + (i * 8), 8);
+ if (pEntry is null) continue;
+
+ long partOff = (long)ReadBE32Wia(pEntry, 0) << 2;
+
+ byte[]? sigType = source.ReadData(partOff, 4);
+ if (sigType is null || ReadBE32Wia(sigType, 0) != 0x10001U) continue;
+
+ byte[]? hdr = source.ReadData(partOff, 0x2C0);
+ if (hdr is null) continue;
+
+ byte[] encKey = new byte[16];
+ Array.Copy(hdr, 0x1BF, encKey, 0, 16);
+ byte[] titleId = new byte[8];
+ Array.Copy(hdr, 0x1DC, titleId, 0, 8);
+ byte ckIdx = hdr[0x1F1];
+
+ byte[]? titleKey = NintendoDisc.DecryptTitleKey(encKey, titleId, ckIdx);
+ if (titleKey is null) continue;
+
+ ulong dataOff = (ulong)ReadBE32Wia(hdr, 0x2B8) << 2;
+ ulong dataSize = (ulong)ReadBE32Wia(hdr, 0x2BC) << 2;
+
+ result.Add(new WiiPartInfo
+ {
+ PartitionOffset = (ulong)partOff,
+ TitleKey = titleKey,
+ DataOffset = dataOff,
+ DataSize = dataSize,
+ DataStart = (ulong)partOff + dataOff,
+ DataEnd = (ulong)partOff + dataOff + dataSize,
+ });
+ }
+ }
+
+ return result.Count == 0 ? null : result;
+ }
+
+ private static List BuildRawRegions(NintendoDisc source,
+ List partitions, long isoSize)
+ {
+ var regions = new List();
+ partitions.Sort((a, b) => a.PartitionOffset.CompareTo(b.PartitionOffset));
+
+ ulong cur = WiaConst.DiscHeaderStoredSize;
+ foreach (var p in partitions)
+ {
+ if (cur < p.PartitionOffset)
+ regions.Add(new RawRegionInfo { Offset = cur, Size = p.PartitionOffset - cur });
+ regions.Add(new RawRegionInfo { Offset = p.PartitionOffset, Size = p.DataOffset });
+ cur = p.DataEnd;
+ }
+
+ if (cur < (ulong)isoSize)
+ regions.Add(new RawRegionInfo { Offset = cur, Size = (ulong)isoSize - cur });
+
+ return regions;
+ }
+
+ private static uint CalcTotalGroups(List partitions,
+ List rawRegions, uint chunkSize)
+ {
+ uint total = 0;
+ foreach (var p in partitions)
+ total += (uint)((p.DataSize + chunkSize - 1) / chunkSize);
+ foreach (var r in rawRegions)
+ total += (uint)((r.Size + chunkSize - 1) / chunkSize);
+ return total;
+ }
+
+ private static List BuildDiscRegions(List partitions,
+ List rawRegions)
+ {
+ var result = new List();
+ foreach (var p in partitions)
+ result.Add(new DiscRegionEntry { IsPartition = true, Offset = (long)p.DataStart, PartitionInfo = p });
+ foreach (var r in rawRegions)
+ result.Add(new DiscRegionEntry { IsPartition = false, Offset = (long)r.Offset, RawInfo = r });
+ result.Sort((a, b) => a.Offset.CompareTo(b.Offset));
+ return result;
+ }
+#endif
+
+ // -----------------------------------------------------------------------
+ // GcFst helper
+ // -----------------------------------------------------------------------
+
+ private static GcFst? BuildGcFst(NintendoDisc source)
+ {
+ byte[]? hdr = source.ReadData(0x420, 12);
+ if (hdr is null) return null;
+
+ uint fstOff = ReadBE32Wia(hdr, 4);
+ uint fstSize = ReadBE32Wia(hdr, 8);
+ if (fstOff == 0 || fstSize == 0) return null;
+
+ byte[]? fstData = source.ReadData(fstOff, (int)fstSize);
+ if (fstData is null) return null;
+
+ return GcFst.TryParse(fstData, offsetShift: 0);
+ }
+
+ // -----------------------------------------------------------------------
+ // Serialisation
+ // -----------------------------------------------------------------------
+
+ private static byte[] SerializeRawDataEntry(WiaRawDataEntry e)
+ {
+ using var ms = new MemoryStream();
+ WriteBE64Wia(ms, e.DataOffset);
+ WriteBE64Wia(ms, e.DataSize);
+ WriteBE32Wia(ms, e.GroupIndex);
+ WriteBE32Wia(ms, e.NumberOfGroups);
+ return ms.ToArray();
+ }
+
+ private static byte[] SerializeGroupEntries(WiaRvzGroupEntry[] entries, uint count, bool isRvz)
+ {
+ using var ms = new MemoryStream();
+ for (uint i = 0; i < count && i < (uint)entries.Length; i++)
+ WriteGroupEntryWia(ms, entries[i], isRvz);
+ return ms.ToArray();
+ }
+
+ private static void WriteGroupEntryWia(Stream s, WiaRvzGroupEntry e, bool isRvz)
+ {
+ WriteBE32Wia(s, e.DataOffset);
+ WriteBE32Wia(s, e.DataSize);
+ if (isRvz) WriteBE32Wia(s, e.RvzPackedSize);
+ }
+
+#if !NET20
+ private static byte[] SerializePartitionEntries(Stream dest, List partitions)
+ {
+ using var ms = new MemoryStream();
+ foreach (var p in partitions)
+ {
+ // Write 16-byte key
+ ms.Write(p.TitleKey, 0, 16);
+ dest.Write(p.TitleKey, 0, 16);
+
+ // DataEntry0: all of the partition
+ WriteBE32Wia(ms, (uint)(p.DataStart / NdConstants.WiiBlockSize));
+ WriteBE32Wia(ms, (uint)(p.DataSize / NdConstants.WiiBlockSize));
+ WriteBE32Wia(ms, p.FirstGroupIndex);
+ WriteBE32Wia(ms, p.NumberOfGroups);
+ WriteBE32Wia(dest, (uint)(p.DataStart / NdConstants.WiiBlockSize));
+ WriteBE32Wia(dest, (uint)(p.DataSize / NdConstants.WiiBlockSize));
+ WriteBE32Wia(dest, p.FirstGroupIndex);
+ WriteBE32Wia(dest, p.NumberOfGroups);
+
+ // DataEntry1: zeros
+ byte[] zeroPDE = new byte[WiaConst.PartitionDataEntrySize];
+ ms.Write(zeroPDE, 0, zeroPDE.Length);
+ dest.Write(zeroPDE, 0, zeroPDE.Length);
+ }
+
+ return ms.ToArray();
+ }
+
+ private static byte[] SerializeRawDataEntries(List regions)
+ {
+ using var ms = new MemoryStream();
+ foreach (var r in regions)
+ {
+ var e = new WiaRawDataEntry
+ {
+ DataOffset = r.Offset,
+ DataSize = r.Size,
+ GroupIndex = r.FirstGroupIndex,
+ NumberOfGroups = r.NumberOfGroups,
+ };
+ ms.Write(SerializeRawDataEntry(e), 0, WiaConst.RawDataEntrySize);
+ }
+
+ return ms.ToArray();
+ }
+
+ private static byte[] BuildExceptionList(List exceptions)
+ {
+ using var ms = new MemoryStream();
+ long pos = 0;
+ ushort count = (ushort)exceptions.Count;
+ ms.WriteByte((byte)(count >> 8));
+ ms.WriteByte((byte)count);
+ pos += 2;
+ foreach (var ex in exceptions)
+ {
+ ms.WriteByte((byte)(ex.Offset >> 8));
+ ms.WriteByte((byte)ex.Offset);
+ ms.Write(ex.Hash, 0, 20);
+ pos += 22;
+ }
+
+ while ((pos % 4) != 0) { ms.WriteByte(0); pos++; }
+
+ return ms.ToArray();
+ }
+#endif
+
+ private static byte[] CompressTableDataWia(byte[] data,
+ WiaRvzCompressionType ct, int cl, byte[] propData, byte propSize)
+ {
+ if (ct == WiaRvzCompressionType.Purge)
+ return PurgeCompressor.Compress(data, 0, data.Length);
+ if (ct > WiaRvzCompressionType.Purge)
+ return WiaRvzCompressionHelper.Compress(ct, data, 0, data.Length, cl, propData, propSize);
+ return data;
+ }
+
+ // -----------------------------------------------------------------------
+ // Header finalisation
+ // -----------------------------------------------------------------------
+
+#if !NET20
+ private static void WriteWiaHeaders(Stream dest, byte[] discHdr,
+ bool isRvz, WiaDiscType discType,
+ WiaRvzCompressionType compressionType, int compressionLevel, uint chunkSize,
+ uint numPartitions, ulong partEntriesOffset, byte[] partHash,
+ uint numRawData, ulong rawEntriesOffset, uint rawEntriesSize,
+ uint numGroups, ulong groupEntriesOffset, uint groupEntriesSize,
+ byte[] propData, byte propSize,
+ long isoSize, long fileSize)
+ {
+ var header2 = new WiaHeader2
+ {
+ DiscType = discType,
+ CompressionType = compressionType,
+ CompressionLevel = compressionLevel,
+ ChunkSize = chunkSize,
+ DiscHeader = discHdr,
+ NumberOfPartitionEntries = numPartitions,
+ PartitionEntrySize = WiaConst.PartitionEntrySize,
+ PartitionEntriesOffset = partEntriesOffset,
+ PartitionEntriesHash = partHash,
+ NumberOfRawDataEntries = numRawData,
+ RawDataEntriesOffset = rawEntriesOffset,
+ RawDataEntriesSize = rawEntriesSize,
+ NumberOfGroupEntries = numGroups,
+ GroupEntriesOffset = groupEntriesOffset,
+ GroupEntriesSize = groupEntriesSize,
+ CompressorDataSize = propSize,
+ CompressorData = propData,
+ };
+
+ byte[] h2Bytes = SerializeHeader2Wia(header2);
+ byte[] h2Hash = ComputeSha1Wia(h2Bytes, 0, h2Bytes.Length);
+
+ uint magic = isRvz ? WiaConst.RvzMagic : WiaConst.WiaMagic;
+ uint ver = isRvz ? WiaConst.RvzVersion : WiaConst.WiaVersion;
+ uint verC = isRvz ? WiaConst.RvzVersionWriteCompatible : WiaConst.WiaVersionWriteCompatible;
+
+ var header1 = new WiaHeader1
+ {
+ Magic = magic,
+ Version = ver,
+ VersionCompatible = verC,
+ Header2Size = WiaConst.Header2Size,
+ Header2Hash = h2Hash,
+ IsoFileSize = (ulong)isoSize,
+ WiaFileSize = (ulong)fileSize,
+ Header1Hash = new byte[20],
+ };
+
+ byte[] h1Bytes = SerializeHeader1Wia(header1);
+ byte[] h1Hashable = new byte[h1Bytes.Length - 20];
+ Array.Copy(h1Bytes, h1Hashable, h1Hashable.Length);
+ header1.Header1Hash = ComputeSha1Wia(h1Hashable, 0, h1Hashable.Length);
+
+ dest.Seek(0, SeekOrigin.Begin);
+ dest.Write(SerializeHeader1Wia(header1), 0, WiaConst.Header1Size);
+ dest.Write(h2Bytes, 0, h2Bytes.Length);
+ }
+
+ private static byte[] SerializeHeader1Wia(WiaHeader1 h)
+ {
+ using var ms = new MemoryStream();
+ WriteLE32Wia(ms, h.Magic);
+ WriteBE32Wia(ms, h.Version);
+ WriteBE32Wia(ms, h.VersionCompatible);
+ WriteBE32Wia(ms, h.Header2Size);
+ ms.Write(h.Header2Hash, 0, 20);
+ WriteBE64Wia(ms, h.IsoFileSize);
+ WriteBE64Wia(ms, h.WiaFileSize);
+ ms.Write(h.Header1Hash, 0, 20);
+ return ms.ToArray();
+ }
+
+ private static byte[] SerializeHeader2Wia(WiaHeader2 h)
+ {
+ using var ms = new MemoryStream();
+ WriteBE32Wia(ms, (uint)h.DiscType);
+ WriteBE32Wia(ms, (uint)h.CompressionType);
+ WriteBE32Wia(ms, (uint)h.CompressionLevel);
+ WriteBE32Wia(ms, h.ChunkSize);
+ byte[] dh = h.DiscHeader ?? new byte[WiaConst.DiscHeaderStoredSize];
+ ms.Write(dh, 0, Math.Min(dh.Length, WiaConst.DiscHeaderStoredSize));
+ if (dh.Length < WiaConst.DiscHeaderStoredSize)
+ ms.Write(new byte[WiaConst.DiscHeaderStoredSize - dh.Length], 0,
+ WiaConst.DiscHeaderStoredSize - dh.Length);
+ WriteBE32Wia(ms, h.NumberOfPartitionEntries);
+ WriteBE32Wia(ms, h.PartitionEntrySize);
+ WriteBE64Wia(ms, h.PartitionEntriesOffset);
+ ms.Write(h.PartitionEntriesHash ?? new byte[20], 0, 20);
+ WriteBE32Wia(ms, h.NumberOfRawDataEntries);
+ WriteBE64Wia(ms, h.RawDataEntriesOffset);
+ WriteBE32Wia(ms, h.RawDataEntriesSize);
+ WriteBE32Wia(ms, h.NumberOfGroupEntries);
+ WriteBE64Wia(ms, h.GroupEntriesOffset);
+ WriteBE32Wia(ms, h.GroupEntriesSize);
+ ms.WriteByte(h.CompressorDataSize);
+ byte[] prop = h.CompressorData ?? new byte[7];
+ ms.Write(prop, 0, Math.Min(prop.Length, 7));
+ if (prop.Length < 7)
+ ms.Write(new byte[7 - prop.Length], 0, 7 - prop.Length);
+ return ms.ToArray();
+ }
+
+ private static byte[] ComputeSha1Wia(byte[] data, int offset, int count)
+ {
+ if (count == 0) return new byte[20];
+ using var sha1 = SHA1.Create();
+ return sha1.ComputeHash(data, offset, count);
+ }
+#endif
+
+ // -----------------------------------------------------------------------
+ // Platform detection
+ // -----------------------------------------------------------------------
+
+ private static Platform DetectWiaPlatform(byte[] header)
+ {
+ if (header.Length >= 0x1C)
+ {
+ uint wiiMagic = (uint)((header[0x18] << 24) | (header[0x19] << 16) | (header[0x1A] << 8) | header[0x1B]);
+ if (wiiMagic == NdConstants.WiiMagicWord)
+ return Platform.Wii;
+ }
+
+ if (header.Length >= 4)
+ {
+ bool valid = true;
+ for (int i = 0; i < 4; i++)
+ {
+ char c = (char)header[i];
+ if (!((c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')))
+ {
+ valid = false;
+ break;
+ }
+ }
+
+ if (valid) return Platform.GameCube;
+ }
+
+ return Platform.Unknown;
+ }
+
+ // -----------------------------------------------------------------------
+ // Misc helpers
+ // -----------------------------------------------------------------------
+
+ private static uint WriteRawGroupData(Stream dest, ref long bytesWritten,
+ GcGroupWorkEntry gi, bool isRvz,
+ WiaRvzCompressionType compressionType, int compressionLevel,
+ byte[] propData, byte propSize)
+ {
+ if (gi.CompressedData != null)
+ {
+ bool useC = !isRvz || gi.CompressedData.Length < gi.MainData!.Length;
+ if (useC)
+ {
+ dest.Write(gi.CompressedData, 0, gi.CompressedData.Length);
+ bytesWritten += gi.CompressedData.Length;
+ return isRvz
+ ? (uint)gi.CompressedData.Length | 0x80000000u
+ : (uint)gi.CompressedData.Length;
+ }
+ }
+
+ if (compressionType == WiaRvzCompressionType.Purge && gi.MainData != null)
+ {
+ byte[] comp = PurgeCompressor.Compress(gi.MainData, 0, gi.MainData.Length);
+ dest.Write(comp, 0, comp.Length);
+ bytesWritten += comp.Length;
+ return (uint)comp.Length;
+ }
+
+ byte[] data = gi.MainData!;
+ dest.Write(data, 0, data.Length);
+ bytesWritten += data.Length;
+ return (uint)data.Length;
+ }
+
+ private static bool IsAllSameWia(byte[] data, int length)
+ {
+ if (length == 0) return true;
+ byte first = data[0];
+ for (int i = 1; i < length; i++)
+ {
+ if (data[i] != first) return false;
+ }
+
+ return true;
+ }
+
+ #if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ private static byte[] ConcatBytesWia(byte[] a, int aOff, int aLen, byte[] b, int bOff, int bLen)
+ {
+ var r = new byte[aLen + bLen];
+ if (aLen > 0) Array.Copy(a, aOff, r, 0, aLen);
+ if (bLen > 0) Array.Copy(b, bOff, r, aLen, bLen);
+ return r;
+ }
+#endif
+
+ private static void PadTo4Wia(Stream s, ref long bytesWritten)
+ {
+ int pad = (int)((-bytesWritten) & 3);
+ if (pad > 0) { s.Write(new byte[pad], 0, pad); bytesWritten += pad; }
+ }
+
+ private static void PadTableTo4Wia(Stream s, ref long tablePos)
+ {
+ long pad = (-tablePos) & 3;
+ if (pad > 0) { s.Write(new byte[pad], 0, (int)pad); tablePos += pad; }
+ }
+
+ private static long AlignWia(long value, long align) => (value + align - 1) / align * align;
+
+ private static uint SwapBE(uint v) => (v << 24) | ((v << 8) & 0x00FF0000u) | ((v >> 8) & 0x0000FF00u) | (v >> 24);
+
+ private static uint ReadBE32Wia(byte[] d, int o) => (uint)((d[o] << 24) | (d[o + 1] << 16) | (d[o + 2] << 8) | d[o + 3]);
+
+ private static void WriteBE32Wia(Stream s, uint v)
+ {
+ s.WriteByte((byte)(v >> 24));
+ s.WriteByte((byte)(v >> 16));
+ s.WriteByte((byte)(v >> 8));
+ s.WriteByte((byte)v);
+ }
+
+ private static void WriteBE64Wia(Stream s, ulong v)
+ {
+ WriteBE32Wia(s, (uint)(v >> 32));
+ WriteBE32Wia(s, (uint)v);
+ }
+
+ #if !NET20
+ private static void WriteLE32Wia(Stream s, uint v)
+ {
+ s.WriteByte((byte)v);
+ s.WriteByte((byte)(v >> 8));
+ s.WriteByte((byte)(v >> 16));
+ s.WriteByte((byte)(v >> 24));
+ }
+#endif
+
+ // -----------------------------------------------------------------------
+ // Inner work types — explicit structs, no ValueTuple (net20 compatibility)
+ // -----------------------------------------------------------------------
+
+ // Key: (byte sameByte, int bytesRead) — replaces ValueTuple
+ private struct WiaDedupKey2 : IEquatable
+ {
+ public byte SameByte;
+ public int BytesRead;
+
+ public WiaDedupKey2(byte sameByte, int bytesRead)
+ {
+ SameByte = sameByte;
+ BytesRead = bytesRead;
+ }
+
+ public bool Equals(WiaDedupKey2 other) => SameByte == other.SameByte && BytesRead == other.BytesRead;
+ public override bool Equals(object? obj) => obj is WiaDedupKey2 k && Equals(k);
+ public override int GetHashCode() => (SameByte * 397) ^ BytesRead;
+ }
+
+ // Key: (ulong partKeyHash, byte sampleByte, int bytesRead) — replaces ValueTuple
+ private struct WiaDedupKey3 : IEquatable
+ {
+ public ulong PartKeyHash;
+ public byte SampleByte;
+ public int BytesRead;
+
+ public WiaDedupKey3(ulong pkh, byte sb, int br)
+ {
+ PartKeyHash = pkh;
+ SampleByte = sb;
+ BytesRead = br;
+ }
+
+ public bool Equals(WiaDedupKey3 other) => PartKeyHash == other.PartKeyHash && SampleByte == other.SampleByte && BytesRead == other.BytesRead;
+ public override bool Equals(object? obj) => obj is WiaDedupKey3 k && Equals(k);
+ public override int GetHashCode() => (int)(PartKeyHash ^ (PartKeyHash >> 32)) ^ (SampleByte * 397) ^ BytesRead;
+ }
+
+ // Value: (uint offset, uint dataSize) — replaces ValueTuple
+ private struct WiaDedup2
+ {
+ public uint Offset;
+ public uint DataSize;
+
+ public WiaDedup2(uint offset, uint dataSize) { Offset = offset; DataSize = dataSize; }
+ }
+
+ // Group entry holding DataOffset, DataSize, RvzPackedSize
+ private struct WiaRvzGroupEntry
+ {
+ public uint DataOffset;
+ public uint DataSize;
+ public uint RvzPackedSize;
+
+ public WiaRvzGroupEntry(uint dataOffset, uint dataSize, uint rvzPackedSize)
+ {
+ DataOffset = dataOffset;
+ DataSize = dataSize;
+ RvzPackedSize = rvzPackedSize;
+ }
+ }
+
+ // Raw data entry local struct (avoids confusion with model RawDataEntry)
+ private struct WiaRawDataEntry
+ {
+ public ulong DataOffset;
+ public ulong DataSize;
+ public uint GroupIndex;
+ public uint NumberOfGroups;
+ }
+
+ // Flat work item for Parallel.For — replaces (int b, int c) ValueTuple
+ private struct WiaFlatWorkItem
+ {
+ public int BatchIndex;
+ public int ChunkIndex;
+
+ public WiaFlatWorkItem(int b, int c) { BatchIndex = b; ChunkIndex = c; }
+ }
+
+ private sealed class GcGroupWorkEntry
+ {
+ public int BytesRead;
+ public long SourceOffset;
+ public bool IsAllSame;
+ public byte SameByte;
+ public bool IsDedupHit;
+ public WiaRvzGroupEntry DedupEntry;
+ public byte[]? MainData;
+ public uint RvzPackedSize;
+ public byte[]? CompressedData = null;
+ }
+
+#if !NET20
+ private sealed class WiiChunkWork
+ {
+ public bool IsAllZeros;
+ public bool IsDecDedupHit;
+ public uint DecDedupOffset;
+ public uint DecDedupDataSize;
+ public byte[] ExceptionListBytes = new byte[0];
+ public int UnpaddedExLen;
+ public byte[] MainDataBytes = new byte[0];
+ public uint RvzPackedSize;
+ public byte[]? CompressedData = null;
+ public bool DecAllSame;
+ public WiaDedupKey3 DecDedupKey;
+ }
+
+ private sealed class WiiBatchItem
+ {
+ public int BytesRead;
+ public long SrcOffset;
+ public bool IsInterDedupHit;
+ public WiaDedup2 DedupResult;
+ public bool EncAllSame;
+ public WiaDedupKey3 DedupKey;
+ public byte[]? DecryptedAll;
+ public List? AllExceptions;
+ public int NumChunks;
+ public WiiChunkWork[]? PartWork;
+ }
+
+ private sealed class WiiPartInfo
+ {
+ public ulong PartitionOffset;
+ public byte[] TitleKey = new byte[0];
+ public ulong DataOffset;
+ public ulong DataSize;
+ public ulong DataStart;
+ public ulong DataEnd;
+ public uint FirstGroupIndex;
+ public uint NumberOfGroups;
+ }
+
+ private sealed class RawRegionInfo
+ {
+ public ulong Offset;
+ public ulong Size;
+ public uint FirstGroupIndex;
+ public uint NumberOfGroups;
+ }
+
+ private sealed class DiscRegionEntry
+ {
+ public bool IsPartition;
+ public long Offset;
+ public WiiPartInfo? PartitionInfo;
+ public RawRegionInfo? RawInfo;
+ }
+#endif
+ }
+}
diff --git a/SabreTools.Wrappers/WIA.cs b/SabreTools.Wrappers/WIA.cs
new file mode 100644
index 000000000..ae204b7d0
--- /dev/null
+++ b/SabreTools.Wrappers/WIA.cs
@@ -0,0 +1,952 @@
+using System;
+using System.IO;
+#if !NET20
+using System.Security.Cryptography;
+#endif
+using SabreTools.Data.Models.NintendoDisc;
+using SabreTools.Data.Models.WIA;
+using WiaConstants = SabreTools.Data.Models.WIA.Constants;
+using WiaReader = SabreTools.Serialization.Readers.WIA;
+
+namespace SabreTools.Wrappers
+{
+ public partial class WIA : WrapperBase
+ {
+ #region Descriptive Properties
+
+ ///
+ public override string DescriptionString => "WIA / RVZ Compressed GameCube / Wii Disc Image";
+
+ #endregion
+
+ #region Extension Properties
+
+ ///
+ public WiaHeader1 Header1 => Model.Header1;
+
+ ///
+ public WiaHeader2 Header2 => Model.Header2;
+
+ ///
+ public bool IsRvz => Model.IsRvz;
+
+ ///
+ public PartitionEntry[]? PartitionEntries => Model.PartitionEntries;
+
+ ///
+ public RawDataEntry[] RawDataEntries => Model.RawDataEntries;
+
+ ///
+ /// Total uncompressed ISO size in bytes
+ ///
+ public ulong IsoFileSize => Model.Header1.IsoFileSize;
+
+ ///
+ /// Disc header parsed from the 128-byte raw disc header stored in Header2.
+ ///
+ public DiscHeader? DiscHeader
+ {
+ get
+ {
+ if (_discHeader is not null)
+ return _discHeader;
+ byte[]? raw = Header2.DiscHeader;
+ if (raw is null || raw.Length < 0x20)
+ return null;
+ using var ms = new MemoryStream(raw);
+ _discHeader = Serialization.Readers.NintendoDisc.ParseDiscHeaderOnly(ms);
+ return _discHeader;
+ }
+ }
+
+ private DiscHeader? _discHeader;
+
+ #endregion
+
+ #region Constructors
+
+ ///
+ public WIA(DiscImage model, byte[] data) : base(model, data) { }
+
+ ///
+ public WIA(DiscImage model, byte[] data, int offset) : base(model, data, offset) { }
+
+ ///
+ public WIA(DiscImage model, byte[] data, int offset, int length) : base(model, data, offset, length) { }
+
+ ///
+ public WIA(DiscImage model, Stream data) : base(model, data) { }
+
+ ///
+ public WIA(DiscImage model, Stream data, long offset) : base(model, data, offset) { }
+
+ ///
+ public WIA(DiscImage model, Stream data, long offset, long length) : base(model, data, offset, length) { }
+
+ #endregion
+
+ #region Static Constructors
+
+ ///
+ /// Create a WIA/RVZ wrapper from a byte array and offset
+ ///
+ /// Byte array representing the WIA or RVZ image
+ /// Offset within the array to parse
+ /// A WIA wrapper on success, null on failure
+ public static WIA? Create(byte[]? data, int offset)
+ {
+ // If the data is invalid
+ if (data is null || data.Length == 0)
+ return null;
+
+ // If the offset is out of bounds
+ if (offset < 0 || offset >= data.Length)
+ return null;
+
+ // Create a memory stream and use that
+ var dataStream = new MemoryStream(data, offset, data.Length - offset);
+ return Create(dataStream);
+ }
+
+ ///
+ /// Create a WIA/RVZ wrapper from a Stream
+ ///
+ /// Stream representing the WIA or RVZ image
+ /// A WIA wrapper on success, null on failure
+ public static WIA? Create(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ try
+ {
+ long currentOffset = data.Position;
+
+ var model = new WiaReader().Deserialize(data);
+ if (model is null)
+ return null;
+
+ // The reader parsed the compressed table blobs as raw bytes.
+ // Re-read and decompress them here now that we have the compression parameters.
+ DecompressTables(model, data, currentOffset);
+
+ return new WIA(model, data, currentOffset);
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ ///
+ /// Re-reads the partition entries, raw data entries, and group entries from the source
+ /// stream, decompresses them using the algorithm specified in Header2, and replaces the
+ /// (garbage) values that the reader left in the model.
+ ///
+ private static void DecompressTables(DiscImage model, Stream data, long baseOffset)
+ {
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ var comp = model.Header2.CompressionType;
+
+ // None / Purge tables are stored as plain big-endian structs — already parsed correctly.
+ if (comp == WiaRvzCompressionType.None || comp == WiaRvzCompressionType.Purge)
+ return;
+
+ var compData = model.Header2.CompressorData ?? new byte[7];
+ byte compDataSize = model.Header2.CompressorDataSize;
+
+ // --- Raw data entries (stored compressed) ---
+ if (model.Header2.NumberOfRawDataEntries > 0 &&
+ model.Header2.RawDataEntriesOffset > 0 &&
+ model.Header2.RawDataEntriesSize > 0)
+ {
+ int count = (int)model.Header2.NumberOfRawDataEntries;
+ int compressedSize = (int)model.Header2.RawDataEntriesSize;
+ int expectedSize = count * WiaConstants.RawDataEntrySize;
+
+ data.Seek(baseOffset + (long)model.Header2.RawDataEntriesOffset, SeekOrigin.Begin);
+ byte[] buf = new byte[compressedSize];
+ int read = data.Read(buf, 0, compressedSize);
+ if (read < compressedSize)
+ return;
+
+ byte[] plain = WiaRvzCompressionHelper.Decompress(
+ comp, buf, 0, compressedSize, compData, compDataSize);
+ if (plain is null || plain.Length < expectedSize)
+ return;
+
+ model.RawDataEntries = ParseRawDataEntries(plain, count);
+ }
+
+ // --- Group entries (stored compressed) ---
+ if (model.Header2.NumberOfGroupEntries > 0 &&
+ model.Header2.GroupEntriesOffset > 0 &&
+ model.Header2.GroupEntriesSize > 0)
+ {
+ int count = (int)model.Header2.NumberOfGroupEntries;
+ int compressedSize = (int)model.Header2.GroupEntriesSize;
+ int entrySize = model.IsRvz ? WiaConstants.RvzGroupEntrySize : WiaConstants.WiaGroupEntrySize;
+ int expectedSize = count * entrySize;
+
+ data.Seek(baseOffset + (long)model.Header2.GroupEntriesOffset, SeekOrigin.Begin);
+ byte[] buf = new byte[compressedSize];
+ int read = data.Read(buf, 0, compressedSize);
+ if (read < compressedSize)
+ return;
+
+ byte[] plain = WiaRvzCompressionHelper.Decompress(
+ comp, buf, 0, compressedSize, compData, compDataSize);
+ if (plain is null || plain.Length < expectedSize)
+ return;
+
+ if (model.IsRvz)
+ model.RvzGroupEntries = ParseRvzGroupEntries(plain, count);
+ else
+ model.GroupEntries = ParseWiaGroupEntries(plain, count);
+ }
+#endif
+ }
+
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ /// Parses raw data entries from a plain (already decompressed) byte array.
+ private static RawDataEntry[] ParseRawDataEntries(byte[] plain, int count)
+ {
+ var entries = new RawDataEntry[count];
+ for (int i = 0; i < count; i++)
+ {
+ int o = i * WiaConstants.RawDataEntrySize;
+ var e = new RawDataEntry();
+ e.DataOffset = ReadUInt64BE(plain, o);
+ e.DataSize = ReadUInt64BE(plain, o + 8);
+ e.GroupIndex = ReadUInt32BE(plain, o + 16);
+ e.NumberOfGroups = ReadUInt32BE(plain, o + 20);
+ entries[i] = e;
+ }
+
+ return entries;
+ }
+
+ /// Parses WIA group entries from a plain (already decompressed) byte array.
+ private static WiaGroupEntry[] ParseWiaGroupEntries(byte[] plain, int count)
+ {
+ var entries = new WiaGroupEntry[count];
+ for (int i = 0; i < count; i++)
+ {
+ int o = i * WiaConstants.WiaGroupEntrySize;
+ var e = new WiaGroupEntry();
+ e.DataOffset = (ulong)ReadUInt32BE(plain, o) << 2;
+ e.DataSize = ReadUInt32BE(plain, o + 4);
+ entries[i] = e;
+ }
+
+ return entries;
+ }
+
+ /// Parses RVZ group entries from a plain (already decompressed) byte array.
+ private static RvzGroupEntry[] ParseRvzGroupEntries(byte[] plain, int count)
+ {
+ var entries = new RvzGroupEntry[count];
+ for (int i = 0; i < count; i++)
+ {
+ int o = i * WiaConstants.RvzGroupEntrySize;
+ var e = new RvzGroupEntry();
+ e.DataOffset = (ulong)ReadUInt32BE(plain, o) << 2;
+ e.DataSize = ReadUInt32BE(plain, o + 4);
+ e.RvzPackedSize = ReadUInt32BE(plain, o + 8);
+ entries[i] = e;
+ }
+
+ return entries;
+ }
+#endif
+
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ private static ulong ReadUInt64BE(byte[] b, int o)
+ {
+ return ((ulong)b[o] << 56) | ((ulong)b[o + 1] << 48) | ((ulong)b[o + 2] << 40) | ((ulong)b[o + 3] << 32)
+ | ((ulong)b[o + 4] << 24) | ((ulong)b[o + 5] << 16) | ((ulong)b[o + 6] << 8) | b[o + 7];
+ }
+
+ private static uint ReadUInt32BE(byte[] b, int o)
+ {
+ return ((uint)b[o] << 24) | ((uint)b[o + 1] << 16) | ((uint)b[o + 2] << 8) | b[o + 3];
+ }
+#endif
+
+ #endregion
+
+ #region Inner Wrapper
+
+ // Cache for on-demand decompression in ReadVirtual.
+ private uint _cachedRawGroupIndex = uint.MaxValue;
+ private byte[]? _cachedRawGroup;
+ private uint _cachedEncGroupIndex = uint.MaxValue;
+ private byte[]? _cachedEncGroup;
+
+ ///
+ /// Returns a NintendoDisc wrapper backed by a virtual stream that decompresses
+ /// WIA/RVZ groups on demand, avoiding loading the entire ISO into memory.
+ ///
+ public NintendoDisc? GetInnerWrapper()
+ {
+ if (Model.Header1.IsoFileSize == 0)
+ return null;
+
+ var vStream = new WiaVirtualStream(this);
+ return NintendoDisc.Create(vStream);
+ }
+
+ ///
+ /// Reads bytes of the virtual decompressed ISO at
+ /// into , decompressing
+ /// WIA/RVZ groups on demand. Returns the number of bytes read.
+ ///
+ internal int ReadVirtual(long offset, byte[] buffer, int bufferOffset, int count)
+ {
+ long isoSize = (long)Model.Header1.IsoFileSize;
+ if (offset >= isoSize || count <= 0)
+ return 0;
+
+ count = (int)Math.Min(count, isoSize - offset);
+ int totalRead = 0;
+
+ while (totalRead < count)
+ {
+ long pos = offset + totalRead;
+ int got = ReadVirtualChunk(pos, buffer, bufferOffset + totalRead, count - totalRead);
+ if (got <= 0)
+ {
+ // Advance past one "zero" byte to avoid infinite loops over gaps.
+ buffer[bufferOffset + totalRead] = 0;
+ totalRead++;
+ }
+ else
+ {
+ totalRead += got;
+ }
+ }
+
+ return totalRead;
+ }
+
+ ///
+ /// Reads bytes for one contiguous segment of the virtual ISO starting at .
+ /// Returns 0 if the position is not covered by any known data entry (caller fills with zeros).
+ ///
+ private int ReadVirtualChunk(long pos, byte[] buffer, int bufferOffset, int count)
+ {
+ // 1. Disc header (first 0x80 bytes stored verbatim in Header2.DiscHeader)
+ if (pos < WiaConstants.DiscHeaderStoredSize && Model.Header2.DiscHeader is { Length: > 0 })
+ {
+ int available = (int)Math.Min(WiaConstants.DiscHeaderStoredSize - pos, count);
+ int srcAvail = Math.Min(available, Model.Header2.DiscHeader.Length - (int)pos);
+ if (srcAvail > 0)
+ Array.Copy(Model.Header2.DiscHeader, (int)pos, buffer, bufferOffset, srcAvail);
+ if (available > srcAvail)
+ Array.Clear(buffer, bufferOffset + srcAvail, available - srcAvail);
+ return available;
+ }
+
+ uint chunkSize = Model.Header2.ChunkSize;
+ var comp = Model.Header2.CompressionType;
+ byte[] compData = Model.Header2.CompressorData ?? new byte[7];
+ byte compDataSize = Model.Header2.CompressorDataSize;
+
+ // 2. Raw data entries (non-partition disc data)
+ if (Model.RawDataEntries is { Length: > 0 })
+ {
+ foreach (var rde in Model.RawDataEntries)
+ {
+ if (rde.DataSize == 0 || rde.NumberOfGroups == 0)
+ continue;
+
+ long rdeStart = (long)rde.DataOffset;
+ long rdeEnd = rdeStart + (long)rde.DataSize;
+ if (pos < rdeStart || pos >= rdeEnd)
+ continue;
+
+ long skippedData = rdeStart % 0x8000;
+ long adjustedBase = rdeStart - skippedData;
+ long adjustedPos = pos - adjustedBase;
+ uint g = (uint)(adjustedPos / chunkSize);
+ int offsetInGroup = (int)(adjustedPos % chunkSize);
+
+ if (g >= rde.NumberOfGroups)
+ continue;
+
+ uint groupFileIdx = rde.GroupIndex + g;
+ byte[]? groupBytes = GetCachedRawGroup(groupFileIdx, comp, compData, compDataSize, chunkSize);
+ if (groupBytes is null)
+ return 0;
+
+ int available = groupBytes.Length - offsetInGroup;
+ if (available <= 0)
+ return 0;
+
+ int remainingInEntry = (int)Math.Min(rdeEnd - pos, count);
+ // Also clamp to the end of this group
+ long groupIsoEnd = adjustedBase + ((long)(g + 1) * chunkSize);
+ int remainingInGroup = (int)Math.Min(groupIsoEnd - pos, remainingInEntry);
+ int toCopy = Math.Min(available, remainingInGroup);
+ if (toCopy <= 0)
+ return 0;
+
+ Array.Copy(groupBytes, offsetInGroup, buffer, bufferOffset, toCopy);
+ return toCopy;
+ }
+ }
+
+ // 3. Partition data entries (Wii encrypted partition data)
+ if (Model.PartitionEntries is { Length: > 0 })
+ {
+ foreach (var pe in Model.PartitionEntries)
+ {
+ int r = ReadPartitionChunk(pe.DataEntry0, pe.PartitionKey, pos,
+ buffer, bufferOffset, count, comp, compData, compDataSize, chunkSize);
+ if (r > 0) return r;
+ r = ReadPartitionChunk(pe.DataEntry1, pe.PartitionKey, pos,
+ buffer, bufferOffset, count, comp, compData, compDataSize, chunkSize);
+ if (r > 0) return r;
+ }
+ }
+
+ return 0;
+ }
+
+ ///
+ /// Reads bytes of decrypted Wii partition data beginning at
+ /// , a byte offset in the 0x7C00-block partition-data space.
+ /// Spans across both DataEntry0 and DataEntry1 of the partition entry.
+ /// Maps directly to the decompressed WIA/RVZ group data — no re-encryption is performed.
+ ///
+ internal byte[]? ReadDecryptedPartitionBytes(PartitionEntry pe, long partDataOffset, int length)
+ {
+ if (length <= 0 || pe is null)
+ return null;
+
+ const int WiiBlockSize = 0x8000;
+ const int WiiBlockDataSize = 0x7C00;
+
+ uint chunkSize = Model.Header2.ChunkSize;
+ var comp = Model.Header2.CompressionType;
+ byte[] compData = Model.Header2.CompressorData ?? new byte[7];
+ byte compDataSize = Model.Header2.CompressorDataSize;
+ int blocksPerGroup = (int)(chunkSize / WiiBlockSize);
+
+ byte[] result = new byte[length];
+ int produced = 0;
+
+ // DataEntry0 covers [0 .. de0.NumberOfSectors * 0x7C00) in partition-data space.
+ // DataEntry1 (if present) immediately follows.
+ var de0 = pe.DataEntry0;
+ var de1 = pe.DataEntry1;
+ long de0DataSize = (long)de0.NumberOfSectors * WiiBlockDataSize;
+ long de1DataSize = de1 is not null ? (long)de1.NumberOfSectors * WiiBlockDataSize : 0;
+
+ while (produced < length)
+ {
+ long off = partDataOffset + produced;
+
+ // Determine which DataEntry covers this offset
+ PartitionDataEntry de;
+ long deRelOff; // offset within this DataEntry's decrypted data space
+ if (off < de0DataSize)
+ {
+ de = de0;
+ deRelOff = off;
+ }
+ else if (de1 is not null && de1.NumberOfGroups > 0 && off < de0DataSize + de1DataSize)
+ {
+ de = de1;
+ deRelOff = off - de0DataSize;
+ }
+ else
+ {
+ break; // beyond available data
+ }
+
+ long blockNum = deRelOff / WiiBlockDataSize;
+ int offsetInBlock = (int)(deRelOff % WiiBlockDataSize);
+ long groupRelative = blockNum / blocksPerGroup;
+ int blockInGroup = (int)(blockNum % blocksPerGroup);
+
+ if (groupRelative >= de.NumberOfGroups)
+ break;
+
+ uint groupFileIdx = de.GroupIndex + (uint)groupRelative;
+ long dataOffsetForLfg = groupRelative * blocksPerGroup * WiiBlockDataSize;
+
+ byte[]? decrypted = ReadDecryptedGroupData(groupFileIdx, comp, compData, compDataSize,
+ blocksPerGroup, WiiBlockDataSize, dataOffsetForLfg);
+ if (decrypted is null)
+ break;
+
+ int offsetInGroup = (blockInGroup * WiiBlockDataSize) + offsetInBlock;
+ int available = decrypted.Length - offsetInGroup;
+ if (available <= 0)
+ break;
+
+ int remainingInGroup = (blocksPerGroup * WiiBlockDataSize) - offsetInGroup;
+ int toCopy = Math.Min(length - produced, Math.Min(available, remainingInGroup));
+ if (toCopy <= 0)
+ break;
+
+ Array.Copy(decrypted, offsetInGroup, result, produced, toCopy);
+ produced += toCopy;
+ }
+
+ if (produced <= 0)
+ return null;
+ if (produced < length)
+ Array.Resize(ref result, produced);
+ return result;
+ }
+
+ private int ReadPartitionChunk(PartitionDataEntry de, byte[] partitionKey, long pos,
+ byte[] buffer, int bufferOffset, int count,
+ WiaRvzCompressionType comp, byte[] compData, byte compDataSize, uint chunkSize)
+ {
+ if (de.NumberOfSectors == 0 || de.NumberOfGroups == 0)
+ return 0;
+
+ const int WiiBlockSize = 0x8000;
+ if (chunkSize == 0)
+ return 0;
+
+ int blocksPerGroup = (int)(chunkSize / WiiBlockSize);
+ long isoDataStart = (long)de.FirstSector * WiiBlockSize;
+ long isoDataEnd = isoDataStart + ((long)de.NumberOfSectors * WiiBlockSize);
+
+ if (pos < isoDataStart || pos >= isoDataEnd)
+ return 0;
+
+ long offsetInPartition = pos - isoDataStart;
+ long blockNum = offsetInPartition / WiiBlockSize;
+ int offsetInBlock = (int)(offsetInPartition % WiiBlockSize);
+
+ long groupNum = blockNum / blocksPerGroup;
+ int blockInGroup = (int)(blockNum % blocksPerGroup);
+
+ if (groupNum >= de.NumberOfGroups)
+ return 0;
+
+ uint groupFileIdx = de.GroupIndex + (uint)groupNum;
+ byte[]? encryptedGroup = GetCachedEncGroup(groupFileIdx, de, partitionKey,
+ comp, compData, compDataSize, blocksPerGroup, chunkSize);
+ if (encryptedGroup is null)
+ return 0;
+
+ int offsetInEncGroup = (blockInGroup * WiiBlockSize) + offsetInBlock;
+ int available = encryptedGroup.Length - offsetInEncGroup;
+ if (available <= 0)
+ return 0;
+
+ long remainingInEntry = isoDataEnd - pos;
+ // Stay within this group
+ long groupIsoEnd = isoDataStart + ((groupNum + 1) * blocksPerGroup * WiiBlockSize);
+ long remainingInGroup = groupIsoEnd - pos;
+ int toCopy = (int)Math.Min(count, Math.Min(Math.Min(available, remainingInEntry), remainingInGroup));
+ if (toCopy <= 0)
+ return 0;
+
+ Array.Copy(encryptedGroup, offsetInEncGroup, buffer, bufferOffset, toCopy);
+ return toCopy;
+ }
+
+ private byte[]? GetCachedRawGroup(uint groupFileIdx,
+ WiaRvzCompressionType comp, byte[] compData, byte compDataSize, uint chunkSize)
+ {
+ if (_cachedRawGroupIndex == groupFileIdx)
+ return _cachedRawGroup;
+
+ byte[]? group = ReadGroupRaw(groupFileIdx, comp, compData, compDataSize, chunkSize);
+ _cachedRawGroupIndex = groupFileIdx;
+ _cachedRawGroup = group;
+ return group;
+ }
+
+ private byte[]? GetCachedEncGroup(uint groupFileIdx, PartitionDataEntry de, byte[] partitionKey,
+ WiaRvzCompressionType comp, byte[] compData, byte compDataSize, int blocksPerGroup, uint chunkSize)
+ {
+ if (_cachedEncGroupIndex == groupFileIdx)
+ return _cachedEncGroup;
+
+ long dataOffsetForLfg = (groupFileIdx - de.GroupIndex) * blocksPerGroup * 0x7C00;
+ byte[]? decrypted = ReadDecryptedGroupData(groupFileIdx, comp, compData, compDataSize,
+ blocksPerGroup, 0x7C00, dataOffsetForLfg);
+ if (decrypted is null)
+ return null;
+
+ byte[] encrypted = EncryptWiiGroup(decrypted, partitionKey, blocksPerGroup);
+ _cachedEncGroupIndex = groupFileIdx;
+ _cachedEncGroup = encrypted;
+ return encrypted;
+ }
+
+ ///
+ /// Reads and decompresses one raw (non-partition) group.
+ /// Returns chunkSize bytes of raw ISO data, or null on failure.
+ ///
+ private byte[]? ReadGroupRaw(uint groupIdx, WiaRvzCompressionType comp,
+ byte[] compressorData, byte compressorDataSize, uint chunkSize)
+ {
+ if (Model.IsRvz)
+ {
+ if (Model.RvzGroupEntries is null || groupIdx >= Model.RvzGroupEntries.Length)
+ return null;
+ var ge = Model.RvzGroupEntries[groupIdx];
+ bool isRvzCompressed = (ge.DataSize & 0x80000000u) != 0;
+ uint dataSize = ge.DataSize & 0x7FFFFFFFu;
+ if (dataSize == 0)
+ return new byte[chunkSize];
+ byte[] fileData = ReadRangeFromSource((long)ge.DataOffset, (int)dataSize);
+ return DecompressGroupBytes(fileData, 0, (int)dataSize, comp,
+ compressorData, compressorDataSize, (int)chunkSize, Model.IsRvz, isRvzCompressed,
+ ge.RvzPackedSize, groupIdx * chunkSize, false, chunkSize);
+ }
+ else
+ {
+ if (Model.GroupEntries is null || groupIdx >= Model.GroupEntries.Length)
+ return null;
+ var ge = Model.GroupEntries[groupIdx];
+ if (ge.DataSize == 0)
+ return new byte[chunkSize];
+ byte[] fileData = ReadRangeFromSource((long)ge.DataOffset, (int)ge.DataSize);
+ return DecompressGroupBytes(fileData, 0, (int)ge.DataSize, comp,
+ compressorData, compressorDataSize, (int)chunkSize, false, false,
+ 0, 0L, false, chunkSize);
+ }
+ }
+
+ ///
+ /// Reads and decompresses a Wii partition group, returning the hash-stripped decrypted data.
+ ///
+ private byte[]? ReadDecryptedGroupData(uint groupIdx, WiaRvzCompressionType comp,
+ byte[] compressorData, byte compressorDataSize, int blocksPerGroup, int blockDataSize,
+ long dataOffsetForLfg)
+ {
+ int decryptedGroupSize = blocksPerGroup * blockDataSize;
+
+ if (Model.IsRvz)
+ {
+ if (Model.RvzGroupEntries is null || groupIdx >= Model.RvzGroupEntries.Length)
+ return null;
+ var ge = Model.RvzGroupEntries[groupIdx];
+ bool isRvzCompressed = (ge.DataSize & 0x80000000u) != 0;
+ uint dataSize = ge.DataSize & 0x7FFFFFFFu;
+ if (dataSize == 0)
+ return new byte[decryptedGroupSize];
+ byte[] fileData = ReadRangeFromSource((long)ge.DataOffset, (int)dataSize);
+ return DecompressGroupBytes(fileData, 0, (int)dataSize, comp,
+ compressorData, compressorDataSize, decryptedGroupSize, Model.IsRvz, isRvzCompressed,
+ ge.RvzPackedSize, dataOffsetForLfg, true,
+ Model.Header2.ChunkSize);
+ }
+ else
+ {
+ if (Model.GroupEntries is null || groupIdx >= Model.GroupEntries.Length)
+ return null;
+ var ge = Model.GroupEntries[groupIdx];
+ if (ge.DataSize == 0)
+ return new byte[decryptedGroupSize];
+ byte[] fileData2 = ReadRangeFromSource((long)ge.DataOffset, (int)ge.DataSize);
+ return DecompressGroupBytes(fileData2, 0, (int)ge.DataSize, comp,
+ compressorData, compressorDataSize, decryptedGroupSize, false, false,
+ 0, 0L, true,
+ Model.Header2.ChunkSize);
+ }
+ }
+
+ ///
+ /// Decompresses raw group bytes according to the WIA compression type and strips any
+ /// exception-list header, returning the plain data payload.
+ ///
+ private static byte[]? DecompressGroupBytes(byte[] fileData, int offset, int length,
+ WiaRvzCompressionType comp, byte[] compressorData, byte compressorDataSize,
+ int expectedSize, bool isRvz, bool isRvzCompressed,
+ uint rvzPackedSize, long dataOffsetForLfg, bool isWiiPartition,
+ uint chunkSize = 2 * 1024 * 1024)
+ {
+ if (fileData is null || fileData.Length < length)
+ return null;
+
+ // Mirrors DolphinIsoLib WiaRvzReader::ReadGroupCore logic:
+ // Decompress first (Bzip2/LZMA/LZMA2/Zstd), then RVZ-unpack junk regions if present.
+ bool shouldDecompress = comp > WiaRvzCompressionType.Purge && (!isRvz || isRvzCompressed);
+
+ if (comp == WiaRvzCompressionType.None)
+ {
+ // NONE: exception lists precede data with 4-byte alignment for Wii partitions
+ int dataStart = isWiiPartition ? SkipExceptionLists(fileData, offset, length, chunkSize) : offset;
+ int mainLen = length - (dataStart - offset);
+ byte[] noneData = new byte[expectedSize];
+ Array.Copy(fileData, dataStart, noneData, 0, Math.Min(mainLen, expectedSize));
+ return noneData;
+ }
+ else if (comp == WiaRvzCompressionType.Purge)
+ {
+ // Exception list precedes the Purge payload; capture it for SHA-1, then decompress.
+ int purgeStart = isWiiPartition ? SkipExceptionLists(fileData, offset, length, chunkSize) : offset;
+ int exceptionLen = purgeStart - offset;
+ byte[]? exceptionBytes = exceptionLen > 0
+ ? new byte[exceptionLen] : null;
+ if (exceptionBytes != null)
+ Array.Copy(fileData, offset, exceptionBytes, 0, exceptionLen);
+ int purgeLen = length - exceptionLen;
+ return PurgeDecompressor.Decompress(fileData, purgeStart, purgeLen, expectedSize, exceptionBytes);
+ }
+ else
+ {
+ // Bzip2 / LZMA / LZMA2 / Zstd — delegate to WiaRvzCompressionHelper
+ byte[]? workingData;
+ if (shouldDecompress)
+ {
+ try
+ {
+ workingData = WiaRvzCompressionHelper.Decompress(
+ comp, fileData, offset, length, compressorData, compressorDataSize);
+ }
+ catch
+ {
+ return null;
+ }
+
+ if (workingData is null)
+ return null;
+ }
+ else
+ {
+ workingData = fileData;
+ }
+
+ // RVZ-pack step: junk regions are stored as LFG seeds rather than raw bytes.
+ if (isRvz && rvzPackedSize > 0)
+ {
+ // Exception lists are always present for Wii partition groups.
+ // When compressed (shouldDecompress=true), they are NOT padded to 4-byte alignment.
+ // When uncompressed (shouldDecompress=false), they ARE padded to 4-byte alignment.
+ int rvzDataStart = isWiiPartition
+ ? (shouldDecompress
+ ? SkipExceptionListsNoAlign(workingData, 0, workingData.Length, chunkSize)
+ : SkipExceptionLists(workingData, 0, workingData.Length, chunkSize))
+ : 0;
+ int rvzDataLen = workingData.Length - rvzDataStart;
+ byte[] rvzPayload = new byte[rvzDataLen];
+ Array.Copy(workingData, rvzDataStart, rvzPayload, 0, rvzDataLen);
+
+ var rvzDecomp = new RvzPackDecompressor(rvzPayload, rvzPackedSize, dataOffsetForLfg);
+ byte[] unpacked = new byte[expectedSize];
+ int bytesRead = rvzDecomp.Decompress(unpacked, 0, expectedSize);
+ if (bytesRead < expectedSize)
+ Array.Resize(ref unpacked, bytesRead);
+ return unpacked;
+ }
+
+ // Skip exception lists always present for Wii partition groups.
+ // Compressed groups: no 4-byte alignment. Uncompressed groups: 4-byte aligned.
+ int dataStart = isWiiPartition
+ ? (shouldDecompress
+ ? SkipExceptionListsNoAlign(workingData, 0, workingData.Length, chunkSize)
+ : SkipExceptionLists(workingData, 0, workingData.Length, chunkSize))
+ : 0;
+ int mainLen = workingData.Length - dataStart;
+ byte[] data = new byte[expectedSize];
+ Array.Copy(workingData, dataStart, data, 0, Math.Min(mainLen, expectedSize));
+ return data;
+ }
+ }
+
+ ///
+ /// Skips the packed exception-list header at the start of group data (NONE/Purge path).
+ /// Exception lists are 4-byte-aligned after the last list.
+ /// Returns the offset of the first data byte.
+ ///
+ private static int SkipExceptionLists(byte[] data, int offset, int length, uint chunkSize = 2 * 1024 * 1024)
+ {
+ // Number of exception lists = max(1, chunkSize / WiiGroupSize).
+ // For WIA chunkSize==2MiB this is always 1.
+ // For RVZ sub-2MiB chunks this is also 1 (chunkSize <= groupSize).
+ const uint WiiGroupSize = 2 * 1024 * 1024; // 0x200000
+ int numLists = Math.Max(1, (int)(chunkSize / WiiGroupSize));
+
+ int pos = offset;
+ for (int i = 0; i < numLists && pos + 2 <= offset + length; i++)
+ {
+ ushort count = (ushort)((data[pos] << 8) | data[pos + 1]);
+ pos += 2;
+ // Each exception entry is 2 + 20 = 22 bytes
+ pos += count * 22;
+ // 4-byte alignment after last list
+ if (i == numLists - 1)
+ pos = (pos + 3) & ~3;
+ }
+
+ return pos;
+ }
+
+ ///
+ /// Skips exception lists in compressed group data (Bzip2/LZMA/etc.) where
+ /// lists are NOT 4-byte aligned.
+ ///
+ private static int SkipExceptionListsNoAlign(byte[] data, int offset, int length, uint chunkSize = 2 * 1024 * 1024)
+ {
+ const uint WiiGroupSize = 2 * 1024 * 1024;
+ int numLists = Math.Max(1, (int)(chunkSize / WiiGroupSize));
+
+ int pos = offset;
+ for (int i = 0; i < numLists && pos + 2 <= offset + length; i++)
+ {
+ ushort count = (ushort)((data[pos] << 8) | data[pos + 1]);
+ pos += 2;
+ pos += count * 22;
+ }
+
+ return pos;
+ }
+
+ ///
+ /// Re-encrypts one decrypted hash-stripped Wii group back into standard ISO-layout
+ /// encrypted 0x8000-byte blocks. Mirrors Dolphin's VolumeWii::EncryptGroup.
+ ///
+ private static byte[] EncryptWiiGroup(byte[] decryptedData, byte[] key, int blocksPerGroup)
+ {
+#if NET20
+ // AES not available on net20; return a zero buffer so the wrapper can still be created
+ return new byte[blocksPerGroup * 0x8000];
+#else
+ const int WiiBlockSize = 0x8000;
+ const int WiiBlockDataSize = 0x7C00;
+ const int WiiBlockHashSize = 0x0400;
+ const int H0Count = 31;
+ const int H1Count = 8;
+ const int H2Count = 8;
+ const int HashLen = 20;
+
+ // --- Build H0 / H1 / H2 hash arrays ---
+ // H0[block][h0] = SHA1 of 0x400-byte chunk h0 within data block 'block'
+ byte[][][] h0 = new byte[blocksPerGroup][][];
+ for (int b = 0; b < blocksPerGroup; b++)
+ {
+ h0[b] = new byte[H0Count][];
+ int blockBase = b * WiiBlockDataSize;
+ for (int h = 0; h < H0Count; h++)
+ {
+ int src = blockBase + (h * 0x400);
+ int len = Math.Min(0x400, decryptedData.Length - src);
+ h0[b][h] = ComputeSha1(decryptedData, src < decryptedData.Length ? src : 0, Math.Max(0, len));
+ }
+ }
+
+ // H1[h1Group][slot] = SHA1 of block (h1Group*8+slot)'s 31 H0 hashes
+ byte[][][] h1 = new byte[H1Count][][];
+ for (int g = 0; g < H1Count; g++)
+ {
+ h1[g] = new byte[H1Count][];
+ for (int s = 0; s < H1Count; s++)
+ {
+ int blockIdx = (g * H1Count) + s;
+ if (blockIdx >= blocksPerGroup)
+ {
+ h1[g][s] = new byte[HashLen];
+ continue;
+ }
+
+ byte[] h0Concat = new byte[H0Count * HashLen];
+ for (int i = 0; i < H0Count; i++)
+ Array.Copy(h0[blockIdx][i], 0, h0Concat, i * HashLen, HashLen);
+ h1[g][s] = ComputeSha1(h0Concat, 0, h0Concat.Length);
+ }
+ }
+
+ // H2[h2Idx] = SHA1 of H1 group h2Idx's 8 hashes (same for every block)
+ byte[][] h2 = new byte[H2Count][];
+ for (int i = 0; i < H2Count; i++)
+ {
+ int grp = Math.Min(i, h1.Length - 1);
+ byte[] h1Concat = new byte[H1Count * HashLen];
+ for (int s = 0; s < H1Count; s++)
+ Array.Copy(h1[grp][s], 0, h1Concat, s * HashLen, HashLen);
+ h2[i] = ComputeSha1(h1Concat, 0, h1Concat.Length);
+ }
+
+ byte[] result = new byte[blocksPerGroup * WiiBlockSize];
+
+ using var aes = Aes.Create();
+ aes.Key = key;
+ aes.Mode = CipherMode.CBC;
+ aes.Padding = PaddingMode.None;
+
+ for (int b = 0; b < blocksPerGroup; b++)
+ {
+ // Serialize hash block
+ byte[] hashBlock = new byte[WiiBlockHashSize];
+ int off = 0;
+
+ // H0 (31 * 20 = 0x26C)
+ for (int i = 0; i < H0Count; i++) { Array.Copy(h0[b][i], 0, hashBlock, off, HashLen); off += HashLen; }
+
+ off += 0x14; // padding0
+
+ // H1 for this block's group (8 * 20 = 0xA0)
+ int h1Grp = b / H1Count;
+ if (h1Grp < h1.Length)
+ {
+ for (int i = 0; i < H1Count; i++) { Array.Copy(h1[h1Grp][i], 0, hashBlock, off, HashLen); off += HashLen; }
+ }
+ else
+ {
+ off += H1Count * HashLen;
+ }
+
+ off += 0x20; // padding1
+
+ // H2 (8 * 20 = 0xA0)
+ for (int i = 0; i < H2Count; i++) { Array.Copy(h2[i], 0, hashBlock, off, HashLen); off += HashLen; }
+ // Note: off is now 0x3D4; IV will sit at 0x3D0 after encryption
+
+ // Encrypt hash block with IV = zero
+ aes.IV = new byte[16];
+ byte[] encHashBlock;
+ using (var enc = aes.CreateEncryptor())
+ encHashBlock = enc.TransformFinalBlock(hashBlock, 0, WiiBlockHashSize);
+
+ // Extract IV for data block from offset 0x3D0 of the encrypted hash block
+ byte[] iv = new byte[16];
+ Array.Copy(encHashBlock, 0x3D0, iv, 0, 16);
+
+ // Encrypt data block
+ int dataSrc = b * WiiBlockDataSize;
+ int dataLen = Math.Min(WiiBlockDataSize, decryptedData.Length - dataSrc);
+ byte[] dataBlock = new byte[WiiBlockDataSize];
+ if (dataLen > 0)
+ Array.Copy(decryptedData, dataSrc, dataBlock, 0, dataLen);
+
+ aes.IV = iv;
+ byte[] encDataBlock;
+ using (var enc = aes.CreateEncryptor())
+ encDataBlock = enc.TransformFinalBlock(dataBlock, 0, WiiBlockDataSize);
+
+ int dest = b * WiiBlockSize;
+ Array.Copy(encHashBlock, 0, result, dest, WiiBlockHashSize);
+ Array.Copy(encDataBlock, 0, result, dest + WiiBlockHashSize, WiiBlockDataSize);
+ }
+
+ return result;
+#endif
+ }
+
+#if !NET20
+ private static byte[] ComputeSha1(byte[] data, int offset, int count)
+ {
+ if (count == 0)
+ return new byte[20];
+
+ using var sha1 = SHA1.Create();
+ return sha1.ComputeHash(data, offset, count);
+ }
+#endif
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Wrappers/WiaRvzCompressionHelper.cs b/SabreTools.Wrappers/WiaRvzCompressionHelper.cs
new file mode 100644
index 000000000..ecfb1f0bb
--- /dev/null
+++ b/SabreTools.Wrappers/WiaRvzCompressionHelper.cs
@@ -0,0 +1,199 @@
+using System;
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+using System.IO;
+using SabreTools.IO.Extensions;
+using SharpCompress.Compressors;
+using SharpCompress.Compressors.BZip2;
+using SharpCompress.Compressors.LZMA;
+using SharpCompress.Compressors.ZStandard;
+#endif
+using SabreTools.Data.Models.WIA;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// Compress and decompress helpers for WIA / RVZ group and table data.
+ /// Mirrors Dolphin's WIACompression.cpp: Bzip2, LZMA (raw, no stream header), LZMA2, and Zstd.
+ ///
+ internal static class WiaRvzCompressionHelper
+ {
+ // Dictionary sizes per compression level 1–9 (index 0 unused).
+ // Mirrors Dolphin WIACompression.cpp dict_size choices.
+ private static readonly int[] DictSizes =
+ {
+ 0, // 0: unused
+ 1 << 16, // 1: 64 KiB
+ 1 << 20, // 2: 1 MiB
+ 1 << 22, // 3: 4 MiB
+ 1 << 22, // 4: 4 MiB
+ 1 << 23, // 5: 8 MiB
+ 1 << 23, // 6: 8 MiB
+ 1 << 24, // 7: 16 MiB
+ 1 << 25, // 8: 32 MiB
+ 1 << 26, // 9: 64 MiB
+ };
+
+ private static int GetDictSize(int level) =>
+ DictSizes[Math.Max(1, Math.Min(9, level))];
+
+ // Returns the raw LZMA2 dict-size property byte for a given dictionary size.
+ private static uint Lzma2DictSize(byte p) => (uint)((2 | (p & 1)) << ((p / 2) + 11));
+
+ private static byte EncodeLzma2DictSize(uint d)
+ {
+ byte e = 0;
+ while (e < 40 && d > Lzma2DictSize(e))
+ e++;
+ return e;
+ }
+
+ ///
+ /// Fills the compressor-data bytes for WiaHeader2.CompressorData /
+ /// WiaHeader2.CompressorDataSize.
+ /// LZMA: 5 bytes. LZMA2: 1 byte. Others: 0 bytes.
+ ///
+ internal static void GetCompressorData(WiaRvzCompressionType type, int level,
+ out byte[] propData, out byte propSize)
+ {
+ propData = new byte[7];
+ int dictSize = GetDictSize(level);
+
+ switch (type)
+ {
+ case WiaRvzCompressionType.LZMA:
+ propData[0] = 0x5D; // propByte for default pb=2,lp=0,lc=3
+ propData[1] = (byte)dictSize;
+ propData[2] = (byte)(dictSize >> 8);
+ propData[3] = (byte)(dictSize >> 16);
+ propData[4] = (byte)(dictSize >> 24);
+ propSize = 5;
+ break;
+
+ case WiaRvzCompressionType.LZMA2:
+ propData[0] = EncodeLzma2DictSize((uint)dictSize);
+ propSize = 1;
+ break;
+
+ default:
+ propSize = 0;
+ break;
+ }
+ }
+
+ /// Compress using the specified algorithm.
+ internal static byte[] Compress(WiaRvzCompressionType type, byte[] data, int offset,
+ int length, int level, byte[] compressorData, byte compressorDataSize)
+ {
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ switch (type)
+ {
+ case WiaRvzCompressionType.Bzip2:
+ return CompressBzip2(data, offset, length);
+ case WiaRvzCompressionType.LZMA:
+ return CompressLzma(data, offset, length, level, isLzma2: false);
+ case WiaRvzCompressionType.LZMA2:
+ return CompressLzma(data, offset, length, level, isLzma2: true);
+ case WiaRvzCompressionType.Zstd:
+ return CompressZstd(data, offset, length, level);
+ default:
+ throw new ArgumentException($"Cannot compress type {type}", nameof(type));
+ }
+#else
+ throw new PlatformNotSupportedException("WIA/RVZ compression requires .NET 4.6.2 or later.");
+#endif
+ }
+
+ /// Decompress using the specified algorithm.
+ internal static byte[] Decompress(WiaRvzCompressionType type, byte[] data, int offset,
+ int length, byte[] compressorData, byte compressorDataSize)
+ {
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ switch (type)
+ {
+ case WiaRvzCompressionType.Bzip2:
+ return DecompressBzip2(data, offset, length);
+ case WiaRvzCompressionType.LZMA:
+ {
+ byte[] props = new byte[compressorDataSize];
+ Array.Copy(compressorData, props, compressorDataSize);
+ return DecompressLzma(data, offset, length, props, isLzma2: false);
+ }
+ case WiaRvzCompressionType.LZMA2:
+ {
+ byte[] props = new byte[compressorDataSize];
+ Array.Copy(compressorData, props, compressorDataSize);
+ return DecompressLzma(data, offset, length, props, isLzma2: true);
+ }
+ case WiaRvzCompressionType.Zstd:
+ return DecompressZstd(data, offset, length);
+ default:
+ throw new ArgumentException($"Cannot decompress type {type}", nameof(type));
+ }
+#else
+ throw new PlatformNotSupportedException("WIA/RVZ decompression requires .NET 4.6.2 or later.");
+#endif
+ }
+
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+
+ private static byte[] CompressBzip2(byte[] data, int offset, int length)
+ {
+ using var outMs = new MemoryStream();
+ using (var bz2 = BZip2Stream.Create(outMs, CompressionMode.Compress, false, true))
+ {
+ bz2.Write(data, offset, length);
+ }
+ return outMs.ToArray();
+ }
+
+ private static byte[] DecompressBzip2(byte[] data, int offset, int length)
+ {
+ using var inMs = new MemoryStream(data, offset, length);
+ using var bz2 = BZip2Stream.Create(inMs, CompressionMode.Decompress, false, false);
+ using var outMs = new MemoryStream();
+ bz2.BlockCopy(outMs);
+ return outMs.ToArray();
+ }
+
+ private static byte[] CompressLzma(byte[] data, int offset, int length, int level, bool isLzma2)
+ {
+ int dictSize = GetDictSize(level);
+ using var outMs = new MemoryStream();
+ using (var lzma = LzmaStream.Create(new LzmaEncoderProperties(true, dictSize), isLzma2, outMs))
+ {
+ lzma.Write(data, offset, length);
+ }
+ return outMs.ToArray();
+ }
+
+ private static byte[] DecompressLzma(byte[] data, int offset, int length, byte[] props, bool isLzma2)
+ {
+ using var inMs = new MemoryStream(data, offset, length);
+ using var lzma = LzmaStream.Create(props, inMs, length, -1, null, isLzma2, false);
+ using var outMs = new MemoryStream();
+ lzma.BlockCopy(outMs);
+ return outMs.ToArray();
+ }
+
+ private static byte[] CompressZstd(byte[] data, int offset, int length, int level)
+ {
+ using var outMs = new MemoryStream();
+ using (var zstd = new ZStandardStream(outMs, CompressionMode.Compress))
+ {
+ zstd.Write(data, offset, length);
+ }
+ return outMs.ToArray();
+ }
+
+ private static byte[] DecompressZstd(byte[] data, int offset, int length)
+ {
+ using var inMs = new MemoryStream(data, offset, length);
+ using var zstd = new ZStandardStream(inMs);
+ using var outMs = new MemoryStream();
+ zstd.BlockCopy(outMs);
+ return outMs.ToArray();
+ }
+
+#endif
+ }
+}
diff --git a/SabreTools.Wrappers/WiaVirtualStream.cs b/SabreTools.Wrappers/WiaVirtualStream.cs
new file mode 100644
index 000000000..73f4dacc7
--- /dev/null
+++ b/SabreTools.Wrappers/WiaVirtualStream.cs
@@ -0,0 +1,80 @@
+using System;
+using System.IO;
+
+namespace SabreTools.Wrappers
+{
+ ///
+ /// A read-only seekable stream that decompresses WIA/RVZ groups on demand.
+ /// Avoids loading the entire decompressed disc image into memory.
+ ///
+ internal sealed class WiaVirtualStream : Stream
+ {
+ private readonly WIA _wia;
+ private long _position;
+
+ public WiaVirtualStream(WIA wia)
+ {
+ _wia = wia ?? throw new ArgumentNullException(nameof(wia));
+ }
+
+ public override bool CanRead => true;
+ public override bool CanSeek => true;
+ public override bool CanWrite => false;
+ public override long Length => (long)_wia.IsoFileSize;
+ public override long Position
+ {
+ get => _position;
+ set
+ {
+ if (value < 0)
+ throw new ArgumentOutOfRangeException(nameof(value));
+ _position = value;
+ }
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ if (buffer is null)
+ throw new ArgumentNullException(nameof(buffer));
+ if (offset < 0)
+ throw new ArgumentOutOfRangeException(nameof(offset));
+ if (count < 0)
+ throw new ArgumentOutOfRangeException(nameof(count));
+ if (offset + count > buffer.Length)
+ throw new ArgumentException("offset + count exceeds buffer length");
+
+ long remaining = Length - _position;
+ if (remaining <= 0 || count <= 0)
+ return 0;
+
+ count = (int)Math.Min(count, remaining);
+ int read = _wia.ReadVirtual(_position, buffer, offset, count);
+ _position += read;
+ return read;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ long newPos;
+ switch (origin)
+ {
+ case SeekOrigin.Begin: newPos = offset; break;
+ case SeekOrigin.Current: newPos = _position + offset; break;
+ case SeekOrigin.End: newPos = Length + offset; break;
+ default: throw new ArgumentOutOfRangeException(nameof(origin));
+ }
+
+ if (newPos < 0)
+ throw new IOException("Seek position cannot be negative.");
+
+ _position = newPos;
+ return _position;
+ }
+
+ public override void Flush() { }
+
+ public override void SetLength(long value) => throw new NotSupportedException();
+
+ public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException();
+ }
+}
diff --git a/SabreTools.Wrappers/WrapperFactory.cs b/SabreTools.Wrappers/WrapperFactory.cs
index 2ffa55300..920b6b626 100644
--- a/SabreTools.Wrappers/WrapperFactory.cs
+++ b/SabreTools.Wrappers/WrapperFactory.cs
@@ -29,6 +29,7 @@ public static class WrapperFactory
WrapperType.Executable => CreateExecutableWrapper(data),
WrapperType.FDS => FDS.Create(data),
WrapperType.GCF => GCF.Create(data),
+ WrapperType.GCZ => GCZ.Create(data),
WrapperType.GZip => GZip.Create(data),
WrapperType.InstallShieldArchiveV3 => InstallShieldArchiveV3.Create(data),
WrapperType.InstallShieldCAB => InstallShieldCabinet.Create(data),
@@ -44,6 +45,7 @@ public static class WrapperFactory
WrapperType.NCF => NCF.Create(data),
WrapperType.NESCart => NESCart.Create(data),
WrapperType.Nitro => Nitro.Create(data),
+ WrapperType.NintendoDisc => NintendoDisc.Create(data),
WrapperType.PAK => PAK.Create(data),
WrapperType.PFF => PFF.Create(data),
WrapperType.PIC => PIC.Create(data),
@@ -68,6 +70,8 @@ public static class WrapperFactory
WrapperType.WAD => WAD3.Create(data),
WrapperType.WiseOverlayHeader => WiseOverlayHeader.Create(data),
WrapperType.WiseScript => WiseScript.Create(data),
+ WrapperType.WIA => WIA.Create(data),
+ WrapperType.RVZ => WIA.Create(data),
WrapperType.XboxExecutable => XboxExecutable.Create(data),
WrapperType.XDVDFS => XDVDFS.Create(data),
WrapperType.XenonExecutable => XenonExecutable.Create(data),
@@ -98,7 +102,15 @@ public static class WrapperFactory
// Cache the current offset
long initialOffset = stream.Position;
- // Try to get an Xbox ISO wrapper first
+ // Try NintendoDisc (GameCube / Wii) first — detected by magic at 0x018 / 0x01C
+ var nintendoWrapper = NintendoDisc.Create(stream);
+ if (nintendoWrapper is not null)
+ return nintendoWrapper;
+
+ // Reset position in stream
+ stream.SeekIfPossible(initialOffset, SeekOrigin.Begin);
+
+ // Try to get an Xbox ISO wrapper
var xboxWrapper = XboxISO.Create(stream);
if (xboxWrapper is not null)
return xboxWrapper;
@@ -403,6 +415,17 @@ public static WrapperType GetFileType(byte[]? magic, string? extension)
#endregion
+ #region GCZ
+
+ // GCZ magic cookie (0xB10BC001 stored little-endian)
+ if (magic.StartsWith([0x01, 0xC0, 0x0B, 0xB1]))
+ return WrapperType.GCZ;
+
+ if (extension.Equals("gcz", StringComparison.OrdinalIgnoreCase))
+ return WrapperType.GCZ;
+
+ #endregion
+
#region GZip
if (magic.StartsWith(Data.Models.GZIP.Constants.SignatureBytes))
@@ -449,6 +472,36 @@ public static WrapperType GetFileType(byte[]? magic, string? extension)
#endregion
+ #region NintendoDisc
+
+ // Wii disc magic at offset 0x018 (0x5D1C9EA3 stored big-endian on disc)
+ if (magic.Length > 0x1B && magic[0x18] == 0x5D && magic[0x19] == 0x1C && magic[0x1A] == 0x9E && magic[0x1B] == 0xA3)
+ return WrapperType.NintendoDisc;
+
+ // GameCube disc magic at offset 0x01C (0xC2339F3D stored big-endian on disc)
+ if (magic.Length > 0x1F && magic[0x1C] == 0xC2 && magic[0x1D] == 0x33 && magic[0x1E] == 0x9F && magic[0x1F] == 0x3D)
+ return WrapperType.NintendoDisc;
+
+ // GameCube/Wii disc by GameId prefix: first byte is a known title type code,
+ // bytes 1-2 are ASCII letters (region + developer), bytes 3-4 are ASCII digits or letters (title code),
+ // byte 5 is an ASCII digit (disc number). Covers redump ISOs that lack magic words.
+ if (magic.Length > 5
+ && IsNintendoDiscTitleType(magic[0])
+ && magic[1] >= 0x41 && magic[1] <= 0x5A // A-Z
+ && ((magic[2] >= 0x30 && magic[2] <= 0x39) || (magic[2] >= 0x41 && magic[2] <= 0x5A)) // 0-9 or A-Z
+ && ((magic[3] >= 0x30 && magic[3] <= 0x39) || (magic[3] >= 0x41 && magic[3] <= 0x5A)) // 0-9 or A-Z
+ && ((magic[4] >= 0x30 && magic[4] <= 0x39) || (magic[4] >= 0x41 && magic[4] <= 0x5A)) // 0-9 or A-Z
+ && magic[5] >= 0x30 && magic[5] <= 0x39 // 0-9
+ && (extension.Equals("iso", StringComparison.OrdinalIgnoreCase)
+ || extension.Equals("gcm", StringComparison.OrdinalIgnoreCase)))
+ return WrapperType.NintendoDisc;
+
+ // .gcm files are always GameCube disc images
+ if (extension.Equals("gcm", StringComparison.OrdinalIgnoreCase))
+ return WrapperType.NintendoDisc;
+
+ #endregion
+
#region ISO9660
if (extension.Equals("iso", StringComparison.OrdinalIgnoreCase))
@@ -965,6 +1018,24 @@ public static WrapperType GetFileType(byte[]? magic, string? extension)
#endregion
+ #region WIA
+
+ // WIA magic ("WIA\x01" stored little-endian: 0x01414957)
+ if (magic.StartsWith([0x57, 0x49, 0x41, 0x01]))
+ return WrapperType.WIA;
+
+ // RVZ magic ("RVZ\x01" stored little-endian: 0x015A5652)
+ if (magic.StartsWith([0x52, 0x56, 0x5A, 0x01]))
+ return WrapperType.RVZ;
+
+ if (extension.Equals("wia", StringComparison.OrdinalIgnoreCase))
+ return WrapperType.WIA;
+
+ if (extension.Equals("rvz", StringComparison.OrdinalIgnoreCase))
+ return WrapperType.RVZ;
+
+ #endregion
+
#region XboxExecutable
if (magic.StartsWith(Data.Models.XboxExecutable.Constants.MagicBytes))
@@ -1036,5 +1107,17 @@ public static WrapperType GetFileType(byte[]? magic, string? extension)
// We couldn't find a supported match
return WrapperType.UNKNOWN;
}
+
+ ///
+ /// Returns true if the byte is a known Nintendo disc title type code
+ /// (first byte of the 6-char GameId, e.g. 'G'=GameCube, 'R'=GameCube,
+ /// 'D'=GameCube demo, 'S'=Wii, 'F'=Wii channel)
+ ///
+ private static bool IsNintendoDiscTitleType(byte b)
+ {
+ // Standard GameCube and Wii title type prefixes used by Nintendo and licensees
+ return b == (byte)'G' || b == (byte)'D' || b == (byte)'R'
+ || b == (byte)'S' || b == (byte)'F';
+ }
}
}
diff --git a/SabreTools.Wrappers/WrapperType.cs b/SabreTools.Wrappers/WrapperType.cs
index cf44f759b..db3469614 100644
--- a/SabreTools.Wrappers/WrapperType.cs
+++ b/SabreTools.Wrappers/WrapperType.cs
@@ -81,6 +81,11 @@ public enum WrapperType
///
GCF,
+ ///
+ /// GCZ compressed GameCube / Wii disc image
+ ///
+ GCZ,
+
///
/// gzip archive
///
@@ -162,6 +167,11 @@ public enum WrapperType
///
Nitro,
+ ///
+ /// Nintendo GameCube / Wii disc image
+ ///
+ NintendoDisc,
+
///
/// Half-Life Package File
///
@@ -288,6 +298,16 @@ public enum WrapperType
///
WiseScript,
+ ///
+ /// WIA compressed GameCube / Wii disc image
+ ///
+ WIA,
+
+ ///
+ /// RVZ compressed GameCube / Wii disc image
+ ///
+ RVZ,
+
///
/// XBox Executable
///