diff --git a/.gitignore b/.gitignore
index 28329487..37fba3da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -33,8 +33,8 @@ x86/
bld/
[Bb]in/
[Oo]bj/
-[Ll]og/
-[Ll]ogs/
+/[Ll]og/
+/[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
diff --git a/src/LogExpert.Benchmarks/BufferIndexBenchmarks.cs b/src/LogExpert.Benchmarks/BufferIndexBenchmarks.cs
new file mode 100644
index 00000000..1b808d3e
--- /dev/null
+++ b/src/LogExpert.Benchmarks/BufferIndexBenchmarks.cs
@@ -0,0 +1,191 @@
+using BenchmarkDotNet.Attributes;
+
+using ColumnizerLib;
+
+using LogExpert.Benchmarks.Support;
+using LogExpert.Core.Classes.Log.Buffers;
+
+namespace LogExpert.Benchmarks;
+
+[MemoryDiagnoser]
+[RankColumn]
+public class BufferIndexBenchmarks : IDisposable
+{
+ private BufferIndex _index = null!;
+ private int _totalLines;
+
+ private bool _disposed;
+
+ [Params(100, 1_000, 10_000)]
+ public int BufferCount { get; set; }
+
+ private const int LINES_PER_BUFFER = 500;
+
+ [GlobalSetup]
+ public void Setup ()
+ {
+ _index = new BufferIndex(BufferCount, LINES_PER_BUFFER);
+ _totalLines = BufferCount * LINES_PER_BUFFER;
+
+ var fakeFileInfo = new FakeLogFileInfo();
+
+ using (var writeLock = _index.AcquireWriteLock())
+ {
+ for (int i = 0; i < BufferCount; i++)
+ {
+ var buffer = new LogBuffer(fakeFileInfo, LINES_PER_BUFFER)
+ {
+ StartLine = i * LINES_PER_BUFFER
+ };
+
+ for (int j = 0; j < LINES_PER_BUFFER; j++)
+ {
+ buffer.AddLine(new LogLine($"line {i * LINES_PER_BUFFER + j}".AsMemory(), i * LINES_PER_BUFFER + j), 0);
+ }
+
+ _index.Add(buffer);
+ }
+ }
+
+ // Validate setup
+ var snapshot = _index.CreateSnapshot();
+ if (snapshot.BufferCount != BufferCount)
+ {
+ throw new InvalidOperationException($"Setup failed: expected {BufferCount} buffers, got {snapshot.BufferCount}");
+ }
+ }
+
+ [GlobalCleanup]
+ public void Cleanup () => _index.Dispose();
+
+ ///
+ /// Simulates tail-follow: reading the last 1000 lines sequentially.
+ /// Should hit Layer 0 (thread-local cache) ~99% of the time.
+ ///
+ [Benchmark(Baseline = true)]
+ public LogBuffer? SequentialAccess ()
+ {
+ using var readlock = _index.AcquireReadLock();
+ LogBuffer? last = null;
+ var start = Math.Max(0, _totalLines - 1000);
+ for (int i = start; i < _totalLines; i++)
+ {
+ var logBufferEntry = _index.TryFindBuffer(i);
+ if (logBufferEntry.Found)
+ {
+ last = logBufferEntry.Buffer;
+ }
+ }
+
+ return last;
+ }
+
+ ///
+ /// Simulates search/goto: deterministic stride across the full file.
+ /// Co-prime stride visits buffers in non-sequential, non-repeating order.
+ /// Exercises Layers 2 and 3 heavily.
+ ///
+ [Benchmark]
+ public LogBuffer? StrideAccess ()
+ {
+ using var readLock = _index.AcquireReadLock();
+ LogBuffer? last = null;
+ var stride = _totalLines / 3 + 1;
+ var lineNum = 0;
+ for (int i = 0; i < 1000; i++)
+ {
+ var logBufferEntry = _index.TryFindBuffer(lineNum);
+ if (logBufferEntry.Found)
+ {
+ last = logBufferEntry.Buffer;
+ }
+
+ lineNum = (lineNum + stride) % _totalLines;
+ }
+
+ return last;
+ }
+
+ ///
+ /// Worst case for Layer 0: always crossing buffer boundaries.
+ /// Exercises Layer 1 (adjacent prediction).
+ ///
+ [Benchmark]
+ public LogBuffer? BoundaryAccess ()
+ {
+ using var readLock = _index.AcquireReadLock();
+ LogBuffer? last = null;
+
+ for (int i = 0; i < 1000; i++)
+ {
+ int lineNum = i * (_totalLines / 1000);
+ var logBufferEntry = _index.TryFindBuffer(lineNum);
+ if (logBufferEntry.Found)
+ {
+ last = logBufferEntry.Buffer;
+ }
+ }
+
+ return last;
+ }
+
+ ///
+ /// Simulates UI scrolling: page-sized jumps forward through the file.
+ /// 50-line pages with 3x page jumps (fast scroll drag).
+ /// Exercises Layer 0 within pages and Layers 1-2 on transitions.
+ ///
+ [Benchmark]
+ public LogBuffer? ScrollAccess ()
+ {
+ using var readLock = _index.AcquireReadLock();
+ LogBuffer? last = null;
+ const int pageSize = 50;
+ const int pageJump = pageSize * 3;
+ var pageStart = 0;
+
+ for (int page = 0; page < 20 && pageStart < _totalLines; page++)
+ {
+ var pageEnd = Math.Min(pageStart + pageSize, _totalLines);
+ for (int line = pageStart; line < pageEnd; line++)
+ {
+ var logBufferEntry = _index.TryFindBuffer(line);
+ if (logBufferEntry.Found)
+ {
+ last = logBufferEntry.Buffer;
+ }
+ }
+
+ pageStart += pageJump;
+ }
+
+ return last;
+ }
+
+ ///
+ /// Measures LRU eviction cost at current scale.
+ ///
+ [Benchmark]
+ public void EvictAndRepopulate ()
+ {
+ _index.EvictLeastRecentlyUsed();
+ }
+
+ public void Dispose ()
+ {
+ Dispose(true);
+ GC.SuppressFinalize(this);
+ }
+
+ protected virtual void Dispose (bool disposing)
+ {
+ if (!_disposed)
+ {
+ if (disposing)
+ {
+ _index?.Dispose();
+ }
+
+ _disposed = true;
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/LogExpert.Benchmarks/BufferIndexContentionBenchmarks.cs b/src/LogExpert.Benchmarks/BufferIndexContentionBenchmarks.cs
new file mode 100644
index 00000000..9885ddeb
--- /dev/null
+++ b/src/LogExpert.Benchmarks/BufferIndexContentionBenchmarks.cs
@@ -0,0 +1,170 @@
+using BenchmarkDotNet.Attributes;
+using BenchmarkDotNet.Diagnosers;
+
+using ColumnizerLib;
+
+using LogExpert.Benchmarks.Support;
+using LogExpert.Core.Classes.Log.Buffers;
+
+namespace LogExpert.Benchmarks;
+
+///
+/// Measures ReaderWriterLockSlim contention under concurrent read load.
+/// Compares single-threaded throughput against N concurrent readers
+/// to determine if RWLS is a bottleneck worth optimizing.
+///
+[MemoryDiagnoser]
+[ThreadingDiagnoser] // Reports lock contention + thread pool stats
+[RankColumn]
+public class BufferIndexContentionBenchmarks : IDisposable
+{
+ private BufferIndex _index = null!;
+ private int _totalLines;
+ private bool _disposed;
+
+ private const int BUFFERS = 10_000;
+ private const int LINES_PER_BUFFER = 500;
+ private const int READS_PER_TASK = 1_000;
+
+ [GlobalSetup]
+ public void Setup ()
+ {
+ _index = new BufferIndex(BUFFERS, LINES_PER_BUFFER);
+ _totalLines = BUFFERS * LINES_PER_BUFFER;
+
+ var fakeFileInfo = new FakeLogFileInfo();
+ using var writeLock = _index.AcquireWriteLock();
+ for (int i = 0; i < BUFFERS; i++)
+ {
+ var buffer = new LogBuffer(fakeFileInfo, LINES_PER_BUFFER)
+ {
+ StartLine = i * LINES_PER_BUFFER
+ };
+ for (int j = 0; j < LINES_PER_BUFFER; j++)
+ {
+ buffer.AddLine(
+ new LogLine($"line {i * LINES_PER_BUFFER + j}".AsMemory(),
+ i * LINES_PER_BUFFER + j), 0);
+ }
+ _index.Add(buffer);
+ }
+ }
+
+ ///
+ /// Single-threaded baseline: sequential reads under one read lock.
+ /// This is the ideal throughput ceiling.
+ ///
+ [Benchmark(Baseline = true)]
+ public int SingleThreadedReads ()
+ {
+ int found = 0;
+ using var readLock = _index.AcquireReadLock();
+ var start = Math.Max(0, _totalLines - READS_PER_TASK);
+ for (int i = start; i < _totalLines; i++)
+ {
+ if (_index.TryFindBuffer(i).Found)
+ {
+ found++;
+ }
+ }
+
+ return found;
+ }
+
+ ///
+ /// N concurrent readers each acquiring their own read lock.
+ /// If RWLS has no contention, throughput ≈ N × single-threaded.
+ ///
+ [Benchmark]
+ [Arguments(2)]
+ [Arguments(4)]
+ [Arguments(8)]
+ [Arguments(12)]
+ public int ConcurrentReads (int threadCount)
+ {
+ var total = 0;
+ _ = Parallel.For(0, threadCount, _ =>
+ {
+ int found = 0;
+ using var readLock = _index.AcquireReadLock();
+ var start = Math.Max(0, _totalLines - READS_PER_TASK);
+ for (int i = start; i < _totalLines; i++)
+ {
+ if (_index.TryFindBuffer(i).Found)
+ {
+ found++;
+ }
+ }
+ _ = Interlocked.Add(ref total, found);
+ });
+ return total;
+ }
+
+ ///
+ /// Simulates production: N readers + 1 writer (tail-follow append).
+ /// Writer acquires write lock briefly every ~1000 reads.
+ /// This is the realistic contention scenario.
+ ///
+ [Benchmark]
+ [Arguments(4)]
+ [Arguments(8)]
+ public int ConcurrentReadsWithWriter (int readerCount)
+ {
+ using var cts = new CancellationTokenSource();
+ var total = 0;
+
+ // Writer task: periodically takes write lock (simulates new buffer append)
+ var writerTask = Task.Run(() =>
+ {
+ while (!cts.Token.IsCancellationRequested)
+ {
+ using var writeLock = _index.AcquireWriteLock();
+ // Simulate brief write work (no actual mutation to keep state clean)
+ Thread.SpinWait(100);
+ }
+ });
+
+ // Reader tasks
+ _ = Parallel.For(0, readerCount, _ =>
+ {
+ int found = 0;
+ using var readLock = _index.AcquireReadLock();
+ var start = Math.Max(0, _totalLines - READS_PER_TASK);
+ for (int i = start; i < _totalLines; i++)
+ {
+ if (_index.TryFindBuffer(i).Found)
+ {
+ found++;
+ }
+ }
+
+ _ = Interlocked.Add(ref total, found);
+ });
+
+ cts.Cancel();
+ writerTask.Wait();
+ return total;
+ }
+
+ [GlobalCleanup]
+ public void Cleanup () => _index.Dispose();
+
+ public void Dispose ()
+ {
+ Dispose(true);
+ GC.SuppressFinalize(this);
+ }
+
+ protected virtual void Dispose (bool disposing)
+ {
+ if (!_disposed)
+ {
+ if (disposing)
+ {
+ _index?.Dispose();
+ }
+
+ _disposed = true;
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/LogExpert.Benchmarks/LogExpert.Benchmarks.csproj b/src/LogExpert.Benchmarks/LogExpert.Benchmarks.csproj
index 1240cbd4..4bfb4225 100644
--- a/src/LogExpert.Benchmarks/LogExpert.Benchmarks.csproj
+++ b/src/LogExpert.Benchmarks/LogExpert.Benchmarks.csproj
@@ -16,6 +16,7 @@
+
diff --git a/src/LogExpert.Benchmarks/Program.cs b/src/LogExpert.Benchmarks/Program.cs
new file mode 100644
index 00000000..01954f88
--- /dev/null
+++ b/src/LogExpert.Benchmarks/Program.cs
@@ -0,0 +1,54 @@
+using BenchmarkDotNet.Running;
+
+namespace LogExpert.Benchmarks;
+
+public static class Program
+{
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Globalization", "CA1303:Do not pass literals as localized parameters", Justification = "Benchmarks")]
+ public static void Main (string[] args)
+ {
+ if (args == null || args.Length == 0)
+ {
+ Console.WriteLine("No benchmarks specified. Running all benchmarks...");
+
+ // Run all benchmarks if no arguments are provided
+ _ = BenchmarkRunner.Run();
+ _ = BenchmarkRunner.Run();
+ _ = BenchmarkRunner.Run();
+ _ = BenchmarkRunner.Run();
+ }
+ else
+ {
+ // Run specific benchmarks based on command-line arguments
+ _ = BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args);
+ }
+
+ Console.WriteLine("Replace with the name of the benchmark you want to run, e.g. ");
+ Console.WriteLine("StreamReaderBenchmarks: Benchmarks for stream readers");
+ Console.WriteLine("ReadThroughputBenchmarks: Benchmarks for read throughput");
+ Console.WriteLine("BufferIndexBenchmarks: Benchmarks for buffer index");
+ Console.WriteLine("BufferIndexContentionBenchmarks: Benchmarks for buffer index contention");
+ Console.WriteLine("Dry run:");
+ Console.WriteLine("dotnet run -c Release -- --filter \"**\" --job Dry --noOverwrite");
+ Console.WriteLine("Short run:");
+ Console.WriteLine("dotnet run -c Release -- --filter \"**\" --job Short --noOverwrite");
+ Console.WriteLine("Full baseline run:");
+ Console.WriteLine("dotnet run -c Release -- --filter \"**\" --noOverwrite");
+ }
+}
+
+/*
+ * Comment / Uncommen the benchmark to run, careful some can run longer
+ * 1.) a dry run
+ * dotnet run -c Release -- --filter "StreamReaderBenchmarks" --job Dry --noOverwrite
+ * 2.) a short run
+ * dotnet run -c Release -- --filter "StreamReaderBenchmarks" --job Short --noOverwrite
+ * 3.) a full baseline run
+ * dotnet run -c Release -- --filter "StreamReaderBenchmarks" --noOverwrite
+ *
+ * The full baseline run generates a MD file
+ * BenchmarkDotNet.Artifacts/results/*-report-github.md
+ *
+ * If changes are made with the LogfileReader / BufferIndex, always do a Benchmark to
+ * verify no performance regression is introduced, especially with large files.
+ */
diff --git a/src/LogExpert.Benchmarks/ReadThroughputBenchmarks.cs b/src/LogExpert.Benchmarks/ReadThroughputBenchmarks.cs
new file mode 100644
index 00000000..f9f0cf8f
--- /dev/null
+++ b/src/LogExpert.Benchmarks/ReadThroughputBenchmarks.cs
@@ -0,0 +1,128 @@
+using System.Text;
+
+using BenchmarkDotNet.Attributes;
+
+using LogExpert.Core.Classes.Log;
+using LogExpert.Core.Entities;
+using LogExpert.Core.Enums;
+
+namespace LogExpert.Benchmarks;
+
+///
+/// Measures LogfileReader.ReadFiles() throughput with different progress reporters.
+/// Uses real temp files to include actual I/O in the measurement.
+///
+[MemoryDiagnoser]
+[RankColumn]
+public class ReadThroughputBenchmarks
+{
+ private string _tempFile = null!;
+
+ [Params(10_000, 100_000, 1_000_000)]
+ public int LineCount { get; set; }
+
+ [GlobalSetup]
+ public void Setup ()
+ {
+ _tempFile = Path.GetTempFileName();
+ GenerateLogFile(_tempFile, LineCount);
+
+ // Initialize PluginRegistry for local file system support
+ // (or use NullPluginRegistry if constructor doesn't need it)
+ _ = PluginRegistry.PluginRegistry.Create(Path.GetDirectoryName(_tempFile)!, 500);
+ }
+
+ ///
+ /// Baseline: read with NullProgressReporter (zero event overhead).
+ ///
+ [Benchmark(Baseline = true)]
+ public int ReadWithNullReporter ()
+ {
+ using var reader = new LogfileReader(
+ _tempFile,
+ new EncodingOptions { Encoding = Encoding.UTF8 },
+ multiFile: false,
+ bufferCount: 500,
+ linesPerBuffer: 500,
+ new MultiFileOptions(),
+ ReaderType.System,
+ PluginRegistry.PluginRegistry.Instance,
+ maximumLineLength: 500,
+ progressReporter: Core.Classes.Log.ProgressReporters.NullProgressReporter.Instance);
+
+ reader.ReadFiles();
+ return reader.LineCount;
+ }
+
+ ///
+ /// Production path: read with PeriodicProgressReporter (default, no subscribers).
+ ///
+ [Benchmark]
+ public int ReadWithPeriodicReporter ()
+ {
+ using var reader = new LogfileReader(
+ _tempFile,
+ new EncodingOptions { Encoding = Encoding.UTF8 },
+ multiFile: false,
+ bufferCount: 500,
+ linesPerBuffer: 500,
+ new MultiFileOptions(),
+ ReaderType.System,
+ PluginRegistry.PluginRegistry.Instance,
+ maximumLineLength: 500);
+ // No progressReporter = default PeriodicProgressReporter
+
+ reader.ReadFiles();
+ return reader.LineCount;
+ }
+
+ ///
+ /// Post-change: read with block-based allocation (System reader uses CharBlockAllocator).
+ /// Compare Gen0/Gen1/Gen2 collections vs baseline to validate allocation reduction.
+ /// This method is identical to ReadWithNullReporter — it exists solely for explicit
+ /// before/after naming in benchmark reports.
+ ///
+ [Benchmark]
+ public int ReadWithBlockAllocation ()
+ {
+ using var reader = new LogfileReader(
+ _tempFile,
+ new EncodingOptions { Encoding = Encoding.UTF8 },
+ multiFile: false,
+ bufferCount: 500,
+ linesPerBuffer: 500,
+ new MultiFileOptions(),
+ ReaderType.System,
+ PluginRegistry.PluginRegistry.Instance,
+ maximumLineLength: 500,
+ progressReporter: Core.Classes.Log.ProgressReporters.NullProgressReporter.Instance);
+
+ reader.ReadFiles();
+ return reader.LineCount;
+ }
+
+ [GlobalCleanup]
+ public void Cleanup ()
+ {
+ if (File.Exists(_tempFile))
+ {
+ File.Delete(_tempFile);
+ }
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Security", "CA5394:Do not use insecure randomness", Justification = "Benchmark data generation")]
+ private static void GenerateLogFile (string path, int lineCount)
+ {
+ var rng = new Random(42); // deterministic seed for reproducibility
+ using var writer = new StreamWriter(path, false, Encoding.UTF8, bufferSize: 65536);
+ for (int i = 0; i < lineCount; i++)
+ {
+ writer.Write("2026-04-23 12:00:00.");
+ writer.Write(i % 1000);
+ writer.Write(" [INFO] Thread-");
+ writer.Write(rng.Next(1, 32));
+ writer.Write(" SomeNamespace.SomeClass - Log message number ");
+ writer.WriteLine(i);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/LogExpert.Benchmarks/StreamReaderBenchmarks.cs b/src/LogExpert.Benchmarks/StreamReaderBenchmarks.cs
index c8a0b382..a82f0432 100644
--- a/src/LogExpert.Benchmarks/StreamReaderBenchmarks.cs
+++ b/src/LogExpert.Benchmarks/StreamReaderBenchmarks.cs
@@ -1,9 +1,8 @@
using System.Text;
using BenchmarkDotNet.Attributes;
-using BenchmarkDotNet.Running;
-using LogExpert.Core.Classes.Log;
+using LogExpert.Core.Classes.Log.Streamreaders;
using LogExpert.Core.Entities;
using LogExpert.Core.Interfaces;
@@ -150,12 +149,4 @@ private static void ReadAllLines (ILogStreamReader reader)
// Consume the line
}
}
-}
-
-public static class Program
-{
- public static void Main (string[] args)
- {
- _ = BenchmarkRunner.Run();
- }
-}
+}
\ No newline at end of file
diff --git a/src/LogExpert.Benchmarks/Support/FakeLogFileInfo.cs b/src/LogExpert.Benchmarks/Support/FakeLogFileInfo.cs
new file mode 100644
index 00000000..bd70a382
--- /dev/null
+++ b/src/LogExpert.Benchmarks/Support/FakeLogFileInfo.cs
@@ -0,0 +1,34 @@
+using ColumnizerLib;
+
+namespace LogExpert.Benchmarks.Support;
+
+///
+/// Minimal ILogFileInfo stub for benchmarks. No filesystem access.
+/// Wraps an in-memory byte array as the file content.
+///
+internal sealed class FakeLogFileInfo : ILogFileInfo
+{
+ private readonly byte[] _content;
+
+ public FakeLogFileInfo (string name = "fake.log", byte[]? content = null, long length = 1_000_000)
+ {
+ FullName = name;
+ _content = content ?? [];
+ Length = content?.Length ?? length;
+ OriginalLength = Length;
+ }
+
+ public string FullName { get; }
+ public string FileName => Path.GetFileName(FullName);
+ public string DirectoryName => Path.GetDirectoryName(FullName) ?? "";
+ public char DirectorySeparatorChar => Path.DirectorySeparatorChar;
+ public Uri Uri => new($"file:///{FullName}");
+ public long Length { get; set; }
+ public long OriginalLength { get; }
+ public bool FileExists => true;
+ public int PollInterval => 250;
+
+ public bool FileHasChanged () => false;
+ public Stream OpenStream () => new MemoryStream(_content, writable: false);
+ public ILogFileInfo GetRolloverInfo (string fileName) => new FakeLogFileInfo(fileName);
+}
\ No newline at end of file
diff --git a/src/LogExpert.Benchmarks/Support/NullPluginRegistry.cs b/src/LogExpert.Benchmarks/Support/NullPluginRegistry.cs
new file mode 100644
index 00000000..5c815054
--- /dev/null
+++ b/src/LogExpert.Benchmarks/Support/NullPluginRegistry.cs
@@ -0,0 +1,30 @@
+using ColumnizerLib;
+
+using LogExpert.Core.Interfaces;
+
+namespace LogExpert.Benchmarks.Support;
+
+///
+/// No-op IPluginRegistry for benchmarks. Returns empty columnizer list and
+/// a stub file system plugin that handles all URIs via local file system.
+///
+internal sealed class NullPluginRegistry : IPluginRegistry
+{
+ public static readonly NullPluginRegistry Instance = new();
+
+ public IList RegisteredColumnizers { get; } = [];
+
+ public IFileSystemPlugin FindFileSystemForUri (string fileNameOrUri) => NullFileSystemPlugin.Instance;
+
+ private sealed class NullFileSystemPlugin : IFileSystemPlugin
+ {
+ public static readonly NullFileSystemPlugin Instance = new();
+
+ public string Text => "Null";
+ public string Description => "No-op file system for benchmarks";
+ public bool CanHandleUri (string uriString) => true;
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Globalization", "CA1303:Do not pass literals as localized parameters", Justification = "For UnitTests")]
+ public ILogFileInfo GetLogfileInfo (string uriString) => throw new NotSupportedException("NullFileSystemPlugin does not support GetLogfileInfo");
+ }
+}
\ No newline at end of file
diff --git a/src/LogExpert.Core/Classes/Log/BatchedProgressReporter.cs b/src/LogExpert.Core/Classes/Log/BatchedProgressReporter.cs
deleted file mode 100644
index 03d711bb..00000000
--- a/src/LogExpert.Core/Classes/Log/BatchedProgressReporter.cs
+++ /dev/null
@@ -1,106 +0,0 @@
-using System.Collections.Concurrent;
-
-using LogExpert.Core.EventArguments;
-
-namespace LogExpert.Core.Classes.Log;
-
-///
-/// Batches progress updates to reduce UI thread marshalling overhead.
-/// Collects updates in a thread-safe queue and processes them on a timer.
-///
-//TODO Refactor
-public sealed class BatchedProgressReporter : IDisposable
-{
- private readonly ConcurrentQueue _progressQueue = new();
- private readonly Timer _timer;
- private readonly Action _progressCallback;
- private readonly int _updateIntervalMs;
- private bool _disposed;
-
- ///
- /// Creates a new batched progress reporter.
- ///
- /// Callback to invoke with latest progress
- /// Update interval in milliseconds (default: 100ms)
- public BatchedProgressReporter (Action progressCallback, int updateIntervalMs = 100)
- {
- _progressCallback = progressCallback ?? throw new ArgumentNullException(nameof(progressCallback));
- _updateIntervalMs = updateIntervalMs;
-
- // Start timer
- _timer = new Timer(ProcessQueue, null, updateIntervalMs, updateIntervalMs);
- }
-
- ///
- /// Reports progress (thread-safe, non-blocking)
- ///
- public void ReportProgress (LoadFileEventArgs args)
- {
- if (_disposed)
- {
- return;
- }
-
- // Only keep the latest update - discard old ones
- _progressQueue.Enqueue(args);
-
- // Keep queue size bounded (max 10 items)
- while (_progressQueue.Count > 10)
- {
- _ = _progressQueue.TryDequeue(out _);
- }
- }
-
- ///
- /// Flushes any pending updates immediately
- ///
- public void Flush ()
- {
- ProcessQueue(null);
- }
-
- private void ProcessQueue (object state)
- {
- if (_disposed)
- {
- return;
- }
-
- // Get only the LATEST update (discard intermediate ones)
- LoadFileEventArgs latestUpdate = null;
- while (_progressQueue.TryDequeue(out var update))
- {
- latestUpdate = update;
- }
-
- // Invoke callback with latest update
- if (latestUpdate != null)
- {
- try
- {
- _progressCallback(latestUpdate);
- }
- catch (Exception ex)
- {
- // Log but don't crash
- System.Diagnostics.Debug.WriteLine($"Error in progress callback: {ex.Message}");
- }
- }
- }
-
- public void Dispose ()
- {
- if (_disposed)
- {
- return;
- }
-
- _disposed = true;
-
- Flush();
- _timer?.Dispose();
-
- // Clear queue
- _progressQueue.Clear();
- }
-}
\ No newline at end of file
diff --git a/src/LogExpert.Core/Classes/Log/Buffers/BufferIndex.cs b/src/LogExpert.Core/Classes/Log/Buffers/BufferIndex.cs
new file mode 100644
index 00000000..0e838d23
--- /dev/null
+++ b/src/LogExpert.Core/Classes/Log/Buffers/BufferIndex.cs
@@ -0,0 +1,621 @@
+using System.Collections.Concurrent;
+using System.Diagnostics;
+using System.Globalization;
+
+using NLog;
+
+namespace LogExpert.Core.Classes.Log.Buffers;
+
+/*
+ * !IMPORTANT
+ * Before and after changes are made run the BufferIndexBenchmarks for a baseline, so no performance regression is introduced
+ * If changes are made to this class, please also review BufferIndexSnapshot and BufferShiftTest to ensure consistency and correctness.
+ */
+
+///
+/// Thread-safe index that maps line numbers to instances with LRU eviction. This is the hot
+/// path — every GetLogLine call goes through here. Has zero file-I/O dependencies. Constructable with only integers for
+/// benchmarking.
+///
+public sealed class BufferIndex : IDisposable
+{
+ private readonly int _maxBuffers;
+ private readonly int _maxLinesPerBuffer;
+ private readonly ReaderWriterLockSlim _lock = new(LockRecursionPolicy.SupportsRecursion);
+ private readonly SortedList _bufferList = [];
+ private readonly ConcurrentDictionary _lruCacheDict;
+ private readonly ThreadLocal _lastBufferIndex = new(() => -1);
+
+ private static readonly Logger _logger = LogManager.GetCurrentClassLogger();
+
+ private volatile bool _isLineCountDirty = true;
+ private int _cachedLineCount;
+
+ public BufferIndex (int maxBuffers, int maxLinesPerBuffer)
+ {
+ _maxBuffers = maxBuffers;
+ _maxLinesPerBuffer = maxLinesPerBuffer;
+ _lruCacheDict = new(Environment.ProcessorCount, maxBuffers + 1);
+ }
+
+ #region Hot Path Lookup
+
+ ///
+ /// 4-layer lookup. Caller must hold at least a read lock. Returns false if lineNum is out of range or the index is
+ /// empty.
+ ///
+ public LogBufferEntry TryFindBuffer (int lineNum)
+ {
+ return TryFindBufferWithIndex(lineNum);
+ }
+
+ ///
+ /// Core buffer lookup returning both buffer and index position. The caller MUST already hold a read,
+ /// upgradeable-read, or write lock.
+ ///
+ internal LogBufferEntry GetBufferForLineWithIndex (int lineNum)
+ {
+ return TryFindBufferWithIndex(lineNum);
+ }
+
+ private LogBufferEntry TryFindBufferWithIndex (int lineNum)
+ {
+#if DEBUG
+ Util.AssertTrue(_lock.IsReadLockHeld || _lock.IsUpgradeableReadLockHeld || _lock.IsWriteLockHeld, "No lock held for buffer list in TryFindBufferWithIndex");
+ long startTime = Environment.TickCount;
+#endif
+ var arr = _bufferList.Values;
+ var count = arr.Count;
+
+ if (count == 0)
+ {
+ return new LogBufferEntry(null, -1, false);
+ }
+
+ // Layer 0: Last buffer cache — O(1) for sequential access
+ var lastIdx = _lastBufferIndex.Value;
+ if (lastIdx >= 0 && lastIdx < count)
+ {
+ var buf = arr[lastIdx];
+ if ((uint)(lineNum - buf.StartLine) < (uint)buf.LineCount)
+ {
+ //dont UpdateLRUCache, the cache has not changed in layer 0
+ return new LogBufferEntry(buf, lastIdx, true);
+ }
+
+ // Layer 1: Adjacent buffer prediction — O(1) for buffer boundary crossings
+ if (lastIdx + 1 < count)
+ {
+ var next = arr[lastIdx + 1];
+ if ((uint)(lineNum - next.StartLine) < (uint)next.LineCount)
+ {
+ _lastBufferIndex.Value = lastIdx + 1;
+ UpdateLru(next);
+ return new LogBufferEntry(next, lastIdx + 1, true);
+ }
+ }
+
+ if (lastIdx - 1 >= 0)
+ {
+ var prev = arr[lastIdx - 1];
+ if ((uint)(lineNum - prev.StartLine) < (uint)prev.LineCount)
+ {
+ _lastBufferIndex.Value = lastIdx - 1;
+ UpdateLru(prev);
+ return new LogBufferEntry(prev, lastIdx - 1, true);
+ }
+ }
+ }
+
+ // Layer 2: Direct mapping guess — O(1) speculative for uniform buffers
+ var guess = lineNum / _maxLinesPerBuffer;
+ if ((uint)guess < (uint)count)
+ {
+ var buf = arr[guess];
+ if ((uint)(lineNum - buf.StartLine) < (uint)buf.LineCount)
+ {
+ _lastBufferIndex.Value = guess;
+ UpdateLru(buf);
+ return new LogBufferEntry(buf, guess, true);
+ }
+ }
+
+ // Layer 3: Branchless binary search with power-of-two strides
+ var step = HighestPowerOfTwo(count);
+ var idx = (arr[step - 1].StartLine <= lineNum) ? count - step : 0;
+
+ for (step >>= 1; step > 0; step >>= 1)
+ {
+ var probe = idx + step;
+ if (probe < count && arr[probe].StartLine <= lineNum)
+ {
+ idx = probe;
+ }
+ }
+
+ // idx is now the buffer index — verify bounds
+ if (idx < count)
+ {
+ var buf = arr[idx];
+ if ((uint)(lineNum - buf.StartLine) < (uint)buf.LineCount)
+ {
+ _lastBufferIndex.Value = idx;
+ UpdateLru(buf);
+ return new LogBufferEntry(buf, idx, true);
+ }
+ }
+#if DEBUG
+ long endTime = Environment.TickCount;
+ _logger.Debug($"TryFindBufferWithIndex({lineNum}) duration: {endTime - startTime} ms.");
+#endif
+ return new LogBufferEntry(null, -1, false);
+ }
+
+ #endregion
+
+ #region Navigation: multi-file traversal
+
+ ///
+ /// Finds the start line of the next file segment after . Caller must hold at least a read
+ /// lock.
+ ///
+ public (bool Found, int StartLine) TryGetNextFileStartLine (int lineNum)
+ {
+ var result = -1;
+
+ var foundBufferEntry = TryFindBufferWithIndex(lineNum);
+ if (!foundBufferEntry.Found)
+ {
+ return (foundBufferEntry.Found, result);
+ }
+
+ for (var i = foundBufferEntry.Index; i < _bufferList.Values.Count; ++i)
+ {
+ if (_bufferList.Values[i].FileInfo != foundBufferEntry.Buffer.FileInfo)
+ {
+ result = _bufferList.Values[i].StartLine;
+ break;
+ }
+ }
+
+ return (result != -1, result);
+ }
+
+ ///
+ /// Finds the start line of the previous file segment before . Caller must hold at least a
+ /// read lock.
+ ///
+ public (bool Found, int StartLine) TryGetPrevFileStartLine (int lineNum)
+ {
+ var result = -1;
+
+ var foundBufferEntry = TryFindBufferWithIndex(lineNum);
+
+ if (!foundBufferEntry.Found)
+ {
+ return (foundBufferEntry.Found, result);
+ }
+
+ if (foundBufferEntry.Buffer != null && foundBufferEntry.Index != -1)
+ {
+ for (var i = foundBufferEntry.Index; i >= 0; --i)
+ {
+ if (_bufferList.Values[i].FileInfo != foundBufferEntry.Buffer.FileInfo)
+ {
+ result = _bufferList.Values[i].StartLine + _bufferList.Values[i].LineCount;
+ break;
+ }
+ }
+ }
+
+ return (result != -1, result);
+ }
+
+ ///
+ /// Finds the first buffer belonging to the same file as . Caller must hold at least a
+ /// read lock.
+ ///
+ public LogBuffer? GetFirstBufferForFile (LogBuffer logBuffer, int index)
+ {
+ //maybe not necessary
+ ArgumentNullException.ThrowIfNull(logBuffer, "GetFirstBufferForFile not possible: Buffer is NULL");
+
+ if (index == -1)
+ {
+ return null;
+ }
+
+ var info = logBuffer.FileInfo;
+
+ var resultBuffer = logBuffer;
+ while (true)
+ {
+ index--;
+ if (index < 0 || _bufferList.Values[index].FileInfo != info)
+ {
+ break;
+ }
+
+ resultBuffer = _bufferList.Values[index];
+ }
+
+ return resultBuffer;
+ }
+
+ #endregion
+
+ #region Mutation — called during reads and rollover
+
+ ///
+ /// Adds a buffer to the index and updates LRU tracking. Caller must hold a write lock.
+ ///
+ public void Add (LogBuffer buffer)
+ {
+#if DEBUG
+ _logger.Debug(CultureInfo.InvariantCulture, "AddBufferToList(): {0}/{1}/{2}", buffer.StartLine, buffer.LineCount, buffer.FileInfo.FullName);
+#endif
+ _bufferList[buffer.StartLine] = buffer;
+ UpdateLru(buffer);
+ _isLineCountDirty = true;
+ }
+
+ ///
+ /// Removes a buffer by its start line key and LRU entry. Caller must hold a write lock.
+ ///
+ public bool Remove (LogBuffer buffer)
+ {
+ ArgumentNullException.ThrowIfNull(buffer, "Remove not possible: Buffer is NULL");
+
+ Debug.Assert(_lock.IsWriteLockHeld, "No writer lock for buffer list");
+ _ = _lruCacheDict.TryRemove(buffer.StartLine, out _);
+ _isLineCountDirty = true;
+ return _bufferList.Remove(buffer.StartLine);
+ }
+
+ ///
+ /// Atomically updates a buffer's start line in both the index and LRU cache. Used by ShiftBuffers during rollover.
+ /// Caller must hold a write lock.
+ ///
+ public void UpdateStartLine (LogBuffer buffer, int newStartLine)
+ {
+ var hadCache = _lruCacheDict.TryRemove(buffer.StartLine, out var cacheEntry);
+
+ _ = _bufferList.Remove(buffer.StartLine);
+ buffer.StartLine = newStartLine;
+ _bufferList[newStartLine] = buffer;
+
+ if (hadCache)
+ {
+ _ = _lruCacheDict.TryAdd(buffer.StartLine, cacheEntry);
+ }
+
+ _isLineCountDirty = true;
+ }
+
+ ///
+ /// Clears all buffers and LRU entries. Does NOT dispose buffer content. Caller must hold a write lock.
+ ///
+ public void Clear ()
+ {
+ _bufferList.Clear();
+ _lruCacheDict.Clear();
+ ResetThreadLocalCache();
+ _isLineCountDirty = true;
+ }
+
+ #endregion
+
+ #region LRU eviction
+
+ ///
+ /// Removes least-recently-used entries when cache exceeds max size. Evicts content but preserves metadata so
+ /// buffers remain findable for re-read. Does NOT acquire _lock — only touches _lruCache (ConcurrentDictionary) and
+ /// individual buffer SpinLocks.
+ ///
+ public void EvictLeastRecentlyUsed ()
+ {
+#if DEBUG
+ long startTime = Environment.TickCount;
+#endif
+ _logger.Debug(CultureInfo.InvariantCulture, "Starting garbage collection");
+ var threshold = 10;
+
+ if (_lruCacheDict.Count - (_maxBuffers + threshold) > 0)
+ {
+ var diff = _lruCacheDict.Count - _maxBuffers;
+#if DEBUG
+ if (diff > 0)
+ {
+ _logger.Info(CultureInfo.InvariantCulture, "Removing {0} entries from LRU cache", diff);
+ }
+#endif
+ // Snapshot values and sort by timestamp (ascending = least recently used first)
+ var entries = _lruCacheDict.ToArray();
+ Array.Sort(entries, static (a, b) => a.Value.LastUseTimeStamp.CompareTo(b.Value.LastUseTimeStamp));
+
+ for (var i = 0; i < diff && i < entries.Length; ++i)
+ {
+ var kvp = entries[i];
+ if (_lruCacheDict.TryRemove(kvp.Key, out var removed))
+ {
+ var lockTaken = false;
+ try
+ {
+ removed.LogBuffer.AcquireContentLock(ref lockTaken);
+ // Evict content but preserve metadata (LineCount, StartLine, etc.)
+ // so the buffer remains findable in _bufferList lookups.
+ // Do NOT return to pool — the buffer is still referenced by _bufferList.
+ removed.LogBuffer.EvictContent();
+ }
+ finally
+ {
+ if (lockTaken)
+ {
+ removed.LogBuffer.ReleaseContentLock();
+ }
+ }
+ }
+ }
+ }
+
+#if DEBUG
+ if (_lruCacheDict.Count - (_maxBuffers + threshold) > 0)
+ {
+ long endTime = Environment.TickCount;
+ _logger.Info(CultureInfo.InvariantCulture, "Garbage collector time: " + (endTime - startTime) + " ms.");
+ }
+#endif
+ }
+
+ ///
+ /// Atomically clears the index and returns all LRU-tracked buffers to the pool. Clears the index FIRST under the
+ /// caller's write lock, THEN returns buffers to pool. This prevents a race where concurrent readers find buffers
+ /// that have been returned to the pool. Caller must hold a write lock.
+ ///
+ public void ClearLru (LogBufferPool pool)
+ {
+ _logger.Info(CultureInfo.InvariantCulture, "Clearing LRU cache.");
+
+ // 1. Collect buffer references before clearing
+ var toReturn = new List(_lruCacheDict.Count);
+ foreach (var entry in _lruCacheDict.Values)
+ {
+ toReturn.Add(entry.LogBuffer);
+ }
+
+ // 2. Clear index FIRST — no concurrent reader can find these after this
+ _bufferList.Clear();
+ _lruCacheDict.Clear();
+ _isLineCountDirty = true;
+ ResetThreadLocalCache();
+
+ // 3. Now safe to return to pool
+ foreach (var entry in toReturn)
+ {
+ var lockTaken = false;
+ try
+ {
+ entry.AcquireContentLock(ref lockTaken);
+ pool.Return(entry);
+ }
+ finally
+ {
+ if (lockTaken)
+ {
+ entry.ReleaseContentLock();
+ }
+ }
+ }
+
+ _logger.Info(CultureInfo.InvariantCulture, "Clearing done.");
+ }
+
+ #endregion
+
+ ///
+ /// Gets the number of buffers.
+ ///
+ public int BufferCount => _bufferList.Count;
+
+ ///
+ /// Returns the buffer at the specified positional index. Caller must hold at least a read lock.
+ ///
+ public LogBuffer GetBufferAt (int index) => _bufferList.GetValueAtIndex(index);
+
+ ///
+ /// Returns the last buffer in the index (highest start line). Caller must hold at least a read lock.
+ ///
+ public LogBuffer GetLastBuffer () => _bufferList.GetValueAtIndex(_bufferList.Count - 1);
+
+ ///
+ /// Returns an enumerable collection of all log buffers managed by the current instance.
+ ///
+ ///
+ /// An containing each in the collection. The
+ /// enumeration reflects the current state of the buffers at the time of the call.
+ ///
+ public IEnumerable EnumerateBuffers () { return [.. _bufferList.Values]; }
+
+ ///
+ /// Total lines across all buffers. Recalculated on demand when dirty. Caller must hold at least a read lock.
+ ///
+ public int TotalLineCount
+ {
+ get
+ {
+ if (_isLineCountDirty)
+ {
+ var total = 0;
+ foreach (var buffer in _bufferList.Values)
+ {
+ total += buffer.LineCount;
+ }
+
+ _cachedLineCount = total;
+ _isLineCountDirty = false;
+ }
+
+ return _cachedLineCount;
+ }
+ }
+
+ public void MarkLineCountDirty () => _isLineCountDirty = true;
+
+ ///
+ /// Gets the number of items currently stored in the least recently used (LRU) cache.
+ ///
+ public int LruCacheCount => _lruCacheDict.Count;
+
+ #region Lock management — using-scoped only
+
+ public ReadLockScope AcquireReadLock () => new(_lock);
+
+ public WriteLockScope AcquireWriteLock () => new(_lock);
+
+ public UpgradeableReadLockScope AcquireUpgradeableReadLock () => new(_lock);
+
+ #endregion
+
+ #region Diagnostics
+
+ ///
+ /// Creates an immutable point-in-time capture of the index state. Acquires its own read lock internally.
+ ///
+ public BufferIndexSnapshot CreateSnapshot ()
+ {
+ using var _ = AcquireReadLock();
+
+ var buffers = new List(_bufferList.Count);
+
+ foreach (var b in _bufferList.Values)
+ {
+ buffers.Add(new BufferIndexSnapshot.BufferInfo
+ (
+ b.StartLine,
+ b.LineCount,
+ b.StartPos,
+ b.Size,
+ b.IsDisposed,
+ b.FileInfo.FullName
+ ));
+ }
+
+ return new BufferIndexSnapshot
+ {
+ BufferCount = _bufferList.Count,
+ TotalLineCount = TotalLineCount,
+ LruCacheCount = _lruCacheDict.Count,
+ Buffers = buffers
+ };
+ }
+
+ #endregion
+
+ #region Internal Helpers
+
+ public void ResetThreadLocalCache () => _lastBufferIndex.Value = -1;
+
+ private void UpdateLru (LogBuffer logBuffer)
+ {
+ var cacheEntry = _lruCacheDict.GetOrAdd(
+ logBuffer.StartLine,
+ static (_, buf) => new LogBufferCacheEntry { LogBuffer = buf },
+ logBuffer);
+
+ cacheEntry.Touch();
+ }
+
+ private static int HighestPowerOfTwo (int n) => 1 << (31 - int.LeadingZeroCount(n));
+
+ public void Dispose ()
+ {
+ _lastBufferIndex.Dispose();
+ _lock.Dispose();
+ }
+
+ #endregion
+}
+
+#region Lock scope structs
+
+public readonly ref struct ReadLockScope
+{
+ private readonly ReaderWriterLockSlim _lock;
+
+ public ReadLockScope (ReaderWriterLockSlim rwLock)
+ {
+ _lock = rwLock;
+ if (!_lock.TryEnterReadLock(TimeSpan.FromSeconds(10)))
+ {
+ //_logger.Warn("Reader lock wait timed out, forcing entry");
+ _lock.EnterReadLock();
+ }
+ }
+
+ public void Dispose () => _lock.ExitReadLock();
+
+}
+
+public readonly ref struct WriteLockScope
+{
+ private readonly ReaderWriterLockSlim _lock;
+
+ public WriteLockScope (ReaderWriterLockSlim rwLock)
+ {
+ _lock = rwLock;
+ if (!_lock.TryEnterWriteLock(TimeSpan.FromSeconds(10)))
+ {
+ //_logger.Warn("Writer lock wait timed out, forcing entry");
+ _lock.EnterWriteLock();
+ }
+ }
+
+ public void Dispose () => _lock.ExitWriteLock();
+}
+
+public readonly ref struct UpgradeableReadLockScope
+{
+ private readonly ReaderWriterLockSlim _lock;
+
+ public UpgradeableReadLockScope (ReaderWriterLockSlim rwLock)
+ {
+ _lock = rwLock;
+ if (!_lock.TryEnterUpgradeableReadLock(TimeSpan.FromSeconds(10)))
+ {
+ //_logger.Warn("Upgradeable read lock timed out, forcing entry");
+ _lock.EnterUpgradeableReadLock();
+ }
+ }
+
+ public WriteLockUpgradeScope UpgradeToWrite () => new(_lock);
+
+ public void Dispose () => _lock.ExitUpgradeableReadLock();
+}
+
+public readonly ref struct WriteLockUpgradeScope
+{
+ private readonly ReaderWriterLockSlim _lock;
+
+ public WriteLockUpgradeScope (ReaderWriterLockSlim rwls)
+ {
+ _lock = rwls;
+ if (!_lock.TryEnterWriteLock(TimeSpan.FromSeconds(10)))
+ {
+ //_logger.Warn("Writer lock upgrade timed out, forcing entry");
+ _lock.EnterWriteLock();
+ }
+ }
+
+ public void Dispose () => _lock.ExitWriteLock();
+}
+
+#endregion
+
+public readonly struct LogBufferEntry (LogBuffer? buffer, int index, bool found)
+{
+ public LogBuffer? Buffer { get; } = buffer;
+
+ public int Index { get; } = index;
+
+ public bool Found { get; } = found;
+}
\ No newline at end of file
diff --git a/src/LogExpert.Core/Classes/Log/Buffers/BufferIndexSnapshot.cs b/src/LogExpert.Core/Classes/Log/Buffers/BufferIndexSnapshot.cs
new file mode 100644
index 00000000..5b54390b
--- /dev/null
+++ b/src/LogExpert.Core/Classes/Log/Buffers/BufferIndexSnapshot.cs
@@ -0,0 +1,24 @@
+namespace LogExpert.Core.Classes.Log.Buffers;
+
+///
+/// Immutable point-in-time capture of state.
+/// Taken under a single read lock, safe to inspect afterward without locks.
+///
+public sealed class BufferIndexSnapshot
+{
+ public int BufferCount { get; init; }
+ public int TotalLineCount { get; init; }
+ public int LruCacheCount { get; init; }
+ public IReadOnlyList Buffers { get; init; } = [];
+
+ public sealed record BufferInfo (
+ int StartLine,
+ int LineCount,
+ long StartPos,
+ long Size,
+ bool IsDisposed,
+ string FileName);
+
+ public override string ToString () =>
+ $"Buffers={BufferCount}, Lines={TotalLineCount}, LRU={LruCacheCount}";
+}
\ No newline at end of file
diff --git a/src/LogExpert.Core/Classes/Log/Buffers/CharBlockAllocator.cs b/src/LogExpert.Core/Classes/Log/Buffers/CharBlockAllocator.cs
new file mode 100644
index 00000000..030437a8
--- /dev/null
+++ b/src/LogExpert.Core/Classes/Log/Buffers/CharBlockAllocator.cs
@@ -0,0 +1,132 @@
+namespace LogExpert.Core.Classes.Log.Buffers;
+
+///
+/// Allocates slices from large char[] blocks.
+/// Multiple lines are packed into each block to reduce per-line allocation overhead.
+///
+///
+/// Blocks are plain arrays (not pooled) because their lifetime extends beyond the allocator:
+/// the UI thread may hold slices long after the backing
+/// is evicted. Using here would
+/// cause use-after-return corruption when evicted blocks are re-rented by new reads.
+///
+/// We still get the primary GC benefit: hundreds of short-lived strings from
+/// are copied into a few large blocks, keeping
+/// the strings Gen0-eligible and reducing Gen1/Gen2 promotions.
+///
+/// This class is NOT thread-safe. Each reader/fill operation should use its own instance.
+///
+public sealed class CharBlockAllocator : IDisposable
+{
+ private const int DEFAULT_BLOCK_SIZE = 65_536; // 128 KB in chars (64K chars × 2 bytes)
+
+ private readonly int _blockSize;
+ private List _blocks = [];
+ private readonly List _oversizedBlocks = [];
+ private char[] _currentBlock;
+ private int _currentOffset;
+ private bool _disposed;
+
+ public CharBlockAllocator (int blockSize = DEFAULT_BLOCK_SIZE)
+ {
+ _blockSize = blockSize;
+ _currentBlock = new char[_blockSize];
+ _blocks.Add(_currentBlock);
+ _currentOffset = 0;
+ }
+
+ ///
+ /// Gets the number of normal (fixed-size) blocks currently rented from the pool.
+ ///
+ public int BlockCount => _blocks.Count;
+
+ ///
+ /// Gets the number of oversized (standalone) blocks currently rented from the pool.
+ /// Useful for diagnostics — a high count indicates pathological line lengths.
+ ///
+ public int OversizedBlockCount => _oversizedBlocks.Count;
+
+ ///
+ /// Allocates a region of the specified length from the current block.
+ /// If the current block has insufficient space, a new block is rented.
+ /// Lines longer than the block size receive a standalone rental tracked separately.
+ ///
+ public Memory Rent (int length)
+ {
+ ObjectDisposedException.ThrowIf(_disposed, this);
+
+ if (length <= 0)
+ {
+ return Memory.Empty;
+ }
+
+ // Oversized line: give it its own array, tracked separately
+ if (length > _blockSize)
+ {
+ var oversized = new char[length];
+ _oversizedBlocks.Add(oversized);
+ return oversized.AsMemory(0, length);
+ }
+
+ // Current block has space
+ if (_currentOffset + length <= _currentBlock.Length)
+ {
+ var memory = _currentBlock.AsMemory(_currentOffset, length);
+ _currentOffset += length;
+ return memory;
+ }
+
+ // Need a new block
+ _currentBlock = new char[_blockSize];
+ _blocks.Add(_currentBlock);
+ _currentOffset = length;
+ return _currentBlock.AsMemory(0, length);
+ }
+
+ ///
+ /// Detaches and returns the list of all blocks (normal + oversized). After this call,
+ /// the allocator no longer owns those blocks — the caller (LogBuffer) holds them
+ /// until GC collects them after all slices are released.
+ ///
+ public List DetachBlocks ()
+ {
+ ObjectDisposedException.ThrowIf(_disposed, this);
+
+ // Merge oversized blocks into the main list so the caller owns everything
+ if (_oversizedBlocks.Count > 0)
+ {
+ _blocks.AddRange(_oversizedBlocks);
+ _oversizedBlocks.Clear();
+ }
+
+ // Swap the list — O(1), no copy. Caller owns the old list.
+ var blocks = _blocks;
+ _currentBlock = new char[_blockSize];
+ _blocks = [_currentBlock];
+ _currentOffset = 0;
+ return blocks;
+ }
+
+ ///
+ /// Releases all block references. The actual char[] memory is collected by GC
+ /// once all slices pointing into them are released.
+ ///
+ public void ReturnAll ()
+ {
+ _blocks.Clear();
+ _oversizedBlocks.Clear();
+ _currentBlock = null!;
+ _currentOffset = 0;
+ }
+
+ public void Dispose ()
+ {
+ if (_disposed)
+ {
+ return;
+ }
+
+ ReturnAll();
+ _disposed = true;
+ }
+}
\ No newline at end of file
diff --git a/src/LogExpert.Core/Classes/Log/LogBuffer.cs b/src/LogExpert.Core/Classes/Log/Buffers/LogBuffer.cs
similarity index 82%
rename from src/LogExpert.Core/Classes/Log/LogBuffer.cs
rename to src/LogExpert.Core/Classes/Log/Buffers/LogBuffer.cs
index a240720b..f600b0b9 100644
--- a/src/LogExpert.Core/Classes/Log/LogBuffer.cs
+++ b/src/LogExpert.Core/Classes/Log/Buffers/LogBuffer.cs
@@ -4,7 +4,7 @@
using NLog;
-namespace LogExpert.Core.Classes.Log;
+namespace LogExpert.Core.Classes.Log.Buffers;
public class LogBuffer
{
@@ -19,6 +19,7 @@ public class LogBuffer
private LogLine[] _lineArray;
private int _lineArrayLength; // capacity of the rented array
+ private List _charBlocks;
private int MAX_LINES = 500;
@@ -113,6 +114,8 @@ public void ClearLines ()
Array.Clear(_lineArray, 0, LineCount);
}
+ ReturnCharBlocks();
+
LineCount = 0;
#if DEBUG
_filePositions.Clear();
@@ -124,6 +127,8 @@ public void ClearLines ()
///
public void Reinitialise (ILogFileInfo fileInfo, int maxLines)
{
+ ReturnCharBlocks();
+
FileInfo = fileInfo;
MAX_LINES = maxLines;
StartLine = 0;
@@ -154,8 +159,10 @@ public void EvictContent ()
_lineArray = null;
}
- // Do NOT zero LineCount — it is needed for buffer lookup in GetBufferForLineWithIndex.
- // Do NOT zero StartLine, StartPos, Size — they are needed for re-reading from disk.
+ ReturnCharBlocks();
+
+ //! Do NOT zero LineCount — it is needed for buffer lookup in GetBufferForLineWithIndex.
+ //! Do NOT zero StartLine, StartPos, Size — they are needed for re-reading from disk.
IsDisposed = true;
#if DEBUG
DisposeCount++;
@@ -176,6 +183,8 @@ public void DisposeContent ()
LineCount = 0;
}
+ ReturnCharBlocks();
+
IsDisposed = true;
#if DEBUG
DisposeCount++;
@@ -205,6 +214,16 @@ public void ReleaseContentLock ()
_contentLock.Exit(useMemoryBarrier: false);
}
+ ///
+ /// Attaches pooled char[] blocks that back the ReadOnlyMemory in this buffer's LogLine entries.
+ /// These blocks will be returned to ArrayPool when the buffer is evicted or disposed.
+ ///
+ public void AttachCharBlocks (List blocks)
+ {
+ ReturnCharBlocks(); // return any previously held blocks
+ _charBlocks = blocks;
+ }
+
#endregion
#if DEBUG
@@ -218,4 +237,16 @@ public long GetFilePosForLineOfBlock (int line)
}
#endif
+
+ #region Private Methods
+
+ private void ReturnCharBlocks ()
+ {
+ // Just drop the reference — do NOT return to ArrayPool.
+ // The UI thread may still hold ReadOnlyMemory slices into these blocks.
+ // GC will collect them once all references (LogLine, UI snapshots) are released.
+ _charBlocks = null;
+ }
+
+ #endregion
}
\ No newline at end of file
diff --git a/src/LogExpert.Core/Classes/Log/LogBufferCacheEntry.cs b/src/LogExpert.Core/Classes/Log/Buffers/LogBufferCacheEntry.cs
similarity index 91%
rename from src/LogExpert.Core/Classes/Log/LogBufferCacheEntry.cs
rename to src/LogExpert.Core/Classes/Log/Buffers/LogBufferCacheEntry.cs
index b983cfed..7ce51e47 100644
--- a/src/LogExpert.Core/Classes/Log/LogBufferCacheEntry.cs
+++ b/src/LogExpert.Core/Classes/Log/Buffers/LogBufferCacheEntry.cs
@@ -1,4 +1,4 @@
-namespace LogExpert.Core.Classes.Log;
+namespace LogExpert.Core.Classes.Log.Buffers;
public class LogBufferCacheEntry
{
diff --git a/src/LogExpert.Core/Classes/Log/LogBufferPool.cs b/src/LogExpert.Core/Classes/Log/Buffers/LogBufferPool.cs
similarity index 95%
rename from src/LogExpert.Core/Classes/Log/LogBufferPool.cs
rename to src/LogExpert.Core/Classes/Log/Buffers/LogBufferPool.cs
index b7bbed59..fb825902 100644
--- a/src/LogExpert.Core/Classes/Log/LogBufferPool.cs
+++ b/src/LogExpert.Core/Classes/Log/Buffers/LogBufferPool.cs
@@ -2,7 +2,7 @@
using ColumnizerLib;
-namespace LogExpert.Core.Classes.Log;
+namespace LogExpert.Core.Classes.Log.Buffers;
public sealed class LogBufferPool (int maxSize)
{
diff --git a/src/LogExpert.Core/Classes/Log/CastingPipelineBuilder.cs b/src/LogExpert.Core/Classes/Log/CastingPipelineBuilder.cs
new file mode 100644
index 00000000..e0a92007
--- /dev/null
+++ b/src/LogExpert.Core/Classes/Log/CastingPipelineBuilder.cs
@@ -0,0 +1,67 @@
+using System.Collections.Concurrent;
+
+using LogExpert.Core.Interfaces;
+
+namespace LogExpert.Core.Classes.Log;
+
+public class CastingPipelineBuilder : IPipeline