SemaphoreSlim Await Priority - c#

I am wondering if SemaphoreSlim has anything like a priority when calling Await.
I have not been able to find anything, but maybe someone has done something like this before.
The idea is, that if I need to, an await can be called on the semaphore later on with a higher priority, and it will allow the await to return first.

No, there are no priorities in SemaphoreSlim, whether you're using synchronous or asynchronous locking.
There is very rarely ever a need for priorities with asynchronous locks. Usually these kinds of problems have more elegant solutions if you take a step back and look at the bigger picture.

Here is a class PrioritySemaphore<TPriority> that can be acquired with priority. Internally it is based on the SortedSet collection.
public class PrioritySemaphore<TPriority>
{
private readonly PriorityQueue _priorityQueue;
private readonly object _locker = new object();
private readonly int _maxCount;
private int _currentCount;
private long _indexSeed = 0;
public PrioritySemaphore(int initialCount, int maxCount,
IComparer<TPriority> comparer = null)
{
if (initialCount < 0)
throw new ArgumentOutOfRangeException(nameof(initialCount));
if (maxCount <= 0) throw new ArgumentOutOfRangeException(nameof(maxCount));
_priorityQueue = new PriorityQueue(comparer);
_currentCount = initialCount;
_maxCount = maxCount;
}
public PrioritySemaphore(int initialCount, IComparer<TPriority> comparer = null)
: this(initialCount, Int32.MaxValue, comparer) { }
public PrioritySemaphore(IComparer<TPriority> comparer = null)
: this(0, Int32.MaxValue, comparer) { }
public int CurrentCount { get { lock (_locker) return _currentCount; } }
public async Task<bool> WaitAsync(TPriority priority, int millisecondsTimeout,
CancellationToken cancellationToken = default)
{
if (millisecondsTimeout < -1)
throw new ArgumentOutOfRangeException(nameof(millisecondsTimeout));
cancellationToken.ThrowIfCancellationRequested();
lock (_locker)
{
if (_currentCount > 0)
{
_currentCount--;
return true;
}
}
if (millisecondsTimeout == 0) return false;
var tcs = new TaskCompletionSource<bool>(
TaskCreationOptions.RunContinuationsAsynchronously);
long entryIndex = -1;
bool taskCompleted = false;
Timer timer = null;
if (millisecondsTimeout > 0)
{
timer = new Timer(_ =>
{
bool doComplete;
lock (_locker)
{
doComplete = entryIndex == -1
|| _priorityQueue.Remove(priority, entryIndex);
if (doComplete) taskCompleted = true;
}
if (doComplete) tcs.TrySetResult(false);
}, null, millisecondsTimeout, Timeout.Infinite);
}
CancellationTokenRegistration registration = default;
if (cancellationToken.CanBeCanceled)
{
registration = cancellationToken.Register(() =>
{
bool doComplete;
lock (_locker)
{
doComplete = entryIndex == -1
|| _priorityQueue.Remove(priority, entryIndex);
if (doComplete) taskCompleted = true;
}
if (doComplete) tcs.TrySetCanceled(cancellationToken);
});
}
bool disposeSubscriptions = false;
lock (_locker)
{
if (!taskCompleted)
{
entryIndex = _indexSeed++;
_priorityQueue.Enqueue(priority, entryIndex, tcs, timer, registration);
}
else
{
disposeSubscriptions = true;
}
}
if (disposeSubscriptions)
{
timer?.Dispose();
registration.Dispose();
}
return await tcs.Task.ConfigureAwait(false);
}
public Task WaitAsync(TPriority priority,
CancellationToken cancellationToken = default)
{
return WaitAsync(priority, Timeout.Infinite, cancellationToken);
}
public void Release()
{
TaskCompletionSource<bool> tcs;
Timer timer;
CancellationTokenRegistration registration;
lock (_locker)
{
if (_priorityQueue.IsEmpty)
{
if (_currentCount >= _maxCount) throw new SemaphoreFullException();
_currentCount++;
return;
}
(tcs, timer, registration) = _priorityQueue.Dequeue();
}
tcs.TrySetResult(true);
timer?.Dispose();
registration.Dispose();
}
private class PriorityQueue : IComparer<(TPriority Priority, long Index,
TaskCompletionSource<bool>, Timer, CancellationTokenRegistration)>
{
private readonly SortedSet<(TPriority Priority, long Index,
TaskCompletionSource<bool> TCS, Timer Timer,
CancellationTokenRegistration Registration)> _sortedSet;
private readonly IComparer<TPriority> _priorityComparer;
private readonly Comparer<long> _indexComparer = Comparer<long>.Default;
public PriorityQueue(IComparer<TPriority> comparer)
{
_priorityComparer = comparer ?? Comparer<TPriority>.Default;
_sortedSet = new SortedSet<(TPriority Priority, long Index,
TaskCompletionSource<bool> TCS, Timer Timer,
CancellationTokenRegistration Registration)>(this);
}
public bool IsEmpty => _sortedSet.Count == 0;
public void Enqueue(TPriority priority, long index,
TaskCompletionSource<bool> tcs, Timer timer,
CancellationTokenRegistration registration)
{
_sortedSet.Add((priority, index, tcs, timer, registration));
}
public (TaskCompletionSource<bool>, Timer, CancellationTokenRegistration)
Dequeue()
{
Debug.Assert(_sortedSet.Count > 0);
var entry = _sortedSet.Min;
_sortedSet.Remove(entry);
return (entry.TCS, entry.Timer, entry.Registration);
}
public bool Remove(TPriority priority, long index)
{
return _sortedSet.Remove((priority, index, default, default, default));
}
public int Compare((TPriority Priority, long Index,
TaskCompletionSource<bool>, Timer, CancellationTokenRegistration) x,
(TPriority Priority, long Index, TaskCompletionSource<bool>, Timer,
CancellationTokenRegistration) y)
{
int result = _priorityComparer.Compare(x.Priority, y.Priority);
if (result == 0) result = _indexComparer.Compare(x.Index, y.Index);
return result;
}
}
}
Usage example:
var semaphore = new PrioritySemaphore<int>();
//...
await semaphore.WaitAsync(priority: 1);
//...
await semaphore.WaitAsync(priority: 2);
//...
semaphore.Release();
After the Release, the semaphore will be acquired by the awaiter with the highest priority. In the above example it will be the awaiter with priority 1. Smaller values denote higher priority. If there are more than one awaiters with the same highest priority, the semaphore will be acquired by the one that requested it first (FIFO order is maintained).
The class PrioritySemaphore<TPriority> has only asynchronous API. It supports awaiting with timeout and with CancellationToken, but these features have not been tested extensively.
Note: The .NET 6 introduced the PriorityQueue<TElement, TPriority> class, which theoretically could be used to simplify the above implementation. Unfortunately the new class does not support removing specific elements from the queue. Only dequeuing is supported. And in order to implement the cancellation and timeout functionality of the PrioritySemaphore<TPriority> class, removing specific elements from the queue is required. So the new class cannot be used in the above implementation.

Related

How to use named mutexes and async/await in .NET Core 3.1?

I am implementing a caching layer for my ASP.NET Core 3.1 Web API.
Starting Implementation
public interface ICache
{
T Get<T>(string key);
void Set<T>(string key, T value);
}
public static class ICacheExtensions
{
public static T GetOrCreate<T>(this ICache cache, string key, Func<T> factory)
{
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
value = factory();
if (!EqualityComparer<T>.Default.Equals(value, default(T)))
{
cache.Set(key, value);
}
}
return value;
}
public static async Task<T> GetOrCreateAsync<T>(this ICache cache, string key, Func<Task<T>> factory)
{
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
value = await factory().ConfigureAwait(false);
if (!EqualityComparer<T>.Default.Equals(value, default(T)))
{
cache.Set(key, value);
}
}
return value;
}
}
This works fine, but one known problem I'm trying to address is that it is susceptible to cache stampedes. If my API is handling many requests that all try to access the same key using one of the GetOrCreate methods at the same time, they will each run a parallel instance of the factory function. This means redundant work and wasted resources.
What I have attempted to do is introduce mutexes to ensure that only one instance of the factory function can run per cache key.
Introduce Mutexes
public interface ICache
{
T Get<T>(string key);
void Set<T>(string key, T value);
}
public static class ICacheExtensions
{
public static T GetOrCreate<T>(this ICache cache, string key, Func<T> factory)
{
using var mutex = new Mutex(false, key);
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
mutex.WaitOne();
try
{
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
value = factory();
if (!EqualityComparer<T>.Default.Equals(value, default(T)))
{
cache.Set(key, value);
}
}
}
finally
{
mutex.ReleaseMutex();
}
}
return value;
}
public static async Task<T> GetOrCreateAsync<T>(this ICache cache, string key, Func<Task<T>> factory)
{
using var mutex = new Mutex(false, key);
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
mutex.WaitOne();
try
{
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
value = await factory().ConfigureAwait(false);
if (!EqualityComparer<T>.Default.Equals(value, default(T)))
{
cache.Set(key, value);
}
}
}
finally
{
mutex.ReleaseMutex();
}
}
return value;
}
}
This works great for GetOrCreate(), but GetOrCreateAsync() throws an exception. Turns out mutexes are thread-bound so if WaitOne() and ReleaseMutex() are called on different threads (as tends to happen with async/await), the mutex doesn't like that and throws an exception. I found this other SO question that describes some workarounds and decided to go with a custom task scheduler. SingleThreadedTaskScheduler schedules tasks using a thread pool containing exactly one thread. And I intend to interact with the mutex only from that thread.
SingleThreadedTaskScheduler
internal sealed class SingleThreadedTaskScheduler : TaskScheduler, IDisposable
{
private readonly Thread _thread;
private BlockingCollection<Task> _tasks;
public SingleThreadedTaskScheduler()
{
_tasks = new BlockingCollection<Task>();
_thread = new Thread(() =>
{
foreach (var t in _tasks.GetConsumingEnumerable())
{
TryExecuteTask(t);
}
});
_thread.IsBackground = true;
_thread.Start();
}
protected override IEnumerable<Task> GetScheduledTasks()
{
return _tasks.ToArray();
}
protected override void QueueTask(Task task)
{
_tasks.Add(task);
}
protected override bool TryExecuteTaskInline(Task task, bool taskWasPreviouslyQueued)
{
return false;
}
public void Dispose()
{
_tasks?.CompleteAdding();
_thread?.Join();
_tasks?.Dispose();
_tasks = null;
}
}
GetOrCreateAsync with SingleThreadedTaskScheduler
private static readonly TaskScheduler _mutexTaskScheduler = new SingleThreadedTaskScheduler();
public static async Task<T> GetOrCreateAsync<T>(this ICache cache, string key, Func<Task<T>> factory)
{
using var mutex = new Mutex(false, key);
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
await Task.Factory
.StartNew(() => mutex.WaitOne(), CancellationToken.None, TaskCreationOptions.None, _mutexTaskScheduler)
.ConfiureAwait(false);
try
{
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
value = await factory().ConfigureAwait(false);
if (!EqualityComparer<T>.Default.Equals(value, default(T)))
{
cache.Set(key, value);
}
}
}
finally
{
await Task.Factory
.StartNew(() => mutex.ReleaseMutex(), CancellationToken.None, TaskCreationOptions.None, _mutexTaskScheduler)
.ConfiureAwait(false);
}
}
return value;
}
With this implementation, the exception is resolved, but GetOrCreateAsync still calls the factory function many times in a cache stampede scenario. Am I missing something?
I've also tried using SemaphoreSlim instead of Mutex which should play nicer with async/await. The issue here is that Linux doesn't support named semaphores so I'd have to keep all my semaphores in a Dictionary<string, SemaphoreSlim> and that would be too cumbersome to manage.
The linked solution only works when using a named mutex to synchronize asynchronous code across processes. It won't work to synchronize code within the same process. Mutexes allow recursive acquisition, so by moving all acquisitions on the same thread, it's the same as if the mutex isn't there at all.
I'd have to keep all my semaphores in a Dictionary<string, SemaphoreSlim> and that would be too cumbersome to manage.
If you need a non-recursive named mutex, named Semaphores (which don't work on Linux) or managing your own dictionary is really the only way to go.
I have an AsyncCache<T> that I've been working on but isn't prod-ready yet. It tries to look like a cache of Task<T> instances but is actually a cache of TaskCompletionSource<T> instances.
Using semaphores appears to work. Credit to Stephen Cleary for confirming that this was a better route than Mutexes.
public static async Task<T> GetOrCreateAsync<T>(this ICache cache, string key, Func<Task<T>> factory)
{
using var mutex = new Mutex(false, key);
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
WaitOne(key);
try
{
var value = cache.Get<T>(key);
if (EqualityComparer<T>.Default.Equals(value, default(T)))
{
value = await factory().ConfigureAwait(false);
if (!EqualityComparer<T>.Default.Equals(value, default(T)))
{
cache.Set(key, value);
}
}
ReleaseAll(key);
}
catch (Exception)
{
ReleaseOne(key);
throw;
}
}
return value;
}
private static readonly ConcurrentDictionary<string, SemaphoreSlim> _semaphores = new ConcurrentDictionary<string, SemaphoreSlim>();
private static void WaitOne(string key)
{
var semaphore = _semaphores.GetOrAdd(key, k => new SemaphoreSlim(1, int.MaxValue));
semaphore.Wait();
}
private static void ReleaseOne(string key)
{
var semaphore = _semaphores.GetOrAdd(key, k => new SemaphoreSlim(0, int.MaxValue));
semaphore.Release();
}
private static void ReleaseAll(string key)
{
var semaphore = default(SemaphoreSlim);
_semaphores.Remove(key, out semaphore);
semaphore?.Release(int.MaxValue);
semaphore?.Dispose();
}

TPL DataFlow Queue with Postponement

I am processing a queue concurrently using an ActionBlock.
The one catch here is that when processing an item in the queue, I may want to wait until a dependency is satisfied by the processing of another item in the queue.
I think I should be able to do this with the TPL DataFlow library with linking, postponement and release of postponement but I'm not sure what constructs to use.
In pseudocode:
public class Item
{
public string Name { get; set; }
public List<string> DependsOn = new List<string>();
}
ActionBlock<Item> block = null;
var block = new ActionBlock<Item>(o => {
if (!HasActionBlockProcessedAllDependencies(o.DependsOn))
{
// enqueue a callback when ALL dependencies have been completed
}
else
{
DoWork(o);
}
},
new ExecutionDataflowBlockOptions {
MaxDegreeOfParallelism = resourceProcessorOptions.MaximumProviderConcurrency
});
var items = new[]
{
new Item { Name = "Apple", DependsOn = { "Pear" } },
new Item { Name = "Pear" }
}
I am not sure if this will be helpful to you, but here is a custom DependencyTransformBlock class that knows about the dependencies between the items it receives, and processes each one only after its dependencies have been successfully processed. This custom block supports all the built-in functionality of a normal TransformBlock, except from the EnsureOrdered option.
The constructors of this class accept a Func<TInput, TKey> lambda for retrieving the key of each item, and a Func<TInput, IReadOnlyCollection<TKey>> lambda for retrieving its dependencies. The keys are expected to be unique. In case a duplicate key is found, the block will complete with failure.
In case of circular dependencies between items, the affected items will remain unprocessed. The property TInput[] Unprocessed allows to retrieve the unprocessed items after the completion of the block. An item can also remain unprocessed in case any of its dependencies is not supplied.
public class DependencyTransformBlock<TInput, TKey, TOutput> :
ITargetBlock<TInput>, ISourceBlock<TOutput>
{
private readonly ITargetBlock<TInput> _inputBlock;
private readonly IPropagatorBlock<Item, TOutput> _transformBlock;
private readonly object _locker = new object();
private readonly Dictionary<TKey, Item> _items;
private int _pendingCount = 1;
// The initial 1 represents the completion of the _inputBlock
private class Item
{
public TKey Key;
public TInput Input;
public bool HasInput;
public bool IsCompleted;
public HashSet<Item> Dependencies;
public HashSet<Item> Dependents;
public Item(TKey key) => Key = key;
}
public DependencyTransformBlock(
Func<TInput, Task<TOutput>> transform,
Func<TInput, TKey> keySelector,
Func<TInput, IReadOnlyCollection<TKey>> dependenciesSelector,
ExecutionDataflowBlockOptions dataflowBlockOptions = null,
IEqualityComparer<TKey> keyComparer = null)
{
if (transform == null)
throw new ArgumentNullException(nameof(transform));
if (keySelector == null)
throw new ArgumentNullException(nameof(keySelector));
if (dependenciesSelector == null)
throw new ArgumentNullException(nameof(dependenciesSelector));
dataflowBlockOptions =
dataflowBlockOptions ?? new ExecutionDataflowBlockOptions();
keyComparer = keyComparer ?? EqualityComparer<TKey>.Default;
_items = new Dictionary<TKey, Item>(keyComparer);
_inputBlock = new ActionBlock<TInput>(async input =>
{
var key = keySelector(input);
var dependencyKeys = dependenciesSelector(input);
bool isReadyForProcessing = true;
Item item;
lock (_locker)
{
if (!_items.TryGetValue(key, out item))
{
item = new Item(key);
_items.Add(key, item);
}
if (item.HasInput)
throw new InvalidOperationException($"Duplicate key ({key}).");
item.Input = input;
item.HasInput = true;
if (dependencyKeys != null && dependencyKeys.Count > 0)
{
item.Dependencies = new HashSet<Item>();
foreach (var dependencyKey in dependencyKeys)
{
if (!_items.TryGetValue(dependencyKey, out var dependency))
{
dependency = new Item(dependencyKey);
_items.Add(dependencyKey, dependency);
}
if (!dependency.IsCompleted)
{
item.Dependencies.Add(dependency);
if (dependency.Dependents == null)
dependency.Dependents = new HashSet<Item>();
dependency.Dependents.Add(item);
}
}
isReadyForProcessing = item.Dependencies.Count == 0;
}
if (isReadyForProcessing) _pendingCount++;
}
if (isReadyForProcessing)
{
await _transformBlock.SendAsync(item);
}
}, new ExecutionDataflowBlockOptions()
{
CancellationToken = dataflowBlockOptions.CancellationToken,
BoundedCapacity = 1
});
var middleBuffer = new BufferBlock<Item>(new DataflowBlockOptions()
{
CancellationToken = dataflowBlockOptions.CancellationToken,
BoundedCapacity = DataflowBlockOptions.Unbounded
});
_transformBlock = new TransformBlock<Item, TOutput>(async item =>
{
try
{
TInput input;
lock (_locker)
{
Debug.Assert(item.HasInput && !item.IsCompleted);
input = item.Input;
}
var result = await transform(input).ConfigureAwait(false);
lock (_locker)
{
item.IsCompleted = true;
if (item.Dependents != null)
{
foreach (var dependent in item.Dependents)
{
Debug.Assert(dependent.Dependencies != null);
var removed = dependent.Dependencies.Remove(item);
Debug.Assert(removed);
if (dependent.HasInput
&& dependent.Dependencies.Count == 0)
{
middleBuffer.Post(dependent);
_pendingCount++;
}
}
}
item.Input = default; // Cleanup
item.Dependencies = null;
item.Dependents = null;
}
return result;
}
finally
{
lock (_locker)
{
_pendingCount--;
if (_pendingCount == 0) middleBuffer.Complete();
}
}
}, dataflowBlockOptions);
middleBuffer.LinkTo(_transformBlock);
PropagateCompletion(_inputBlock, middleBuffer,
condition: () => { lock (_locker) return --_pendingCount == 0; });
PropagateCompletion(middleBuffer, _transformBlock);
PropagateFailure(_transformBlock, middleBuffer);
PropagateFailure(_transformBlock, _inputBlock);
}
// Constructor with synchronous lambda
public DependencyTransformBlock(
Func<TInput, TOutput> transform,
Func<TInput, TKey> keySelector,
Func<TInput, IReadOnlyCollection<TKey>> dependenciesSelector,
ExecutionDataflowBlockOptions dataflowBlockOptions = null,
IEqualityComparer<TKey> keyComparer = null) : this(
input => Task.FromResult(transform(input)),
keySelector, dependenciesSelector, dataflowBlockOptions, keyComparer)
{
if (transform == null) throw new ArgumentNullException(nameof(transform));
}
public TInput[] Unprocessed
{
get
{
lock (_locker) return _items.Values
.Where(item => item.HasInput && !item.IsCompleted)
.Select(item => item.Input)
.ToArray();
}
}
public Task Completion => _transformBlock.Completion;
public void Complete() => _inputBlock.Complete();
void IDataflowBlock.Fault(Exception ex) => _inputBlock.Fault(ex);
DataflowMessageStatus ITargetBlock<TInput>.OfferMessage(
DataflowMessageHeader header, TInput value, ISourceBlock<TInput> source,
bool consumeToAccept)
{
return _inputBlock.OfferMessage(header, value, source, consumeToAccept);
}
TOutput ISourceBlock<TOutput>.ConsumeMessage(DataflowMessageHeader header,
ITargetBlock<TOutput> target, out bool messageConsumed)
{
return _transformBlock.ConsumeMessage(header, target, out messageConsumed);
}
bool ISourceBlock<TOutput>.ReserveMessage(DataflowMessageHeader header,
ITargetBlock<TOutput> target)
{
return _transformBlock.ReserveMessage(header, target);
}
void ISourceBlock<TOutput>.ReleaseReservation(DataflowMessageHeader header,
ITargetBlock<TOutput> target)
{
_transformBlock.ReleaseReservation(header, target);
}
public IDisposable LinkTo(ITargetBlock<TOutput> target,
DataflowLinkOptions linkOptions)
{
return _transformBlock.LinkTo(target, linkOptions);
}
private async void PropagateCompletion(IDataflowBlock source,
IDataflowBlock target, Func<bool> condition = null)
{
try { await source.Completion.ConfigureAwait(false); } catch { }
if (source.Completion.IsFaulted)
target.Fault(source.Completion.Exception.InnerException);
else
if (condition == null || condition()) target.Complete();
}
private async void PropagateFailure(IDataflowBlock source,
IDataflowBlock target)
{
try { await source.Completion.ConfigureAwait(false); } catch { }
if (source.Completion.IsFaulted)
target.Fault(source.Completion.Exception.InnerException);
}
}
Usage example:
var block = new DependencyTransformBlock<Item, string, Item>(item =>
{
DoWork(item);
return item;
},
keySelector: item => item.Name,
dependenciesSelector: item => item.DependsOn,
new ExecutionDataflowBlockOptions
{
MaxDegreeOfParallelism = Environment.ProcessorCount
},
keyComparer: StringComparer.OrdinalIgnoreCase);
//...
block.LinkTo(DataflowBlock.NullTarget<Item>());
In this example the block is linked to a NullTarget in order to discard its output, so that it becomes essentially an ActionBlock equivalent.

Thread-safe Cached Enumerator - lock with yield

I have a custom "CachedEnumerable" class (inspired by Caching IEnumerable) that I need to make thread safe for my asp.net core web app.
Is the following implementation of the Enumerator thread safe? (All other reads/writes to IList _cache are locked appropriately) (Possibly related to Does the C# Yield free a lock?)
And more specifically, if there are 2 threads accessing the enumerator, how do I protect against one thread incrementing "index" causing a second enumerating thread from getting the wrong element from the _cache (ie. element at index + 1 instead of at index)? Is this race condition a real concern?
public IEnumerator<T> GetEnumerator()
{
var index = 0;
while (true)
{
T current;
lock (_enumeratorLock)
{
if (index >= _cache.Count && !MoveNext()) break;
current = _cache[index];
index++;
}
yield return current;
}
}
Full code of my version of CachedEnumerable:
public class CachedEnumerable<T> : IDisposable, IEnumerable<T>
{
IEnumerator<T> _enumerator;
private IList<T> _cache = new List<T>();
public bool CachingComplete { get; private set; } = false;
public CachedEnumerable(IEnumerable<T> enumerable)
{
switch (enumerable)
{
case CachedEnumerable<T> cachedEnumerable: //This case is actually dealt with by the extension method.
_cache = cachedEnumerable._cache;
CachingComplete = cachedEnumerable.CachingComplete;
_enumerator = cachedEnumerable.GetEnumerator();
break;
case IList<T> list:
//_cache = list; //without clone...
//Clone:
_cache = new T[list.Count];
list.CopyTo((T[]) _cache, 0);
CachingComplete = true;
break;
default:
_enumerator = enumerable.GetEnumerator();
break;
}
}
public CachedEnumerable(IEnumerator<T> enumerator)
{
_enumerator = enumerator;
}
private int CurCacheCount
{
get
{
lock (_enumeratorLock)
{
return _cache.Count;
}
}
}
public IEnumerator<T> GetEnumerator()
{
var index = 0;
while (true)
{
T current;
lock (_enumeratorLock)
{
if (index >= _cache.Count && !MoveNext()) break;
current = _cache[index];
index++;
}
yield return current;
}
}
//private readonly AsyncLock _enumeratorLock = new AsyncLock();
private readonly object _enumeratorLock = new object();
private bool MoveNext()
{
if (CachingComplete) return false;
if (_enumerator != null && _enumerator.MoveNext()) //The null check should have been unnecessary b/c of the lock...
{
_cache.Add(_enumerator.Current);
return true;
}
else
{
CachingComplete = true;
DisposeWrappedEnumerator(); //Release the enumerator, as it is no longer needed.
}
return false;
}
public T ElementAt(int index)
{
lock (_enumeratorLock)
{
if (index < _cache.Count)
{
return _cache[index];
}
}
EnumerateUntil(index);
lock (_enumeratorLock)
{
if (_cache.Count <= index) throw new ArgumentOutOfRangeException(nameof(index));
return _cache[index];
}
}
public bool TryGetElementAt(int index, out T value)
{
lock (_enumeratorLock)
{
value = default;
if (index < CurCacheCount)
{
value = _cache[index];
return true;
}
}
EnumerateUntil(index);
lock (_enumeratorLock)
{
if (_cache.Count <= index) return false;
value = _cache[index];
}
return true;
}
private void EnumerateUntil(int index)
{
while (true)
{
lock (_enumeratorLock)
{
if (_cache.Count > index || !MoveNext()) break;
}
}
}
public void Dispose()
{
DisposeWrappedEnumerator();
}
private void DisposeWrappedEnumerator()
{
if (_enumerator != null)
{
_enumerator.Dispose();
_enumerator = null;
if (_cache is List<T> list)
{
list.Trim();
}
}
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
public int CachedCount
{
get
{
lock (_enumeratorLock)
{
return _cache.Count;
}
}
}
public int Count()
{
if (CachingComplete)
{
return _cache.Count;
}
EnsureCachingComplete();
return _cache.Count;
}
private void EnsureCachingComplete()
{
if (CachingComplete)
{
return;
}
//Enumerate the rest of the collection
while (!CachingComplete)
{
lock (_enumeratorLock)
{
if (!MoveNext()) break;
}
}
}
public T[] ToArray()
{
EnsureCachingComplete();
//Once Caching is complete, we don't need to lock
if (!(_cache is T[] array))
{
array = _cache.ToArray();
_cache = array;
}
return array;
}
public T this[int index] => ElementAt(index);
}
public static CachedEnumerable<T> Cached<T>(this IEnumerable<T> source)
{
//no gain in caching a cache.
if (source is CachedEnumerable<T> cached)
{
return cached;
}
return new CachedEnumerable<T>(source);
}
}
Basic Usage: (Although not a meaningful use case)
var cached = expensiveEnumerable.Cached();
foreach (var element in cached) {
Console.WriteLine(element);
}
Update
I tested the current implementation based on #Theodors answer https://stackoverflow.com/a/58547863/5683904 and confirmed (AFAICT) that it is thread-safe when enumerated with a foreach without creating duplicate values (Thread-safe Cached Enumerator - lock with yield):
class Program
{
static async Task Main(string[] args)
{
var enumerable = Enumerable.Range(0, 1_000_000);
var cachedEnumerable = new CachedEnumerable<int>(enumerable);
var c = new ConcurrentDictionary<int, List<int>>();
var tasks = Enumerable.Range(1, 100).Select(id => Test(id, cachedEnumerable, c));
Task.WaitAll(tasks.ToArray());
foreach (var keyValuePair in c)
{
var hasDuplicates = keyValuePair.Value.Distinct().Count() != keyValuePair.Value.Count;
Console.WriteLine($"Task #{keyValuePair.Key} count: {keyValuePair.Value.Count}. Has duplicates? {hasDuplicates}");
}
}
static async Task Test(int id, IEnumerable<int> cache, ConcurrentDictionary<int, List<int>> c)
{
foreach (var i in cache)
{
//await Task.Delay(10);
c.AddOrUpdate(id, v => new List<int>() {i}, (k, v) =>
{
v.Add(i);
return v;
});
}
}
}
Your class is not thread safe, because shared state is mutated in unprotected regions inside your class. The unprotected regions are:
The constructor
The Dispose method
The shared state is:
The _enumerator private field
The _cache private field
The CachingComplete public property
Some other issues regarding your class:
Implementing IDisposable creates the responsibility to the caller to dispose your class. There is no need for IEnumerables to be disposable. In the contrary IEnumerators are disposable, but there is language support for their automatic disposal (feature of foreach statement).
Your class offers extended functionality not expected from an IEnumerable (ElementAt, Count etc). Maybe you intended to implement a CachedList instead? Without implementing the IList<T> interface, LINQ methods like Count() and ToArray() cannot take advantage of your extended functionality, and will use the slow path like they do with plain vanilla IEnumerables.
Update: I just noticed another thread-safety issue. This one is related to the public IEnumerator<T> GetEnumerator() method. The enumerator is compiler-generated, since the method is an iterator (utilizes yield return). Compiler-generated enumerators are not thread safe. Consider this code for example:
var enumerable = Enumerable.Range(0, 1_000_000);
var cachedEnumerable = new CachedEnumerable<int>(enumerable);
var enumerator = cachedEnumerable.GetEnumerator();
var tasks = Enumerable.Range(1, 4).Select(id => Task.Run(() =>
{
int count = 0;
while (enumerator.MoveNext())
{
count++;
}
Console.WriteLine($"Task #{id} count: {count}");
})).ToArray();
Task.WaitAll(tasks);
Four threads are using concurrently the same IEnumerator. The enumerable has 1,000,000 items. You may expect that each thread would enumerate ~250,000 items, but that's not what happens.
Output:
Task #1 count: 0
Task #4 count: 0
Task #3 count: 0
Task #2 count: 1000000
The MoveNext in the line while (enumerator.MoveNext()) is not your safe MoveNext. It is the compiler-generated unsafe MoveNext. Although unsafe, it includes a mechanism intended probably for dealing with exceptions, that marks temporarily the enumerator as finished before calling the externally provided code. So when multiple threads are calling the MoveNext concurrently, all but the first will get a return value of false, and will terminate instantly the enumeration, having completed zero loops. To solve this you must probably code your own IEnumerator class.
Update: Actually my last point about thread-safe enumeration is a bit unfair, because enumerating with the IEnumerator interface is an inherently unsafe operation, which is impossible to fix without the cooperation of the calling code. This is because obtaining the next element is not an atomic operation, since it involves two steps (call MoveNext() + read Current). So your thread-safety concerns are limited to the protection of the internal state of your class (fields _enumerator, _cache and CachingComplete). These are left unprotected only in the constructor and in the Dispose method, but I suppose that the normal use of your class may not follow code paths that create the race conditions that would result to internal state corruption.
Personally I would prefer to take care of these code paths too, and I wouldn't let it to the whims of chance.
Update: I wrote a cache for IAsyncEnumerables, to demonstrate an alternative technique. The enumeration of the source IAsyncEnumerable is not driven by the callers, using locks or semaphores to obtain exclusive access, but by a separate worker-task. The first caller starts the worker-task. Each caller at first yields all items that are already cached, and then awaits for more items, or for a notification that there are no more items. As notification mechanism I used a TaskCompletionSource<bool>. A lock is still used to ensure that all access to shared resources is synchronized.
public class CachedAsyncEnumerable<T> : IAsyncEnumerable<T>
{
private readonly object _locker = new object();
private IAsyncEnumerable<T> _source;
private Task _sourceEnumerationTask;
private List<T> _buffer;
private TaskCompletionSource<bool> _moveNextTCS;
private Exception _sourceEnumerationException;
private int _sourceEnumerationVersion; // Incremented on exception
public CachedAsyncEnumerable(IAsyncEnumerable<T> source)
{
_source = source ?? throw new ArgumentNullException(nameof(source));
}
public async IAsyncEnumerator<T> GetAsyncEnumerator(
CancellationToken cancellationToken = default)
{
lock (_locker)
{
if (_sourceEnumerationTask == null)
{
_buffer = new List<T>();
_moveNextTCS = new TaskCompletionSource<bool>();
_sourceEnumerationTask = Task.Run(
() => EnumerateSourceAsync(cancellationToken));
}
}
int index = 0;
int localVersion = -1;
while (true)
{
T current = default;
Task<bool> moveNextTask = null;
lock (_locker)
{
if (localVersion == -1)
{
localVersion = _sourceEnumerationVersion;
}
else if (_sourceEnumerationVersion != localVersion)
{
ExceptionDispatchInfo
.Capture(_sourceEnumerationException).Throw();
}
if (index < _buffer.Count)
{
current = _buffer[index];
index++;
}
else
{
moveNextTask = _moveNextTCS.Task;
}
}
if (moveNextTask == null)
{
yield return current;
continue;
}
var moved = await moveNextTask;
if (!moved) yield break;
lock (_locker)
{
current = _buffer[index];
index++;
}
yield return current;
}
}
private async Task EnumerateSourceAsync(CancellationToken cancellationToken)
{
TaskCompletionSource<bool> localMoveNextTCS;
try
{
await foreach (var item in _source.WithCancellation(cancellationToken))
{
lock (_locker)
{
_buffer.Add(item);
localMoveNextTCS = _moveNextTCS;
_moveNextTCS = new TaskCompletionSource<bool>();
}
localMoveNextTCS.SetResult(true);
}
lock (_locker)
{
localMoveNextTCS = _moveNextTCS;
_buffer.TrimExcess();
_source = null;
}
localMoveNextTCS.SetResult(false);
}
catch (Exception ex)
{
lock (_locker)
{
localMoveNextTCS = _moveNextTCS;
_sourceEnumerationException = ex;
_sourceEnumerationVersion++;
_sourceEnumerationTask = null;
}
localMoveNextTCS.SetException(ex);
}
}
}
This implementation follows a specific strategy for dealing with exceptions. If an exception occurs while enumerating the source IAsyncEnumerable, the exception will be propagated to all current callers, the currently used IAsyncEnumerator will be discarded, and the incomplete cached data will be discarded too. A new worker-task may start again later, when the next enumeration request is received.
The access to cache, yes it is thread safe, only one thread per time can read from _cache object.
But in that way you can't assure that all threads gets elements in the same order as they access to GetEnumerator.
Check these two exaples, if the behavior is what you expect, you can use lock in that way.
Example 1:
THREAD1 Calls GetEnumerator
THREAD1 Initialize T current;
THREAD2 Calls GetEnumerator
THREAD2 Initialize T current;
THREAD2 LOCK THREAD
THREAD1 WAIT
THREAD2 read from cache safely _cache[0]
THREAD2 index++
THREAD2 UNLOCK
THREAD1 LOCK
THREAD1 read from cache safely _cache[1]
THREAD1 i++
THREAD1 UNLOCK
THREAD2 yield return current;
THREAD1 yield return current;
Example 2:
THREAD2 Initialize T current;
THREAD2 LOCK THREAD
THREAD2 read from cache safely
THREAD2 UNLOCK
THREAD1 Initialize T current;
THREAD1 LOCK THREAD
THREAD1 read from cache safely
THREAD1 UNLOCK
THREAD1 yield return current;
THREAD2 yield return current;

BroadcastBlock with guaranteed delivery in TPL Dataflow

I have a stream of data that I process in several different ways... so I would like to send a copy of each message I get to multiple targets so that these targets may execute in parallel... however, I need to set BoundedCapacity on my blocks because the data is streamed in way faster than my targets can handle them and there is a ton of data. Without BoundedCapacity I would quickly run out of memory.
However the problem is BroadcastBlock will drop messages if a target cannot handle it (due to the BoundedCapacity).
What I need is a BroadcastBlock that will not drop messages, but will essentially refuse additional input until it can deliver messages to each target and then is ready for more.
Is there something like this, or has anybody written a custom block that behaves in this manner?
It is fairly simple to build what you're asking using ActionBlock and SendAsync(), something like:
public static ITargetBlock<T> CreateGuaranteedBroadcastBlock<T>(
IEnumerable<ITargetBlock<T>> targets)
{
var targetsList = targets.ToList();
return new ActionBlock<T>(
async item =>
{
foreach (var target in targetsList)
{
await target.SendAsync(item);
}
}, new ExecutionDataflowBlockOptions { BoundedCapacity = 1 });
}
This is the most basic version, but extending it to support mutable list of targets, propagating completion or cloning function should be easy.
Here is a polished version of svick's idea. The GuaranteedDeliveryBroadcastBlock class below is an (almost) complete substitute of the built-in BroadcastBlock. Linking and unlinking targets at any moment is supported.
public class GuaranteedDeliveryBroadcastBlock<T> :
ITargetBlock<T>, ISourceBlock<T>, IPropagatorBlock<T, T>
{
private class Subscription
{
public readonly ITargetBlock<T> Target;
public readonly bool PropagateCompletion;
public readonly CancellationTokenSource CancellationSource;
public Subscription(ITargetBlock<T> target,
bool propagateCompletion,
CancellationTokenSource cancellationSource)
{
Target = target;
PropagateCompletion = propagateCompletion;
CancellationSource = cancellationSource;
}
}
private readonly object _locker = new object();
private readonly Func<T, T> _cloningFunction;
private readonly CancellationToken _cancellationToken;
private readonly ITargetBlock<T> _actionBlock;
private readonly List<Subscription> _subscriptions = new List<Subscription>();
private readonly Task _completion;
private CancellationTokenSource _faultCTS
= new CancellationTokenSource(); // Is nullified on completion
public GuaranteedDeliveryBroadcastBlock(Func<T, T> cloningFunction,
DataflowBlockOptions dataflowBlockOptions = null)
{
_cloningFunction = cloningFunction
?? throw new ArgumentNullException(nameof(cloningFunction));
dataflowBlockOptions ??= new DataflowBlockOptions();
_cancellationToken = dataflowBlockOptions.CancellationToken;
_actionBlock = new ActionBlock<T>(async item =>
{
Task sendAsyncToAll;
lock (_locker)
{
var allSendAsyncTasks = _subscriptions
.Select(sub => sub.Target.SendAsync(
_cloningFunction(item), sub.CancellationSource.Token));
sendAsyncToAll = Task.WhenAll(allSendAsyncTasks);
}
await sendAsyncToAll;
}, new ExecutionDataflowBlockOptions()
{
CancellationToken = dataflowBlockOptions.CancellationToken,
BoundedCapacity = dataflowBlockOptions.BoundedCapacity,
MaxMessagesPerTask = dataflowBlockOptions.MaxMessagesPerTask,
TaskScheduler = dataflowBlockOptions.TaskScheduler,
});
var afterCompletion = _actionBlock.Completion.ContinueWith(t =>
{
lock (_locker)
{
// PropagateCompletion
foreach (var subscription in _subscriptions)
{
if (subscription.PropagateCompletion)
{
if (t.IsFaulted)
subscription.Target.Fault(t.Exception);
else
subscription.Target.Complete();
}
}
// Cleanup
foreach (var subscription in _subscriptions)
{
subscription.CancellationSource.Dispose();
}
_subscriptions.Clear();
_faultCTS.Dispose();
_faultCTS = null; // Prevent future subscriptions to occur
}
}, TaskScheduler.Default);
// Ensure that any exception in the continuation will be surfaced
_completion = Task.WhenAll(_actionBlock.Completion, afterCompletion);
}
public Task Completion => _completion;
public void Complete() => _actionBlock.Complete();
void IDataflowBlock.Fault(Exception ex)
{
_actionBlock.Fault(ex);
lock (_locker) _faultCTS?.Cancel();
}
public IDisposable LinkTo(ITargetBlock<T> target,
DataflowLinkOptions linkOptions)
{
if (linkOptions.MaxMessages != DataflowBlockOptions.Unbounded)
throw new NotSupportedException();
Subscription subscription;
lock (_locker)
{
if (_faultCTS == null) return new Unlinker(null); // Has completed
var cancellationSource = CancellationTokenSource
.CreateLinkedTokenSource(_cancellationToken, _faultCTS.Token);
subscription = new Subscription(target,
linkOptions.PropagateCompletion, cancellationSource);
_subscriptions.Add(subscription);
}
return new Unlinker(() =>
{
lock (_locker)
{
// The subscription may have already been removed
if (_subscriptions.Remove(subscription))
{
subscription.CancellationSource.Cancel();
subscription.CancellationSource.Dispose();
}
}
});
}
private class Unlinker : IDisposable
{
private readonly Action _action;
public Unlinker(Action disposeAction) => _action = disposeAction;
void IDisposable.Dispose() => _action?.Invoke();
}
DataflowMessageStatus ITargetBlock<T>.OfferMessage(
DataflowMessageHeader messageHeader, T messageValue,
ISourceBlock<T> source, bool consumeToAccept)
{
return _actionBlock.OfferMessage(messageHeader, messageValue, source,
consumeToAccept);
}
T ISourceBlock<T>.ConsumeMessage(DataflowMessageHeader messageHeader,
ITargetBlock<T> target, out bool messageConsumed)
=> throw new NotSupportedException();
bool ISourceBlock<T>.ReserveMessage(DataflowMessageHeader messageHeader,
ITargetBlock<T> target)
=> throw new NotSupportedException();
void ISourceBlock<T>.ReleaseReservation(DataflowMessageHeader messageHeader,
ITargetBlock<T> target)
=> throw new NotSupportedException();
}
Missing features: the IReceivableSourceBlock<T> interface is not implemented, and linking with the MaxMessages option is not supported.
This class is thread-safe.

TPL DataFlow, link blocks with priority?

Using TPL.DataFlow blocks, is it possible to link two or more sources to a single ITargetBlock(e.g. ActionBlock) and prioritize the sources?
e.g.
BufferBlock<string> b1 = new ...
BufferBlock<string> b2 = new ...
ActionBlock<string> a = new ...
//somehow force messages in b1 to be processed before any message of b2, always
b1.LinkTo (a);
b2.LinkTo (a);
As long as there are messages in b1, I want those to be fed to "a" and once b1 is empty, b2 messages are beeing pushed into "a"
Ideas?
There is nothing like that in TPL Dataflow itself.
The simplest way I can imagine doing this by yourself would be to create a structure that encapsulates three blocks: high priority input, low priority input and output. Those blocks would be simple BufferBlocks, along with a method forwarding messages from the two inputs to the output based on priority, running in background.
The code could look like this:
public class PriorityBlock<T>
{
private readonly BufferBlock<T> highPriorityTarget;
public ITargetBlock<T> HighPriorityTarget
{
get { return highPriorityTarget; }
}
private readonly BufferBlock<T> lowPriorityTarget;
public ITargetBlock<T> LowPriorityTarget
{
get { return lowPriorityTarget; }
}
private readonly BufferBlock<T> source;
public ISourceBlock<T> Source
{
get { return source; }
}
public PriorityBlock()
{
var options = new DataflowBlockOptions { BoundedCapacity = 1 };
highPriorityTarget = new BufferBlock<T>(options);
lowPriorityTarget = new BufferBlock<T>(options);
source = new BufferBlock<T>(options);
Task.Run(() => ForwardMessages());
}
private async Task ForwardMessages()
{
while (true)
{
await Task.WhenAny(
highPriorityTarget.OutputAvailableAsync(),
lowPriorityTarget.OutputAvailableAsync());
T item;
if (highPriorityTarget.TryReceive(out item))
{
await source.SendAsync(item);
}
else if (lowPriorityTarget.TryReceive(out item))
{
await source.SendAsync(item);
}
else
{
// both input blocks must be completed
source.Complete();
return;
}
}
}
}
Usage would look like this:
b1.LinkTo(priorityBlock.HighPriorityTarget);
b2.LinkTo(priorityBlock.LowPriorityTarget);
priorityBlock.Source.LinkTo(a);
For this to work, a also has to have BoundingCapacity set to one (or at least a very low number).
The caveat with this code is that it can introduce latency of two messages (one waiting in the output block, one waiting in SendAsync()). So, if you have a long list of low priority messages and suddenly a high priority message comes in, it will be processed only after those two low-priority messages that are already waiting.
If this is a problem for you, it can be solved. But I believe it would require more complicated code, that deals with the less public parts of TPL Dataflow, like OfferMessage().
Here is an implementation of a PriorityBufferBlock<T> class, that propagates high priority items more frequently than low priority items. The constructor of this class has a priorityPrecedence parameter, that defines how many high priority items will be propagated for each low priority item. If this parameter has the value 1.0 (the smallest valid value), there is no real priority to speak of. If this parameter has the value Double.PositiveInfinity, no low priority item will ever be propagated as long as there are high priority items in the queue. If this parameter has a more normal value, like 5.0 for example, one low priority item will be propagated for every 5 high priority items.
This class maintains internally two queues, one for high and one for low priority items. The number of items stored in each queue is not taken into account, unless one of the two lists is empty, in which case all items of the other queue are freely propagated on demand. The priorityPrecedence parameter influences the behavior of the class only when both internal queues are non-empty. Otherwise, if only one queue has items, the PriorityBufferBlock<T> behaves like a normal BufferBlock<T>.
public class PriorityBufferBlock<T> : IPropagatorBlock<T, T>,
IReceivableSourceBlock<T>
{
private readonly IPropagatorBlock<T, int> _block;
private readonly Queue<T> _highQueue = new();
private readonly Queue<T> _lowQueue = new();
private readonly Predicate<T> _hasPriorityPredicate;
private readonly double _priorityPrecedence;
private double _priorityCounter = 0;
private object Locker => _highQueue;
public PriorityBufferBlock(Predicate<T> hasPriorityPredicate,
double priorityPrecedence,
DataflowBlockOptions dataflowBlockOptions = null)
{
ArgumentNullException.ThrowIfNull(hasPriorityPredicate);
if (priorityPrecedence < 1.0)
throw new ArgumentOutOfRangeException(nameof(priorityPrecedence));
_hasPriorityPredicate = hasPriorityPredicate;
_priorityPrecedence = priorityPrecedence;
dataflowBlockOptions ??= new();
_block = new TransformBlock<T, int>(item =>
{
bool hasPriority = _hasPriorityPredicate(item);
Queue<T> selectedQueue = hasPriority ? _highQueue : _lowQueue;
lock (Locker) selectedQueue.Enqueue(item);
return 0;
}, new()
{
BoundedCapacity = dataflowBlockOptions.BoundedCapacity,
CancellationToken = dataflowBlockOptions.CancellationToken,
MaxMessagesPerTask = dataflowBlockOptions.MaxMessagesPerTask
});
this.Completion = _block.Completion.ContinueWith(completion =>
{
Debug.Assert(this.Count == 0 || !completion.IsCompletedSuccessfully);
lock (Locker) { _highQueue.Clear(); _lowQueue.Clear(); }
return completion;
}, default, TaskContinuationOptions.ExecuteSynchronously |
TaskContinuationOptions.DenyChildAttach, TaskScheduler.Default).Unwrap();
}
public Task Completion { get; private init; }
public void Complete() => _block.Complete();
void IDataflowBlock.Fault(Exception exception) => _block.Fault(exception);
public int Count
{
get { lock (Locker) return _highQueue.Count + _lowQueue.Count; }
}
private Queue<T> GetSelectedQueue(bool forDequeue)
{
Debug.Assert(Monitor.IsEntered(Locker));
Queue<T> selectedQueue;
if (_highQueue.Count == 0)
selectedQueue = _lowQueue;
else if (_lowQueue.Count == 0)
selectedQueue = _highQueue;
else if (_priorityCounter + 1 > _priorityPrecedence)
selectedQueue = _lowQueue;
else
selectedQueue = _highQueue;
if (forDequeue)
{
if (_highQueue.Count == 0 || _lowQueue.Count == 0)
_priorityCounter = 0;
else if (++_priorityCounter > _priorityPrecedence)
_priorityCounter -= _priorityPrecedence + 1;
}
return selectedQueue;
}
private T Peek()
{
Debug.Assert(Monitor.IsEntered(Locker));
Debug.Assert(_highQueue.Count > 0 || _lowQueue.Count > 0);
return GetSelectedQueue(false).Peek();
}
private T Dequeue()
{
Debug.Assert(Monitor.IsEntered(Locker));
Debug.Assert(_highQueue.Count > 0 || _lowQueue.Count > 0);
return GetSelectedQueue(true).Dequeue();
}
private class TargetProxy : ITargetBlock<int>
{
private readonly PriorityBufferBlock<T> _parent;
private readonly ITargetBlock<T> _realTarget;
public TargetProxy(PriorityBufferBlock<T> parent, ITargetBlock<T> target)
{
Debug.Assert(parent is not null);
_parent = parent;
_realTarget = target ?? throw new ArgumentNullException(nameof(target));
}
public Task Completion => throw new NotSupportedException();
public void Complete() => _realTarget.Complete();
void IDataflowBlock.Fault(Exception error) => _realTarget.Fault(error);
DataflowMessageStatus ITargetBlock<int>.OfferMessage(
DataflowMessageHeader messageHeader, int messageValue,
ISourceBlock<int> source, bool consumeToAccept)
{
Debug.Assert(messageValue == 0);
if (consumeToAccept) throw new NotSupportedException();
lock (_parent.Locker)
{
T realValue = _parent.Peek();
DataflowMessageStatus response = _realTarget.OfferMessage(
messageHeader, realValue, _parent, consumeToAccept);
if (response == DataflowMessageStatus.Accepted) _parent.Dequeue();
return response;
}
}
}
public IDisposable LinkTo(ITargetBlock<T> target,
DataflowLinkOptions linkOptions)
=> _block.LinkTo(new TargetProxy(this, target), linkOptions);
DataflowMessageStatus ITargetBlock<T>.OfferMessage(
DataflowMessageHeader messageHeader, T messageValue,
ISourceBlock<T> source, bool consumeToAccept)
=> _block.OfferMessage(messageHeader,
messageValue, source, consumeToAccept);
T ISourceBlock<T>.ConsumeMessage(DataflowMessageHeader messageHeader,
ITargetBlock<T> target, out bool messageConsumed)
{
_ = _block.ConsumeMessage(messageHeader, new TargetProxy(this, target),
out messageConsumed);
if (messageConsumed) lock (Locker) return Dequeue();
return default;
}
bool ISourceBlock<T>.ReserveMessage(DataflowMessageHeader messageHeader,
ITargetBlock<T> target)
=> _block.ReserveMessage(messageHeader, new TargetProxy(this, target));
void ISourceBlock<T>.ReleaseReservation(DataflowMessageHeader messageHeader,
ITargetBlock<T> target)
=> _block.ReleaseReservation(messageHeader, new TargetProxy(this, target));
public bool TryReceive(Predicate<T> filter, out T item)
{
if (filter is not null) throw new NotSupportedException();
if (((IReceivableSourceBlock<int>)_block).TryReceive(null, out _))
{
lock (Locker) item = Dequeue(); return true;
}
item = default; return false;
}
public bool TryReceiveAll(out IList<T> items)
{
if (((IReceivableSourceBlock<int>)_block).TryReceiveAll(out IList<int> items2))
{
T[] array = new T[items2.Count];
lock (Locker)
for (int i = 0; i < array.Length; i++)
array[i] = Dequeue();
items = array; return true;
}
items = default; return false;
}
}
Usage example:
var bufferBlock = new PriorityBufferBlock<SaleOrder>(x => x.HasPriority, 2.5);
The above implementation supports all the features of the built-in BufferBlock<T>, except from the TryReceive with not-null filter. The core functionality of the block is delegated to an internal TransformBlock<T, int>, that contains a dummy zero value for every item stored in one of the queues.

Categories

Resources