I've been having trouble running multiple tasks with heavy operations.
It seems as if the task processes is killed before all the operations are complete.
The code here is an example code I used to replicate the issue. If I add something like Debug.Write(), the added wait for writing fixes the issue. The issue is gone if I test on a smaller sample size too. The reason there is a class in the example below is to create complexity for the test.
The real case where I encountered the issue first is too complicated to explain for a post here.
public static class StaticRandom
{
static int seed = Environment.TickCount;
static readonly ThreadLocal<Random> random =
new ThreadLocal<Random>(() => new Random(Interlocked.Increment(ref seed)));
public static int Next()
{
return random.Value.Next();
}
public static int Next(int maxValue)
{
return random.Value.Next(maxValue);
}
public static double NextDouble()
{
return random.Value.NextDouble();
}
}
// this is the test function I run to recreate the problem:
static void tasktest()
{
var testlist = new List<ExampleClass>();
for (var index = 0; index < 10000; ++index)
{
var newClass = new ExampleClass();
newClass.Populate(Enumerable.Range(0, 1000).ToList());
testlist.Add(newClass);
}
var anotherClassList = new List<ExampleClass>();
var threadNumber = 5;
if (threadNumber > testlist.Count)
{
threadNumber = testlist.Count;
}
var taskList = new List<Task>();
var tokenSource = new CancellationTokenSource();
CancellationToken cancellationToken = tokenSource.Token;
int stuffPerThread = testlist.Count / threadNumber;
var stuffCounter = 0;
for (var count = 1; count <= threadNumber; ++count)
{
var toSkip = stuffCounter;
var threadWorkLoad = stuffPerThread;
var currentIndex = count;
// these ifs make sure all the indexes are covered
if (stuffCounter + threadWorkLoad > testlist.Count)
{
threadWorkLoad = testlist.Count - stuffCounter;
}
else if (count == threadNumber && stuffCounter + threadWorkLoad < testlist.Count)
{
threadWorkLoad = testlist.Count - stuffCounter;
}
taskList.Add(Task.Factory.StartNew(() => taskfunc(testlist, anotherClassList, toSkip, threadWorkLoad),
cancellationToken, TaskCreationOptions.None, TaskScheduler.Default));
stuffCounter += stuffPerThread;
}
Task.WaitAll(taskList.ToArray());
}
public class ExampleClass
{
public ExampleClassInner[] Inners { get; set; }
public ExampleClass()
{
Inners = new ExampleClassInner[5];
for (var index = 0; index < Inners.Length; ++index)
{
Inners[index] = new ExampleClassInner();
}
}
public void Populate(List<int> intlist) {/*adds random ints to the inner class*/}
public ExampleClass(ExampleClass copyFrom)
{
Inners = new ExampleClassInner[5];
for (var index = 0; index < Inners.Length; ++index)
{
Inners[index] = new ExampleClassInner(copyFrom.Inners[index]);
}
}
public class ExampleClassInner
{
public bool SomeBool { get; set; } = false;
public int SomeInt { get; set; } = -1;
public ExampleClassInner()
{
}
public ExampleClassInner(ExampleClassInner copyFrom)
{
SomeBool = copyFrom.SomeBool;
SomeInt = copyFrom.SomeInt;
}
}
}
static int expensivefunc(int theint)
{
/*a lot of pointless arithmetic and loops done only on primitives and with primitives,
just to increase the complexity*/
theint *= theint + 1;
var anotherlist = Enumerable.Range(0, 10000).ToList();
for (var index = 0; index < anotherlist.Count; ++index)
{
theint += index;
if (theint % 5 == 0)
{
theint *= index / 2;
}
}
var yetanotherlist = Enumerable.Range(0, 50000).ToList();
for (var index = 0; index < yetanotherlist.Count; ++index)
{
theint += index;
if (theint % 7 == 0)
{
theint -= index / 3;
}
}
while (theint > 8)
{
theint /= 2;
}
return theint;
}
// this function is intentionally creating a lot of objects, to simulate complexity
static void taskfunc(List<ExampleClass> intlist, List<ExampleClass> anotherClassList, int skip, int take)
{
if (take == 0)
{
take = intlist.Count;
}
var partial = intlist.Skip(skip).Take(take).ToList();
for (var index = 0; index < partial.Count; ++index)
{
var testint = expensivefunc(index);
var newClass = new ExampleClass(partial[index]);
newDna.Inners[StaticRandom.Next(5)].SomeInt = testint;
anotherClassList.Add(new ExampleClass(newClass));
}
}
The expected result is that the list anotherClassList will be the same size as testlist and this happens when the lists are smaller or the complexity of the task operations is smaller. However, when I increase the volume of operations, the anotherClassList has a few indexes missing and sometimes some of the indexes in the list are null objects.
Example result:
Why does this happen, I have Task.WaitAll?
Your problem is it's just not thread-safe; you just can't add to a list<T> in a multi-threaded environment and expect it to play nice.
One way is to use lock or a thread safe collection, but I feel this all should be refactored (my OCD is going off all over the place).
private static object _sync = new object();
...
private static void TaskFunc(List<ExampleClass> intlist, List<ExampleClass> anotherClassList, int skip, int take)
{
...
var partial = intlist.Skip(skip).Take(take).ToList();
...
// note that locking here will likely drastically decrease any performance threading gain
lock (_sync)
{
for (var index = 0; index < partial.Count; ++index)
{
// this is your problem, you are adding to a list from multiple threads
anotherClassList.Add(...);
}
}
}
In short, I think you need to better thinking about the threading logic of your method, identify what you are trying to achieve, and how to make it conceptually thread safe (while keeping your performance gains).
After TheGeneral enlightened me that Lists are not thread safe, I changed the List to which I was adding in a thread, to an Array type and this fixed my issue.
I realise this is a non-specific code question. But I suspect that people with answers are on this forum.
I am receiving a large amount of records of < 100 bytes via TCP at a rate of 10 per millisecond.
I have to parse and process the data and that takes me 100 microseconds - so I am pretty maxed out.
Does 100 microseconds seem large?
Here is an example of the kind of processing I do with LINQ. It is really convenient - but is it inherently slow?
public void Process()
{
try
{
int ptr = PayloadOffset + 1;
var cPair = MessageData.GetString(ref ptr, 7);
var orderID = MessageData.GetString(ref ptr, 15);
if (Book.CPairs.ContainsKey(cPair))
{
var cPairGroup = Book.CPairs[cPair];
if (cPairGroup.BPrices != null)
{
cPairGroup.BPrices.ForEach(x => { x.BOrders.RemoveAll(y => y.OrderID.Equals(orderID)); });
cPairGroup.BPrices.RemoveAll(x => x.BOrders.Count == 0);
}
}
}
}
public class BOrderGroup
{
public double Amount;
public string OrderID;
}
public class BPriceGroup
{
public double BPrice;
public List<BOrderGroup> BOrders;
}
public class CPairGroup
{
public List<BPriceGroup> BPrices;
}
public static Dictionary<string, CPairGroup> CPairs;
As other have mentioned, LINQ is not inherently slow. But it can be slower than equivalent non-LINQ code (this is why Roslyn team has "Avoid LINQ" guide under coding conventions).
If this is your hot path and you need every microsecond than you should probably implement logic in such a way:
public void Process()
{
try
{
int ptr = PayloadOffset + 1;
var cPair = MessageData.GetString(ref ptr, 7);
var orderID = MessageData.GetString(ref ptr, 15);
if (Book.CPairs.TryGetValue(cPair, out CPairGroup cPairGroup) && cPairGroup != null)
{
for (int i = cPairGroup.BPrices.Count - 1; i >= 0; i--)
{
var x = cPairGroup.BPrices[i];
for (int j = x.BOrders.Count - 1; j >= 0; j--)
{
var y = x.BOrders[j];
if (y.OrderID.Equals(orderID))
{
x.BOrders.RemoveAt(j);
}
}
if (x.BOrders.Count == 0)
{
cPairGroup.BPrices.RemoveAt(i);
}
}
}
}
}
Main points:
Avoid double dictionary lookup by using TryGetValue
Single iteration over cPairGroup.BPrices
In place modification of structures by iterating backwards
This code should not contain any additional heap allocations
I am looking to create an external application that monitors the 'FPS' of a DirectX application (like FRAPS without the recording). I have read several Microsoft articles on performance measuring tools - but I am looking to get the feedback (and experience) of the community.
My question: what is the best method for obtaining the FPS of a DirectX application?
Windows has some Event Tracing for Windows providers related to DirectX profiling. The most intresting ones are Microsoft-Windows-D3D9 and Microsoft-Windows-DXGI, which allow tracing of the frame presentation events. The simplest way to calculate FPS is to count the number of PresentStart events withing a time interval and divide that by the length of the interval.
To work with ETW in C#, install Microsoft.Diagnostics.Tracing.TraceEvent package.
The following code sample displays FPS of running processes:
using System;
using System.Collections.Generic;
using System.Text;
using System.Diagnostics;
using System.Threading;
using Microsoft.Diagnostics.Tracing.Session;
namespace ConsoleApp1
{
//helper class to store frame timestamps
public class TimestampCollection
{
const int MAXNUM = 1000;
public string Name { get; set; }
List<long> timestamps = new List<long>(MAXNUM + 1);
object sync = new object();
//add value to the collection
public void Add(long timestamp)
{
lock (sync)
{
timestamps.Add(timestamp);
if (timestamps.Count > MAXNUM) timestamps.RemoveAt(0);
}
}
//get the number of timestamps withing interval
public int QueryCount(long from, long to)
{
int c = 0;
lock (sync)
{
foreach (var ts in timestamps)
{
if (ts >= from && ts <= to) c++;
}
}
return c;
}
}
class Program
{
//event codes (https://github.com/GameTechDev/PresentMon/blob/40ee99f437bc1061a27a2fc16a8993ee8ce4ebb5/PresentData/PresentMonTraceConsumer.cpp)
public const int EventID_D3D9PresentStart = 1;
public const int EventID_DxgiPresentStart = 42;
//ETW provider codes
public static readonly Guid DXGI_provider = Guid.Parse("{CA11C036-0102-4A2D-A6AD-F03CFED5D3C9}");
public static readonly Guid D3D9_provider = Guid.Parse("{783ACA0A-790E-4D7F-8451-AA850511C6B9}");
static TraceEventSession m_EtwSession;
static Dictionary<int, TimestampCollection> frames = new Dictionary<int, TimestampCollection>();
static Stopwatch watch = null;
static object sync = new object();
static void EtwThreadProc()
{
//start tracing
m_EtwSession.Source.Process();
}
static void OutputThreadProc()
{
//console output loop
while (true)
{
long t1, t2;
long dt = 2000;
Console.Clear();
Console.WriteLine(DateTime.Now.ToString() + "." + DateTime.Now.Millisecond.ToString());
Console.WriteLine();
lock (sync)
{
t2 = watch.ElapsedMilliseconds;
t1 = t2 - dt;
foreach (var x in frames.Values)
{
Console.Write(x.Name + ": ");
//get the number of frames
int count = x.QueryCount(t1, t2);
//calculate FPS
Console.WriteLine("{0} FPS", (double)count / dt * 1000.0);
}
}
Console.WriteLine();
Console.WriteLine("Press any key to stop tracing...");
Thread.Sleep(1000);
}
}
public static void Main(string[] argv)
{
//create ETW session and register providers
m_EtwSession = new TraceEventSession("mysess");
m_EtwSession.StopOnDispose = true;
m_EtwSession.EnableProvider("Microsoft-Windows-D3D9");
m_EtwSession.EnableProvider("Microsoft-Windows-DXGI");
//handle event
m_EtwSession.Source.AllEvents += data =>
{
//filter out frame presentation events
if (((int)data.ID == EventID_D3D9PresentStart && data.ProviderGuid == D3D9_provider) ||
((int)data.ID == EventID_DxgiPresentStart && data.ProviderGuid == DXGI_provider))
{
int pid = data.ProcessID;
long t;
lock (sync)
{
t = watch.ElapsedMilliseconds;
//if process is not yet in Dictionary, add it
if (!frames.ContainsKey(pid))
{
frames[pid] = new TimestampCollection();
string name = "";
var proc = Process.GetProcessById(pid);
if (proc != null)
{
using (proc)
{
name = proc.ProcessName;
}
}
else name = pid.ToString();
frames[pid].Name = name;
}
//store frame timestamp in collection
frames[pid].Add(t);
}
}
};
watch = new Stopwatch();
watch.Start();
Thread thETW = new Thread(EtwThreadProc);
thETW.IsBackground = true;
thETW.Start();
Thread thOutput = new Thread(OutputThreadProc);
thOutput.IsBackground = true;
thOutput.Start();
Console.ReadKey();
m_EtwSession.Dispose();
}
}
}
Based on the source code of PresentMon project.
Fraps inserts a DLL into every running application and hooks specific DX calls to figure out the framerate and capture video, pretty sure that you'll have to do something similar. After a bit of poking around I found a Github project that does some basic DX hooking for doing captures and overlays, so that might be a good spot to start out with. Though I've not used it personally so I can't totally vouch for the quality.
http://spazzarama.com/2011/03/14/c-screen-capture-and-overlays-for-direct3d-9-10-and-11-using-api-hooks/
Building on https://stackoverflow.com/a/54625953/12047161:
I had more success not using the stopwatch as the event triggers seems to be asynchronous with the actual frames. I kept getting batches of 20-50 frames all at once, making the estimated FPS fluctuate between 50 and 250% of the actual value.
Instead i used TimeStampRelativeMSec
//handle event
m_EtwSession.Source.AllEvents += data =>
{
//filter out frame presentation events
if((int) data.ID == EventID_DxgiPresentStart && data.ProviderGuid == DXGI_provider)
{
int pid = data.ProcessID;
long t;
t = watch.ElapsedMilliseconds;
//if process is not yet in Dictionary, add it
if (!frames.ContainsKey(pid))
{
frames[pid] = new TimestampCollection();
string name = "";
var proc = Process.GetProcessById(pid);
if (proc != null)
{
using (proc)
{
name = proc.ProcessName;
}
}
else name = pid.ToString();
frames[pid].Name = name;
}
frames[pid].Add((long)data.TimeStampRelativeMSec);
}
};
property from the TraceEvent class, and calculate FPS by rounding the average time between an arbitrary number of past entries:
public double GetFrameTime(int count)
{
double returnValue = 0;
int listCount = timestamps.Count;
if(listCount > count)
{
for(int i = 1; i <= count; i++)
{
returnValue += timestamps[listCount - i] - timestamps[listCount - (i + 1)];
}
returnValue /= count;
}
return returnValue;
}
This method gave me far more accurate (Compared to, as available, in-game counters) of several different games i've tried.
I want a simple class that implements a fixed-size circular buffer. It should be efficient, easy on the eyes, generically typed.
For now it need not be MT-capable. I can always add a lock later, it won't be high-concurrency in any case.
Methods should be: .Add() and I guess .List(), where I retrieve all the entries. On second thought, Retrieval I think should be done via an indexer. At any moment I will want to be able to retrieve any element in the buffer by index. But keep in mind that from one moment to the next Element[n] may be different, as the circular buffer fills up and rolls over. This isn't a stack, it's a circular buffer.
Regarding "overflow": I would expect internally there would be an array holding the items, and over time the head and tail of the buffer will rotate around that fixed array. But that should be invisible from the user. There should be no externally-detectable "overflow" event or behavior.
This is not a school assignment - it is most commonly going to be used for a MRU cache or a fixed-size transaction or event log.
I would use an array of T, a head and tail pointer, and add and get methods.
Like: (Bug hunting is left to the user)
// Hijack these for simplicity
import java.nio.BufferOverflowException;
import java.nio.BufferUnderflowException;
public class CircularBuffer<T> {
private T[] buffer;
private int tail;
private int head;
#SuppressWarnings("unchecked")
public CircularBuffer(int n) {
buffer = (T[]) new Object[n];
tail = 0;
head = 0;
}
public void add(T toAdd) {
if (head != (tail - 1)) {
buffer[head++] = toAdd;
} else {
throw new BufferOverflowException();
}
head = head % buffer.length;
}
public T get() {
T t = null;
int adjTail = tail > head ? tail - buffer.length : tail;
if (adjTail < head) {
t = (T) buffer[tail++];
tail = tail % buffer.length;
} else {
throw new BufferUnderflowException();
}
return t;
}
public String toString() {
return "CircularBuffer(size=" + buffer.length + ", head=" + head + ", tail=" + tail + ")";
}
public static void main(String[] args) {
CircularBuffer<String> b = new CircularBuffer<String>(3);
for (int i = 0; i < 10; i++) {
System.out.println("Start: " + b);
b.add("One");
System.out.println("One: " + b);
b.add("Two");
System.out.println("Two: " + b);
System.out.println("Got '" + b.get() + "', now " + b);
b.add("Three");
System.out.println("Three: " + b);
// Test Overflow
// b.add("Four");
// System.out.println("Four: " + b);
System.out.println("Got '" + b.get() + "', now " + b);
System.out.println("Got '" + b.get() + "', now " + b);
// Test Underflow
// System.out.println("Got '" + b.get() + "', now " + b);
// Back to start, let's shift on one
b.add("Foo");
b.get();
}
}
}
This is how I would (or did) write an efficient circular buffer in Java. It's backed by a simple array. For my particular use case, I needed high concurrent throughput, so I used CAS for allocation of the index. I then created mechanisms for reliable copies including a CAS copy of the entire buffer. I used this in a case which is outlined in greater detail in short article.
import java.util.concurrent.atomic.AtomicLong;
import java.lang.reflect.Array;
/**
* A circular array buffer with a copy-and-swap cursor.
*
* <p>This class provides an list of T objects who's size is <em>unstable</em>.
* It's intended for capturing data where the frequency of sampling greatly
* outweighs the frequency of inspection (for instance, monitoring).</p>
*
* <p>This object keeps in memory a fixed size buffer which is used for
* capturing objects. It copies the objects to a snapshot array which may be
* worked with. The size of the snapshot array will vary based on the
* stability of the array during the copy operation.</p>
*
* <p>Adding buffer to the buffer is <em>O(1)</em>, and lockless. Taking a
* stable copy of the sample is <em>O(n)</em>.</p>
*/
public class ConcurrentCircularBuffer <T> {
private final AtomicLong cursor = new AtomicLong();
private final T[] buffer;
private final Class<T> type;
/**
* Create a new concurrent circular buffer.
*
* #param type The type of the array. This is captured for the same reason
* it's required by {#link java.util.List.toArray()}.
*
* #param bufferSize The size of the buffer.
*
* #throws IllegalArgumentException if the bufferSize is a non-positive
* value.
*/
public ConcurrentCircularBuffer (final Class <T> type,
final int bufferSize)
{
if (bufferSize < 1) {
throw new IllegalArgumentException(
"Buffer size must be a positive value"
);
}
this.type = type;
this.buffer = (T[]) new Object [ bufferSize ];
}
/**
* Add a new object to this buffer.
*
* <p>Add a new object to the cursor-point of the buffer.</p>
*
* #param sample The object to add.
*/
public void add (T sample) {
buffer[(int) (cursor.getAndIncrement() % buffer.length)] = sample;
}
/**
* Return a stable snapshot of the buffer.
*
* <p>Capture a stable snapshot of the buffer as an array. The snapshot
* may not be the same length as the buffer, any objects which were
* unstable during the copy will be factored out.</p>
*
* #return An array snapshot of the buffer.
*/
public T[] snapshot () {
T[] snapshots = (T[]) new Object [ buffer.length ];
/* Determine the size of the snapshot by the number of affected
* records. Trim the size of the snapshot by the number of records
* which are considered to be unstable during the copy (the amount the
* cursor may have moved while the copy took place).
*
* If the cursor eliminated the sample (if the sample size is so small
* compared to the rate of mutation that it did a full-wrap during the
* copy) then just treat the buffer as though the cursor is
* buffer.length - 1 and it was not changed during copy (this is
* unlikley, but it should typically provide fairly stable results).
*/
long before = cursor.get();
/* If the cursor hasn't yet moved, skip the copying and simply return a
* zero-length array.
*/
if (before == 0) {
return (T[]) Array.newInstance(type, 0);
}
System.arraycopy(buffer, 0, snapshots, 0, buffer.length);
long after = cursor.get();
int size = buffer.length - (int) (after - before);
long snapshotCursor = before - 1;
/* Highly unlikely, but the entire buffer was replaced while we
* waited...so just return a zero length array, since we can't get a
* stable snapshot...
*/
if (size <= 0) {
return (T[]) Array.newInstance(type, 0);
}
long start = snapshotCursor - (size - 1);
long end = snapshotCursor;
if (snapshotCursor < snapshots.length) {
size = (int) snapshotCursor + 1;
start = 0;
}
/* Copy the sample snapshot to a new array the size of our stable
* snapshot area.
*/
T[] result = (T[]) Array.newInstance(type, size);
int startOfCopy = (int) (start % snapshots.length);
int endOfCopy = (int) (end % snapshots.length);
/* If the buffer space wraps the physical end of the array, use two
* copies to construct the new array.
*/
if (startOfCopy > endOfCopy) {
System.arraycopy(snapshots, startOfCopy,
result, 0,
snapshots.length - startOfCopy);
System.arraycopy(snapshots, 0,
result, (snapshots.length - startOfCopy),
endOfCopy + 1);
}
else {
/* Otherwise it's a single continuous segment, copy the whole thing
* into the result.
*/
System.arraycopy(snapshots, startOfCopy,
result, 0, endOfCopy - startOfCopy + 1);
}
return (T[]) result;
}
/**
* Get a stable snapshot of the complete buffer.
*
* <p>This operation fetches a snapshot of the buffer using the algorithm
* defined in {#link snapshot()}. If there was concurrent modification of
* the buffer during the copy, however, it will retry until a full stable
* snapshot of the buffer was acquired.</p>
*
* <p><em>Note, for very busy buffers on large symmetric multiprocessing
* machines and supercomputers running data processing intensive
* applications, this operation has the potential of being fairly
* expensive. In practice on commodity hardware, dualcore processors and
* non-processing intensive systems (such as web services) it very rarely
* retries.</em></p>
*
* #return A full copy of the internal buffer.
*/
public T[] completeSnapshot () {
T[] snapshot = snapshot();
/* Try again until we get a snapshot that's the same size as the
* buffer... This is very often a single iteration, but it depends on
* how busy the system is.
*/
while (snapshot.length != buffer.length) {
snapshot = snapshot();
}
return snapshot;
}
/**
* The size of this buffer.
*/
public int size () {
return buffer.length;
}
}
I would use ArrayBlockingQueue or one of the other prebuilt Queue implementations, depending on what the needs are. Very rarely there is need to implement such a data structure yourself (unless it's a school assignment).
EDIT: Now that you have added the requirement "to retrieve any element in the buffer by index", I suppose that you need to implement your own class (unless google-collections or some other library provides one). A circular buffer is quite easy to implement, as JeeBee's example shows. You may also look at ArrayBlockingQueue's source code - its code is quite clean, just remove the locking and unneeded methods, and add methods for accessing it by index.
Use Java's ArrayDeque
Here is a ready-to-use CircularArrayList implementation for Java which I use in production code. By overriding AbstractList in the Java-recommended way, it supports all functionality you would expect from a standard List implementation in the Java Collections Framework (generic element type, subList, iteration etc.).
The following calls complete in O(1):
add(item) - adds at end of list
remove(0) - removes from beginning of list
get(i) - retrieves random element in list
Here is an implementation I've coded for my own use but that could be useful.
The buffer contains a maximum fixed set of items. The set is circular, old items are automatically removed. The caller can get items tail by an absolute incremental index (a long), but items may have been lost between calls too distant in time. This class is fully thread-safe.
public sealed class ConcurrentCircularBuffer<T> : ICollection<T>
{
private T[] _items;
private int _index;
private bool _full;
public ConcurrentCircularBuffer(int capacity)
{
if (capacity <= 1) // need at least two items
throw new ArgumentException(null, "capacity");
Capacity = capacity;
_items = new T[capacity];
}
public int Capacity { get; private set; }
public long TotalCount { get; private set; }
public int Count
{
get
{
lock (SyncObject) // full & _index need to be in sync
{
return _full ? Capacity : _index;
}
}
}
public void AddRange(IEnumerable<T> items)
{
if (items == null)
return;
lock (SyncObject)
{
foreach (var item in items)
{
AddWithLock(item);
}
}
}
private void AddWithLock(T item)
{
_items[_index] = item;
_index++;
if (_index == Capacity)
{
_full = true;
_index = 0;
}
TotalCount++;
}
public void Add(T item)
{
lock (SyncObject)
{
AddWithLock(item);
}
}
public void Clear()
{
lock (SyncObject)
{
_items = new T[Capacity];
_index = 0;
_full = false;
TotalCount = 0;
}
}
// this gives raw access to the underlying buffer. not sure I should keep that
public T this[int index]
{
get
{
return _items[index];
}
}
public T[] GetTail(long startIndex)
{
long lostCount;
return GetTail(startIndex, out lostCount);
}
public T[] GetTail(long startIndex, out long lostCount)
{
if (startIndex < 0 || startIndex >= TotalCount)
throw new ArgumentOutOfRangeException("startIndex");
T[] array = ToArray();
lostCount = (TotalCount - Count) - startIndex;
if (lostCount >= 0)
return array;
lostCount = 0;
// this maybe could optimized to not allocate the initial array
// but in multi-threading environment, I suppose this is arguable (and more difficult).
T[] chunk = new T[TotalCount - startIndex];
Array.Copy(array, array.Length - (TotalCount - startIndex), chunk, 0, chunk.Length);
return chunk;
}
public T[] ToArray()
{
lock (SyncObject)
{
T[] items = new T[_full ? Capacity : _index];
if (_full)
{
if (_index == 0)
{
Array.Copy(_items, items, Capacity);
}
else
{
Array.Copy(_items, _index, items, 0, Capacity - _index);
Array.Copy(_items, 0, items, Capacity - _index, _index);
}
}
else if (_index > 0)
{
Array.Copy(_items, items, _index);
}
return items;
}
}
public IEnumerator<T> GetEnumerator()
{
return ToArray().AsEnumerable().GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
bool ICollection<T>.Contains(T item)
{
return _items.Contains(item);
}
void ICollection<T>.CopyTo(T[] array, int arrayIndex)
{
if (array == null)
throw new ArgumentNullException("array");
if (array.Rank != 1)
throw new ArgumentException(null, "array");
if (arrayIndex < 0)
throw new ArgumentOutOfRangeException("arrayIndex");
if ((array.Length - arrayIndex) < Count)
throw new ArgumentException(null, "array");
T[] thisArray = ToArray();
Array.Copy(thisArray, 0, array, arrayIndex, thisArray.Length);
}
bool ICollection<T>.IsReadOnly
{
get
{
return false;
}
}
bool ICollection<T>.Remove(T item)
{
return false;
}
private static object _syncObject;
private static object SyncObject
{
get
{
if (_syncObject == null)
{
object obj = new object();
Interlocked.CompareExchange(ref _syncObject, obj, null);
}
return _syncObject;
}
}
}
Just use someone else's implementation:
The Power Collections Deque<T> is implemented by a circular buffer.
The power collections library is patchy but the Deque is perfectly acceptable expanding circular buffer.
Since you indicate that you do not want expansion and instead desire overwrite you could fairly easily modify the code to overwrite. This would simply involve removing the check for the pointers being logically adjacent and just writing anyway. At the same time the private buffer could be made readonly.
System.Collections.Generic.Queue - is simple circular buffer inside (T[] with head and tail, just like in sample from JeeBee).
In Guava 15, we introduced EvictingQueue, which is a non-blocking, bounded queue that automatically evicts (removes) elements from the head of the queue when attempting to add elements to a full queue. This is different from conventional bounded queues, which either block or reject new elements when full.
It sounds like this should suit your needs, and has a much simpler interface than using an ArrayDeque directly (it uses one under the hood though!).
More information can be found here.
I want to answer this question in the java perspective.
To implement a circular buffer with java, you probably need three things including: a circular buffer class, generic and few operations on it(In order to learn which operations you need and the inner mechanism in these operation, you might need to read wiki for circular buffer at first).
Secondly, the judgement of buffer full or empty should be treated very carefully.
Here I give two instinctive solutions for the full / empty judgement. In solution one, you needs to create two integer variants for storing both the current size of your buffer and the maximum size of your buffer. Obviously, if current size equals to the maximum size, the buffer is full.
In another solution, we set the last one storage place in idle (for circular buffer with size seven, we set the storage at seven in idle). According to this, we can determine the buffer is full when expression (rp+1)%MAXSIZE == fp; is satisfied.
For more clarification, here gives one implementations with the java language.
import java.nio.BufferOverflowException;
import java.nio.BufferUnderflowException;
public class CircularBuffer<T> {
private int front;
private int rear;
private int currentSize;
private int maxSize;
private T[] buffer;
public CircularBuffer(int n) {
buffer = (T[]) new Object[n];
front = 0;
rear = 0;
currentSize = 0;
maxSize = n;
}
public void push(T e) {
if (!isFull()) {
buffer[rear] = e;
currentSize++;
rear = (rear + 1) % maxSize;
} else throw new BufferOverflowException();
}
public T pop() {
if (!isEmpty()) {
T temp = buffer[front];
buffer[front] = null;
front = (front + 1) % maxSize;
currentSize--;
return temp;
} else throw new BufferUnderflowException();
}
public T peekFirst() {
if (!isEmpty()) {
return buffer[front];
} else return null;
}
public T peekLast() {
if (!isEmpty()) {
return buffer[rear - 1];
} else return null;
}
public int size() {
return currentSize;
}
public boolean isEmpty() {
if (currentSize == 0) {
return true;
} else return false;
}
public boolean isFull() {
if (currentSize == maxSize) {
return true;
} else return false;
}
public boolean clean() {
front = 0;
rear = 0;
while (rear != 0) {
buffer[rear] = null;
rear = (rear + 1) % maxSize;
}
return true;
}
public static void main(String[] args) {
CircularBuffer<Integer> buff = new CircularBuffer<>(7);
buff.push(0);
buff.push(1);
buff.push(2);
System.out.println(buff.size());
System.out.println("The head element is: " + buff.pop());
System.out.println("Size should be twoo: " + buff.size());
System.out.println("The last element is one: " + buff.peekLast());
System.out.println("Size should be two: " + buff.size());
buff.clean();
System.out.println("Size should be zero: " + buff.size());
}
}
if an lru cache would work, consider just using http://java.sun.com/javase/6/docs/api/java/util/LinkedHashMap.html#LinkedHashMap(int,%20float,%20boolean), http://java.sun.com/javase/6/docs/api/java/util/LinkedHashMap.html#removeEldestEntry(java.util.Map.Entry)
Here is another implementation which uses Apache common collection's BoundedFifoBuffer . please use CircularFifoQueue if you are using latest JAR from Apache as below class is deprecated
BoundedFifoBuffer apiCallHistory = new BoundedFifoBuffer(20);
for(int i =1 ; i < 25; i++){
if(apiCallHistory.isFull()){
System.out.println("removing :: "+apiCallHistory.remove());
}
apiCallHistory.add(i);
}
// The following is in C#
public class fqueue
{
// The following code implements a circular queue of objects
//private data:
private bool empty;
private bool full;
private int begin, end;
private object[] x;
//public data:
public fqueue()
{
empty = !(full = false);
begin = end = 0xA2;
x = new object[256];
return;
}
public fqueue(int size)
{
if (1 > size) throw new Exception("fqueue: Size cannot be zero or negative");
empty = !(full = false);
begin = end = 0xA2;
x = new object[size];
return;
}
public object write
{
set
{
if(full) throw new Exception("Write error: Queue is full");
end = empty ? end : (end + 1) % x.Length;
full = ((end + 1) % x.Length) == begin;
empty = false;
x[end] = value;
}
}
public object read
{
get
{
if(empty) throw new Exception("Read error: Queue is empty");
full = false;
object ret = x[begin];
begin = (empty=end==begin) ?
begin :
(begin + 1) % x.Length;
return ret;
}
}
public int maxSize
{
get
{
return x.Length;
}
}
public int queueSize
{
get
{
return end - begin + (empty ? 0 : 1 + ((end < begin) ? x.Length : 0));
}
}
public bool isEmpty
{
get
{
return empty;
}
}
public bool isFull
{
get
{
return full;
}
}
public int start
{
get
{
return begin;
}
}
public int finish
{
get
{
return end;
}
}
}