Related
What I need to do is record 2 usb webcams at 60 fps 1280x720 format in UWP c#.
The camera's are currently previewed in a CaptureElement
This is how it start previewing:
public async Task StartPreviewSideAsync(DeviceInformation deviceInformation)
{
if (deviceInformation != null)
{
var settings = new MediaCaptureInitializationSettings {VideoDeviceId = deviceInformation.Id};
try
{
_mediaCaptureSide = new MediaCapture();
var profiles = MediaCapture.FindAllVideoProfiles(deviceInformation.Id);
Debug.WriteLine(MediaCapture.IsVideoProfileSupported(deviceInformation.Id) + " count: " + profiles.Count);
var match = (from profile in profiles
from desc in profile.SupportedRecordMediaDescription
where desc.Width == 1280 && desc.Height == 720 && Math.Abs(Math.Round(desc.FrameRate) - 60) < 1
select new {profile, desc}).FirstOrDefault();
if (match != null)
{
settings.VideoProfile = match.profile;
settings.RecordMediaDescription = match.desc;
}
await _mediaCaptureSide.InitializeAsync(settings);
SideCam.Source = _mediaCaptureSide;
await _mediaCaptureSide.StartPreviewAsync();
_displayRequestSide = new DisplayRequest();
_displayRequestSide.RequestActive();
DisplayInformation.AutoRotationPreferences = DisplayOrientations.Landscape;
CameraManager.GetCameraManager.CurrentSideCamera = deviceInformation;
IsPreviewingSide = true;
}
catch (UnauthorizedAccessException)
{
Debug.WriteLine("The app was denied access to the camera");
IsPreviewingSide = false;
}
catch (Exception ex)
{
Debug.WriteLine("MediaCapture initialization failed. {0}", ex.Message);
IsPreviewingSide = false;
}
}
}
And this is the method that starts the recording:
public IAsyncOperation<LowLagMediaRecording> RecBackCam(StorageFile fileBack)
{
var mp4File = MediaEncodingProfile.CreateMp4(VideoEncodingQuality.HD720p);
if (mp4File.Video != null)
mp4File.Video.Bitrate = 3000000;
return _mediaCaptureBack.PrepareLowLagRecordToStorageFileAsync(mp4File, fileBack);
}
but it does not record 60fps because it cannot find a profile for it (in the preview method).
and when I use this (in the recoding method):
mp4File.Video.FrameRate.Numerator = 3600;
mp4File.Video.FrameRate.Denominator = 60;
it records 60 frames per second but frame 1 and 2 are the same 3 and 4 and so on. But I need actual 60 frames per second.
all the basics of the code comes from the mdsn website
link to code on msdn.
To set the frame rate for MediaCapture, we can use VideoDeviceController.SetMediaStreamPropertiesAsync method like the following:
await _mediaCapture.VideoDeviceController.SetMediaStreamPropertiesAsync(MediaStreamType.VideoRecord, encodingProperties);
For the encodingProperties, we can get it from VideoDeviceController.GetAvailableMediaStreamProperties method, this method returns a list of the supported encoding properties for the video device. If your camera supports 60 fps, 1280x720 resolution then you should be able to find the corresponding VideoEncodingProperties in this list.
For more info, please see the article Set format, resolution, and frame rate for MediaCapture and also Camera resolution sample on GitHub.
I'm creating a poker app and i want to make some sort of simulation of the throwing of the cards effect and to do that I currently use await Task.Delay(x); However this requires async Task and if i switch this method where i do the task.delay(x) to async i will have to change at least 5-6 more into async as well. I suppose this is fine for someone that understand the exact way the asynchronus works. Currently im getting a lot of logic errors simply because i obviously don't know how async and await operators work.. In other words I'm newbie, and is there any alternative to this exact line await Task.Delay(x); I wont use anything else that is connected to async just this line.
Here's the code :
private async Task Shuffle()
{
Bitmap refreshBackImage = new Bitmap(getBack);
bCall.Enabled = false;
bRaise.Enabled = false;
bFold.Enabled = false;
bCheck.Enabled = false;
MaximizeBox = false;
MinimizeBox = false;
bool check = false;
horizontal = tbChips.Left - _settings.Width * 2 - 15;
vertical = pbTimer.Top - _settings.Height - (_settings.Height) / 7;
RNGCrypto random = new RNGCrypto();
for (i = ImgLocation.Length; i > 0; i--)
{
int j = random.Next(i);
string k = ImgLocation[j];
ImgLocation[j] = ImgLocation[i - 1];
ImgLocation[i - 1] = k;
}
for (i = 0; i < 17; i++)
{
Deck[i] = Image.FromFile(ImgLocation[i]);
string[] charsToRemove = { getCards, ".png", "\\" };
foreach (string c in charsToRemove)
{
ImgLocation[i] = ImgLocation[i].Replace(c, string.Empty);
}
Reserve[i] = int.Parse(ImgLocation[i]) - 1;
Holder[i] = new PictureBox
{
SizeMode = PictureBoxSizeMode.StretchImage,
Height = _settings.Height,
Width = _settings.Width
};
Controls.Add(Holder[i]);
Holder[i].Name = "pb" + i;
await Task.Delay(150);
#region Throwing Cards
SetPlayers(Player, i, ref check, 560, 470, refreshBackImage);
SetPlayers(Bot1, i, ref check, 15, 420, refreshBackImage);
SetPlayers(Bot2, i, ref check, 75, 65, refreshBackImage);
SetPlayers(Bot3, i, ref check, 590, 25, refreshBackImage);
SetPlayers(Bot4, i, ref check, 1115, 65, refreshBackImage);
SetPlayers(Bot5, i, ref check, 1160, 420, refreshBackImage);
if (i >= 12)
{
Holder[12].Tag = Reserve[12];
if (i > 12) Holder[13].Tag = Reserve[13];
if (i > 13) Holder[14].Tag = Reserve[14];
if (i > 14) Holder[15].Tag = Reserve[15];
if (i > 15)
{
Holder[16].Tag = Reserve[16];
}
if (!check)
{
horizontal = 410;
vertical = 265;
}
check = true;
if (Holder[i] != null)
{
Holder[i].Anchor = AnchorStyles.None;
Holder[i].Image = refreshBackImage;
//Holder[i].Image = Deck[i];
Holder[i].Location = new Point(horizontal, vertical);
horizontal += 110;
}
}
#endregion
Bot1 = (Bot)FoldedPlayer(Bot1);
Bot2 = (Bot)FoldedPlayer(Bot2);
Bot3 = (Bot)FoldedPlayer(Bot3);
Bot4 = (Bot)FoldedPlayer(Bot4);
Bot5 = (Bot)FoldedPlayer(Bot5);
if (i == 16)
{
if (!restart)
{
MaximizeBox = true;
MinimizeBox = true;
}
Turns();
}
}
Ending();
}
For things like animations, you might want the System.Windows.Forms.Timer component, which raises a periodic event.
You can configure the interval as well as temporarily disabling it entirely.
Note that after the delay, you'll be back to the top of the handler function... it doesn't automatically keep track of where you are in your sequence the way the await keyword automatically resumes at the next line. So you'll need some counter or such to let you know what step of the animation is coming next.
I'm not sure if you want to do the async, because it wont come back until it's complete. Meaning if you want something timed. Async will have some trouble providing this for you. Because you'll either have to wait for the response which may be an issue. Especially if its a game. Watch how the async functions in debug and you'll understand. An event trigger, or just a timed event will work much better because you'll have the control of when it triggers or how a long you wait for the event. This will get rid of your delay all together or it should.
In your case, a simple trigger event at the time, will display the throwing of cards, and once finished will continue on with your function. That's simple terms, you may have to do a few things like waiting until that trigger is finished. Either way there is a ton of information regarding events. Have a look before you go for async. Since it's forms you can do it however you please, if it for windows phone or other types of systems some require async only.
this should help:
https://msdn.microsoft.com/en-us/library/wkzf914z(v=vs.90).aspx
I want to play a video (mostly .mov with Motion JPEG) in frame by frame mode with changing framerate. I have a function who gives me a framenumber and then I have to jump there. It will be mostly in one direction but can skip a few frames from time to time; also the velocity is not constant.
So I have a timer asking every 40ms about a new framenumber and setting the new position.
My first approach now is with DirectShow.Net (Interop.QuartzTypeLib). Therefore I render and open the video and set it to pause to draw the picture in the graph
FilgraphManagerClass media = new FilgraphManagerClass();
media.RenderFile(FileName);
media.pause();
Now I will just set a new position
media.CurrentPosition = framenumber * media.AvgTimePerFrame;
Since the video is in pause mode it will then draw every requested new position (frame). Works perfectly fine but really slow... the video keeps stuttering and lagging and its not the video source; there are enough frames recorded to play a fluent video.
With some performance tests I found out that the LAV-Codec is the bottleneck here. This is not included directly in my project since its a DirectShow-Player it will be cast through my codec pack I installed on my PC.
Ideas:
Using the LAV-Codec by myself directly in C#. I searched but everyone is using DirectShow it seems, building their own filters and not using existing ones directly in the project.
Instead of seeking or setting the time, can I get single frames just by the framenumber and draw them simply?
Is there a complete other way to archive what I want to do?
Background:
This project has to be a train simulator. We recorded real time videos of trains driving from inside the cockpit and know which frame is what position. Now my C# programm calculates the position of the train in dependence of time and acceleration, gives back the appropriate framenumber and draw this frame.
Additional Information:
There is another project (not written by me) in C/C++ who uses DirectShow and the avcodec-LAV directly with a similar way I do and it works fine! Thats because I had the idea to use a codec / filter like the avrcodec-lav by myself. But I can't find an interop or interface to work with C#.
Thanks everyone for reading this and trying to help! :)
Obtaining specific frame by seeking filter graph (the entire pipeline) is pretty slow since every seek operation involves the following on its backyard: flushing everything, possibly re-creating worker threads, seeking to first key frame/splice point/clean point/I-Frame before the requested time, start of decoding starting from found position skipping frames until originally requested time is reached.
Overall, the method works well when you scrub paused video, or retrieve specific still frames. When however you try to play this as smooth video, it eventually causes significant part of the effort to be wasted and spent on seeking within video stream.
Solutions here are:
re-encode video to remove or reduce temporal compression (e.g. Motion JPEG AVI/MOV/MP4 files)
whenever possible prefer to skip frames and/or re-timestamp them according to your algorithm instead of seeking
have a cached of decoded video frames and pick from there, populate them as necessary in worker thread
The latter two are unfortunately hard to achieve without advanced filter development (where continuous decoding without interruption by seeking operations is the key to achieving decent performance). With basic DirectShow.Net you only have basic control over streaming and hence the first item from the list above.
Wanted to post a comment instead of an answer, but don't have the reputation. I think your heading in the wrong direction with Direct Show. I've been messing with motion-jpeg for a few years now between C# & Android, and have gotten great performance with built-in .NET code (for converting byte-array to Jpeg frame) and a bit of multi-threading. I can easily achieve over 30fps from multiple devices with each device running in it's own thread.
Below is an older version of my motion-jpeg parser from my C# app 'OmniView'. To use, just send the network stream to the constructor, and receive the OnImageReceived event. Then you can easily save the frames to the hard-drive for later use (perhaps with the filename set to the timestamp for easy lookup). For better performance though, you will want to save all of the images to one file.
using OmniView.Framework.Helpers;
using System;
using System.IO;
using System.Text;
using System.Windows.Media.Imaging;
namespace OmniView.Framework.Devices.MJpeg
{
public class MJpegStream : IDisposable
{
private const int BUFFER_SIZE = 4096;
private const string tag_length = "Content-Length:";
private const string stamp_format = "yyyyMMddHHmmssfff";
public delegate void ImageReceivedEvent(BitmapImage img);
public delegate void FrameCountEvent(long frames, long failed);
public event ImageReceivedEvent OnImageReceived;
public event FrameCountEvent OnFrameCount;
private bool isHead, isSetup;
private byte[] buffer, newline, newline_src;
private int imgBufferStart;
private Stream data_stream;
private MemoryStream imgStreamA, imgStreamB;
private int headStart, headStop;
private long imgSize, imgSizeTgt;
private bool useStreamB;
public volatile bool EnableRecording, EnableSnapshot;
public string RecordPath, SnapshotFilename;
private string boundary_tag;
private bool tagReadStarted;
private bool enableBoundary;
public volatile bool OututFrameCount;
private long FrameCount, FailedCount;
public MJpegStream() {
isSetup = false;
imgStreamA = new MemoryStream();
imgStreamB = new MemoryStream();
buffer = new byte[BUFFER_SIZE];
newline_src = new byte[] {13, 10};
}
public void Init(Stream stream) {
this.data_stream = stream;
FrameCount = FailedCount = 0;
startHeader(0);
}
public void Dispose() {
if (data_stream != null) data_stream.Dispose();
if (imgStreamA != null) imgStreamA.Dispose();
if (imgStreamB != null) imgStreamB.Dispose();
}
//=============================
public void Process() {
if (isHead) processHeader();
else {
if (enableBoundary) processImageBoundary();
else processImage();
}
}
public void Snapshot(string filename) {
SnapshotFilename = filename;
EnableSnapshot = true;
}
//-----------------------------
// Header
private void startHeader(int remaining_bytes) {
isHead = true;
headStart = 0;
headStop = remaining_bytes;
imgSizeTgt = 0;
tagReadStarted = false;
}
private void processHeader() {
int t = BUFFER_SIZE - headStop;
headStop += data_stream.Read(buffer, headStop, t);
int nl;
//
if (!isSetup) {
byte[] new_newline;
if ((nl = findNewline(headStart, headStop, out new_newline)) >= 0) {
string tag = Encoding.UTF8.GetString(buffer, headStart, nl - headStart);
if (tag.StartsWith("--")) boundary_tag = tag;
headStart = nl+new_newline.Length;
newline = new_newline;
isSetup = true;
return;
}
} else {
while ((nl = findData(newline, headStart, headStop)) >= 0) {
string tag = Encoding.UTF8.GetString(buffer, headStart, nl - headStart);
if (!tagReadStarted && tag.Length > 0) tagReadStarted = true;
headStart = nl+newline.Length;
//
if (!processHeaderData(tag, nl)) return;
}
}
//
if (headStop >= BUFFER_SIZE) {
string data = Encoding.UTF8.GetString(buffer, headStart, headStop - headStart);
throw new Exception("Invalid Header!");
}
}
private bool processHeaderData(string tag, int index) {
if (tag.StartsWith(tag_length)) {
string val = tag.Substring(tag_length.Length);
imgSizeTgt = long.Parse(val);
}
//
if (tag.Length == 0 && tagReadStarted) {
if (imgSizeTgt > 0) {
finishHeader(false);
return false;
}
if (boundary_tag != null) {
finishHeader(true);
return false;
}
}
//
return true;
}
private void finishHeader(bool enable_boundary) {
int s = shiftBytes(headStart, headStop);
enableBoundary = enable_boundary;
startImage(s);
}
//-----------------------------
// Image
private void startImage(int remaining_bytes) {
isHead = false;
imgBufferStart = remaining_bytes;
Stream imgStream = getStream();
imgStream.Seek(0, SeekOrigin.Begin);
imgStream.SetLength(imgSizeTgt);
imgSize = 0;
}
private void processImage() {
long img_r = (imgSizeTgt - imgSize - imgBufferStart);
int bfr_r = Math.Max(BUFFER_SIZE - imgBufferStart, 0);
int t = (int)Math.Min(img_r, bfr_r);
int s = data_stream.Read(buffer, imgBufferStart, t);
int x = imgBufferStart + s;
appendImageData(0, x);
imgBufferStart = 0;
//
if (imgSize >= imgSizeTgt) processImageData(0);
}
private void processImageBoundary() {
int t = Math.Max(BUFFER_SIZE - imgBufferStart, 0);
int s = data_stream.Read(buffer, imgBufferStart, t);
//
int nl, start = 0;
int end = imgBufferStart + s;
while ((nl = findData(newline, start, end)) >= 0) {
int tag_length = boundary_tag.Length;
if (nl+newline.Length+tag_length > BUFFER_SIZE) {
appendImageData(start, nl+newline.Length - start);
start = nl+newline.Length;
continue;
}
//
string v = Encoding.UTF8.GetString(buffer, nl+newline.Length, tag_length);
if (v == boundary_tag) {
appendImageData(start, nl - start);
int xstart = nl+newline.Length + tag_length;
int xsize = shiftBytes(xstart, end);
processImageData(xsize);
return;
} else {
appendImageData(start, nl+newline.Length - start);
}
start = nl+newline.Length;
}
//
if (start < end) {
int end_x = end - newline.Length;
if (start < end_x) {
appendImageData(start, end_x - start);
}
//
shiftBytes(end - newline.Length, end);
imgBufferStart = newline.Length;
}
}
private void processImageData(int remaining_bytes) {
if (EnableSnapshot) {
EnableSnapshot = false;
saveSnapshot();
}
//
try {
BitmapImage img = createImage();
if (EnableRecording) recordFrame();
if (OnImageReceived != null) OnImageReceived.Invoke(img);
FrameCount++;
}
catch (Exception) {
// output frame error ?!
FailedCount++;
}
//
if (OututFrameCount && OnFrameCount != null) OnFrameCount.Invoke(FrameCount, FailedCount);
//
useStreamB = !useStreamB;
startHeader(remaining_bytes);
}
private void appendImageData(int index, int length) {
Stream imgStream = getStream();
imgStream.Write(buffer, index, length);
imgSize += (length - index);
}
//-----------------------------
private void recordFrame() {
string stamp = DateTime.Now.ToString(stamp_format);
string filename = RecordPath+"\\"+stamp+".jpg";
//
ImageHelper.Save(getStream(), filename);
}
private void saveSnapshot() {
Stream imgStream = getStream();
//
imgStream.Position = 0;
Stream file = File.Open(SnapshotFilename, FileMode.Create, FileAccess.Write);
try {imgStream.CopyTo(file);}
finally {file.Close();}
}
private BitmapImage createImage() {
Stream imgStream = getStream();
imgStream.Position = 0;
return ImageHelper.LoadStream(imgStream);
}
//-----------------------------
private Stream getStream() {return useStreamB ? imgStreamB : imgStreamA;}
private int findNewline(int start, int stop, out byte[] data) {
for (int i = start; i < stop; i++) {
if (i < stop-1 && buffer[i] == newline_src[0] && buffer[i+1] == newline_src[1]) {
data = newline_src;
return i;
} else if (buffer[i] == newline_src[1]) {
data = new byte[] {newline_src[1]};
return i;
}
}
data = null;
return -1;
}
private int findData(byte[] data, int start, int stop) {
int data_size = data.Length;
for (int i = start; i < stop-data_size; i++) {
if (findInnerData(data, i)) return i;
}
return -1;
}
private bool findInnerData(byte[] data, int buffer_index) {
int count = data.Length;
for (int i = 0; i < count; i++) {
if (data[i] != buffer[buffer_index+i]) return false;
}
return true;
}
private int shiftBytes(int start, int end) {
int c = end - start;
for (int i = 0; i < c; i++) {
buffer[i] = buffer[end-c+i];
}
return c;
}
}
}
I am looking to create an external application that monitors the 'FPS' of a DirectX application (like FRAPS without the recording). I have read several Microsoft articles on performance measuring tools - but I am looking to get the feedback (and experience) of the community.
My question: what is the best method for obtaining the FPS of a DirectX application?
Windows has some Event Tracing for Windows providers related to DirectX profiling. The most intresting ones are Microsoft-Windows-D3D9 and Microsoft-Windows-DXGI, which allow tracing of the frame presentation events. The simplest way to calculate FPS is to count the number of PresentStart events withing a time interval and divide that by the length of the interval.
To work with ETW in C#, install Microsoft.Diagnostics.Tracing.TraceEvent package.
The following code sample displays FPS of running processes:
using System;
using System.Collections.Generic;
using System.Text;
using System.Diagnostics;
using System.Threading;
using Microsoft.Diagnostics.Tracing.Session;
namespace ConsoleApp1
{
//helper class to store frame timestamps
public class TimestampCollection
{
const int MAXNUM = 1000;
public string Name { get; set; }
List<long> timestamps = new List<long>(MAXNUM + 1);
object sync = new object();
//add value to the collection
public void Add(long timestamp)
{
lock (sync)
{
timestamps.Add(timestamp);
if (timestamps.Count > MAXNUM) timestamps.RemoveAt(0);
}
}
//get the number of timestamps withing interval
public int QueryCount(long from, long to)
{
int c = 0;
lock (sync)
{
foreach (var ts in timestamps)
{
if (ts >= from && ts <= to) c++;
}
}
return c;
}
}
class Program
{
//event codes (https://github.com/GameTechDev/PresentMon/blob/40ee99f437bc1061a27a2fc16a8993ee8ce4ebb5/PresentData/PresentMonTraceConsumer.cpp)
public const int EventID_D3D9PresentStart = 1;
public const int EventID_DxgiPresentStart = 42;
//ETW provider codes
public static readonly Guid DXGI_provider = Guid.Parse("{CA11C036-0102-4A2D-A6AD-F03CFED5D3C9}");
public static readonly Guid D3D9_provider = Guid.Parse("{783ACA0A-790E-4D7F-8451-AA850511C6B9}");
static TraceEventSession m_EtwSession;
static Dictionary<int, TimestampCollection> frames = new Dictionary<int, TimestampCollection>();
static Stopwatch watch = null;
static object sync = new object();
static void EtwThreadProc()
{
//start tracing
m_EtwSession.Source.Process();
}
static void OutputThreadProc()
{
//console output loop
while (true)
{
long t1, t2;
long dt = 2000;
Console.Clear();
Console.WriteLine(DateTime.Now.ToString() + "." + DateTime.Now.Millisecond.ToString());
Console.WriteLine();
lock (sync)
{
t2 = watch.ElapsedMilliseconds;
t1 = t2 - dt;
foreach (var x in frames.Values)
{
Console.Write(x.Name + ": ");
//get the number of frames
int count = x.QueryCount(t1, t2);
//calculate FPS
Console.WriteLine("{0} FPS", (double)count / dt * 1000.0);
}
}
Console.WriteLine();
Console.WriteLine("Press any key to stop tracing...");
Thread.Sleep(1000);
}
}
public static void Main(string[] argv)
{
//create ETW session and register providers
m_EtwSession = new TraceEventSession("mysess");
m_EtwSession.StopOnDispose = true;
m_EtwSession.EnableProvider("Microsoft-Windows-D3D9");
m_EtwSession.EnableProvider("Microsoft-Windows-DXGI");
//handle event
m_EtwSession.Source.AllEvents += data =>
{
//filter out frame presentation events
if (((int)data.ID == EventID_D3D9PresentStart && data.ProviderGuid == D3D9_provider) ||
((int)data.ID == EventID_DxgiPresentStart && data.ProviderGuid == DXGI_provider))
{
int pid = data.ProcessID;
long t;
lock (sync)
{
t = watch.ElapsedMilliseconds;
//if process is not yet in Dictionary, add it
if (!frames.ContainsKey(pid))
{
frames[pid] = new TimestampCollection();
string name = "";
var proc = Process.GetProcessById(pid);
if (proc != null)
{
using (proc)
{
name = proc.ProcessName;
}
}
else name = pid.ToString();
frames[pid].Name = name;
}
//store frame timestamp in collection
frames[pid].Add(t);
}
}
};
watch = new Stopwatch();
watch.Start();
Thread thETW = new Thread(EtwThreadProc);
thETW.IsBackground = true;
thETW.Start();
Thread thOutput = new Thread(OutputThreadProc);
thOutput.IsBackground = true;
thOutput.Start();
Console.ReadKey();
m_EtwSession.Dispose();
}
}
}
Based on the source code of PresentMon project.
Fraps inserts a DLL into every running application and hooks specific DX calls to figure out the framerate and capture video, pretty sure that you'll have to do something similar. After a bit of poking around I found a Github project that does some basic DX hooking for doing captures and overlays, so that might be a good spot to start out with. Though I've not used it personally so I can't totally vouch for the quality.
http://spazzarama.com/2011/03/14/c-screen-capture-and-overlays-for-direct3d-9-10-and-11-using-api-hooks/
Building on https://stackoverflow.com/a/54625953/12047161:
I had more success not using the stopwatch as the event triggers seems to be asynchronous with the actual frames. I kept getting batches of 20-50 frames all at once, making the estimated FPS fluctuate between 50 and 250% of the actual value.
Instead i used TimeStampRelativeMSec
//handle event
m_EtwSession.Source.AllEvents += data =>
{
//filter out frame presentation events
if((int) data.ID == EventID_DxgiPresentStart && data.ProviderGuid == DXGI_provider)
{
int pid = data.ProcessID;
long t;
t = watch.ElapsedMilliseconds;
//if process is not yet in Dictionary, add it
if (!frames.ContainsKey(pid))
{
frames[pid] = new TimestampCollection();
string name = "";
var proc = Process.GetProcessById(pid);
if (proc != null)
{
using (proc)
{
name = proc.ProcessName;
}
}
else name = pid.ToString();
frames[pid].Name = name;
}
frames[pid].Add((long)data.TimeStampRelativeMSec);
}
};
property from the TraceEvent class, and calculate FPS by rounding the average time between an arbitrary number of past entries:
public double GetFrameTime(int count)
{
double returnValue = 0;
int listCount = timestamps.Count;
if(listCount > count)
{
for(int i = 1; i <= count; i++)
{
returnValue += timestamps[listCount - i] - timestamps[listCount - (i + 1)];
}
returnValue /= count;
}
return returnValue;
}
This method gave me far more accurate (Compared to, as available, in-game counters) of several different games i've tried.
Like many people already seem to have (there are several threads on this subject here) I am looking for ways to create video from a sequence of images.
I want to implement my functionality in C#!
Here is what I wan't to do:
/*Pseudo code*/
void CreateVideo(List<Image> imageSequence, long durationOfEachImageMs, string outputVideoFileName, string outputFormat)
{
// Info: imageSequence.Count will be > 30 000 images
// Info: durationOfEachImageMs will be < 300 ms
if (outputFormat = "mpeg")
{
}
else if (outputFormat = "avi")
{
}
else
{
}
//Save video file do disk
}
I know there's a project called Splicer (http://splicer.codeplex.com/) but I can't find suitable documentation or clear examples that I can follow (these are the examples that I found).
The closest I want to do, which I find here on CodePlex is this:
How can I create a video from a directory of images in C#?
I have also read a few threads about ffmpeg (for example this: C# and FFmpeg preferably without shell commands? and this: convert image sequence using ffmpeg) but I find no one to help me with my problem and I don't think ffmpeg-command-line-style is the best solution for me (because of the amount of images).
I believe that I can use the Splicer-project in some way (?).
In my case, it is about about > 30 000 images where each image should be displayed for about 200 ms (in the videostream that I want to create).
(What the video is about? Plants growing ...)
Can anyone help me complete my function?
Well, this answer comes a bit late, but since I have noticed some activity with my original question lately (and the fact that there was not provided a working solution) I would like to give you what finally worked for me.
I'll split my answer into three parts:
Background
Problem
Solution
Background
(this section is not important for the solution)
My original problem was that I had a lot of images (i.e. a huge amount), images that were individually stored in a database as byte arrays. I wanted to make a video sequence with all these images.
My equipment setup was something like this general drawing:
The images depicted growing tomato plants in different states. All images were taken every 1 minute under daytime.
/*pseudo code for taking and storing images*/
while (true)
{
if (daylight)
{
//get an image from the camera
//store the image as byte array to db
}
//wait 1 min
}
I had a very simple db for storing images, there were only one table (the table ImageSet) in it:
Problem
I had read many articles about ffmpeg (please see my original question) but I couldn't find any on how to go from a collection of images to a video.
Solution
Finally, I got a working solution!
The main part of it comes from the open source project AForge.NET. In short, you could say that AForge.NET is a computer vision and artificial intelligence library in C#.
(If you want a copy of the framework, just grab it from http://www.aforgenet.com/)
In AForge.NET, there is this VideoFileWriter class (a class for writing videofiles with help of ffmpeg). This did almost all of the work. (There is also a very good example here)
This is the final class (reduced) which I used to fetch and convert image data into a video from my image database:
public class MovieMaker
{
public void Start()
{
var startDate = DateTime.Parse("12 Mar 2012");
var endDate = DateTime.Parse("13 Aug 2012");
CreateMovie(startDate, endDate);
}
/*THIS CODE BLOCK IS COPIED*/
public Bitmap ToBitmap(byte[] byteArrayIn)
{
var ms = new System.IO.MemoryStream(byteArrayIn);
var returnImage = System.Drawing.Image.FromStream(ms);
var bitmap = new System.Drawing.Bitmap(returnImage);
return bitmap;
}
public Bitmap ReduceBitmap(Bitmap original, int reducedWidth, int reducedHeight)
{
var reduced = new Bitmap(reducedWidth, reducedHeight);
using (var dc = Graphics.FromImage(reduced))
{
// you might want to change properties like
dc.InterpolationMode = System.Drawing.Drawing2D.InterpolationMode.HighQualityBicubic;
dc.DrawImage(original, new Rectangle(0, 0, reducedWidth, reducedHeight), new Rectangle(0, 0, original.Width, original.Height), GraphicsUnit.Pixel);
}
return reduced;
}
/*END OF COPIED CODE BLOCK*/
private void CreateMovie(DateTime startDate, DateTime endDate)
{
int width = 320;
int height = 240;
var framRate = 200;
using (var container = new ImageEntitiesContainer())
{
//a LINQ-query for getting the desired images
var query = from d in container.ImageSet
where d.Date >= startDate && d.Date <= endDate
select d;
// create instance of video writer
using (var vFWriter = new VideoFileWriter())
{
// create new video file
vFWriter.Open("nameOfMyVideoFile.avi", width, height, framRate, VideoCodec.Raw);
var imageEntities = query.ToList();
//loop throught all images in the collection
foreach (var imageEntity in imageEntities)
{
//what's the current image data?
var imageByteArray = imageEntity.Data;
var bmp = ToBitmap(imageByteArray);
var bmpReduced = ReduceBitmap(bmp, width, height);
vFWriter.WriteVideoFrame(bmpReduced);
}
vFWriter.Close();
}
}
}
}
Update 2013-11-29 (how to) (Hope this is what you asked for #Kiquenet?)
Download AForge.NET Framework from the downloads page (Download full ZIP archive and you will find many interesting Visual Studio solutions with projects, like Video, in the AForge.NET Framework-2.2.5\Samples folder...)
Namespace: AForge.Video.FFMPEG (from the documentation)
Assembly: AForge.Video.FFMPEG (in AForge.Video.FFMPEG.dll) (from the documentation) (you can find this AForge.Video.FFMPEG.dll in the AForge.NET Framework-2.2.5\Release folder)
If you want to create your own solution, make sure you have a reference to AForge.Video.FFMPEG.dll in your project. Then it should be easy to use the VideoFileWriter class. If you follow the link to the class you will find a very good (and simple example). In the code, they are feeding the VideoFileWriter with Bitmap image in a for-loop
I found this code in the slicer samples, looks pretty close to to what you want:
string outputFile = "FadeBetweenImages.wmv";
using (ITimeline timeline = new DefaultTimeline())
{
IGroup group = timeline.AddVideoGroup(32, 160, 100);
ITrack videoTrack = group.AddTrack();
IClip clip1 = videoTrack.AddImage("image1.jpg", 0, 2); // play first image for a little while
IClip clip2 = videoTrack.AddImage("image2.jpg", 0, 2); // and the next
IClip clip3 = videoTrack.AddImage("image3.jpg", 0, 2); // and finally the last
IClip clip4 = videoTrack.AddImage("image4.jpg", 0, 2); // and finally the last
}
double halfDuration = 0.5;
// fade out and back in
group.AddTransition(clip2.Offset - halfDuration, halfDuration, StandardTransitions.CreateFade(), true);
group.AddTransition(clip2.Offset, halfDuration, StandardTransitions.CreateFade(), false);
// again
group.AddTransition(clip3.Offset - halfDuration, halfDuration, StandardTransitions.CreateFade(), true);
group.AddTransition(clip3.Offset, halfDuration, StandardTransitions.CreateFade(), false);
// and again
group.AddTransition(clip4.Offset - halfDuration, halfDuration, StandardTransitions.CreateFade(), true);
group.AddTransition(clip4.Offset, halfDuration, StandardTransitions.CreateFade(), false);
// add some audio
ITrack audioTrack = timeline.AddAudioGroup().AddTrack();
IClip audio =
audioTrack.AddAudio("testinput.wav", 0, videoTrack.Duration);
// create an audio envelope effect, this will:
// fade the audio from 0% to 100% in 1 second.
// play at full volume until 1 second before the end of the track
// fade back out to 0% volume
audioTrack.AddEffect(0, audio.Duration,
StandardEffects.CreateAudioEnvelope(1.0, 1.0, 1.0, audio.Duration));
// render our slideshow out to a windows media file
using (
IRenderer renderer =
new WindowsMediaRenderer(timeline, outputFile, WindowsMediaProfiles.HighQualityVideo))
{
renderer.Render();
}
}
I could not manage to get the above example to work. However I did find another library that works amazingly well once. Try via NuGet "accord.extensions.imaging.io", then I wrote the following little function:
private void makeAvi(string imageInputfolderName, string outVideoFileName, float fps = 12.0f, string imgSearchPattern = "*.png")
{ // reads all images in folder
VideoWriter w = new VideoWriter(outVideoFileName,
new Accord.Extensions.Size(480, 640), fps, true);
Accord.Extensions.Imaging.ImageDirectoryReader ir =
new ImageDirectoryReader(imageInputfolderName, imgSearchPattern);
while (ir.Position < ir.Length)
{
IImage i = ir.Read();
w.Write(i);
}
w.Close();
}
It reads all images from a folder and makes a video out of them.
If you want to make it nicer you could probably read the image dimensions instead of hard coding, but you got the point.
The FFMediaToolkit is a good solution in 2020, with .NET Core support.
https://github.com/radek-k/FFMediaToolkit
FFMediaToolkit is a cross-platform .NET Standard library for creating and reading video files. It uses native FFmpeg libraries by the FFmpeg.Autogen bindings.
The README of the library has a nice example for the question asked.
// You can set there codec, bitrate, frame rate and many other options.
var settings = new VideoEncoderSettings(width: 1920, height: 1080, framerate: 30, codec: VideoCodec.H264);
settings.EncoderPreset = EncoderPreset.Fast;
settings.CRF = 17;
var file = MediaBuilder.CreateContainer(#"C:\videos\example.mp4").WithVideo(settings).Create();
while(file.Video.FramesCount < 300)
{
file.Video.AddFrame(/*Your code*/);
}
file.Dispose(); // MediaOutput ("file" variable) must be disposed when encoding is completed. You can use `using() { }` block instead.
This is a solution for creating a video from an image sequence using Visual Studio using C#.
My starting point was "Hauns TM"'s answer below but my requirements were more basic than theirs so this solution might be more appropriated for less advanced users ( like myself )
Libraries:
using System;
using System.IO;
using System.Drawing;
using Accord.Video.FFMPEG;
You can get the FFMPEG libarary by searching for FFMPEG in "Tools -> NuGet Package Manager -> Manage NuGet Packages for a Solution..."
The variables that I passed into the function are:
outputFileName = "C://outputFolder//outputMovie.avi"
inputImageSequence =
["C://inputFolder//image_001.avi",
"C://inputFolder//image_002.avi",
"C://inputFolder//image_003.avi",
"C://inputFolder//image_004.avi"]
Function:
private void videoMaker( string outputFileName , string[] inputImageSequence)
{
int width = 1920;
int height = 1080;
var framRate = 25;
using (var vFWriter = new VideoFileWriter())
{
// create new video file
vFWriter.Open(outputFileName, width, height, framRate, VideoCodec.Raw);
foreach (var imageLocation in inputImageSequence)
{
Bitmap imageFrame = System.Drawing.Image.FromFile(imageLocation) as Bitmap;
vFWriter.WriteVideoFrame(imageFrame);
}
vFWriter.Close();
}
}
It looks like many of these answers are a bit obsolete year 2020, so I add my thoughts.
I have been working on the same problem and have published the .NET Core project Time Lapse Creator on GitHub: https://github.com/pekspro/TimeLapseCreator It shows how to add information on extra frame (like a timestamp for instance), background audio, title screen, fading and some more. And then ffmpeg is used to make the rendering. This is done in this function:
// Render video from a list of images, add background audio and a thumbnail image.
private async Task RenderVideoAsync(int framesPerSecond, List<string> images, string ffmpgPath,
string audioPath, string thumbnailImagePath, string outPath,
double videoFadeInDuration = 0, double videoFadeOutDuration = 0,
double audioFadeInDuration = 0, double audioFadeOutDuration = 0)
{
string fileListName = Path.Combine(OutputPath, "framelist.txt");
var fileListContent = images.Select(a => $"file '{a}'{Environment.NewLine}duration 1");
await File.WriteAllLinesAsync(fileListName, fileListContent);
TimeSpan vidLengthCalc = TimeSpan.FromSeconds(images.Count / ((double)framesPerSecond));
int coverId = -1;
int audioId = -1;
int framesId = 0;
int nextId = 1;
StringBuilder inputParameters = new StringBuilder();
StringBuilder outputParameters = new StringBuilder();
inputParameters.Append($"-r {framesPerSecond} -f concat -safe 0 -i {fileListName} ");
outputParameters.Append($"-map {framesId} ");
if(videoFadeInDuration > 0 || videoFadeOutDuration > 0)
{
List<string> videoFilterList = new List<string>();
if (videoFadeInDuration > 0)
{
//Assume we fade in from first second.
videoFilterList.Add($"fade=in:start_time={0}s:duration={videoFadeInDuration.ToString("0", NumberFormatInfo.InvariantInfo)}s");
}
if (videoFadeOutDuration > 0)
{
//Assume we fade out to last second.
videoFilterList.Add($"fade=out:start_time={(vidLengthCalc.TotalSeconds - videoFadeOutDuration).ToString("0.000", NumberFormatInfo.InvariantInfo)}s:duration={videoFadeOutDuration.ToString("0.000", NumberFormatInfo.InvariantInfo)}s");
}
string videoFilterString = string.Join(',', videoFilterList);
outputParameters.Append($"-filter:v:{framesId} \"{videoFilterString}\" ");
}
if (thumbnailImagePath != null)
{
coverId = nextId;
nextId++;
inputParameters.Append($"-i {thumbnailImagePath} ");
outputParameters.Append($"-map {coverId} ");
outputParameters.Append($"-c:v:{coverId} copy -disposition:v:{coverId} attached_pic ");
}
if (audioPath != null)
{
audioId = nextId;
nextId++;
inputParameters.Append($"-i {audioPath} ");
outputParameters.Append($"-map {audioId} ");
if(audioFadeInDuration <= 0 && audioFadeOutDuration <= 0)
{
// If no audio fading, just copy as it is.
outputParameters.Append($"-c:a copy ");
}
else
{
List<string> audioEffectList = new List<string>();
if(audioFadeInDuration > 0)
{
//Assume we fade in from first second.
audioEffectList.Add($"afade=in:start_time={0}s:duration={audioFadeInDuration.ToString("0", NumberFormatInfo.InvariantInfo)}s");
}
if (audioFadeOutDuration > 0)
{
//Assume we fade out to last second.
audioEffectList.Add($"afade=out:start_time={(vidLengthCalc.TotalSeconds - audioFadeOutDuration).ToString("0.000", NumberFormatInfo.InvariantInfo)}s:duration={audioFadeOutDuration.ToString("0.000", NumberFormatInfo.InvariantInfo)}s");
}
string audioFilterString = string.Join(',', audioEffectList);
outputParameters.Append($"-filter:a \"{audioFilterString}\" ");
}
}
int milliseconds = vidLengthCalc.Milliseconds;
int seconds = vidLengthCalc.Seconds;
int minutes = vidLengthCalc.Minutes;
var hours = (int)vidLengthCalc.TotalHours;
string durationString = $"{hours:D}:{minutes:D2}:{seconds:D2}.{milliseconds:D3}";
outputParameters.Append($"-c:v:{framesId} libx264 -pix_fmt yuv420p -to {durationString} {outPath} -y ");
string parameters = inputParameters.ToString() + outputParameters.ToString();
try
{
await Task.Factory.StartNew(() =>
{
var outputLog = new List<string>();
using (var process = new Process
{
StartInfo =
{
FileName = ffmpgPath,
Arguments = parameters,
UseShellExecute = false,
CreateNoWindow = true,
// ffmpeg send everything to the error output, standard output is not used.
RedirectStandardError = true
},
EnableRaisingEvents = true
})
{
process.ErrorDataReceived += (sender, e) =>
{
if (string.IsNullOrEmpty(e.Data))
{
return;
}
outputLog.Add(e.Data.ToString());
Console.WriteLine(e.Data.ToString());
};
process.Start();
process.BeginErrorReadLine();
process.WaitForExit();
if (process.ExitCode != 0)
{
throw new Exception($"ffmpeg failed error exit code {process.ExitCode}. Log: {string.Join(Environment.NewLine, outputLog)}");
}
Console.WriteLine($"Exit code: {process.ExitCode}");
}
});
}
catch(Win32Exception )
{
Console.WriteLine("Oh no, failed to start ffmpeg. Have you downloaded and copied ffmpeg.exe to the output folder?");
}
Console.WriteLine();
Console.WriteLine("Video was successfully created. It is availible at: " + Path.GetFullPath(outPath));
}
This function is based on Splicer.Net library.Took me ages to understand how that library works.
Make sure that your fps(frame per second )is correct. By the way standard 24 f/s.
In my case I have 15 images and I now that I need 7 seconds video-> so fps =2.
Fps may vary according to platform...or developer usage.
public bool CreateVideo(List<Bitmap> bitmaps, string outputFile, double fps)
{
int width = 640;
int height = 480;
if (bitmaps == null || bitmaps.Count == 0) return false;
try
{
using (ITimeline timeline = new DefaultTimeline(fps))
{
IGroup group = timeline.AddVideoGroup(32, width, height);
ITrack videoTrack = group.AddTrack();
int i = 0;
double miniDuration = 1.0 / fps;
foreach (var bmp in bitmaps)
{
IClip clip = videoTrack.AddImage(bmp, 0, i * miniDuration, (i + 1) * miniDuration);
System.Diagnostics.Debug.WriteLine(++i);
}
timeline.AddAudioGroup();
IRenderer renderer = new WindowsMediaRenderer(timeline, outputFile, WindowsMediaProfiles.HighQualityVideo);
renderer.Render();
}
}
catch { return false; }
return true;
}
Hope this helps.