I'm using a SampleGrabber to get audio data, however my BufferCB method is not being executed. What am I doing wrong ?
//add Sample Grabber
IBaseFilter pSampleGrabber = (IBaseFilter)Activator.CreateInstance(Type.GetTypeFromCLSID(CLSID_SampleGrabber));
hr = pGraph.AddFilter(pSampleGrabber, "SampleGrabber");
checkHR(hr, "Can't add Sample Grabber");
AMMediaType pSampleGrabber_pmt = new AMMediaType();
//pSampleGrabber_pmt.majorType = MediaType.Audio;
pSampleGrabber_pmt.subType = MediaSubType.PCM;
pSampleGrabber_pmt.formatType = FormatType.WaveEx;
pSampleGrabber_pmt.fixedSizeSamples = true;
pSampleGrabber_pmt.formatSize = 18;
pSampleGrabber_pmt.sampleSize = 2;
WaveFormatEx pSampleGrabber_Format = new WaveFormatEx();
pSampleGrabber_Format.wFormatTag = 1;
pSampleGrabber_Format.nChannels = 1;
pSampleGrabber_Format.nSamplesPerSec = 48000;
pSampleGrabber_Format.nAvgBytesPerSec = 96000;
pSampleGrabber_Format.nBlockAlign = 2;
pSampleGrabber_Format.wBitsPerSample = 16;
pSampleGrabber_pmt.formatPtr = Marshal.AllocCoTaskMem(Marshal.SizeOf(pSampleGrabber_Format));
Marshal.StructureToPtr(pSampleGrabber_Format, pSampleGrabber_pmt.formatPtr, false);
hr = ((ISampleGrabber)pSampleGrabber).SetMediaType(pSampleGrabber_pmt);
DsUtils.FreeAMMediaType(pSampleGrabber_pmt);
checkHR(hr, "Can't set media type to sample grabber");
ISampleGrabber pGrabber = new SampleGrabber() as ISampleGrabber;
pGrabber = (ISampleGrabber)pSampleGrabber;
pGrabber.SetCallback(null, 1);
My BufferCB method is like
public int BufferCB(double SampleTime, IntPtr pBuffer, int BufferLen)
{
return 0;
}
You created and configured one instance pSampleGrabber and then you are attaching your callback to another unused idling instance pGrabber.
You need
pSampleGrabber as ISampleGrabber
instead of
new SampleGrabber() as ISampleGrabber
Related
In my Unity game each frame is rendered into a texture and then put together into a video using FFmpeg. Now my questions is if I am doing this right because avcodec_send_frame throws every time an exception.
I am pretty sure that I am doing something wrong or in the wrong order or simply missing something.
Here is the code for capturing the texture:
void Update() {
//StartCoroutine(CaptureFrame());
if (rt == null)
{
rect = new Rect(0, 0, captureWidth, captureHeight);
rt = new RenderTexture(captureWidth, captureHeight, 24);
frame = new Texture2D(captureWidth, captureHeight, TextureFormat.RGB24, false);
}
Camera camera = this.GetComponent<Camera>(); // NOTE: added because there was no reference to camera in original script; must add this script to Camera
camera.targetTexture = rt;
camera.Render();
RenderTexture.active = rt;
frame.ReadPixels(rect, 0, 0);
frame.Apply();
camera.targetTexture = null;
RenderTexture.active = null;
byte[] fileData = null;
fileData = frame.GetRawTextureData();
encoding(fileData, fileData.Length);
}
And here is the code for encoding and sending the byte data:
private unsafe void encoding(byte[] bytes, int size)
{
Debug.Log("Encoding...");
AVCodec* codec;
codec = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_H264);
int ret, got_output = 0;
AVCodecContext* codecContext = null;
codecContext = ffmpeg.avcodec_alloc_context3(codec);
codecContext->bit_rate = 400000;
codecContext->width = captureWidth;
codecContext->height = captureHeight;
//codecContext->time_base.den = 25;
//codecContext->time_base.num = 1;
AVRational timeBase = new AVRational();
timeBase.num = 1;
timeBase.den = 25;
codecContext->time_base = timeBase;
//AVStream* videoAVStream = null;
//videoAVStream->time_base = timeBase;
AVRational frameRate = new AVRational();
frameRate.num = 25;
frameRate.den = 1;
codecContext->framerate = frameRate;
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P;
AVFrame* inputFrame;
inputFrame = ffmpeg.av_frame_alloc();
inputFrame->format = (int)codecContext->pix_fmt;
inputFrame->width = captureWidth;
inputFrame->height = captureHeight;
inputFrame->linesize[0] = inputFrame->width;
AVPixelFormat dst_pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P, src_pix_fmt = AVPixelFormat.AV_PIX_FMT_RGBA;
int src_w = 1920, src_h = 1080, dst_w = 1920, dst_h = 1080;
SwsContext* sws_ctx;
GCHandle pinned = GCHandle.Alloc(bytes, GCHandleType.Pinned);
IntPtr address = pinned.AddrOfPinnedObject();
sbyte** inputData = (sbyte**)address;
sws_ctx = ffmpeg.sws_getContext(src_w, src_h, src_pix_fmt,
dst_w, dst_h, dst_pix_fmt,
0, null, null, null);
fixed (int* lineSize = new int[1])
{
lineSize[0] = 4 * captureHeight;
// Convert RGBA to YUV420P
ffmpeg.sws_scale(sws_ctx, inputData, lineSize, 0, codecContext->width, inputFrame->extended_data, inputFrame->linesize);
}
inputFrame->pts = counter++;
if (ffmpeg.avcodec_send_frame(codecContext, inputFrame) < 0)
throw new ApplicationException("Error sending a frame for encoding!");
AVPacket pkt;
pkt = new AVPacket();
//pkt.data = inData;
AVPacket* packet = &pkt;
ffmpeg.av_init_packet(packet);
Debug.Log("pkt.size " + pkt.size);
pinned.Free();
AVDictionary* options = null;
ffmpeg.av_dict_set(&options, "pkt_size", "1300", 0);
ffmpeg.av_dict_set(&options, "buffer_size", "65535", 0);
AVIOContext* server = null;
ffmpeg.avio_open2(&server, "udp://192.168.0.1:1111", ffmpeg.AVIO_FLAG_WRITE, null, &options);
Debug.Log("encoded");
ret = ffmpeg.avcodec_encode_video2(codecContext, &pkt, inputFrame, &got_output);
ffmpeg.avio_write(server, pkt.data, pkt.size);
ffmpeg.av_free_packet(&pkt);
pkt.data = null;
pkt.size = 0;
}
And every time I start the game
if (ffmpeg.avcodec_send_frame(codecContext, inputFrame) < 0)
throw new ApplicationException("Error sending a frame for encoding!");
throws the exception.
Any help in fixing the issue would be greatly appreciated :)
I try to capture desktop screenshot using SharpDX. My application is able to capture screenshot but without labels in Windows Explorer.
I tryed 2 solutions but without change. I tried find in documentation any information, but without change.
Here is mi code:
public void SCR()
{
uint numAdapter = 0; // # of graphics card adapter
uint numOutput = 0; // # of output device (i.e. monitor)
// create device and factory
var device = new SharpDX.Direct3D11.Device(SharpDX.Direct3D.DriverType.Hardware);
var factory = new Factory1();
// creating CPU-accessible texture resource
var texdes = new SharpDX.Direct3D11.Texture2DDescription
{
CpuAccessFlags = SharpDX.Direct3D11.CpuAccessFlags.Read,
BindFlags = SharpDX.Direct3D11.BindFlags.None,
Format = Format.B8G8R8A8_UNorm,
Height = factory.Adapters1[numAdapter].Outputs[numOutput].Description.DesktopBounds.Height,
Width = factory.Adapters1[numAdapter].Outputs[numOutput].Description.DesktopBounds.Width,
OptionFlags = SharpDX.Direct3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1
};
texdes.SampleDescription.Count = 1;
texdes.SampleDescription.Quality = 0;
texdes.Usage = SharpDX.Direct3D11.ResourceUsage.Staging;
var screenTexture = new SharpDX.Direct3D11.Texture2D(device, texdes);
// duplicate output stuff
var output = new Output1(factory.Adapters1[numAdapter].Outputs[numOutput].NativePointer);
var duplicatedOutput = output.DuplicateOutput(device);
SharpDX.DXGI.Resource screenResource = null;
SharpDX.DataStream dataStream;
Surface screenSurface;
var i = 0;
var miliseconds = 2500000;
while (true)
{
i++;
// try to get duplicated frame within given time
try
{
SharpDX.DXGI.OutputDuplicateFrameInformation duplicateFrameInformation;
duplicatedOutput.AcquireNextFrame(miliseconds, out duplicateFrameInformation, out screenResource);
}
catch (SharpDX.SharpDXException e)
{
if (e.ResultCode.Code == SharpDX.DXGI.ResultCode.WaitTimeout.Result.Code)
{
// this has not been a successful capture
// thanks #Randy
// keep retrying
continue;
}
else
{
throw e;
}
}
device.ImmediateContext.CopyResource(screenResource.QueryInterface<SharpDX.Direct3D11.Resource>(), screenTexture);
screenSurface = screenTexture.QueryInterface<Surface>();
// screenSurface.Map(SharpDX.DXGI.MapFlags.Read, out dataStream);
int width = output.Description.DesktopBounds.Width;
int height = output.Description.DesktopBounds.Height;
var boundsRect = new System.Drawing.Rectangle(0, 0, width, height);
var mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0, MapMode.Read, SharpDX.Direct3D11.MapFlags.None);
using (var bitmap = new System.Drawing.Bitmap(width, height, PixelFormat.Format32bppArgb))
{
// Copy pixels from screen capture Texture to GDI bitmap
var bitmapData = bitmap.LockBits(boundsRect, ImageLockMode.WriteOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destinationPtr = bitmapData.Scan0;
for (int y = 0; y < height; y++)
{
// Copy a single line
Utilities.CopyMemory(destinationPtr, sourcePtr, width * 4);
// Advance pointers
sourcePtr = IntPtr.Add(sourcePtr, mapSource.RowPitch);
destinationPtr = IntPtr.Add(destinationPtr, bitmapData.Stride);
}
// Release source and dest locks
bitmap.UnlockBits(bitmapData);
device.ImmediateContext.UnmapSubresource(screenTexture, 0);
bitmap.Save(string.Format(#"d:\scr\{0}.png", i));
}
// var image = FromByte(ToByte(dataStream));
//var image = getImageFromDXStream(1920, 1200, dataStream);
//image.Save(string.Format(#"d:\scr\{0}.png", i));
// dataStream.Close();
//screenSurface.Unmap();
screenSurface.Dispose();
screenResource.Dispose();
duplicatedOutput.ReleaseFrame();
}
}
After few hours of research and googling i found working solution:
From:
PixelFormat.Format32bppArgb
To:
PixelFormat.Format32bppRgb
How to save the graph obtained after processing at avi file. Managed to get pictures with the overlay's text. I know that there is a method SetOutputFileName(), but how to use it here?
private Bitmap bitmapOverlay;
private IFilterGraph2 m_FilterGraph;
void GO()
{
SetupGraph("C:\\Export.avi");
SetupBitmap();
IMediaControl mediaCtrl = m_FilterGraph as IMediaControl;
int hr = mediaCtrl.Run();
DsError.ThrowExceptionForHR( hr );
}
private void SetupGraph(string FileName)
{
int hr;
IBaseFilter ibfRenderer = null;
ISampleGrabber sampGrabber = null;
IBaseFilter capFilter = null;
IPin iPinInFilter = null;
IPin iPinOutFilter = null;
IPin iPinInDest = null;
// Get the graphbuilder object
m_FilterGraph = new FilterGraph() as IFilterGraph2;
// Get the SampleGrabber interface
sampGrabber = new SampleGrabber() as ISampleGrabber;
// Add the video source
hr = m_FilterGraph.AddSourceFilter(FileName, "Ds.NET FileFilter", out capFilter);
DsError.ThrowExceptionForHR( hr );
// Hopefully this will be the video pin
IPin iPinOutSource = DsFindPin.ByDirection(capFilter, PinDirection.Output, 0);
IBaseFilter baseGrabFlt = sampGrabber as IBaseFilter;
ConfigureSampleGrabber(sampGrabber);
iPinInFilter = DsFindPin.ByDirection(baseGrabFlt, PinDirection.Input, 0);
iPinOutFilter = DsFindPin.ByDirection(baseGrabFlt, PinDirection.Output, 0);
// Add the frame grabber to the graph
hr = m_FilterGraph.AddFilter( baseGrabFlt, "Ds.NET Grabber" );
DsError.ThrowExceptionForHR( hr );
hr = m_FilterGraph.Connect(iPinOutSource, iPinInFilter);
DsError.ThrowExceptionForHR( hr );
// Get the default video renderer
ibfRenderer = (IBaseFilter) new VideoRendererDefault();
// Add it to the graph
hr = m_FilterGraph.AddFilter( ibfRenderer, "Ds.NET VideoRendererDefault" );
DsError.ThrowExceptionForHR( hr );
iPinInDest = DsFindPin.ByDirection(ibfRenderer, PinDirection.Input, 0);
// Connect the graph. Many other filters automatically get added here
hr = m_FilterGraph.Connect(iPinOutFilter, iPinInDest);
DsError.ThrowExceptionForHR( hr );
SaveSizeInfo(sampGrabber);
}
Process video - draw on each frame text.
cc.Save ("C: \\ Test \\ img" + m_Count + ".jpg") - so get shots with superimposed text.
How to make the processed video file saved in avi file?
int ISampleGrabberCB.BufferCB( double SampleTime, IntPtr pBuffer, int BufferLen )
{
Graphics g;
String s;
float sLeft;
float sTop;
SizeF d;
g = Graphics.FromImage(bitmapOverlay);
g.Clear(System.Drawing.Color.Transparent);
g.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.AntiAlias;
// Prepare to put the specified string on the image
g.DrawRectangle(System.Drawing.Pens.Blue, 0, 0, m_videoWidth - 1, m_videoHeight - 1);
g.DrawRectangle(System.Drawing.Pens.Blue, 1, 1, m_videoWidth - 3, m_videoHeight - 3);
d = g.MeasureString(m_String, fontOverlay);
sLeft = (m_videoWidth - d.Width) / 2;
sTop = (m_videoHeight - d.Height ) / 2;
g.DrawString(m_String, fontOverlay, System.Drawing.Brushes.Red,
sLeft, sTop, System.Drawing.StringFormat.GenericTypographic);
g.Dispose();
Bitmap v;
v = new Bitmap(m_videoWidth, m_videoHeight, m_stride,
PixelFormat.Format32bppArgb, pBuffer);
v.RotateFlip(RotateFlipType.Rotate180FlipX);
g = Graphics.FromImage(v);
g.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.AntiAlias;
// draw the overlay bitmap over the video's bitmap
g.DrawImage(bitmapOverlay, 0, 0, bitmapOverlay.Width, bitmapOverlay.Height);
Bitmap cc = new Bitmap(v);
cc.Save("C:\\Test\\img" + m_Count + ".jpg");
g.Dispose();
v.Dispose();
m_Count++;
return 0;
}
Typically it should look like:
[File reader] -> [AVI Demuxer] -> (video pin) -> [Video decoder] -> [Sample grabber] -> [Video encoder] -> [AVI Muxer] -> [File writer]
|-> (audio pin) ->|
AVI file is a media container so you need to demultiplex it to separate streams and at the end to multiplex (modified) streams back to AVI container. When you got video stream it (typically) contains encoded video. So to modify it you need to decode it and then after modification encode it back to the same format. You don't need to do anything about audio stream, just direct it from demuxer straight to muxer. [File writer] filter allows you to specify output file name.
I don't know what is "Ds.NET FileFilter" and how it can demux and then decode the video, but seems it can because you can see your modified picture. AVI Muxer is a standard MS filter, I just don't remember its name. You need to choose a video encoder. I'd recommend first to build a simple graph in GraphEditor that doesn't modify the picture but just read->demux->decod->encode->mux->write to verify you have everything you need and they work fine. Just try to play resulting AVI file.
Context
I'm trying to apply filter such as contrast, color change, brightness on every frame of a .avi video.
The video is playing just fine with directshow.net and c#.
after a couple hours of research, I found out that buffercb was not the way to go to do the job.
Apparantly, EZrgb24 is a filter I can add to my graph that does exactly what I want.
However, I can't get it to work.
Added in the beggining of my class
[DllImport("ole32.dll", EntryPoint = "CoCreateInstance", CallingConvention = CallingConvention.StdCall)]
static extern UInt32 CoCreateInstance([In, MarshalAs(UnmanagedType.LPStruct)] Guid rclsid,
IntPtr pUnkOuter, UInt32 dwClsContext, [In, MarshalAs(UnmanagedType.LPStruct)] Guid riid,
[MarshalAs(UnmanagedType.IUnknown)] out object rReturnedComObject);
Here is relevant code that works
int hr = 0;
IBaseFilter ibfRenderer = null;
ISampleGrabber sampGrabber = null;
IBaseFilter capFilter = null;
IPin iPinInFilter = null;
IPin iPinOutFilter = null;
IPin iPinInDest = null;
Type comType = null;
object comObj = null;
m_FilterGraph = new FilterGraph() as IFilterGraph2;
try
{
// Get the SampleGrabber interface
sampGrabber = new SampleGrabber() as ISampleGrabber;
// Add the video source
hr = m_FilterGraph.AddSourceFilter(_videoPath, "Ds.NET FileFilter", out capFilter);
DsError.ThrowExceptionForHR(hr);
// Hopefully this will be the video pin
IPin iPinOutSource = DsFindPin.ByDirection(capFilter, PinDirection.Output, 0);
IBaseFilter baseGrabFlt = sampGrabber as IBaseFilter;
ConfigureSampleGrabber(sampGrabber);
iPinInFilter = DsFindPin.ByDirection(baseGrabFlt, PinDirection.Input, 0);
iPinOutFilter = DsFindPin.ByDirection(baseGrabFlt, PinDirection.Output, 0);
// Add the frame grabber to the graph
hr = m_FilterGraph.AddFilter(baseGrabFlt, "Ds.NET Grabber");
DsError.ThrowExceptionForHR(hr);
hr = m_FilterGraph.Connect(iPinOutSource, iPinInFilter);
DsError.ThrowExceptionForHR(hr);
// Get the default video renderer
ibfRenderer = (IBaseFilter)new VideoRendererDefault();
// Add it to the graph
hr = m_FilterGraph.AddFilter(ibfRenderer, "Ds.NET VideoRendererDefault");
DsError.ThrowExceptionForHR(hr);
iPinInDest = DsFindPin.ByDirection(ibfRenderer, PinDirection.Input, 0);
// Connect the graph. Many other filters automatically get added here
hr = m_FilterGraph.Connect(iPinOutFilter, iPinInDest);
DsError.ThrowExceptionForHR(hr);
SaveSizeInfo(sampGrabber);
HERE WE WANT TO ADD THE EZRGB FILTER.
Code that doesnt work
/*
// { 8B498501-1218-11cf-ADC4-00A0D100041B }
DEFINE_GUID(CLSID_EZrgb24,
0x8b498501, 0x1218, 0x11cf, 0xad, 0xc4, 0x0, 0xa0, 0xd1, 0x0, 0x4, 0x1b);
*/
unsafe
{
Guid IUnknownGuid = new Guid("00000000-0000-0000-C000-000000000046"); //Can it be written in more pretty style?
Guid ezrgbclsid = new Guid(0x8b498501, 0x1218, 0x11cf, 0xad, 0xc4, 0x0, 0xa0, 0xd1, 0x0, 0x4, 0x1b);
uint hr1 = CoCreateInstance(ezrgbclsid, IntPtr.Zero, (uint)(CLSCTX.CLSCTX_INPROC_HANDLER), ezrgbclsid, out x);//CLSCTX_LOCAL_SERVER
IIPEffect myEffect = (IIPEffect)x;// as IIPEffect;
if (hr1 != 0)
{
int iError = Marshal.GetLastWin32Error();
Console.Write("CoCreateInstance Error = {0}, LastWin32Error = {1}", hr1, iError);
}
myEffect.put_IPEffect(1004, 0, 100); //for this filter, look at resource.h for what the int should be, in this case 1002 is the emboss effect
}
My diagnostic
I found out that the int value returned in hr1, is the hexadecimal value for dll not registred.
Which means to me that EZRGB is not registred on my computer.
How I tryed to solve the problem
Found and downloaded EZRGB.ax on some obscure web site.
executed the command :
cd \windows\syswow64
regsvr32 c:\ezrgb24.ax
A message box appeared with DllRegisterServer in c:\ezrgb24.ax succeeded.
Still doesn't work.
I am using directshow.net, however, this is also tagged both directshow as I feel the solution will work for either c# or c++.
Use can use SampleCB instead of BufferCB; the former provides you access to data which is streamed further, so you can modify it
The typical problem with registration is that you build 32-bit DLL and you are trying to use it from 64-bit code. The bitnesses have to match.
You need CLSCTX_ALL or CLSCTX_INPROC_SERVER
I try to use this code to get pictures of my cam:
IGraphBuilder _graph = null;
ISampleGrabber _grabber = null;
IBaseFilter _sourceObject = null;
IBaseFilter _grabberObject = null;
IMediaControl _control = null;
// Create the main graph
_graph = Activator.CreateInstance(Type.GetTypeFromCLSID(FilterGraph)) as IGraphBuilder;
// Create the webcam source
_sourceObject = FilterInfo.CreateFilter(_monikerString);
// Create the grabber
_grabber = Activator.CreateInstance(Type.GetTypeFromCLSID(SampleGrabber)) as ISampleGrabber;
_grabberObject = _grabber as IBaseFilter;
// Add the source and grabber to the main graph
_graph.AddFilter(_sourceObject, "source");
_graph.AddFilter(_grabberObject, "grabber");
IPin pin = _sourceObject.GetPin(PinDirection.Output, 0);
IAMStreamConfig streamConfig = pin as IAMStreamConfig;
int count = 0, size = 0;
streamConfig.GetNumberOfCapabilities(out count, out size);
int width = 0, height = 0;
AMMediaType mediaType = null;
AMMediaType mediaTypeCandidate = null;
for(int index = 0; index < count; index++) {
VideoStreamConfigCaps scc = new VideoStreamConfigCaps();
int test = streamConfig.GetStreamCaps(index, out mediaTypeCandidate, scc);
if(mediaTypeCandidate.MajorType == MediaTypes.Video && mediaTypeCandidate.SubType == MediaSubTypes.YUY2) {
VideoInfoHeader header = (VideoInfoHeader)Marshal.PtrToStructure(mediaTypeCandidate.FormatPtr, typeof(VideoInfoHeader));
if(header.BmiHeader.Width == 1280 && header.BmiHeader.Height == 720) {
width = header.BmiHeader.Width;
height = header.BmiHeader.Height;
if(mediaType != null)
mediaType.Dispose();
mediaType = mediaTypeCandidate;
} else
mediaTypeCandidate.Dispose();
} else
mediaTypeCandidate.Dispose();
}
streamConfig.SetFormat(mediaType);
And it works but i do not see the Image which is generated by this code:
uint pcount = (uint)(_capGrabber.Width * _capGrabber.Height * PixelFormats.Bgr32.BitsPerPixel / 8);
// Create a file mapping
_section = CreateFileMapping(new IntPtr(-1), IntPtr.Zero, 0x04, 0, pcount, null);
_map = MapViewOfFile(_section, 0xF001F, 0, 0, pcount);
// Get the bitmap
BitmapSource = Imaging.CreateBitmapSourceFromMemorySection(_section, _capGrabber.Width,
_capGrabber.Height, PixelFormats.Bgr32, _capGrabber.Width * PixelFormats.Bgr32.BitsPerPixel / 8, 0) as InteropBitmap;
_capGrabber.Map = _map;
// Invoke event
if (NewBitmapReady != null)
{
NewBitmapReady(this, null);
}
Because the SubMediaTyp is YUY2. How can i add a converter to this code? I have read something about a ColorConvert, which can be added to the IGraphBuilder. How does that work?
I would not expect CreateBitmapSourceFromMemorySection to accept anything else than flavors of RGB. Even more unlikely that it accepts YUY2 media type, so you need the DirectShow pipeline to convert video stream to RGB before you export it as a managed bitmap/imaging object.
To achieve this, you typically add Sample Grabber filter initialized to 24-bit RGB subtype and let DirectShow provide necessary converters automatically.
See detailed explanation and code snippets here: DirectShow: Examples for Using SampleGrabber for Grabbing a Frame and...
media.majorType = MediaType.Video;
media.subType = MediaSubType.RGB24;
media.formatPtr = IntPtr.Zero;
hr = sampGrabber.SetMediaType(media);