I have audio samples extracted through NAudio, i know parameters:
channels
bytes per sample,
samplerate
How to play that samples by using .Net api / or other .Net library
Here code:
openFileDialog1.ShowDialog();
using (var reader = new Mp3FileReader(openFileDialog1.FileName))
{
var pcmLength = (int)reader.Length;
var _leftBuffer = new byte[pcmLength / 2];
var buffer = new byte[pcmLength];
var bytesRead = reader.Read(buffer, 0, pcmLength);
int index = 0;
for (int i = 0; i < bytesRead; i += 4)
{
//extracting only left channel
_leftBuffer[index] = buffer[i];
index++;
_leftBuffer[index] = buffer[i + 1];
index++;
}
// How to play _leftBuffer (Single channel, 2 bytes per sample, 44100 samples per secound)
}
First, you need to implement IWaveProvider or user one of the IWaveProvider implementations that come with NAudio like WaveProvider16, for example. Next, Initialize a WaveOut object with your IWaveProvider using WaveOut.Init(IWaveProvider Provider), and finally, call WaveOut.Play().
YES, I have found a solution: A low-level audio player in C#
Full worked code:
public partial class Form1 : Form
{
private byte[] _leftBuffer;
private BiQuadFilter _leftFilter;
private BiQuadFilter _rightFilter;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
openFileDialog1.ShowDialog();
using (var reader = new Mp3FileReader(openFileDialog1.FileName))
{
var pcmLength = (int)reader.Length;
_leftBuffer = new byte[pcmLength / 2];
var buffer = new byte[pcmLength];
var bytesRead = reader.Read(buffer, 0, pcmLength);
int index = 0;
for (int i = 0; i < bytesRead; i += 4)
{
_leftBuffer[index] = buffer[i];
index++;
_leftBuffer[index] = buffer[i + 1];
index++;
}
var player = new WaveLib.WaveOutPlayer(-1, new WaveLib.WaveFormat(44100, 16, 1), _leftBuffer.Length, 1, (data, size) =>
{
byte[] b = _leftBuffer;
System.Runtime.InteropServices.Marshal.Copy(b, 0, data, size);
});
}
}
}
Related
Is there a way to convert any audio file to ogg with .net core?
I figured out how to convert with NAudio.Vorbis from ogg to wav:
using (var vorbis = new VorbisWaveReader(inputFile))
{
WaveFileWriter.CreateWaveFile(outputFile, vorbis);
}
But I could not find a way to convert any audio file to ogg.
Does NAudio provide the functionallity I require or are there any other libraries better suited?
You could try this library:
Vorbis Encoder
(this is on nuget, the linked GitHub project provides the source and the following example of encoding)
using System;
using System.IO;
namespace OggVorbisEncoder.Example
{
public class Encoder
{
private const int SampleSize = 1024;
[STAThread]
private static void Main()
{
var stdin = new FileStream(#"unencoded.raw", FileMode.Open, FileAccess.Read);
var stdout = new FileStream(#"encoded.ogg", FileMode.Create, FileAccess.Write);
// StripWavHeader(stdin);
// Stores all the static vorbis bitstream settings
var info = VorbisInfo.InitVariableBitRate(2, 44100, 0.1f);
// set up our packet->stream encoder
var serial = new Random().Next();
var oggStream = new OggStream(serial);
// =========================================================
// HEADER
// =========================================================
// Vorbis streams begin with three headers; the initial header (with
// most of the codec setup parameters) which is mandated by the Ogg
// bitstream spec. The second header holds any comment fields. The
// third header holds the bitstream codebook.
var headerBuilder = new HeaderPacketBuilder();
var comments = new Comments();
comments.AddTag("ARTIST", "TEST");
var infoPacket = headerBuilder.BuildInfoPacket(info);
var commentsPacket = headerBuilder.BuildCommentsPacket(comments);
var booksPacket = headerBuilder.BuildBooksPacket(info);
oggStream.PacketIn(infoPacket);
oggStream.PacketIn(commentsPacket);
oggStream.PacketIn(booksPacket);
// Flush to force audio data onto its own page per the spec
OggPage page;
while (oggStream.PageOut(out page, true))
{
stdout.Write(page.Header, 0, page.Header.Length);
stdout.Write(page.Body, 0, page.Body.Length);
}
// =========================================================
// BODY (Audio Data)
// =========================================================
var processingState = ProcessingState.Create(info);
var buffer = new float[info.Channels][];
buffer[0] = new float[SampleSize];
buffer[1] = new float[SampleSize];
var readbuffer = new byte[SampleSize*4];
while (!oggStream.Finished)
{
var bytes = stdin.Read(readbuffer, 0, readbuffer.Length);
if (bytes == 0)
{
processingState.WriteEndOfStream();
}
else
{
var samples = bytes/4;
for (var i = 0; i < samples; i++)
{
// uninterleave samples
buffer[0][i] = (short) ((readbuffer[i*4 + 1] << 8) | (0x00ff & readbuffer[i*4]))/32768f;
buffer[1][i] = (short) ((readbuffer[i*4 + 3] << 8) | (0x00ff & readbuffer[i*4 + 2]))/32768f;
}
processingState.WriteData(buffer, samples);
}
OggPacket packet;
while (!oggStream.Finished
&& processingState.PacketOut(out packet))
{
oggStream.PacketIn(packet);
while (!oggStream.Finished
&& oggStream.PageOut(out page, false))
{
stdout.Write(page.Header, 0, page.Header.Length);
stdout.Write(page.Body, 0, page.Body.Length);
}
}
}
stdin.Close();
stdout.Close();
}
/// <summary>
/// We cheat on the WAV header; we just bypass the header and never
/// verify that it matches 16bit/stereo/44.1kHz.This is just an
/// example, after all.
/// </summary>
private static void StripWavHeader(BinaryReader stdin)
{
var tempBuffer = new byte[6];
for (var i = 0; (i < 30) && (stdin.Read(tempBuffer, 0, 2) > 0); i++)
if ((tempBuffer[0] == 'd') && (tempBuffer[1] == 'a'))
{
stdin.Read(tempBuffer, 0, 6);
break;
}
}
}
}
I have a problem turning a RandomAccessStream into a float array. The float array contains values that are NaN. I can't tell if they come from the stream, the byte array or the float array. Performance & quality are important in this so if there is a better way to do this let me know.
Last count I was getting 122 NaN's.
thanks
private async void button_Click(object sender, RoutedEventArgs e)
{
string text = "this is text";
SpeechSynthesizer synthesizer = new SpeechSynthesizer();
SpeechSynthesisStream synthesisStream = await synthesizer.SynthesizeTextToStreamAsync(text);
Stopwatch watch = new Stopwatch();
watch.Start();
ProcessStream(synthesisStream.CloneStream());
watch.Stop();
// Performance is important
Debug.WriteLine(watch.Elapsed);
}
private async void ProcessStream(IRandomAccessStream stream)
{
// Create a buffer (somewhere to put the stream)
byte[] bytes = new byte[stream.Size];
// Add stream data to buffer (Following or After that) same result
// IBuffer x = await stream.ReadAsync(bytes.AsBuffer(), (uint)stream.Size, InputStreamOptions.None);
using (DataReader reader = new DataReader(stream))
{
await reader.LoadAsync((uint)stream.Size);
reader.ReadBytes(bytes);
}
// Change buffer(in bytes) to a float array
float[] floatArray = MainPage.ConvertByteToFloat(bytes.ToArray());
int nanCount = 0;
for (var index = 0; index < floatArray.Length; index++)
{
float value = floatArray[index];
if (float.IsNaN(value))
{
nanCount++;
}
}
Debug.WriteLine("Nan count: " + nanCount);
}
public static float[] ConvertByteToFloat(byte[] array)
{
float[] floatArr = new float[array.Length / 4];
for (int i = 0; i < floatArr.Length; i++)
{
if (BitConverter.IsLittleEndian)
{
Array.Reverse(array, i * 4, 4);
}
floatArr[i] = BitConverter.ToSingle(array, i * 4);
}
return floatArr;
}
Found the answer At this SO post
Basically I did not know that the 32 bit wav format stored its data in a 16 bit format.
I'm trying to create application that sends and receives audio. I have 2
BufferedWaveProvider's.
For the first one I do
private void AsioOut_AudioAvailable(object sender, AsioAudioAvailableEventArgs e)
{
float[] sourceAudio = new float[e.SamplesPerBuffer*e.InputBuffers.Length];
e.GetAsInterleavedSamples(sourceAudio);
float[] proccesedAudio = new float[settings.BufferSize];
byte[] result = new byte[settings.BufferSize*4];
byte[] sendingAudio = new byte[e.SamplesPerBuffer*4];
for (int j = 1, q = 0; j < sourceAudio.Length; j += 2, q++)
proccesedAudio[q] = sourceAudio[j];
//...audio is beeing proccesed by VSTHost...
result.CopyTo(sendingAudio,0);
if (connection.IsConnected)
Task.Run(() => connection.Send(sendingAudio)); //Proccesed audio sends by socket
inputAudioBufferedWaveProvider.AddSamples(result, 0, result.Length);
}
For the second one I do
private void AudioReceiving()
{
while (isAudioReceiving)
{
incomingBytes = connection.AudioReceiving(); //Socket receives bytes
incomingAudioBufferedWaveProvider.AddSamples(incomingBytes, 0, incomingBytes.Length);
}
}
Those 2 BufferedWiveProvider's is mixing by the MixingSampleProvider.
mixingProvider = new MixingSampleProvider(WaveFormat.CreateIeeeFloatWaveFormat(Convert.ToInt32(settings.SampleRate), 2));
mixingProvider.AddMixerInput(inputAudioPanningProvider);
mixingProvider.AddMixerInput(incomingAudioPanningProvider);
mixingProvider.ReadFully = true;
asioOut.InitRecordAndPlayback(new SampleToWaveProvider(mixingProvider),2,Convert.ToInt32(settings.SampleRate));
After 3-4 minutes after starting application I get exception (Buffer full). I think problem with buffer that being filled by bytes received from network.
What I'm doing wrong?
I have a server side app written in C
struct recv_packet
{
int magic;
int code;
int length;
char *body;
};
char send_buff[1024+1] = "";
ZeroMemory(&send_buff, 1024);
memset(send_buff, 'A', 1024);
//send_buff[1024] = '\0';
recv_packet rcv_pkt = { 0 };
rcv_pkt.magic = MAGIC;
rcv_pkt.code = 0;
rcv_pkt.length = strlen(send_buff);
rcv_pkt.body = send_buff;
int size = sizeof(rcv_pkt.magic) + sizeof(rcv_pkt.code) + sizeof(rcv_pkt.length) + 1024+1;
if (send(ClientSocket, (char *)&rcv_pkt, size, 0) == SOCKET_ERROR)
{
printf("Error %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
On the other side i grab this packet like this:
public struct recv_packet
{
public int magic;
public int code;
public int length;
public byte[] body;
};
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
int port = 4000;
TcpClient client = new TcpClient("127.0.0.1", 4000);
NetworkStream nws = client.GetStream();
BinaryWriter bw = new BinaryWriter(nws);
BinaryReader br = new BinaryReader(nws);
byte[] buff = new byte[512];
send_packet pkt = new send_packet();
pkt.magic = magic;
pkt.cmd = (int)command.MOVE_MOUSE;
while (true)
{
bw.Write(pkt.magic);
bw.Write(pkt.cmd);
//br.Read(buff, 0, 512);
recv_packet rcv_pkt = new recv_packet();
rcv_pkt.magic = br.ReadInt32();
rcv_pkt.code = br.ReadInt32();
rcv_pkt.length = br.ReadInt32();
rcv_pkt.body = br.ReadBytes(rcv_pkt.length);
//string str = rcv_pkt.length.ToString();
string str = System.Text.Encoding.Default.GetString(rcv_pkt.body);
MessageBox.Show(str);
}
}
So it suppose that body will have only '65', but instead I've got some trash in it.
Why could this happen? Thank you for your time.
As I understood there are few ways of resolving this problem - one of them is to redecrlare struct a little bit and then creare a buffer, where all structure elements will be fitted one-by-one. So the solution looks like this:
char send_buff[1024+1] = "";
ZeroMemory(&send_buff, 1025);
memset(send_buff, 'A', 1024);
recv_packet *rcv_pkt = (recv_packet *)malloc(sizeof(recv_packet)+1024+1);
//recv_packet rcv_pkt = { 0 };
rcv_pkt->magic = MAGIC;
rcv_pkt->code = 0;
rcv_pkt->length = strlen(send_buff);
memcpy(rcv_pkt->body, send_buff, 1025);
int size = sizeof(rcv_pkt->magic) + sizeof(rcv_pkt->code) + sizeof(rcv_pkt->length) + 1024 + 1;
//printf("%d", size);
//getchar();
//return 0;
//if (send(ClientSocket, rcv_pkt.body, rcv_pkt.length, 0) == SOCKET_ERROR)
if (send(ClientSocket, (char *)rcv_pkt, size, 0) == SOCKET_ERROR)
(apologies if this is a duplicate ... i posted but saw no evidence that it actually made it to the forum)
I've been trying to get SlimDX DirectSound working. Here's the code I have. It fills the secondary buffer from a wav file and then, in a thread loop, alternately fills the lower or upper halves of the buffer.
It plays the first load of the buffer fine. The AutoResetEvents fire when they should and the lower half then upper half of the buffer are populated (verified with Debug statements). But playing does not continue after the first load of the buffer. So somehow the repopulation of the buffer doesn't work as it should.
Ideas?
(I'm using DirectSound because it's the only way I've found to set the guid of the audio device that I want to use. Am open to other .NET-friendly approaches.)
private void PlaySound(Guid soundCardGuid, string audioFile) {
DirectSound ds = new DirectSound(soundCardGuid);
ds.SetCooperativeLevel(this.Handle, CooperativeLevel.Priority);
WaveFormat format = new WaveFormat();
format.BitsPerSample = 16;
format.BlockAlignment = 4;
format.Channels = 2;
format.FormatTag = WaveFormatTag.Pcm;
format.SamplesPerSecond = 44100;
format.AverageBytesPerSecond = format.SamplesPerSecond * format.BlockAlignment;
SoundBufferDescription desc = new SoundBufferDescription();
desc.Format = format;
desc.Flags = BufferFlags.GlobalFocus;
desc.SizeInBytes = 8 * format.AverageBytesPerSecond;
PrimarySoundBuffer pBuffer = new PrimarySoundBuffer(ds, desc);
SoundBufferDescription desc2 = new SoundBufferDescription();
desc2.Format = format;
desc2.Flags = BufferFlags.GlobalFocus | BufferFlags.ControlPositionNotify | BufferFlags.GetCurrentPosition2;
desc2.SizeInBytes = 8 * format.AverageBytesPerSecond;
SecondarySoundBuffer sBuffer1 = new SecondarySoundBuffer(ds, desc2);
NotificationPosition[] notifications = new NotificationPosition[2];
notifications[0].Offset = desc2.SizeInBytes / 2 + 1;
notifications[1].Offset = desc2.SizeInBytes - 1; ;
notifications[0].Event = new AutoResetEvent(false);
notifications[1].Event = new AutoResetEvent(false);
sBuffer1.SetNotificationPositions(notifications);
byte[] bytes1 = new byte[desc2.SizeInBytes / 2];
byte[] bytes2 = new byte[desc2.SizeInBytes];
Stream stream = File.Open(audioFile, FileMode.Open);
Thread fillBuffer = new Thread(() => {
int readNumber = 1;
int bytesRead;
bytesRead = stream.Read(bytes2, 0, desc2.SizeInBytes);
sBuffer1.Write<byte>(bytes2, 0, LockFlags.None);
sBuffer1.Play(0, PlayFlags.None);
while (true) {
if (bytesRead == 0) { break; }
notifications[0].Event.WaitOne();
bytesRead = stream.Read(bytes1, 0, bytes1.Length);
sBuffer1.Write<byte>(bytes1, 0, LockFlags.None);
if (bytesRead == 0) { break; }
notifications[1].Event.WaitOne();
bytesRead = stream.Read(bytes1, 0, bytes1.Length);
sBuffer1.Write<byte>(bytes1, desc2.SizeInBytes / 2, LockFlags.None);
}
stream.Close();
stream.Dispose();
});
fillBuffer.Start();
}
}
You haven't set it to loop on the play buffer. Change your code to:
sBuffer1.Play(0, PlayFlags.Looping);