Is there a way to convert any audio file to ogg with .net core?
I figured out how to convert with NAudio.Vorbis from ogg to wav:
using (var vorbis = new VorbisWaveReader(inputFile))
{
WaveFileWriter.CreateWaveFile(outputFile, vorbis);
}
But I could not find a way to convert any audio file to ogg.
Does NAudio provide the functionallity I require or are there any other libraries better suited?
You could try this library:
Vorbis Encoder
(this is on nuget, the linked GitHub project provides the source and the following example of encoding)
using System;
using System.IO;
namespace OggVorbisEncoder.Example
{
public class Encoder
{
private const int SampleSize = 1024;
[STAThread]
private static void Main()
{
var stdin = new FileStream(#"unencoded.raw", FileMode.Open, FileAccess.Read);
var stdout = new FileStream(#"encoded.ogg", FileMode.Create, FileAccess.Write);
// StripWavHeader(stdin);
// Stores all the static vorbis bitstream settings
var info = VorbisInfo.InitVariableBitRate(2, 44100, 0.1f);
// set up our packet->stream encoder
var serial = new Random().Next();
var oggStream = new OggStream(serial);
// =========================================================
// HEADER
// =========================================================
// Vorbis streams begin with three headers; the initial header (with
// most of the codec setup parameters) which is mandated by the Ogg
// bitstream spec. The second header holds any comment fields. The
// third header holds the bitstream codebook.
var headerBuilder = new HeaderPacketBuilder();
var comments = new Comments();
comments.AddTag("ARTIST", "TEST");
var infoPacket = headerBuilder.BuildInfoPacket(info);
var commentsPacket = headerBuilder.BuildCommentsPacket(comments);
var booksPacket = headerBuilder.BuildBooksPacket(info);
oggStream.PacketIn(infoPacket);
oggStream.PacketIn(commentsPacket);
oggStream.PacketIn(booksPacket);
// Flush to force audio data onto its own page per the spec
OggPage page;
while (oggStream.PageOut(out page, true))
{
stdout.Write(page.Header, 0, page.Header.Length);
stdout.Write(page.Body, 0, page.Body.Length);
}
// =========================================================
// BODY (Audio Data)
// =========================================================
var processingState = ProcessingState.Create(info);
var buffer = new float[info.Channels][];
buffer[0] = new float[SampleSize];
buffer[1] = new float[SampleSize];
var readbuffer = new byte[SampleSize*4];
while (!oggStream.Finished)
{
var bytes = stdin.Read(readbuffer, 0, readbuffer.Length);
if (bytes == 0)
{
processingState.WriteEndOfStream();
}
else
{
var samples = bytes/4;
for (var i = 0; i < samples; i++)
{
// uninterleave samples
buffer[0][i] = (short) ((readbuffer[i*4 + 1] << 8) | (0x00ff & readbuffer[i*4]))/32768f;
buffer[1][i] = (short) ((readbuffer[i*4 + 3] << 8) | (0x00ff & readbuffer[i*4 + 2]))/32768f;
}
processingState.WriteData(buffer, samples);
}
OggPacket packet;
while (!oggStream.Finished
&& processingState.PacketOut(out packet))
{
oggStream.PacketIn(packet);
while (!oggStream.Finished
&& oggStream.PageOut(out page, false))
{
stdout.Write(page.Header, 0, page.Header.Length);
stdout.Write(page.Body, 0, page.Body.Length);
}
}
}
stdin.Close();
stdout.Close();
}
/// <summary>
/// We cheat on the WAV header; we just bypass the header and never
/// verify that it matches 16bit/stereo/44.1kHz.This is just an
/// example, after all.
/// </summary>
private static void StripWavHeader(BinaryReader stdin)
{
var tempBuffer = new byte[6];
for (var i = 0; (i < 30) && (stdin.Read(tempBuffer, 0, 2) > 0); i++)
if ((tempBuffer[0] == 'd') && (tempBuffer[1] == 'a'))
{
stdin.Read(tempBuffer, 0, 6);
break;
}
}
}
}
Related
I'm trying to parse a binary file as fastest as possible. So this is what I first tried to do:
using (FileStream filestream = path.OpenRead()) {
using (var d = new GZipStream(filestream, CompressionMode.Decompress)) {
using (MemoryStream m = new MemoryStream()) {
d.CopyTo(m);
m.Position = 0;
using (BinaryReaderBigEndian b = new BinaryReaderBigEndian(m)) {
while (b.BaseStream.Position != b.BaseStream.Length) {
UInt32 value = b.ReadUInt32();
} } } } }
Where BinaryReaderBigEndian class is implemented as it follows:
public static class BinaryReaderBigEndian {
public BinaryReaderBigEndian(Stream stream) : base(stream) { }
public override UInt32 ReadUInt32() {
var x = base.ReadBytes(4);
Array.Reverse(x);
return BitConverter.ToUInt32(x, 0);
} }
Then, I tried to get a performance improvement using ReadOnlySpan instead of MemoryStream. So, I tried doing:
using (FileStream filestream = path.OpenRead()) {
using (var d = new GZipStream(filestream, CompressionMode.Decompress)) {
using (MemoryStream m = new MemoryStream()) {
d.CopyTo(m);
int position = 0;
ReadOnlySpan<byte> stream = new ReadOnlySpan<byte>(m.ToArray());
while (position != stream.Length) {
UInt32 value = stream.ReadUInt32(position);
position += 4;
} } } }
Where BinaryReaderBigEndian class changed in:
public static class BinaryReaderBigEndian {
public override UInt32 ReadUInt32(this ReadOnlySpan<byte> stream, int start) {
var data = stream.Slice(start, 4).ToArray();
Array.Reverse(x);
return BitConverter.ToUInt32(x, 0);
} }
But, unfortunately, I didn't notice any improvement. So, where am I doing wrong?
I did some measurement of your code on my computer (Intel Q9400, 8 GiB RAM, SSD disk, Win10 x64 Home, .NET Framework 4/7/2, tested with 15 MB (when unpacked) file) with these results:
No-Span version: 520 ms
Span version: 720 ms
So Span version is actually slower! Why? Because new ReadOnlySpan<byte>(m.ToArray()) performs additional copy of whole file and also ReadUInt32() performs many slicings of the Span (slicing is cheap, but not free). Since you performed more work, you can't expect performance to be any better just because you used Span.
So can we do better? Yes. It turns out that the slowest part of your code is actually garbage collection caused by repeatedly allocating 4-byte Arrays created by the .ToArray() calls in ReadUInt32() method. You can avoid it by implementing ReadUInt32() yourself. It's pretty easy and also eliminates need for Span slicing. You can also replace new ReadOnlySpan<byte>(m.ToArray()) with new ReadOnlySpan<byte>(m.GetBuffer()).Slice(0, (int)m.Length);, which performs cheap slicing instead of copy of whole file. So now code looks like this:
public static void Read(FileInfo path)
{
using (FileStream filestream = path.OpenRead())
{
using (var d = new GZipStream(filestream, CompressionMode.Decompress))
{
using (MemoryStream m = new MemoryStream())
{
d.CopyTo(m);
int position = 0;
ReadOnlySpan<byte> stream = new ReadOnlySpan<byte>(m.GetBuffer()).Slice(0, (int)m.Length);
while (position != stream.Length)
{
UInt32 value = stream.ReadUInt32(position);
position += 4;
}
}
}
}
}
public static class BinaryReaderBigEndian
{
public static UInt32 ReadUInt32(this ReadOnlySpan<byte> stream, int start)
{
UInt32 res = 0;
for (int i = 0; i < 4; i++)
{
res = (res << 8) | (((UInt32)stream[start + i]) & 0xff);
}
return res;
}
}
With these changes I get from 720 ms down to 165 ms (4x faster). Sounds great, doesn't it? But we can do even better. We can completely avoid MemoryStream copy and inline and further optimize ReadUInt32():
public static void Read(FileInfo path)
{
using (FileStream filestream = path.OpenRead())
{
using (var d = new GZipStream(filestream, CompressionMode.Decompress))
{
var buffer = new byte[64 * 1024];
do
{
int bufferDataLength = FillBuffer(d, buffer);
if (bufferDataLength % 4 != 0)
throw new Exception("Stream length not divisible by 4");
if (bufferDataLength == 0)
break;
for (int i = 0; i < bufferDataLength; i += 4)
{
uint value = unchecked(
(((uint)buffer[i]) << 24)
| (((uint)buffer[i + 1]) << 16)
| (((uint)buffer[i + 2]) << 8)
| (((uint)buffer[i + 3]) << 0));
}
} while (true);
}
}
}
private static int FillBuffer(Stream stream, byte[] buffer)
{
int read = 0;
int totalRead = 0;
do
{
read = stream.Read(buffer, totalRead, buffer.Length - totalRead);
totalRead += read;
} while (read > 0 && totalRead < buffer.Length);
return totalRead;
}
And now it takes less than 90 ms (8x faster then the original!). And without Span! Span is great in situations, where it allows perform slicing and avoid array copy, but it won't improve performance just by blindly using it. After all, Span is designed to have performance characteristics on par with Array, but not better (and only on runtimes that have special support for it, such as .NET Core 2.1).
It tooks me hours trying to figure out how to mimic the exact decompression in this project: https://github.com/crimsoncantab/aok-hotkeys/blob/master/modules/hkizip.py but to no avail.
If you go to this website: http://aokhotkeys.appspot.com/ you can click any preset and download it to see for yourself.
The decompression is supposed to output a .hki file to text file
This is my attempt, which may not be as accurate as in the python project linked above:
private string ZlibCodecDecompress(byte[] compressed)
{
int outputSize = 2048;
byte[] output = new Byte[outputSize];
// If you have a ZLIB stream, set this to true. If you have
// a bare DEFLATE stream, set this to false.
bool expectRfc1950Header = false;
using (MemoryStream ms = new MemoryStream())
{
ZlibCodec compressor = new ZlibCodec();
compressor.InitializeInflate(expectRfc1950Header);
compressor.InputBuffer = compressed;
compressor.AvailableBytesIn = compressed.Length;
compressor.NextIn = 0;
compressor.CompressLevel = Ionic.Zlib.CompressionLevel.Level8;
compressor.OutputBuffer = output;
foreach (var f in new FlushType[] { FlushType.None, FlushType.Finish })
{
int bytesToWrite = 0;
do
{
compressor.AvailableBytesOut = outputSize;
compressor.NextOut = 0;
compressor.Inflate(f);
bytesToWrite = outputSize - compressor.AvailableBytesOut;
if (bytesToWrite > 0)
ms.Write(output, 0, bytesToWrite);
}
while ((f == FlushType.None && (compressor.AvailableBytesIn != 0 || compressor.AvailableBytesOut == 0)) ||
(f == FlushType.Finish && bytesToWrite != 0));
}
compressor.EndInflate();
return UTF8Encoding.UTF8.GetString(ms.ToArray());
}
}
This method returns: "Ionic.Zlib.ZlibException: Bad state"
I'd appreciate any help.
Hi I am trying to read a file one byte at a time in reverse order.So far I only managed to read the file from begining to end and write it on another file.
I need to be able to read the file from the end to the begining and print it to another file.
This is what I have so far:
string fileName = Console.ReadLine();
using (FileStream file = new FileStream(fileName ,FileMode.Open , FileAccess.Read))
{
//file.Seek(endOfFile, SeekOrigin.End);
int bytes;
using (FileStream newFile = new FileStream("newsFile.txt" , FileMode.Create , FileAccess.Write))
{
while ((bytes = file.ReadByte()) >= 0)
{
Console.WriteLine(bytes.ToString());
newFile.WriteByte((byte)bytes);
}
}
}
I know that I have to use the Seek method on the fileStream and that gets me to the end of the file.I already did that at the commented protion of the code , but I do not know how to read the file now in the while loop.
How can I achive this?
string fileName = Console.ReadLine();
using (FileStream file = new FileStream(fileName, FileMode.Open, FileAccess.Read))
{
byte[] output = new byte[file.Length]; // reversed file
// read the file backwards using SeekOrigin.Current
//
long offset;
file.Seek(0, SeekOrigin.End);
for (offset = 0; offset < fs.Length; offset++)
{
file.Seek(-1, SeekOrigin.Current);
output[offset] = (byte)file.ReadByte();
file.Seek(-1, SeekOrigin.Current);
}
// write entire reversed file array to new file
//
File.WriteAllBytes("newsFile.txt", output);
}
You could do it by reading one byte at a time, or you could read a larger buffer, write it to the output file in reverse, and continue like that until you've reached the beginning of the file. For example:
string inputFilename = "inputFile.txt";
string outputFilename = "outputFile.txt";
using (ofile = File.OpenWrite(outputFilename))
{
using (ifile = File.OpenRead(inputFilename))
{
int bufferSize = 4096;
byte[] buffer = new byte[bufferSize];
long filePos = ifile.Length;
do
{
long newPos = Math.Max(0, filePos - bufferSize);
int bytesToRead = (int)(filePos - newPos);
ifile.Seek(newPos, SeekOrigin.Set);
int bytesRead = ifile.Read(buffer, 0, bytesToRead);
// write the buffer to the output file, in reverse
for (int i = bytesRead-1; i >= 0; --i)
{
ofile.WriteByte(buffer[i]);
}
filePos = newPos;
} while (filePos > 0);
}
}
An obvious optimization would be to reverse the buffer after you've read it, and then write it in one whole chunk to the output file.
And if you know that the file will fit into memory, it's really easy:
var buffer = File.ReadAllBytes(inputFilename);
// now, reverse the buffer
int i = 0;
int j = buffer.Length-1;
while (i < j)
{
byte b = buffer[i];
buffer[i] = buffer[j];
buffer[j] = b;
++i;
--j;
}
// and write it
File.WriteAllBytes(outputFilename, buffer);
If the file is small (fits in your RAM) then this would work:
public static IEnumerable<byte> Reverse(string inputFilename)
{
var bytes = File.ReadAllBytes(inputFilename);
Array.Reverse(bytes);
foreach (var b in bytes)
{
yield return b;
}
}
Usage:
foreach (var b in Reverse("smallfile.dat"))
{
}
If the file is large (bigger than your RAM) then this would work:
using (var inputFile = File.OpenRead("bigfile.dat"))
using (var inputFileReversed = new ReverseStream(inputFile))
using (var binaryReader = new BinaryReader(inputFileReversed))
{
while (binaryReader.BaseStream.Position != binaryReader.BaseStream.Length)
{
var b = binaryReader.ReadByte();
}
}
It uses the ReverseStream class which can be found here.
I have audio samples extracted through NAudio, i know parameters:
channels
bytes per sample,
samplerate
How to play that samples by using .Net api / or other .Net library
Here code:
openFileDialog1.ShowDialog();
using (var reader = new Mp3FileReader(openFileDialog1.FileName))
{
var pcmLength = (int)reader.Length;
var _leftBuffer = new byte[pcmLength / 2];
var buffer = new byte[pcmLength];
var bytesRead = reader.Read(buffer, 0, pcmLength);
int index = 0;
for (int i = 0; i < bytesRead; i += 4)
{
//extracting only left channel
_leftBuffer[index] = buffer[i];
index++;
_leftBuffer[index] = buffer[i + 1];
index++;
}
// How to play _leftBuffer (Single channel, 2 bytes per sample, 44100 samples per secound)
}
First, you need to implement IWaveProvider or user one of the IWaveProvider implementations that come with NAudio like WaveProvider16, for example. Next, Initialize a WaveOut object with your IWaveProvider using WaveOut.Init(IWaveProvider Provider), and finally, call WaveOut.Play().
YES, I have found a solution: A low-level audio player in C#
Full worked code:
public partial class Form1 : Form
{
private byte[] _leftBuffer;
private BiQuadFilter _leftFilter;
private BiQuadFilter _rightFilter;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
openFileDialog1.ShowDialog();
using (var reader = new Mp3FileReader(openFileDialog1.FileName))
{
var pcmLength = (int)reader.Length;
_leftBuffer = new byte[pcmLength / 2];
var buffer = new byte[pcmLength];
var bytesRead = reader.Read(buffer, 0, pcmLength);
int index = 0;
for (int i = 0; i < bytesRead; i += 4)
{
_leftBuffer[index] = buffer[i];
index++;
_leftBuffer[index] = buffer[i + 1];
index++;
}
var player = new WaveLib.WaveOutPlayer(-1, new WaveLib.WaveFormat(44100, 16, 1), _leftBuffer.Length, 1, (data, size) =>
{
byte[] b = _leftBuffer;
System.Runtime.InteropServices.Marshal.Copy(b, 0, data, size);
});
}
}
}
What is the best method to replace sequence of bytes in binary file to the same length of other bytes? The binary files will be pretty large, about 50 mb and should not be loaded at once in memory.
Update: I do not know location of bytes which needs to be replaced, I need to find them first.
Assuming you're trying to replace a known section of the file.
Open a FileStream with read/write access
Seek to the right place
Overwrite existing data
Sample code coming...
public static void ReplaceData(string filename, int position, byte[] data)
{
using (Stream stream = File.Open(filename, FileMode.Open))
{
stream.Position = position;
stream.Write(data, 0, data.Length);
}
}
If you're effectively trying to do a binary version of a string.Replace (e.g. "always replace bytes { 51, 20, 34} with { 20, 35, 15 } then it's rather harder. As a quick description of what you'd do:
Allocate a buffer of at least the size of data you're interested in
Repeatedly read into the buffer, scanning for the data
If you find a match, seek back to the right place (e.g. stream.Position -= buffer.Length - indexWithinBuffer; and overwrite the data
Sounds simple so far... but the tricky bit is if the data starts near the end of the buffer. You need to remember all potential matches and how far you've matched so far, so that if you get a match when you read the next buffer's-worth, you can detect it.
There are probably ways of avoiding this trickiness, but I wouldn't like to try to come up with them offhand :)
EDIT: Okay, I've got an idea which might help...
Keep a buffer which is at least twice as big as you need
Repeatedly:
Copy the second half of the buffer into the first half
Fill the second half of the buffer from the file
Search throughout the whole buffer for the data you're looking for
That way at some point, if the data is present, it will be completely within the buffer.
You'd need to be careful about where the stream was in order to get back to the right place, but I think this should work. It would be trickier if you were trying to find all matches, but at least the first match should be reasonably simple...
My solution :
/// <summary>
/// Copy data from a file to an other, replacing search term, ignoring case.
/// </summary>
/// <param name="originalFile"></param>
/// <param name="outputFile"></param>
/// <param name="searchTerm"></param>
/// <param name="replaceTerm"></param>
private static void ReplaceTextInBinaryFile(string originalFile, string outputFile, string searchTerm, string replaceTerm)
{
byte b;
//UpperCase bytes to search
byte[] searchBytes = Encoding.UTF8.GetBytes(searchTerm.ToUpper());
//LowerCase bytes to search
byte[] searchBytesLower = Encoding.UTF8.GetBytes(searchTerm.ToLower());
//Temporary bytes during found loop
byte[] bytesToAdd = new byte[searchBytes.Length];
//Search length
int searchBytesLength = searchBytes.Length;
//First Upper char
byte searchByte0 = searchBytes[0];
//First Lower char
byte searchByte0Lower = searchBytesLower[0];
//Replace with bytes
byte[] replaceBytes = Encoding.UTF8.GetBytes(replaceTerm);
int counter = 0;
using (FileStream inputStream = File.OpenRead(originalFile)) {
//input length
long srcLength = inputStream.Length;
using (BinaryReader inputReader = new BinaryReader(inputStream)) {
using (FileStream outputStream = File.OpenWrite(outputFile)) {
using (BinaryWriter outputWriter = new BinaryWriter(outputStream)) {
for (int nSrc = 0; nSrc < srcLength; ++nSrc)
//first byte
if ((b = inputReader.ReadByte()) == searchByte0
|| b == searchByte0Lower) {
bytesToAdd[0] = b;
int nSearch = 1;
//next bytes
for (; nSearch < searchBytesLength; ++nSearch)
//get byte, save it and test
if ((b = bytesToAdd[nSearch] = inputReader.ReadByte()) != searchBytes[nSearch]
&& b != searchBytesLower[nSearch]) {
break;//fail
}
//Avoid overflow. No need, in my case, because no chance to see searchTerm at the end.
//else if (nSrc + nSearch >= srcLength)
// break;
if (nSearch == searchBytesLength) {
//success
++counter;
outputWriter.Write(replaceBytes);
nSrc += nSearch - 1;
}
else {
//failed, add saved bytes
outputWriter.Write(bytesToAdd, 0, nSearch + 1);
nSrc += nSearch;
}
}
else
outputWriter.Write(b);
}
}
}
}
Console.WriteLine("ReplaceTextInBinaryFile.counter = " + counter);
}
You can use my BinaryUtility to search and replace one or more bytes without loading the entire file into memory like this:
var searchAndReplace = new List<Tuple<byte[], byte[]>>()
{
Tuple.Create(
BitConverter.GetBytes((UInt32)0xDEADBEEF),
BitConverter.GetBytes((UInt32)0x01234567)),
Tuple.Create(
BitConverter.GetBytes((UInt32)0xAABBCCDD),
BitConverter.GetBytes((UInt16)0xAFFE)),
};
using(var reader =
new BinaryReader(new FileStream(#"C:\temp\data.bin", FileMode.Open)))
{
using(var writer =
new BinaryWriter(new FileStream(#"C:\temp\result.bin", FileMode.Create)))
{
BinaryUtility.Replace(reader, writer, searchAndReplace);
}
}
BinaryUtility code:
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
public static class BinaryUtility
{
public static IEnumerable<byte> GetByteStream(BinaryReader reader)
{
const int bufferSize = 1024;
byte[] buffer;
do
{
buffer = reader.ReadBytes(bufferSize);
foreach (var d in buffer) { yield return d; }
} while (bufferSize == buffer.Length);
}
public static void Replace(BinaryReader reader, BinaryWriter writer, IEnumerable<Tuple<byte[], byte[]>> searchAndReplace)
{
foreach (byte d in Replace(GetByteStream(reader), searchAndReplace)) { writer.Write(d); }
}
public static IEnumerable<byte> Replace(IEnumerable<byte> source, IEnumerable<Tuple<byte[], byte[]>> searchAndReplace)
{
foreach (var s in searchAndReplace)
{
source = Replace(source, s.Item1, s.Item2);
}
return source;
}
public static IEnumerable<byte> Replace(IEnumerable<byte> input, IEnumerable<byte> from, IEnumerable<byte> to)
{
var fromEnumerator = from.GetEnumerator();
fromEnumerator.MoveNext();
int match = 0;
foreach (var data in input)
{
if (data == fromEnumerator.Current)
{
match++;
if (fromEnumerator.MoveNext()) { continue; }
foreach (byte d in to) { yield return d; }
match = 0;
fromEnumerator.Reset();
fromEnumerator.MoveNext();
continue;
}
if (0 != match)
{
foreach (byte d in from.Take(match)) { yield return d; }
match = 0;
fromEnumerator.Reset();
fromEnumerator.MoveNext();
}
yield return data;
}
if (0 != match)
{
foreach (byte d in from.Take(match)) { yield return d; }
}
}
}
public static void BinaryReplace(string sourceFile, byte[] sourceSeq, string targetFile, byte[] targetSeq)
{
FileStream sourceStream = File.OpenRead(sourceFile);
FileStream targetStream = File.Create(targetFile);
try
{
int b;
long foundSeqOffset = -1;
int searchByteCursor = 0;
while ((b=sourceStream.ReadByte()) != -1)
{
if (sourceSeq[searchByteCursor] == b)
{
if (searchByteCursor == sourceSeq.Length - 1)
{
targetStream.Write(targetSeq, 0, targetSeq.Length);
searchByteCursor = 0;
foundSeqOffset = -1;
}
else
{
if (searchByteCursor == 0)
{
foundSeqOffset = sourceStream.Position - 1;
}
++searchByteCursor;
}
}
else
{
if (searchByteCursor == 0)
{
targetStream.WriteByte((byte) b);
}
else
{
targetStream.WriteByte(sourceSeq[0]);
sourceStream.Position = foundSeqOffset + 1;
searchByteCursor = 0;
foundSeqOffset = -1;
}
}
}
}
finally
{
sourceStream.Dispose();
targetStream.Dispose();
}
}