Voice is not recognized with Microsoft Speech - c#

I tried implementing a tutorial for Microsoft Speech Recognition. I get no errors but still the voice is not recognized. The Code is like
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Microsoft.Speech.Recognition;
namespace WindowsFormsApplication3
{
public partial class Form1 : Form
{
//SpeechRecognizer recognizer;
SpeechRecognitionEngine sre;
public Form1()
{
InitializeComponent();
sre = new SpeechRecognitionEngine();
sre.SetInputToWaveFile(#"c:\Test\Colors.wav");
Console.WriteLine("here");
// Create a simple grammar that recognizes "red", "green", or "blue".
Choices colors = new Choices();
colors.Add(new string[] { "red", "green", "blue" });
Console.WriteLine("here");
// Create a GrammarBuilder object and append the Choices object.
GrammarBuilder gb = new GrammarBuilder();
gb.Append(colors);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
sre.LoadGrammar(g);
// Register a handler for the SpeechRecognized event.
// Start recognition.
sre.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
sre.Recognize();
Console.WriteLine("here");
}
private void Form1_Load(object sender, EventArgs e)
{
}
// Create a simple handler for the SpeechRecognized event.
void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Console.WriteLine("here");
MessageBox.Show("Speech recognized: " + e.Result.Text);
}
}
}
Kindly help me sort out !! I have no idea why it is not working and am new to C# and Visual Studio
PS: I also get messages in the output window like the following While running the program
The thread '<No Name>' (0x674) has exited with code 0 (0x0).
The thread '<No Name>' (0x1ee0) has exited with code 0 (0x0).
The thread '<No Name>' (0xf8) has exited with code 0 (0x0).
The thread '<No Name>' (0x760) has exited with code 0 (0x0).
The thread 'vshost.RunParkingWindow' (0x1184) has exited with code 0 (0x0).

Try this code works for me
public partial class MainWindow : Window
{
SpeechRecognitionEngine _recognizer;
SpeechSynthesizer sre = new SpeechSynthesizer();
int count = 1;
public MainWindow()
{
InitializeComponent();
Initialize();
}
private void Initialize()
{
try
{
var culture = new CultureInfo("en-US");
_recognizer = new SpeechRecognitionEngine(culture);
_recognizer.SetInputToDefaultAudioDevice();
_recognizer.LoadGrammar(GetGrammer());
_recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(_recognizer_SpeechRecognized);
_recognizer.RecognizeAsync(RecognizeMode.Multiple);
sre.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Child);
sre.Rate = -2;
}
catch (Exception ex)
{
System.Windows.MessageBox.Show(ex.InnerException.Message);
}
}
private static Grammar GetGrammer()
{
var choices = new Choices();
//add custom commands
choices.Add(File.ReadAllLines(#"Commands.txt"));
//to add the letters to the dictionary
choices.Add(Enum.GetNames(typeof(Keys)).ToArray());
var grammer = new Grammar(new GrammarBuilder(choices));
return grammer;
}
void _recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
string speech = e.Result.Text;
//to type letters in open application like notepad
if (Enum.GetNames(typeof(Keys)).Contains(speech))
{
try
{ //send the string to the application
SendKeys.SendWait("{" + speech + "}");
}
catch (ArgumentException)
{
}
}
//handle custom commands
switch (speech)
{
case "Hello":
sre.Speak("Goodmorning ");
break;
case "Notepad":
System.Diagnostics.Process.Start("Notepad");
break;
case "Maximize":
this.WindowState = System.Windows.WindowState.Maximized;
break;
case "Minimize":
this.WindowState = System.Windows.WindowState.Minimized;
break;
case "Restore":
this.WindowState = System.Windows.WindowState.Normal;
break;
case "Close":
Close();
break;
}
}
}
You would also need to create a .txt file to load the grammer with the commands each in single line like below
Notepad
Close
Minimize
Maximize
Open
Hello

Related

SpeechRecognitionEngine seems to be getting no audio inpit

I am trying to use SpeechRecognitionEngine in a C# application under Windows 11
I have tried the default microphone array that works with Cortana. I have tried an USB microphone and a bluetooth one. They all show they are working in Settings->Sound
I cannot get any of the events to fire including SpeechDetected and AudioStateChanged
Here is my code based on the Microsoft example:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Speech.Recognition;
using System.Diagnostics;
namespace Speech_Test
{
public partial class Form1 : Form
{
SpeechRecognitionEngine recognizer = null;
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
try
{
recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-GB"));
Choices services = new Choices(new string[] { "restaurants", "hotels", "gas stations" });
Choices cities = new Choices(new string[] { "Seattle", "Boston", "Dallas" });
GrammarBuilder findServices = new GrammarBuilder("Find");
findServices.Append(services);
findServices.Append("near");
findServices.Append(cities);
Grammar servicesGrammar = new Grammar(findServices);
recognizer.LoadGrammarAsync(servicesGrammar);
recognizer.SpeechDetected +=
new EventHandler<SpeechDetectedEventArgs>(recognizer_SpeechDetected);
recognizer.AudioStateChanged +=
new EventHandler<AudioStateChangedEventArgs>(recognizer_AudioStateChanged);
// Add a handler for the speech recognized event.
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.SpeechRecognitionRejected +=
new EventHandler<SpeechRecognitionRejectedEventArgs>(recognizer_SpeechRejected);
// Configure the input to the speech recognizer.
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Multiple);
//recognizer.EmulateRecognize("Find restaurants near Dallas");
}
catch (Exception ex)
{
Debug.WriteLine(ex.InnerException.Message);
}
}
// Handle the AudioStateChanged event.
private void recognizer_AudioStateChanged(object sender, AudioStateChangedEventArgs e)
{
Debug.WriteLine("The new audio state is: " + e.AudioState);
}
private void recognizer_SpeechDetected(object sender, SpeechDetectedEventArgs e)
{
Console.WriteLine(" Speech detected at AudioPosition = {0}", e.AudioPosition);
}
private void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Debug.WriteLine("Recognized text: " + e.Result.Text);
}
private void recognizer_SpeechRejected(object sender, SpeechRecognitionRejectedEventArgs e)
{
Debug.WriteLine("Rejected text: " + e.Result.Text);
}
}
}
Any suggestions on how to find out why audio is not getting through?
Thanks

SAPI does not implement phonetic alphabet selection. Speech Command App

In my speech command app, I am loading files from an external source, processing that data and loading them into a list of possible commands for execution
When I run the app, I get the message in the console
Main.exe Information: 0: SAPI does not implement phonetic alphabet selection.
I tried solutions such as adding
gram.Culture = New System.Globalization.CultureInfo("en-GB")
I think this is either outdated or does not work on this type of WPF application
Any tips?
Code:
{
InitializeComponent();
}
SpeechSynthesizer synth = new SpeechSynthesizer();
PromptBuilder builder = new PromptBuilder();
SpeechRecognitionEngine recog = new SpeechRecognitionEngine();
private System.Windows.Input.Key k;
private int vkey;
private Choices cmd;
private void Button_Click_1(object sender, RoutedEventArgs e)
{
b1.IsEnabled = false;
Choices list = new Choices();
cmd = Singleton.getInstance().getChoices();
list.Add(new string[] { "hello", "test", "it works", "sup", "windows", "grenade" });
Grammar gr = new Grammar(new GrammarBuilder(cmd));
try
{
recog.RequestRecognizerUpdate();
recog.LoadGrammar(gr);
recog.SpeechRecognized += Recog_SpeechRecognized;
recog.SetInputToDefaultAudioDevice();
recog.RecognizeAsync(RecognizeMode.Multiple);
}
catch
{
return;
}
}

How to convert speech to text?

I am trying to develop the following functionality.
The first task to convert text to voice - DONE
The second task to convert voice to text - Getting issue
The third task to implement these both on the given chat board where already AI chat is
I am using following code to get the text from voice/speech.
I am getting the result but is not proper which I want.
Please check below code snippet.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Speech.Recognition;
using System.Speech.Synthesis;
namespace StartingWithSpeechRecognition
{
class Program
{
static SpeechRecognitionEngine _recognizer = null;
static ManualResetEvent manualResetEvent = null;
static void Main(string[] args)
{
manualResetEvent = new ManualResetEvent(false);
Console.WriteLine("To recognize speech, and write 'test' to the console, press 0");
Console.WriteLine("To recognize speech and make sure the computer speaks to you, press 1");
Console.WriteLine("To emulate speech recognition, press 2");
Console.WriteLine("To recognize speech using Choices and GrammarBuilder.Append, press 3");
Console.WriteLine("To recognize speech using a DictationGrammar, press 4");
Console.WriteLine("To get a prompt building example, press 5");
ConsoleKeyInfo pressedKey = Console.ReadKey(true);
char keychar = pressedKey.KeyChar;
Console.WriteLine("You pressed '{0}'", keychar);
switch (keychar)
{
case '0':
RecognizeSpeechAndWriteToConsole();
break;
case '1':
RecognizeSpeechAndMakeSureTheComputerSpeaksToYou();
break;
case '2':
EmulateRecognize();
break;
case '3':
SpeechRecognitionWithChoices();
break;
case '4':
SpeechRecognitionWithDictationGrammar();
break;
case '5':
PromptBuilding();
break;
default:
Console.WriteLine("You didn't press 0, 1, 2, 3, 4, or 5!");
Console.WriteLine("Press any key to continue . . .");
Console.ReadKey(true);
Environment.Exit(0);
break;
}
if (keychar != '5')
{
manualResetEvent.WaitOne();
}
if (_recognizer != null)
{
_recognizer.Dispose();
}
Console.WriteLine("Press any key to continue . . .");
Console.ReadKey(true);
}
#region Recognize speech and write to console
static void RecognizeSpeechAndWriteToConsole()
{
_recognizer = new SpeechRecognitionEngine();
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("test"))); // load a "test" grammar
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("exit"))); // load a "exit" grammar
_recognizer.SpeechRecognized += _recognizeSpeechAndWriteToConsole_SpeechRecognized; // if speech is recognized, call the specified method
_recognizer.SpeechRecognitionRejected += _recognizeSpeechAndWriteToConsole_SpeechRecognitionRejected; // if recognized speech is rejected, call the specified method
_recognizer.SetInputToDefaultAudioDevice(); // set the input to the default audio device
_recognizer.RecognizeAsync(RecognizeMode.Multiple); // recognize speech asynchronous
}
static void _recognizeSpeechAndWriteToConsole_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "test")
{
Console.WriteLine("test");
}
else if (e.Result.Text == "exit")
{
manualResetEvent.Set();
}
}
static void _recognizeSpeechAndWriteToConsole_SpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
{
Console.WriteLine("Speech rejected. Did you mean:");
foreach (RecognizedPhrase r in e.Result.Alternates)
{
Console.WriteLine(" " + r.Text);
}
}
#endregion
#region Recognize speech and make sure the computer speaks to you (text to speech)
static void RecognizeSpeechAndMakeSureTheComputerSpeaksToYou()
{
_recognizer = new SpeechRecognitionEngine();
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("hello computer"))); // load a "hello computer" grammar
_recognizer.SpeechRecognized += _recognizeSpeechAndMakeSureTheComputerSpeaksToYou_SpeechRecognized; // if speech is recognized, call the specified method
_recognizer.SpeechRecognitionRejected += _recognizeSpeechAndMakeSureTheComputerSpeaksToYou_SpeechRecognitionRejected;
_recognizer.SetInputToDefaultAudioDevice(); // set the input to the default audio device
_recognizer.RecognizeAsync(RecognizeMode.Multiple); // recognize speech asynchronous
}
static void _recognizeSpeechAndMakeSureTheComputerSpeaksToYou_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "hello computer")
{
SpeechSynthesizer speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.Speak("hello user");
speechSynthesizer.Dispose();
}
manualResetEvent.Set();
}
static void _recognizeSpeechAndMakeSureTheComputerSpeaksToYou_SpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
{
if (e.Result.Alternates.Count == 0)
{
Console.WriteLine("No candidate phrases found.");
return;
}
Console.WriteLine("Speech rejected. Did you mean:");
foreach (RecognizedPhrase r in e.Result.Alternates)
{
Console.WriteLine(" " + r.Text);
}
}
#endregion
#region Emulate speech recognition
static void EmulateRecognize()
{
_recognizer = new SpeechRecognitionEngine();
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("emulate speech"))); // load "emulate speech" grammar
_recognizer.SpeechRecognized += _emulateRecognize_SpeechRecognized;
_recognizer.EmulateRecognize("emulate speech");
}
static void _emulateRecognize_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "emulate speech")
{
Console.WriteLine("Speech was emulated!");
}
manualResetEvent.Set();
}
#endregion
#region Speech recognition with Choices and GrammarBuilder.Append
static void SpeechRecognitionWithChoices()
{
_recognizer = new SpeechRecognitionEngine();
GrammarBuilder grammarBuilder = new GrammarBuilder();
grammarBuilder.Append("I"); // add "I"
grammarBuilder.Append(new Choices("like", "dislike")); // load "like" & "dislike"
grammarBuilder.Append(new Choices("dogs", "cats", "birds", "snakes", "fishes", "tigers", "lions", "snails", "elephants")); // add animals
_recognizer.LoadGrammar(new Grammar(grammarBuilder)); // load grammar
_recognizer.SpeechRecognized += speechRecognitionWithChoices_SpeechRecognized;
_recognizer.SetInputToDefaultAudioDevice(); // set input to default audio device
_recognizer.RecognizeAsync(RecognizeMode.Multiple); // recognize speech
}
static void speechRecognitionWithChoices_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Console.WriteLine("Do you really " + e.Result.Words[1].Text + " " + e.Result.Words[2].Text + "?");
manualResetEvent.Set();
}
#endregion
#region Speech recognition with DictationGrammar
static void SpeechRecognitionWithDictationGrammar()
{
_recognizer = new SpeechRecognitionEngine();
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("exit")));
_recognizer.LoadGrammar(new DictationGrammar());
_recognizer.SpeechRecognized += speechRecognitionWithDictationGrammar_SpeechRecognized;
_recognizer.SetInputToDefaultAudioDevice();
_recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
static void speechRecognitionWithDictationGrammar_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "exit")
{
manualResetEvent.Set();
return;
}
Console.WriteLine("You said: " + e.Result.Text);
}
#endregion
#region Prompt building
static void PromptBuilding()
{
PromptBuilder builder = new PromptBuilder();
builder.StartSentence();
builder.AppendText("This is a prompt building example.");
builder.EndSentence();
builder.StartSentence();
builder.AppendText("Now, there will be a break of 2 seconds.");
builder.EndSentence();
builder.AppendBreak(new TimeSpan(0, 0, 2));
builder.StartStyle(new PromptStyle(PromptVolume.ExtraSoft));
builder.AppendText("This text is spoken extra soft.");
builder.EndStyle();
builder.StartStyle(new PromptStyle(PromptRate.Fast));
builder.AppendText("This text is spoken fast.");
builder.EndStyle();
SpeechSynthesizer synthesizer = new SpeechSynthesizer();
synthesizer.Speak(builder);
synthesizer.Dispose();
}
#endregion
}
}
If this is the wrong way then please suggest me right way or any reference link or tutorial will be highly appreciated.
The System.Speech.Recognition is an old API.
I think you have to use Google Speech API: https://cloud.google.com/speech/docs/basics Or MS Bing speech API: https://azure.microsoft.com/en-us/services/cognitive-services/speech/
I preferred the Google API. And here is very small example:
using Google.Apis.Auth.OAuth2;
using Google.Cloud.Speech.V1;
using Grpc.Auth;
using System;
var speech = SpeechClient.Create( channel );
var response = speech.Recognize( new RecognitionConfig()
{
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
SampleRateHertz = 16000,
LanguageCode = "hu",
}, RecognitionAudio.FromFile( "888.wav" ) );
foreach ( var result in response.Results )
{
foreach ( var alternative in result.Alternatives )
{
Console.WriteLine( alternative.Transcript );
}
}
But you can find more samples:
https://cloud.google.com/speech/docs/samples
Regrads

C# SelectVoice not changing in windows application but does in console

So I am trying to change a voice in C# for the System.Speech.Synthesis library. It will work for me when I attempt the code in console mode. However when I am working on a windows application it does not change the voice while giving no errors. Here is the code of the windows application that works asside from the voice change.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading;
using System.Windows.Forms;
using System.Speech.Synthesis;
using System.Speech.Recognition;
namespace JarvisRev1
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
this.button1.Click += new EventHandler(button1_Click);
this.button2.Click += new EventHandler(button2_Click);
this.button3.Click += new EventHandler(button3_Click);
foreach (InstalledVoice voice in sSynth.GetInstalledVoices())
{
cbVoice.Items.Add(voice.VoiceInfo.Name);
}
}
SpeechSynthesizer sSynth = new SpeechSynthesizer();
PromptBuilder pBuilder = new PromptBuilder();
SpeechRecognitionEngine sRecognize = new SpeechRecognitionEngine();
private void button1_Click(object sender, EventArgs e)
{
pBuilder.ClearContent();
pBuilder.AppendText(textBox1.Text);
sSynth.SelectVoice("IVONA 2 Brian");
sSynth.SpeakAsync(pBuilder);
}
private void button2_Click(object sender, EventArgs e)
{
button2.Enabled = false;
button3.Enabled = true;
Choices sList = new Choices();
sList.Add(new string[] { "hello", "test", "it works", "how", "are", "you", "today", "i", "am", "fine", "exit", "close", "quit", "so", "hello how are you" });
Grammar gr = new Grammar(new GrammarBuilder(sList));
try
{
sRecognize.RequestRecognizerUpdate();
sRecognize.LoadGrammar(gr);
sRecognize.SpeechRecognized += sRecognize_SpeechRecognized;
sRecognize.SetInputToDefaultAudioDevice();
sRecognize.RecognizeAsync(RecognizeMode.Multiple);
sRecognize.Recognize();
}
catch
{
return;
}
}
private void button3_Click(object sender, EventArgs e)
{
sRecognize.RecognizeAsyncStop();
button2.Enabled = true;
button3.Enabled = false;
}
private void sRecognize_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "exit")
{
Application.Exit();
}
else
{
textBox1.Text = textBox1.Text + " " + e.Result.Text.ToString();
}
}
}
}
This is the code in console mode that is working for me.
using System;
using System.Speech.Synthesis; // Add reference to System.Speech
class Program
{
static void Main(string[] args)
{
var synth = new SpeechSynthesizer();
synth.SelectVoice("IVONA 2 Brian");
synth.SpeakAsync("For you Sir, Always.");
foreach (var voice in synth.GetInstalledVoices())
{
Console.WriteLine(voice.VoiceInfo.Name);
}
Console.ReadLine();
}
}
Have same issue when Microsoft Irina Desktop voice is available in system. Workaround to set voice explicitly in prompt, e.g.:
using System.Speech.Synthesis;
var synth=new SpeechSynthesizer();
var builder=new PromptBuilder();
builder.StartVoice("Microsoft David Desktop");
builder.AppendText("Hello, World!");
builder.EndVoice();
synth.SpeakAsync(new Prompt(builder));
As you already utilize PromptBuilder, try to add StartVoice and EndVoice calls around text.

Speech Recognition to display pictures from a listbox

I am having an error with this Speech Recognition, I keep getting "At least one grammar must be loaded before doing a recognition" I can't get the images to display when you say its corresponding linked name.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
using SpeechLib;
using System.IO;
using System.Speech.Recognition;
using System.Globalization;
namespace SimpleSpeechRecognition
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private SpeechRecognitionEngine recognizer;
private void Form1_Load(object sender, EventArgs e)
{
speechListBox1.Items.Add("Dog");
speechListBox1.Items.Add("Elephant");
speechListBox1.SpeechEnabled = true;
recognizer = new SpeechRecognitionEngine(new CultureInfo("en-GB"));
recognizer.SetInputToDefaultAudioDevice();
Choices choices = new Choices("Dog", "Elephant");
GrammarBuilder m_GrammarBuilder = new GrammarBuilder(choices);
Grammar m_Speech = new Grammar(m_GrammarBuilder);
recognizer.LoadGrammar(m_Speech);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
foreach (RecognizedWordUnit word in e.Result.Words)
{
switch (word.Text)
{
case "Dog":
pictureBox1.Image = Image.FromFile("C:\\" + "dog.jpg");;
break;
case "Elephant":
pictureBox1.Image = Image.FromFile("C:\\" + "elephant.jpg");
break;
}
}
}
private void speechListBox1_SelectedIndexChanged(object sender, EventArgs e)
{
//MessageBox.Show(speechListBox1.SelectedItems[0].ToString());
SayPhrase(speechListBox1.SelectedItems[0].ToString());
//pictureBox1.Image = Image.FromFile("C:\\" + "dog.jpg");
//pictureBox1.Image = Image.FromFile(((FileInfo)speechListBox1.SelectedItem).FullName);
pictureBox1.Refresh();
}
private void SayPhrase(string PhraseToSay )
{
SpeechVoiceSpeakFlags SpFlags = new SpeechVoiceSpeakFlags();
SpVoice Voice = new SpVoice();
Voice.Speak(PhraseToSay, SpFlags);
}
}
}
The errors self-explanatory:
The speech engine must have a collection of 'Choices' to listen out for, however these need to be built into appropriate Grammar for the speech engine to listen out for.
GrammarBuilder m_GrammarBuilder = new GrammarBuilder(choices);
Grammar m_Speech = (m_GrammarBuilder);
Then just load the grammar in:
recognizer.LoadGrammar(m_Speech);
I think that should solve your problem. It also worth noting that you can unload and load different sets of grammar via the .UnloadGrammar() function as well.
Additionally, it's also worth initializing a SpeechRecognitionEngine with an appropriate culture info. For English (UK) this is:
new SpeechRecognitionEngine(new CultureInfo("en-GB"))

Categories

Resources