Unity has an issue with Texture2D and GetPixels at various sizes. So I’ve created a native plugin [UnityImageNativePlugin] to handle loading BMP, GIF, JPG, and PNG image types.
Category: Unity
Speech recognition Tutorial for Unity3D 5
Unity Labs: [Speech Recognition and VR]
Unity Docs: [Windows.Speech.DictationRecognizer]
Unity Bitbucket Source: [Speech to Text Repository]
Unity Asset Store: [Unity Speech-to-Text Package] (free)
Unity Asset Store List: [Speech API]
Mac Support for Chrome Speech Proxy
Compatible with [WebGL Speech Detection] and [WebGL Speech Synthesis]
UNET Part 1 – Setup and Movement Syncing
Unity Chroma SDK Plugin
This week, I’m wrapping up a Unity plugin for Chroma [UnityChromaSDK] that consumes the [Chroma REST API]. I relied on [Swagger.io] to process swagger definitions in order to auto-generate a C# 2.0 client that works in Unity 3.5.7 and above.
You can check the REST version on [localhost:54235].
Unity: 5.6 is now Available and Completes the Unity 5 Cycle
Unity Contest: Best Tool for EditorVR – $10,000 USD
[Best Tool for EditorVR – $10,000 USD] has a deadline of April 27th, 2017.
Modeling with Speech Commands in Edit Mode in the Unity Editor
Documentation: [UnityWebGLSpeechDetection] [private]
Documentation: [UnityWebGLSpeechSynthesis] [private]
SteamVR: Unity Move/Rotate With Motion Controller Sticks
The [SteamVR] API makes the motion controller touchpad input available.
1 Import the SteamVR Plugin
2 Open the `Assets\SteamVR\InteractionSystem\Samples\Scenes\Interactions_Example.unity` scene
3 Attach `StickManipulation.cs` on the SteamVR `Player`.
using UnityEngine;
using Valve.VR;
using Valve.VR.InteractionSystem;
public class StickManipulation : MonoBehaviour
{
private float _mMoveSpeed = 2.5f;
private float _mHorizontalTurnSpeed = 180f;
private float _mVerticalTurnSpeed = 2.5f;
private bool _mInverted = false;
private const float VERTICAL_LIMIT = 60f;
private void OnGUI()
{
Player player = Player.instance;
if (!player)
{
return;
}
EVRButtonId touchPad = EVRButtonId.k_EButton_SteamVR_Touchpad;
if (null != player.leftController)
{
var touchPadVector = player.leftController.GetAxis(touchPad);
GUILayout.Label(string.Format("Left X: {0:F2}, {1:F2}", touchPadVector.x, touchPadVector.y));
}
if (null != player.rightController)
{
var touchPadVector = player.rightController.GetAxis(touchPad);
GUILayout.Label(string.Format("Right X: {0:F2}, {1:F2}", touchPadVector.x, touchPadVector.y));
}
}
float GetAngle(float input)
{
if (input < 0f)
{
return -Mathf.LerpAngle(0, VERTICAL_LIMIT, -input);
}
else if (input > 0f)
{
return Mathf.LerpAngle(0, VERTICAL_LIMIT, input);
}
return 0f;
}
// Update is called once per frame
void Update()
{
Player player = Player.instance;
if (!player)
{
return;
}
EVRButtonId touchPad = EVRButtonId.k_EButton_SteamVR_Touchpad;
if (null != player.leftController)
{
Quaternion orientation = Camera.main.transform.rotation;
var touchPadVector = player.leftController.GetAxis(touchPad);
Vector3 moveDirection = orientation * Vector3.forward * touchPadVector.y + orientation * Vector3.right * touchPadVector.x;
Vector3 pos = player.transform.position;
pos.x += moveDirection.x * _mMoveSpeed * Time.deltaTime;
pos.z += moveDirection.z * _mMoveSpeed * Time.deltaTime;
player.transform.position = pos;
}
if (null != player.rightController)
{
Quaternion orientation = player.transform.rotation;
var touchPadVector = player.rightController.GetAxis(touchPad);
Vector3 euler = transform.rotation.eulerAngles;
float angle;
if (_mInverted)
{
angle = GetAngle(touchPadVector.y);
}
else
{
angle = GetAngle(-touchPadVector.y);
}
euler.x = Mathf.LerpAngle(euler.x, angle, _mVerticalTurnSpeed * Time.deltaTime);
euler.y += touchPadVector.x * _mHorizontalTurnSpeed * Time.deltaTime;
player.transform.rotation = Quaternion.Euler(euler);
}
}
}
Unity: Jon Hales releases the Game Blizz Words
Unity: Speech Detection for Edit Mode
Unity: Getting Started with SteamVR
Generate JSON Classes
A [class generator] for JSON would be handy.
Free Animated Model – Spider Green
Unity GDC 2017 Keynote
Sector 13: VR Flight Test #2
Performance is running smoothly.
Unity: Speech Detection and Synthesis Together
To be able to call out “fire” and “stop”, I made some edits to the `F3DPlayerTurretController.cs` script.
using UnityEngine;
using System.Collections;
using UnityWebGLSpeechDetection;
namespace Forge3D
{
public class F3DPlayerTurretController : MonoBehaviour
{
RaycastHit hitInfo; // Raycast structure
public F3DTurret turret;
bool isFiring; // Is turret currently in firing state
public F3DFXController fxController;
// reference to the proxy
private ISpeechDetectionPlugin _mSpeechDetectionPlugin = null;
enum FireState
{
IDLE,
DETECTED_FIRE,
FIRE_ONCE,
FIRE_IDLE,
DETECTED_STOP,
STOP_ONCE
}
// detect the word once in all updates
private static FireState _sFireState = FireState.IDLE;
// make sure all turrets detect the async word in their update event
private static bool _sReadyForLateUpdate = false;
// init the speech proxy
private IEnumerator Start()
{
// get the singleton instance
_mSpeechDetectionPlugin = ProxySpeechDetectionPlugin.GetInstance();
// check the reference to the plugin
if (null == _mSpeechDetectionPlugin)
{
Debug.LogError("Proxy Speech Detection Plugin is not set!");
yield break;
}
// wait for plugin to become available
while (!_mSpeechDetectionPlugin.IsAvailable())
{
yield return null;
}
// subscribe to events
_mSpeechDetectionPlugin.AddListenerOnDetectionResult(HandleDetectionResult);
// abort and clear existing words
_mSpeechDetectionPlugin.Abort();
}
// Handler for speech detection events
void HandleDetectionResult(object sender, SpeechDetectionEventArgs args)
{
if (null == args.detectionResult)
{
return;
}
SpeechRecognitionResult[] results = args.detectionResult.results;
if (null == results)
{
return;
}
bool doAbort = false;
foreach (SpeechRecognitionResult result in results)
{
SpeechRecognitionAlternative[] alternatives = result.alternatives;
if (null == alternatives)
{
continue;
}
foreach (SpeechRecognitionAlternative alternative in alternatives)
{
if (string.IsNullOrEmpty(alternative.transcript))
{
continue;
}
string lower = alternative.transcript.ToLower();
Debug.LogFormat("Detected: {0}", lower);
if (lower.Contains("fire"))
{
if (_sFireState == FireState.IDLE)
{
_sFireState = FireState.DETECTED_FIRE;
}
doAbort = true;
}
if (lower.Contains("stop"))
{
if (_sFireState == FireState.FIRE_IDLE)
{
_sFireState = FireState.DETECTED_STOP;
}
doAbort = true;
}
}
}
// abort detection on match for faster matching on words instead of complete sentences
if (doAbort)
{
_mSpeechDetectionPlugin.Abort();
}
}
// make the async detected word, detectable at the start of all the update events
void LateUpdate()
{
if (_sReadyForLateUpdate)
{
_sReadyForLateUpdate = false;
switch (_sFireState)
{
case FireState.DETECTED_FIRE:
_sFireState = FireState.FIRE_ONCE;
break;
case FireState.FIRE_ONCE:
_sFireState = FireState.FIRE_IDLE;
break;
case FireState.DETECTED_STOP:
_sFireState = FireState.STOP_ONCE;
break;
case FireState.STOP_ONCE:
_sFireState = FireState.IDLE;
break;
}
}
}
void Update()
{
CheckForTurn();
CheckForFire();
// After update, use one late update to detect the async word
_sReadyForLateUpdate = true;
}
void CheckForFire()
{
// Fire turret
//if (!isFiring && Input.GetKeyDown(KeyCode.Mouse0))
if (!isFiring && _sFireState == FireState.FIRE_ONCE)
{
isFiring = true;
fxController.Fire();
}
// Stop firing
//if (isFiring && Input.GetKeyUp(KeyCode.Mouse0))
if (isFiring && _sFireState == FireState.STOP_ONCE)
{
isFiring = false;
fxController.Stop();
}
}
To be able to call out the names of weapons and to add speech, I made some edits to the `F3DFXController` script.
using System.Collections;
using System;
using UnityEngine;
using UnityEngine.UI;
using UnityWebGLSpeechDetection;
using UnityWebGLSpeechSynthesis;
namespace Forge3D
{
// Weapon types
public enum F3DFXType
{
Vulcan,
SoloGun,
Sniper,
ShotGun,
Seeker,
RailGun,
PlasmaGun,
PlasmaBeam,
PlasmaBeamHeavy,
LightningGun,
FlameRed,
LaserImpulse
}
public class F3DFXController : MonoBehaviour
{
/// <summary>
/// Voices drop down
/// </summary>
public Dropdown _mDropdownVoices = null;
/// <summary>
/// Reference to the proxy
/// </summary>
private ISpeechDetectionPlugin _mSpeechDetectionPlugin = null;
/// <summary>
/// Reference to the proxy
/// </summary>
private ISpeechSynthesisPlugin _mSpeechSynthesisPlugin = null;
/// <summary>
/// Reference to the supported voices
/// </summary>
private VoiceResult _mVoiceResult = null;
/// <summary>
/// Reference to the utterance, voice, and text to speak
/// </summary>
private SpeechSynthesisUtterance _mSpeechSynthesisUtterance = null;
/// <summary>
/// Track when the utterance is created
/// </summary>
private bool _mUtteranceSet = false;
/// <summary>
/// Track when the voices are created
/// </summary>
private bool _mVoicesSet = false;
enum WeaponState
{
IDLE,
DETECTED_LEFT,
LEFT_ONCE,
DETECTED_RIGHT,
RIGHT_ONCE
}
// detect the word once in all updates
private static WeaponState _sWeaponState = WeaponState.IDLE;
// make sure all turrets detect the async word in their update event
private static bool _sReadyForLateUpdate = false;
// Singleton instance
public static F3DFXController instance;
// init the speech proxy
private IEnumerator Start()
{
// get the singleton instance
_mSpeechDetectionPlugin = ProxySpeechDetectionPlugin.GetInstance();
// check the reference to the plugin
if (null == _mSpeechDetectionPlugin)
{
Debug.LogError("Proxy Speech Detection Plugin is not set!");
yield break;
}
// wait for plugin to become available
while (!_mSpeechDetectionPlugin.IsAvailable())
{
yield return null;
}
_mSpeechSynthesisPlugin = ProxySpeechSynthesisPlugin.GetInstance();
if (null == _mSpeechSynthesisPlugin)
{
Debug.LogError("Proxy Speech Synthesis Plugin is not set!");
yield break;
}
// wait for proxy to become available
while (!_mSpeechSynthesisPlugin.IsAvailable())
{
yield return null;
}
// subscribe to events
_mSpeechDetectionPlugin.AddListenerOnDetectionResult(HandleDetectionResult);
// abort and clear existing words
_mSpeechDetectionPlugin.Abort();
// Get voices from proxy
GetVoices();
// Create an instance of SpeechSynthesisUtterance
_mSpeechSynthesisPlugin.CreateSpeechSynthesisUtterance((utterance) =>
{
//Debug.LogFormat("Utterance created: {0}", utterance._mReference);
_mSpeechSynthesisUtterance = utterance;
// The utterance is set
_mUtteranceSet = true;
// Set the default voice if ready
SetIfReadyForDefaultVoice();
});
}
/// <summary>
/// Get voices from the proxy
/// </summary>
/// <returns></returns>
private void GetVoices()
{
// get voices from the proxy
_mSpeechSynthesisPlugin.GetVoices((voiceResult) =>
{
_mVoiceResult = voiceResult;
// prepare the voices drop down items
SpeechSynthesisUtils.PopulateVoicesDropdown(_mDropdownVoices, _mVoiceResult);
// The voices are set
_mVoicesSet = true;
// Set the default voice if ready
SetIfReadyForDefaultVoice();
});
}
/// <summary>
/// Set the default voice if voices and utterance are ready
/// </summary>
private void SetIfReadyForDefaultVoice()
{
if (_mVoicesSet &&
_mUtteranceSet)
{
// set the default voice
SpeechSynthesisUtils.SetDefaultVoice(_mDropdownVoices);
// enable voices dropdown
SpeechSynthesisUtils.SetInteractable(true, _mDropdownVoices);
Voice voice = SpeechSynthesisUtils.GetVoice(_mVoiceResult, SpeechSynthesisUtils.GetDefaultVoice());
_mSpeechSynthesisPlugin.SetVoice(_mSpeechSynthesisUtterance, voice);
// drop down reference must be set
if (_mDropdownVoices)
{
// set up the drop down change listener
_mDropdownVoices.onValueChanged.AddListener(delegate {
// handle the voice change event, and set the voice on the utterance
SpeechSynthesisUtils.HandleVoiceChanged(_mDropdownVoices,
_mVoiceResult,
_mSpeechSynthesisUtterance,
_mSpeechSynthesisPlugin);
});
}
}
}
/// <summary>
/// Speak the utterance
/// </summary>
private void Speak(string text)
{
if (!_mVoicesSet ||
!_mUtteranceSet)
{
// not ready
return;
}
// Cancel if already speaking
_mSpeechSynthesisPlugin.Cancel();
// Set the text that will be spoken
_mSpeechSynthesisPlugin.SetText(_mSpeechSynthesisUtterance, text);
// Use the plugin to speak the utterance
_mSpeechSynthesisPlugin.Speak(_mSpeechSynthesisUtterance);
}
// Handler for speech detection events
void HandleDetectionResult(object sender, SpeechDetectionEventArgs args)
{
if (null == args.detectionResult)
{
return;
}
SpeechRecognitionResult[] results = args.detectionResult.results;
if (null == results)
{
return;
}
bool doAbort = false;
foreach (SpeechRecognitionResult result in results)
{
SpeechRecognitionAlternative[] alternatives = result.alternatives;
if (null == alternatives)
{
continue;
}
foreach (SpeechRecognitionAlternative alternative in alternatives)
{
if (string.IsNullOrEmpty(alternative.transcript))
{
continue;
}
string lower = alternative.transcript.ToLower();
Debug.LogFormat("Detected: {0}", lower);
if (lower.Contains("left"))
{
if (_sWeaponState == WeaponState.IDLE)
{
_sWeaponState = WeaponState.DETECTED_LEFT;
}
doAbort = true;
break;
}
else if (lower.Contains("right"))
{
if (_sWeaponState == WeaponState.IDLE)
{
_sWeaponState = WeaponState.DETECTED_RIGHT;
}
doAbort = true;
break;
}
else if (lower.Contains("lightning"))
{
if (DefaultFXType != F3DFXType.LightningGun)
{
DefaultFXType = F3DFXType.LightningGun;
Speak(string.Format("{0} is active, sir", DefaultFXType));
}
doAbort = true;
break;
}
else if (lower.Contains("beam"))
{
if (DefaultFXType != F3DFXType.PlasmaBeam)
{
DefaultFXType = F3DFXType.PlasmaBeam;
Speak(string.Format("{0} is active, sir", DefaultFXType));
}
doAbort = true;
break;
}
}
}
// abort detection on match for faster matching on words instead of complete sentences
if (doAbort)
{
_mSpeechDetectionPlugin.Abort();
}
}
// make the async detected word, detectable at the start of all the update events
void LateUpdate()
{
if (_sReadyForLateUpdate)
{
_sReadyForLateUpdate = false;
switch (_sWeaponState)
{
case WeaponState.DETECTED_LEFT:
_sWeaponState = WeaponState.LEFT_ONCE;
break;
case WeaponState.LEFT_ONCE:
_sWeaponState = WeaponState.IDLE;
break;
case WeaponState.DETECTED_RIGHT:
_sWeaponState = WeaponState.RIGHT_ONCE;
break;
case WeaponState.RIGHT_ONCE:
_sWeaponState = WeaponState.IDLE;
break;
}
}
}
void Update()
{
// Switch weapon types using keyboard keys
//if (Input.GetKeyDown(KeyCode.RightArrow))
if (_sWeaponState == WeaponState.LEFT_ONCE)
NextWeapon();
//else if (Input.GetKeyDown(KeyCode.LeftArrow))
if (_sWeaponState == WeaponState.RIGHT_ONCE)
PrevWeapon();
// After update, use one late update to detect the async word
_sReadyForLateUpdate = true;
}
Unity: OSVR Rendering Plugin For Unity
Which is similar to:
Dependencies:
* Includes: Add `C:\Program Files\OSVR\SDK\x86\include`
* Includes: Add `C:\Program Files\OSVR\SDK\x64\include`
* (32-bit) Libraries: Add `C:\Program Files\OSVR\SDK\x86\lib`
* (64-bit) Libraries: Add `C:\Program Files\OSVR\SDK\x64\lib`
* Add Library: osvrRenderManager.lib
[Boost C++ Libraries] [binaries]
* (32-bit) Includes: Add `C:\local\boost_1_63_0_x86`
* (64-bit) Includes: Add `C:\local\boost_1_63_0_x64`
* (32-bit) Include: Add `C:\local\boost_1_63_0_x86\libs`
* (64-bit) Include: Add `C:\local\boost_1_63_0_x64\libs`
* Includes: Add `C:\local\glew-2.0.0\include`
* (32-bit) Libraries: Add `C:\local\glew-2.0.0\lib\Release\Win32`
* (64-bit) Libraries: Add `C:\local\glew-2.0.0\lib\Release\x64`
SteamVR: Hide the floor mat
Question: [How do you hide the floor mat]?
Unity: WebGL Speech Synthesis
My WebGL Speech Synthesis package has been accepted into the Unity Asset Store.
Unity: SteamVR Center View
The Unity [SteamVR Plugin] has an API to center the VR view. It’s great when VR apps have a key mapped to do this.
using UnityEngine;
using Valve.VR;
public class SteamVRRecenter : MonoBehaviour
{
// Keep the script around
private void Start()
{
DontDestroyOnLoad(gameObject);
}
// Update is called once per frame
void FixedUpdate()
{
if (Input.GetKeyUp(KeyCode.L))
{
var system = OpenVR.System;
if (system != null)
{
system.ResetSeatedZeroPose();
}
}
}
}
Unity: Webcam Project
The Unity API makes it possible to show a web camera on a texture.
Unity: Switch Weapons With Speech
Unity: Drive Turrets With Speech
The [Sci-Fi Effects] assets comes with some great looking turrets and effects.
I used the [WebGL Speech Detection] package to add speech commands.
And to make speech work in the Unity editor, I added the [Chrome Speech Proxy].
To make Speech Detection work in the Turret example, I made some edits to the `F3DPlayerTurretController.cs` script.
// reference to the proxy
private ProxySpeechDetectionPlugin _mProxySpeechDetectionPlugin = null;
enum FireState
{
IDLE,
DETECTED_FIRE,
FIRE_ONCE,
FIRE_IDLE,
DETECTED_STOP,
STOP_ONCE
}
// detect the word once in all updates
private static FireState _sFireState = FireState.IDLE;
// make sure all turrets detect the async word in their update event
private static bool _sReadyForLateUpdate = false;
// init the speech proxy
private IEnumerator Start()
{
while (null == WebGLSpeechDetectionPlugin.GetInstance() ||
null == ProxySpeechDetectionPlugin.GetInstance() ||
!ProxySpeechDetectionPlugin.GetInstance().IsAvailable())
{
yield return null;
}
// reference to the plugin
WebGLSpeechDetectionPlugin plugin = WebGLSpeechDetectionPlugin.GetInstance();
// subscribe to events
plugin.OnDetectionResult += HandleDetectionResult;
// reference to the proxy
_mProxySpeechDetectionPlugin = ProxySpeechDetectionPlugin.GetInstance();
// abort and clear existing words
_mProxySpeechDetectionPlugin.Abort();
}
// Handler for speech detection events
void HandleDetectionResult(object sender, WebGLSpeechDetectionPlugin.SpeechDetectionEventArgs args)
{
if (null == args.detectionResult)
{
return;
}
WebGLSpeechDetectionPlugin.SpeechRecognitionResult[] results = args.detectionResult.results;
if (null == results)
{
return;
}
bool doAbort = false;
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionResult result in results)
{
WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative[] alternatives = result.alternatives;
if (null == alternatives)
{
continue;
}
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative alternative in alternatives)
{
if (string.IsNullOrEmpty(alternative.transcript))
{
continue;
}
string lower = alternative.transcript.ToLower();
Debug.LogFormat("Detected: {0}", lower);
if (lower.Contains("fire"))
{
if (_sFireState == FireState.IDLE)
{
_sFireState = FireState.DETECTED_FIRE;
}
doAbort = true;
}
if (lower.Contains("stop"))
{
if (_sFireState == FireState.FIRE_IDLE)
{
_sFireState = FireState.DETECTED_STOP;
}
doAbort = true;
}
}
}
// abort detection on match for faster matching on words instead of complete sentences
if (doAbort)
{
_mProxySpeechDetectionPlugin.Abort();
}
}
// make the async detected word, detectable at the start of all the update events
void LateUpdate()
{
if (_sReadyForLateUpdate)
{
_sReadyForLateUpdate = false;
switch (_sFireState)
{
case FireState.DETECTED_FIRE:
_sFireState = FireState.FIRE_ONCE;
break;
case FireState.FIRE_ONCE:
_sFireState = FireState.FIRE_IDLE;
break;
case FireState.DETECTED_STOP:
_sFireState = FireState.STOP_ONCE;
break;
case FireState.STOP_ONCE:
_sFireState = FireState.IDLE;
break;
}
}
}
void Update()
{
CheckForTurn();
CheckForFire();
// After update, use one late update to detect the async word
_sReadyForLateUpdate = true;
}
void CheckForFire()
{
// Fire turret
//if (!isFiring && Input.GetKeyDown(KeyCode.Mouse0))
if (!isFiring && _sFireState == FireState.FIRE_ONCE)
{
isFiring = true;
fxController.Fire();
}
// Stop firing
//if (isFiring && Input.GetKeyUp(KeyCode.Mouse0))
if (isFiring && _sFireState == FireState.STOP_ONCE)
{
isFiring = false;
fxController.Stop();
}
}
To be able to call out the names of weapons, I made some edits to the `F3DFXController` script.
// reference to the proxy
private ProxySpeechDetectionPlugin _mProxySpeechDetectionPlugin = null;
enum WeaponState
{
IDLE,
DETECTED_LEFT,
LEFT_ONCE,
DETECTED_RIGHT,
RIGHT_ONCE
}
// detect the word once in all updates
private static WeaponState _sWeaponState = WeaponState.IDLE;
// make sure all turrets detect the async word in their update event
private static bool _sReadyForLateUpdate = false;
// Singleton instance
public static F3DFXController instance;
// init the speech proxy
private IEnumerator Start()
{
while (null == WebGLSpeechDetectionPlugin.GetInstance() ||
null == ProxySpeechDetectionPlugin.GetInstance() ||
!ProxySpeechDetectionPlugin.GetInstance().IsAvailable())
{
yield return null;
}
// reference to the plugin
WebGLSpeechDetectionPlugin plugin = WebGLSpeechDetectionPlugin.GetInstance();
// subscribe to events
plugin.OnDetectionResult += HandleDetectionResult;
// reference to the proxy
_mProxySpeechDetectionPlugin = ProxySpeechDetectionPlugin.GetInstance();
// abort and clear existing words
_mProxySpeechDetectionPlugin.Abort();
}
// Handler for speech detection events
void HandleDetectionResult(object sender, WebGLSpeechDetectionPlugin.SpeechDetectionEventArgs args)
{
if (null == args.detectionResult)
{
return;
}
WebGLSpeechDetectionPlugin.SpeechRecognitionResult[] results = args.detectionResult.results;
if (null == results)
{
return;
}
bool doAbort = false;
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionResult result in results)
{
WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative[] alternatives = result.alternatives;
if (null == alternatives)
{
continue;
}
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative alternative in alternatives)
{
if (string.IsNullOrEmpty(alternative.transcript))
{
continue;
}
string lower = alternative.transcript.ToLower();
Debug.LogFormat("Detected: {0}", lower);
if (lower.Contains("left"))
{
if (_sWeaponState == WeaponState.IDLE)
{
_sWeaponState = WeaponState.DETECTED_LEFT;
}
doAbort = true;
}
else if (lower.Contains("right"))
{
if (_sWeaponState == WeaponState.IDLE)
{
_sWeaponState = WeaponState.DETECTED_RIGHT;
}
doAbort = true;
}
else if (lower.Contains("lightning"))
{
DefaultFXType = F3DFXType.LightningGun;
doAbort = true;
}
else if (lower.Contains("beam"))
{
DefaultFXType = F3DFXType.PlasmaBeam;
doAbort = true;
}
}
}
// abort detection on match for faster matching on words instead of complete sentences
if (doAbort)
{
_mProxySpeechDetectionPlugin.Abort();
}
}
// make the async detected word, detectable at the start of all the update events
void LateUpdate()
{
if (_sReadyForLateUpdate)
{
_sReadyForLateUpdate = false;
switch (_sWeaponState)
{
case WeaponState.DETECTED_LEFT:
_sWeaponState = WeaponState.LEFT_ONCE;
break;
case WeaponState.LEFT_ONCE:
_sWeaponState = WeaponState.IDLE;
break;
case WeaponState.DETECTED_RIGHT:
_sWeaponState = WeaponState.RIGHT_ONCE;
break;
case WeaponState.RIGHT_ONCE:
_sWeaponState = WeaponState.IDLE;
break;
}
}
}
void Update()
{
// Switch weapon types using keyboard keys
//if (Input.GetKeyDown(KeyCode.RightArrow))
if (_sWeaponState == WeaponState.LEFT_ONCE)
NextWeapon();
//else if (Input.GetKeyDown(KeyCode.LeftArrow))
if (_sWeaponState == WeaponState.RIGHT_ONCE)
PrevWeapon();
// After update, use one late update to detect the async word
_sReadyForLateUpdate = true;
}
Chrome Speech Proxy
The [Chrome Speech Proxy] uses the Chrome Browser for the Speech API to do real-time speech detection without any quotas. This makes Speech Detection available on Windows and in the Unity editor.
