Question: [How do you hide the floor mat]?
Allegorithmic: Substance Painter 2.5
Unity: WebGL Speech Synthesis
My WebGL Speech Synthesis package has been accepted into the Unity Asset Store.
Unity: SteamVR Center View
The Unity [SteamVR Plugin] has an API to center the VR view. It’s great when VR apps have a key mapped to do this.
using UnityEngine;
using Valve.VR;
public class SteamVRRecenter : MonoBehaviour
{
// Keep the script around
private void Start()
{
DontDestroyOnLoad(gameObject);
}
// Update is called once per frame
void FixedUpdate()
{
if (Input.GetKeyUp(KeyCode.L))
{
var system = OpenVR.System;
if (system != null)
{
system.ResetSeatedZeroPose();
}
}
}
}
Live Illustration with Jennet Liaw and Rob Generette III – AdobeLive
Live Illustration with Kyle Webster (KyleBrush) – AdobeLive
Unity: Webcam Project
The Unity API makes it possible to show a web camera on a texture.
CRS-10 | Falcon 9 First Stage Landing
Unity: Switch Weapons With Speech
Unity: Drive Turrets With Speech
The [Sci-Fi Effects] assets comes with some great looking turrets and effects.
I used the [WebGL Speech Detection] package to add speech commands.
And to make speech work in the Unity editor, I added the [Chrome Speech Proxy].
To make Speech Detection work in the Turret example, I made some edits to the `F3DPlayerTurretController.cs` script.
// reference to the proxy
private ProxySpeechDetectionPlugin _mProxySpeechDetectionPlugin = null;
enum FireState
{
IDLE,
DETECTED_FIRE,
FIRE_ONCE,
FIRE_IDLE,
DETECTED_STOP,
STOP_ONCE
}
// detect the word once in all updates
private static FireState _sFireState = FireState.IDLE;
// make sure all turrets detect the async word in their update event
private static bool _sReadyForLateUpdate = false;
// init the speech proxy
private IEnumerator Start()
{
while (null == WebGLSpeechDetectionPlugin.GetInstance() ||
null == ProxySpeechDetectionPlugin.GetInstance() ||
!ProxySpeechDetectionPlugin.GetInstance().IsAvailable())
{
yield return null;
}
// reference to the plugin
WebGLSpeechDetectionPlugin plugin = WebGLSpeechDetectionPlugin.GetInstance();
// subscribe to events
plugin.OnDetectionResult += HandleDetectionResult;
// reference to the proxy
_mProxySpeechDetectionPlugin = ProxySpeechDetectionPlugin.GetInstance();
// abort and clear existing words
_mProxySpeechDetectionPlugin.Abort();
}
// Handler for speech detection events
void HandleDetectionResult(object sender, WebGLSpeechDetectionPlugin.SpeechDetectionEventArgs args)
{
if (null == args.detectionResult)
{
return;
}
WebGLSpeechDetectionPlugin.SpeechRecognitionResult[] results = args.detectionResult.results;
if (null == results)
{
return;
}
bool doAbort = false;
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionResult result in results)
{
WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative[] alternatives = result.alternatives;
if (null == alternatives)
{
continue;
}
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative alternative in alternatives)
{
if (string.IsNullOrEmpty(alternative.transcript))
{
continue;
}
string lower = alternative.transcript.ToLower();
Debug.LogFormat("Detected: {0}", lower);
if (lower.Contains("fire"))
{
if (_sFireState == FireState.IDLE)
{
_sFireState = FireState.DETECTED_FIRE;
}
doAbort = true;
}
if (lower.Contains("stop"))
{
if (_sFireState == FireState.FIRE_IDLE)
{
_sFireState = FireState.DETECTED_STOP;
}
doAbort = true;
}
}
}
// abort detection on match for faster matching on words instead of complete sentences
if (doAbort)
{
_mProxySpeechDetectionPlugin.Abort();
}
}
// make the async detected word, detectable at the start of all the update events
void LateUpdate()
{
if (_sReadyForLateUpdate)
{
_sReadyForLateUpdate = false;
switch (_sFireState)
{
case FireState.DETECTED_FIRE:
_sFireState = FireState.FIRE_ONCE;
break;
case FireState.FIRE_ONCE:
_sFireState = FireState.FIRE_IDLE;
break;
case FireState.DETECTED_STOP:
_sFireState = FireState.STOP_ONCE;
break;
case FireState.STOP_ONCE:
_sFireState = FireState.IDLE;
break;
}
}
}
void Update()
{
CheckForTurn();
CheckForFire();
// After update, use one late update to detect the async word
_sReadyForLateUpdate = true;
}
void CheckForFire()
{
// Fire turret
//if (!isFiring && Input.GetKeyDown(KeyCode.Mouse0))
if (!isFiring && _sFireState == FireState.FIRE_ONCE)
{
isFiring = true;
fxController.Fire();
}
// Stop firing
//if (isFiring && Input.GetKeyUp(KeyCode.Mouse0))
if (isFiring && _sFireState == FireState.STOP_ONCE)
{
isFiring = false;
fxController.Stop();
}
}
To be able to call out the names of weapons, I made some edits to the `F3DFXController` script.
// reference to the proxy
private ProxySpeechDetectionPlugin _mProxySpeechDetectionPlugin = null;
enum WeaponState
{
IDLE,
DETECTED_LEFT,
LEFT_ONCE,
DETECTED_RIGHT,
RIGHT_ONCE
}
// detect the word once in all updates
private static WeaponState _sWeaponState = WeaponState.IDLE;
// make sure all turrets detect the async word in their update event
private static bool _sReadyForLateUpdate = false;
// Singleton instance
public static F3DFXController instance;
// init the speech proxy
private IEnumerator Start()
{
while (null == WebGLSpeechDetectionPlugin.GetInstance() ||
null == ProxySpeechDetectionPlugin.GetInstance() ||
!ProxySpeechDetectionPlugin.GetInstance().IsAvailable())
{
yield return null;
}
// reference to the plugin
WebGLSpeechDetectionPlugin plugin = WebGLSpeechDetectionPlugin.GetInstance();
// subscribe to events
plugin.OnDetectionResult += HandleDetectionResult;
// reference to the proxy
_mProxySpeechDetectionPlugin = ProxySpeechDetectionPlugin.GetInstance();
// abort and clear existing words
_mProxySpeechDetectionPlugin.Abort();
}
// Handler for speech detection events
void HandleDetectionResult(object sender, WebGLSpeechDetectionPlugin.SpeechDetectionEventArgs args)
{
if (null == args.detectionResult)
{
return;
}
WebGLSpeechDetectionPlugin.SpeechRecognitionResult[] results = args.detectionResult.results;
if (null == results)
{
return;
}
bool doAbort = false;
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionResult result in results)
{
WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative[] alternatives = result.alternatives;
if (null == alternatives)
{
continue;
}
foreach (WebGLSpeechDetectionPlugin.SpeechRecognitionAlternative alternative in alternatives)
{
if (string.IsNullOrEmpty(alternative.transcript))
{
continue;
}
string lower = alternative.transcript.ToLower();
Debug.LogFormat("Detected: {0}", lower);
if (lower.Contains("left"))
{
if (_sWeaponState == WeaponState.IDLE)
{
_sWeaponState = WeaponState.DETECTED_LEFT;
}
doAbort = true;
}
else if (lower.Contains("right"))
{
if (_sWeaponState == WeaponState.IDLE)
{
_sWeaponState = WeaponState.DETECTED_RIGHT;
}
doAbort = true;
}
else if (lower.Contains("lightning"))
{
DefaultFXType = F3DFXType.LightningGun;
doAbort = true;
}
else if (lower.Contains("beam"))
{
DefaultFXType = F3DFXType.PlasmaBeam;
doAbort = true;
}
}
}
// abort detection on match for faster matching on words instead of complete sentences
if (doAbort)
{
_mProxySpeechDetectionPlugin.Abort();
}
}
// make the async detected word, detectable at the start of all the update events
void LateUpdate()
{
if (_sReadyForLateUpdate)
{
_sReadyForLateUpdate = false;
switch (_sWeaponState)
{
case WeaponState.DETECTED_LEFT:
_sWeaponState = WeaponState.LEFT_ONCE;
break;
case WeaponState.LEFT_ONCE:
_sWeaponState = WeaponState.IDLE;
break;
case WeaponState.DETECTED_RIGHT:
_sWeaponState = WeaponState.RIGHT_ONCE;
break;
case WeaponState.RIGHT_ONCE:
_sWeaponState = WeaponState.IDLE;
break;
}
}
}
void Update()
{
// Switch weapon types using keyboard keys
//if (Input.GetKeyDown(KeyCode.RightArrow))
if (_sWeaponState == WeaponState.LEFT_ONCE)
NextWeapon();
//else if (Input.GetKeyDown(KeyCode.LeftArrow))
if (_sWeaponState == WeaponState.RIGHT_ONCE)
PrevWeapon();
// After update, use one late update to detect the async word
_sReadyForLateUpdate = true;
}
Chrome Speech Proxy
The [Chrome Speech Proxy] uses the Chrome Browser for the Speech API to do real-time speech detection without any quotas. This makes Speech Detection available on Windows and in the Unity editor.
Certificates: Generating a Self-Assigned Certificate for HTTPS
A certificate can be generated to work for [Httplistener with https support].
Space Station Hosts First Hangout
[Carrying the Fire: An Astronaut’s Journeys] by Michael Collins.
Emotiv: Community SDK
Emotiv has a free [Community SDK] for interacting with the Insight and Epoc headsets.
The developer community hangs out on the [forums] and in the [G+ Community].
Unity: CEF Proxy
I used a proxy with a custom build of [CEF] to send speech data to the [Word Detection Unity Plugin].
Copy a recent TestApp build from `Branch 2704` (Windows 64-bit) from [Chromium Embedded Framework 3 Builds] into the `CefGlue.Demo.WinFormsProxy` output folder to get all the dependent libraries.
Or download a build of the [SpeechProxy].
Quick Start Guide:
1. Command-line arguments are used to enable speech detection
CefGlue.Demo.WinFormsProxy.exe --enable-speech-input
2. Use the help menu to [acquire keys] for the Speech API
3. Enable the `Speech API Private API`
4. The Speech API has a `queries per day` quota.
5. Assign the Speech API keys
6. Open the [Chrome Speech Demo] to verify that the Speech API is working
7. The [Google Developers Console] shows traffic hitting the Speech API.
8. The Unity Proxy Speech Detection Plugin communicates with the Speech Proxy (email support@theylovegames.com to get access to the beta)
9. Use a proxy port that matches the port in Unity
10. The proxy URL should be pointed at the HTTPS proxy script https://theylovegames.com/speech_proxy/ when connecting to Unity.
Udemy: How to setup a SQLite Database in an Android App
Udemy has a great course that covers how to use SQLite databases.
Unreal: Built-in JS Support
It looks like the existing Unreal build system can include JS files.
ModuleRules: [HTML5JS.Build.cs]
Sample JS: [HTML5JavaScriptFx.js]
C++ hooks: [HTML5JavaScriptFx.h]
Chromium Embedded Framework
The [Chromium Embedded Framework (CEF)] is a simple framework for embedding Chromium-based browsers in other applications.
[Xilium.CefGlue] is a .NET/Mono binding for The Chromium Embedded Framework (CEF).
[Chromium Embedded Framework 3 Builds]
The CEF [Add support for the Web Speech API] feature requires that [api keys] are generated to work and are capped at [50 requests].
[Cloud Speech Discussion Group]
Forums: [UnityCEF]
Forums: [CEF Speech API]:
It is NOT possible to get additional quota for Chrome’s Speech API. Look at the [Cloud Speech API] instead.
Do NOT post to any Chromium groups/mailing lists for questions about the Speech API.
Source: [fork]
Adobe Fuse CC Tutorial – Create custom 3D characters, bring them into Photoshop CC 2015
Raspberry PI 2 with Leap
The Leap Motion Controller requires ARM-9 or better and to make work with the Raspberry PI 2 I used a proxy HTTP server to work with the Raspberry PI 2. The Raspberry PI 2 controls servos based on the data from the leap. Each finger on the leap controls a different servo.
The project in action:
Details about the code:
The proxy sends JSON data for the finger rotations (in degrees).
{
"thumb": 27.815885630721692,
"index": 8.8111549114070726,
"middle": 16.216426372741033,
"ring": 29.267951404867844,
"pinky": 86.043786182477533
}
The script: `LeapServos.py`
#!/usr/bin/env python
import RPi.GPIO as GPIO
import datetime
import time
import urllib2
import threading
import json
servo_pin2 = 18
servo_pin3 = 22
# 60 degrees / 0.1seconds
servo_speed = 0.1
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(servo_pin2, GPIO.OUT)
GPIO.setup(servo_pin3, GPIO.OUT)
last_time = datetime.datetime.now()
current_time = datetime.datetime.now()
accuracy = 0.01
rotation1 = 90
rotation2 = 90
rotation3 = 90
rotation4 = 90
rotation5 = 90
timeRotation1 = 0
timeRotation2 = 0
timeRotation3 = 0
timeRotation4 = 0
timeRotation5 = 0
targetRotation1 = 90
targetRotation2 = 90
targetRotation3 = 90
targetRotation4 = 90
targetRotation5 = 90
pulse2 = GPIO.PWM(servo_pin2, 50)
pulse3 = GPIO.PWM(servo_pin3, 50)
logTime = datetime.datetime.now()
stayAlive = True
def getRotation(rotation):
if (rotation > 90.0):
return 180
if (rotation > 45.0):
return 90
else:
return 0
def newTarget():
global targetRotation1
global targetRotation2
global targetRotation3
global targetRotation4
global targetRotation5
while (stayAlive):
content = urllib2.urlopen("http://192.168.2.97").read()
#print (content)
jsonObject = json.loads(content)
#print (jsonObject)
#for key in jsonObject:
# print (key)
#print ("index: " + str(jsonObject["index"]))
targetRotation1 = getRotation(jsonObject["thumb"])
targetRotation2 = getRotation(jsonObject["index"])
targetRotation3 = getRotation(jsonObject["middle"])
targetRotation4 = getRotation(jsonObject["ring"])
targetRotation5 = getRotation(jsonObject["pinky"])
time.sleep(0)
print ("Thread complete.")
return
webThread = threading.Thread(target=newTarget, args=(), kwargs={})
webThread.start()
def log():
global logTime
global timeRotation2
global timeRotation3
global targetRotation2
global targetRotation3
if (logTime < datetime.datetime.now()):
logTime = datetime.datetime.now() + datetime.timedelta(0, 0.5)
#print "****"
#print ("2: TargetRotation: " + str(targetRotation2) + " Time: "+str(timeRotation2))
#print ("3: TargetRotation: " + str(targetRotation3) + " Time: "+str(timeRotation3))
return
def reset(pulse):
pulse.start(7.5);
pulse.ChangeDutyCycle(7.5)
time.sleep(0.5)
pulse.ChangeDutyCycle(0)
return
def compare(timeRotation, rotation, targetRotation):
global deltaTime
if (timeRotation >= 100):
return 0.25
elif (rotation == targetRotation):
if (timeRotation >= 0.0):
return timeRotation - deltaTime.total_seconds()
else:
return timeRotation;
else:
print "targetRotation changed."
return 100
def update(pulse, timeRotation, targetRotation):
cycle = 0
if (timeRotation >= 100):
cycle = 0
if (targetRotation == 90):
cycle = 7.5
elif (targetRotation == 0):
cycle = 2.5
else:
cycle = 12.5
print ("Cycle: "+str(cycle))
pulse.ChangeDutyCycle(cycle)
elif (timeRotation >= 0.0 and ((timeRotation - deltaTime.total_seconds()) <= 0.0)):
pulse.ChangeDutyCycle(0)
print ("Cycle: "+str(cycle))
return targetRotation
try:
reset(pulse2)
reset(pulse3)
time.sleep(1)
print "setup complete"
while True:
last_time = current_time
current_time = datetime.datetime.now()
deltaTime = current_time - last_time;
log()
timeRotation2 = compare(timeRotation2, rotation2, targetRotation2)
timeRotation3 = compare(timeRotation3, rotation3, targetRotation3)
rotation2 = update(pulse2, timeRotation2, targetRotation2);
rotation3 = update(pulse3, timeRotation3, targetRotation3);
time.sleep(0);
except KeyboardInterrupt:
print '\r\nProgram shutdown.'
stayAlive = False
time.sleep(1)
GPIO.cleanup();
print '\r\nProgam complete.'
Unreal: Speech Plugins
I created repositories to hold some new Unreal projects.
Documentation: [UnrealHTML5SpeechDetection] [private repo]
Research:
Default Local Build: [localhost:8000]
[Unreal: HTML5 – Getting Started]
[How to reduce HTML5 package size?]
Files Required for Final Deployment ----------------------------------- *.js.gz - compressed JavaScript files. *.data - compressed game content. *.mem - compressed memory initialization file. *.html - uncompressed landing page. *.symbols - uncompressed symbols, if necessary.
[HTML5: Call C++ UFUNCTION from Webpage via JavaScript]
[HTML5Platform.Automation.cs] includes [GameX.html.template]
[Connecting C++ and HTML5] [info]
Video: [Getting started with Emscripten – Transpiling C / C++ to JavaScript / HTML5]
Issues:
Pull Request: [Add support for including project and plugin JS]
Lumberyard: How to Make Lumberyard Better?
Amazon Prime: Sleep Sounds
Adobe Creative Cloud: How to Mix like a Pro with Remix and Essential Sound Panel
Steam: SteamVR Plugin for Razer Hydra
[SteamVR Plugin for Razer Hydra]
** Caution: Be sure to close any Game, UE4, Unreal Launcher, SteamVR, and Unity processes that are running before installing.
Source: [steamvr_driver_hydra]
Install "SteamVR" - steam://install/250820 Install "Sixense SDK for the Razer Hydra" - steam://install/42300 Install "SteamVR Driver for Razer Hydra" - steam://install/491380










