月別アーカイブ: 4月 2018

Watson Unity Assistant (旧

標準

元記事>「Watson×Unity!初心者でもできる、VR 空間で Unity ちゃんとおしゃべりアプリ!」

https://www.ibm.com/developerworks/jp/cloud/library/unity/

新しいUnity-SDKに対応させるときのメモ。

旧 Watson Conversation →新 Watson Assistant

大きく変更がないならば、こちらの方のアドバイスに従って、SampleAssistant.csの書き換えで対応する。

インスペクターに入力するVersion Dateは、https://www.ibm.com/watson/developercloud/assistant/api/v1/curl.html?curl#list-examples

このあたりを見ると、出てくる (APIのバージョン)
2018/4/21では、2018-02-16だそうであるので、入力は、2018-02-16

===========

//WatsonAssistant.cs

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using IBM.Watson.DeveloperCloud.Services.Assistant.v1;
using IBM.Watson.DeveloperCloud.Connection;
using IBM.Watson.DeveloperCloud.Utilities;

public class WatsonAssistant : MonoBehaviour
{
 #region PLEASE SET THESE VARIABLES IN THE INSPECTOR
 [SerializeField]
 private string _username;
 [SerializeField]
 private string _password;
 [SerializeField]
 private string _url;
 [SerializeField]
 private string _versionDate;
 [SerializeField]
 private string _workspaceId;
 #endregion

private Assistant m_Conversation;
 private string m_Input = "おはよう";

// Use this for initialization
 void Start()
 {
 // Create credential and instantiate service
 Credentials credentials = new Credentials(_username, _password, _url);

m_Conversation = new Assistant(credentials);
 Debug.Log("User: " + m_Input);

m_Conversation.VersionDate = _versionDate;

// Message
 Dictionary<string, object> input = new Dictionary<string, object>();
 input.Add("text", m_Input);
 MessageRequest messageRequest = new MessageRequest()
 {
 Input = input
 };
 m_Conversation.Message(OnMessage, OnFail, _workspaceId, messageRequest);
 }

 void OnMessage(object resp, Dictionary<string, object> customData)
 {
 if (resp is Dictionary<string, object>)
 {
 Dictionary<string, object> dic_resp = (Dictionary<string, object>)resp;

foreach (object o in (List<object>)dic_resp["intents"])
 {
 Dictionary<string, object> dic_intent = (Dictionary<string, object>)o;
 Debug.Log("intent: " + dic_intent["intent"] + ", confidence: " + dic_intent["confidence"]);
 }

Dictionary<string, object> dic_output = (Dictionary<string, object>)dic_resp["output"];
 string res = "";
 foreach (object o in (List<object>)dic_output["text"])
 {
 res += o.ToString();
 }
 Debug.Log("response: " + res);
 }

}
 private void OnFail(RESTConnector.Error error, Dictionary<string, object> customData)
 {
 Debug.Log("SampleConversation.OnFail() Error received: " + error.ToString());
 }

// Update is called once per frame
 void Update()
 {
 }
}

 

広告

Watson Unity TextToSpeech

標準

元記事>「Watson×Unity!初心者でもできる、VR 空間で Unity ちゃんとおしゃべりアプリ!」

https://www.ibm.com/developerworks/jp/cloud/library/unity/

に対し、Unity-SDKのバージョンが変わったため、SampleTextToSpeech.csの書き換えが必要になった。

(SpeechToTextに関しては、前記事参照)

こんな感じでどうでしょう?

※サービス資格情報は、Unityのほうのインスペクターで入力します

//SampleTextToSpeech.cs

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using IBM.Watson.DeveloperCloud.Services.TextToSpeech.v1;
using IBM.Watson.DeveloperCloud.Connection;
using IBM.Watson.DeveloperCloud.Utilities;

public class SampleTextToSpeech : MonoBehaviour {

#region PLEASE SET THESE VARIABLES IN THE INSPECTOR
 [SerializeField]
 private string _username;
 [SerializeField]
 private string _password;
 [SerializeField]
 private string _url;
 #endregion

TextToSpeech m_textToSpeech;
 string m_TestString = "おはようございます。漢字も読めます。";

private bool m_synthesizeTested = false;
 private bool m_getVoicesTested = false;
 private bool m_getVoiceTested = false;


 // Use this for initialization

void Start()
 {
 // Create credential and instantiate service
 Credentials credentials = new Credentials(_username, _password, _url);

m_textToSpeech = new TextToSpeech(credentials);
 Runnable.Run(Examples());
 }

private IEnumerator Examples()
 {

// Synthesize
 Debug.Log("Attempting synthesize.");
// m_textToSpeech.Voice = VoiceType.en_US_Allison;
 m_textToSpeech.Voice = VoiceType.ja_JP_Emi;

m_textToSpeech.ToSpeech(HandleToSpeechCallback, OnFail, m_TestString, true);
 while (!m_synthesizeTested)
 yield return null;

// Get Voices
 Debug.Log("Attempting to get voices.");
 m_textToSpeech.GetVoices(OnGetVoices, OnFail);
 while (!m_getVoicesTested)
 yield return null;

// Get Voice
 Debug.Log("Attempting to get voice {0}.");
 m_textToSpeech.GetVoice(OnGetVoice, OnFail, VoiceType.ja_JP_Emi);

while (!m_getVoiceTested)
 yield return null;
 
 }

void HandleToSpeechCallback(AudioClip clip, Dictionary<string, object> customData = null)
 {
 PlayClip(clip);
 }

private void PlayClip(AudioClip clip)
 {
 if (Application.isPlaying && clip != null)
 {
 GameObject audioObject = new GameObject("AudioObject");
 AudioSource source = audioObject.AddComponent<AudioSource>();
 source.spatialBlend = 0.0f;
 source.loop = false;
 source.clip = clip;
 source.Play();

GameObject.Destroy(audioObject, clip.length);
 }
 }

private void OnGetVoices(Voices voices, Dictionary<string, object> customData = null)
 {
 Debug.Log("Text to Speech - Get voices response: {0}");
 m_getVoicesTested = true;
 }

private void OnGetVoice(Voice voice, Dictionary<string, object> customData = null)
 {
 Debug.Log("Text to Speech - Get voice response: {0}");
 m_getVoiceTested = true;
 }

private void OnFail(RESTConnector.Error error, Dictionary<string, object> customData)
 {
 Debug.Log("Error received: {0}");
 }

// Update is called once per frame
 void Update () {

}

}

Watson Unity -New Version

標準

元記事>「Watson×Unity!初心者でもできる、VR 空間で Unity ちゃんとおしゃべりアプリ!」

https://www.ibm.com/developerworks/jp/cloud/library/unity/

こちら、WatsonのUnitySDKに対し、Unity 上でWatsonのコンフィギュレーションエディターが出ないという仕様に変わってしまったことへの対処方法をあれこれ検索していましたら、先に解決してくれた方がいますので、リンクさせてもらいます

「developerWorks記事「Watson×Unity!初心者でもできる、VR 空間で Unity ちゃんとおしゃべりアプリ!」を試してみる」

https://qiita.com/yamachan360/items/911204e57957bd4bbef0

1.Unity-SDKのダウンロード

https://github.com/watson-developer-cloud/unity-sdk/releases/

↑のページからリリースされている、Unity-SDKの最新バージョンをダウンロードする(2018/4/21現在で v2.2.2)

sourceファイルをダウンロードし、展開して、フォルダ名をWatsonに変更しておく。

シーンに適当なオブジェクト(Unityちゃんなど)を表示し、オーディオソースコンポーネントを追加。

2.SampleSpeechToText.csの書き換え

上記 yamachan360様のページからSampleSpeechToText.csをコンポーネントとしてオブジェクトに追加。IBM Cloudのサービス資格情報を自分のものに入れ替えて、実行。

SampleSpeechToText.cs
//SampleSpeechToText.cs

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using IBM.Watson.DeveloperCloud.Services.SpeechToText.v1;
using IBM.Watson.DeveloperCloud.Connection;
using IBM.Watson.DeveloperCloud.Utilities;

public class SampleSpeechToText : MonoBehaviour
{
    #region PLEASE SET THESE VARIABLES IN THE INSPECTOR
    [SerializeField]
    private string cre_id;
    [SerializeField]
    private string cre_pw;
    [SerializeField]
    private string cre_url;
    #endregion

    [SerializeField]
    private AudioClip m_AudioClip = new AudioClip();
    //private SpeechToText m_SpeechToText = new SpeechToText();
    private SpeechToText m_SpeechToText;

    // Use this for initialization
    IEnumerator Start()
    {
  //      string cre_id = "4934xxxx-xxxx-xxxx-xxxx-xxxxxxxxxx99";                  // 資格情報より
  //      string cre_pw = "MxxxxxxxxxxE";                                          // 資格情報より
  //      string cre_url = "https://stream.watsonplatform.net/speech-to-text/api"; // 資格情報より
        Credentials credentials = new Credentials(cre_id, cre_pw, cre_url);
        m_SpeechToText = new SpeechToText(credentials);
        m_SpeechToText.Keywords = new string[] { "ibm" };
        m_SpeechToText.KeywordsThreshold = 0.1f;

        // 音声をマイクから 3 秒間取得する
        Debug.Log("Start record"); //集音開始
        var audioSource = GetComponent<AudioSource>();
        audioSource.clip = Microphone.Start(null, true, 10, 44100);
        audioSource.loop = false;
        audioSource.spatialBlend = 0.0f;
        yield return new WaitForSeconds(3f);
        Microphone.End(null); //集音終了
        Debug.Log("Finish record");

        // ためしに録音内容を再生してみる
        audioSource.Play();

        // SpeechToText を日本語指定して、録音音声をテキストに変換
        m_SpeechToText.RecognizeModel = "ja-JP_BroadbandModel";
        //m_SpeechToText.Recognize(HandleOnRecognize, audioSource.clip);
        m_SpeechToText.Recognize(HandleOnRecognize, OnFail, audioSource.clip);
    }

    void HandleOnRecognize(SpeechRecognitionEvent result, Dictionary<string, object> customData)
    {
        if (result != null && result.results.Length > 0)
        {
            foreach (var res in result.results)
            {
                foreach (var alt in res.alternatives)
                {
                    string text = alt.transcript;
                    Debug.Log(string.Format("{0} ({1}, {2:0.00})\n", text, res.final ? "Final" : "Interim", alt.confidence));
                }
            }
        }
    }
    private void OnFail(RESTConnector.Error error, Dictionary<string, object> customData)
    {
        Debug.Log("SampleSpeechToText.OnFail() Error received: " + error.ToString());
    }

    // Update is called once per frame
    void Update()
    {

    }
}

オレンジ色のところは、本家GithubのExampleSpeechToText.cs の29行目~36行目の、

 #region PLEASE SET THESE VARIABLES IN THE INSPECTOR
 [SerializeField]
 private string _username;
 [SerializeField]
 private string _password;
 [SerializeField]
 private string _url;
 #endregion

これを使うと、インスペクターのほうで、サービス資格情報を入力することが出来る。