Android Dev Webサイトは、組み込みのGoogle音声入力アクティビティを使用して音声入力を行う例を提供しています。このアクティビティは、マイク付きの事前設定されたポップアップを表示し、onActivityResult()
私の質問:SpeechRecognizer
クラスを直接使用して、缶詰のアクティビティを表示せずに音声入力を行う方法はありますか?これにより、音声入力用の独自のアクティビティを作成できます。
SpeechRecognizerクラスを使用したコードを次に示します( here および here からソース)。
import Android.app.Activity;
import Android.content.Intent;
import Android.os.Bundle;
import Android.view.View;
import Android.view.View.OnClickListener;
import Android.speech.RecognitionListener;
import Android.speech.RecognizerIntent;
import Android.speech.SpeechRecognizer;
import Android.widget.Button;
import Android.widget.TextView;
import Java.util.ArrayList;
import Android.util.Log;
public class VoiceRecognitionTest extends Activity implements OnClickListener
{
private TextView mText;
private SpeechRecognizer sr;
private static final String TAG = "MyStt3Activity";
@Override
public void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
Button speakButton = (Button) findViewById(R.id.btn_speak);
mText = (TextView) findViewById(R.id.textView1);
speakButton.setOnClickListener(this);
sr = SpeechRecognizer.createSpeechRecognizer(this);
sr.setRecognitionListener(new listener());
}
class listener implements RecognitionListener
{
public void onReadyForSpeech(Bundle params)
{
Log.d(TAG, "onReadyForSpeech");
}
public void onBeginningOfSpeech()
{
Log.d(TAG, "onBeginningOfSpeech");
}
public void onRmsChanged(float rmsdB)
{
Log.d(TAG, "onRmsChanged");
}
public void onBufferReceived(byte[] buffer)
{
Log.d(TAG, "onBufferReceived");
}
public void onEndOfSpeech()
{
Log.d(TAG, "onEndofSpeech");
}
public void onError(int error)
{
Log.d(TAG, "error " + error);
mText.setText("error " + error);
}
public void onResults(Bundle results)
{
String str = new String();
Log.d(TAG, "onResults " + results);
ArrayList data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
for (int i = 0; i < data.size(); i++)
{
Log.d(TAG, "result " + data.get(i));
str += data.get(i);
}
mText.setText("results: "+String.valueOf(data.size()));
}
public void onPartialResults(Bundle partialResults)
{
Log.d(TAG, "onPartialResults");
}
public void onEvent(int eventType, Bundle params)
{
Log.d(TAG, "onEvent " + eventType);
}
}
public void onClick(View v) {
if (v.getId() == R.id.btn_speak)
{
Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,"voice.recognition.test");
intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS,5);
sr.startListening(intent);
Log.i("111111","11111111");
}
}
}
ボタンでmain.xmlを定義し、マニフェストでRECORD_AUDIO権限を付与します
また、ユーザーに適切な権限を要求するようにしてください。マニフェストに適切なRECORD_AUDIO権限がリストされていても、エラー9の戻り値INSUFFICIENT_PERMISSIONSで行き詰まりました。
次のサンプルコードでは here ユーザーから権限を取得できた後、音声認識機能が適切な応答を返しました。
例えば。このブロックをアクティビティのonCreate()に追加しましたが、SpeechRecognizerメソッドを呼び出す前に、UIフローの他の場所に移動する可能性があります。
protected void onCreate(Bundle savedInstanceState) {
...
if (ContextCompat.checkSelfPermission(this,
Manifest.permission.RECORD_AUDIO)
!= PackageManager.PERMISSION_GRANTED) {
// Should we show an explanation?
if (ActivityCompat.shouldShowRequestPermissionRationale(this,
Manifest.permission.RECORD_AUDIO)) {
// Show an explanation to the user *asynchronously* -- don't block
// this thread waiting for the user's response! After the user
// sees the explanation, try again to request the permission.
} else {
// No explanation needed, we can request the permission.
ActivityCompat.requestPermissions(this,
new String[]{Manifest.permission.RECORD_AUDIO},
527);
// MY_PERMISSIONS_REQUEST_READ_CONTACTS is an
// app-defined int constant. The callback method gets the
// result of the request. (In this example I just punched in
// the value 527)
}
...
}
次に、許可リクエストのアクティビティにコールバックメソッドを提供します。
@Override
public void onRequestPermissionsResult(int requestCode,
String permissions[], int[] grantResults) {
switch (requestCode) {
case 527: {
// If request is cancelled, the result arrays are empty.
if (grantResults.length > 0
&& grantResults[0] == PackageManager.PERMISSION_GRANTED) {
// permission was granted, yay! Do the
// contacts-related task you need to do.
} else {
// permission denied, boo! Disable the
// functionality that depends on this permission.
}
return;
}
// other 'case' lines to check for other
// permissions this app might request
}
}
結果のテキストがonResults()メソッドで取得される上記のpreethaのサンプルコードで変更しなければならないもう1つのこと。元のコードが出力するサイズではなく、翻訳された音声の実際のテキストを取得するには、構築された文字列strの値を出力するか、ArrayList(データ)の戻り値の1つを取得します。例えば:
.setText(data.get(0));
SpeechRecognizer
を使用できますが、それ以外のサンプルコードは知りません この以前のSO質問 。ただし、APIの新機能です。レベル8(Android 2.2)であるため、この記事の執筆時点では広く使用できません。
あなたはそれを次のように行うことができます:
import Android.app.Activity
import androidx.appcompat.app.AppCompatActivity
import Android.os.Bundle
import kotlinx.Android.synthetic.main.activity_main.*
import Android.widget.Toast
import Android.content.ActivityNotFoundException
import Android.speech.RecognizerIntent
import Android.content.Intent
class MainActivity : AppCompatActivity() {
private val REQ_CODE = 100
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
speak.setOnClickListener {
val intent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH)
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM)
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "ar-JO") // Locale.getDefault()
intent.putExtra(RecognizerIntent.EXTRA_Prompt, "Need to speak")
try {
startActivityForResult(intent, REQ_CODE)
} catch (a: ActivityNotFoundException) {
Toast.makeText(applicationContext,
"Sorry your device not supported",
Toast.LENGTH_SHORT).show()
}
}
}
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
super.onActivityResult(requestCode, resultCode, data)
when (requestCode) {
REQ_CODE -> {
if (resultCode == Activity.RESULT_OK && data != null) {
val result = data
.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS)
println("result: $result")
text.text = result[0]
}
}
}
}
}
layout
は次のようになります。
<?xml version = "1.0" encoding = "utf-8"?>
<RelativeLayout xmlns:Android = "http://schemas.Android.com/apk/res/Android"
xmlns:app = "http://schemas.Android.com/apk/res-auto"
xmlns:tools = "http://schemas.Android.com/tools"
Android:layout_width = "match_parent"
Android:layout_height = "match_parent"
tools:context = ".MainActivity">
<LinearLayout
Android:layout_width = "match_parent"
Android:gravity = "center"
Android:layout_height = "match_parent">
<TextView
Android:id = "@+id/text"
Android:textSize = "30sp"
Android:layout_width = "wrap_content"
Android:layout_height = "wrap_content"/>
</LinearLayout>
<LinearLayout
Android:layout_width = "wrap_content"
Android:layout_alignParentBottom = "true"
Android:layout_centerInParent = "true"
Android:orientation = "vertical"
Android:layout_height = "wrap_content">
<ImageView
Android:id = "@+id/speak"
Android:layout_width = "wrap_content"
Android:layout_height = "wrap_content"
Android:background = "?selectableItemBackground"
Android:src = "@Android:drawable/ic_btn_speak_now"/>
</LinearLayout>
</RelativeLayout>
あなたが尋ねているもう一つの方法は少し長くなりますが、あなたにもっとコントロールを与えます、そしてまたGoogle Assistanceダイアローグであなたを困らせません:
1-最初にManifest
ファイルの権限を許可する必要があります:
<uses-permission Android:name="Android.permission.INTERNET" />
<uses-permission Android:name="Android.permission.RECORD_AUDIO"/>
2-私は上記のすべての答えを次のように統合しています:
RecognitionListener
クラスを作成します。private val TAG = "Driver-Assistant"
class Listener(context: Context): RecognitionListener {
private var ctx = context
override fun onReadyForSpeech(params: Bundle?) {
Log.d(TAG, "onReadyForSpeech")
}
override fun onRmsChanged(rmsdB: Float) {
Log.d(TAG, "onRmsChanged")
}
override fun onBufferReceived(buffer: ByteArray?) {
Log.d(TAG, "onBufferReceived")
}
override fun onPartialResults(partialResults: Bundle?) {
Log.d(TAG, "onPartialResults")
}
override fun onEvent(eventType: Int, params: Bundle?) {
Log.d(TAG, "onEvent")
}
override fun onBeginningOfSpeech() {
Toast.makeText(ctx, "Speech started", Toast.LENGTH_LONG).show()
}
override fun onEndOfSpeech() {
Toast.makeText(ctx, "Speech finished", Toast.LENGTH_LONG).show()
}
override fun onError(error: Int) {
var string = when (error) {
6 -> "No speech input"
4 -> "Server sends error status"
8 -> "RecognitionService busy."
7 -> "No recognition result matched."
1 -> "Network operation timed out."
2 -> "Other network related errors."
9 -> "Insufficient permissions"
5 -> " Other client side errors."
3 -> "Audio recording error."
else -> "unknown!!"
}
Toast.makeText(ctx, "sorry error occurred: $string", Toast.LENGTH_LONG).show()
}
override fun onResults(results: Bundle?) {
Log.d(TAG, "onResults $results")
val data = results!!.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
display.text = data!![0]
}
}
SpeechRecognizer
を定義し、上記にlistner
を追加する必要があります。実行時の許可を求めることを忘れないでください。すべて一緒に以下に示します。lateinit var sr: SpeechRecognizer
lateinit var display: TextView
class MainActivity : AppCompatActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
display = text
if (ContextCompat.checkSelfPermission(this,
Manifest.permission.RECORD_AUDIO)
!= PackageManager.PERMISSION_GRANTED) {
if (ActivityCompat.shouldShowRequestPermissionRationale(this,
Manifest.permission.RECORD_AUDIO)) {
} else {
ActivityCompat.requestPermissions(this,
arrayOf(Manifest.permission.RECORD_AUDIO),
527)
}
}
sr = SpeechRecognizer.createSpeechRecognizer(this)
sr.setRecognitionListener(Listener(this))
speak.setOnClickListener {
val intent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH)
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM)
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "ar-JO") // Locale.getDefault()
sr.startListening(intent)
}
}
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<out String>, grantResults: IntArray) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
when (requestCode) {
527 -> if (grantResults.isNotEmpty()
&& grantResults[0] == PackageManager.PERMISSION_GRANTED) {
Toast.makeText(this, "Permission granted", Toast.LENGTH_SHORT).show()
} else {
Toast.makeText(this, "Permission not granted", Toast.LENGTH_SHORT).show()
}
}
}
}
package com.Android.example.speechtxt;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.content.ContextCompat;
import Android.Manifest;
import Android.content.Intent;
import Android.content.pm.PackageManager;
import Android.net.Uri;
import Android.os.Build;
import Android.os.Bundle;
import Android.provider.Settings;
import Android.speech.RecognitionListener;
import Android.speech.RecognizerIntent;
import Android.speech.SpeechRecognizer;
import Android.view.MotionEvent;
import Android.view.View;
import Android.widget.RelativeLayout;
import Android.widget.Toast;
import Java.util.ArrayList;
import Java.util.Locale;
public class MainActivity extends AppCompatActivity {
private RelativeLayout relativeLayout;
private SpeechRecognizer speechRecognizer;
private Intent speechintent;
String keeper="";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
checkVoiceCommandPermission();
relativeLayout = findViewById(R.id.touchscr);
speechRecognizer = SpeechRecognizer.createSpeechRecognizer(getApplicationContext());
speechintent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
speechintent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
speechintent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault());
speechRecognizer.setRecognitionListener(new RecognitionListener() {
@Override
public void onReadyForSpeech(Bundle params) {
}
@Override
public void onBeginningOfSpeech() {
}
@Override
public void onRmsChanged(float rmsdB) {
}
@Override
public void onBufferReceived(byte[] buffer) {
}
@Override
public void onEndOfSpeech() {
}
@Override
public void onError(int error) {
}
@Override
public void onResults(Bundle results)
{
ArrayList<String> speakedStringArray = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
if(speakedStringArray!=null)
{
keeper = speakedStringArray.get(0);
Toast.makeText(getApplicationContext(),""+keeper,Toast.LENGTH_SHORT).show();
}
}
@Override
public void onPartialResults(Bundle partialResults) {
}
@Override
public void onEvent(int eventType, Bundle params) {
}
});
relativeLayout.setOnTouchListener(new View.OnTouchListener() {
@Override
public boolean onTouch(View v, MotionEvent event) {
switch (event.getAction())
{
case MotionEvent.ACTION_DOWN:
speechRecognizer.startListening(speechintent);
keeper="";
break;
case MotionEvent.ACTION_UP:
speechRecognizer.stopListening();
break;
}
return false;
}
});
}
private void checkVoiceCommandPermission()
{
if(Build.VERSION.SDK_INT>=Build.VERSION_CODES.M)
{
if (!(ContextCompat.checkSelfPermission(MainActivity.this, Manifest.permission.RECORD_AUDIO)== PackageManager.PERMISSION_GRANTED))
{
Intent intent = new Intent(Settings.ACTION_APPLICATION_DETAILS_SETTINGS, Uri.parse("package:" +getPackageName()));
startActivity(intent);
finish();
}
}
}
}