英文:
Text To Speech not working properly in Android Studio
问题
package com.maitreyastudio.ai;
import androidx.annotation.NonNull;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityCompat;
import androidx.core.content.ContextCompat;
import android.Manifest;
import android.annotation.SuppressLint;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.location.Address;
import android.location.Geocoder;
import android.location.Location;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.provider.Settings;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.speech.tts.TextToSpeech;
import android.view.MotionEvent;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
public class MainActivity extends AppCompatActivity {
private Button btnRecognize;
private SpeechRecognizer speechRecognizer;
private TextToSpeech textToSpeech;
private EditText ET_ShowRecognized;
private Intent intent;
private String ET_ShowRecognizedText;
private String ProcessingText;
@SuppressLint({"SetTextI18n", "ClickableViewAccessibility", "MissingPermission"})
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.RECORD_AUDIO, Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.READ_EXTERNAL_STORAGE, Manifest.permission.ACCESS_FINE_LOCATION}, PackageManager.PERMISSION_GRANTED);
ET_ShowRecognized = findViewById(R.id.ET_ShowRecognized);
btnRecognize = findViewById(R.id.btnRecognize);
textToSpeech = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
@Override
public void onInit(int i) {
if (i == TextToSpeech.SUCCESS) {
textToSpeech.setLanguage(Locale.ENGLISH);
}
}
});
textToSpeech.speak("Hi you successfully ran me.", TextToSpeech.QUEUE_FLUSH, null, null);
intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
speechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
speechRecognizer.setRecognitionListener(new RecognitionListener() {
@Override
public void onReadyForSpeech(Bundle bundle) {}
@Override
public void onBeginningOfSpeech() {}
@Override
public void onRmsChanged(float v) {}
@Override
public void onBufferReceived(byte[] bytes) {}
@Override
public void onEndOfSpeech() {}
@Override
public void onError(int i) {}
@Override
public void onResults(Bundle bundle) {
ArrayList<String> matches = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
if (matches != null) {
ET_ShowRecognized.setText(matches.get(0));
process();
}
}
@Override
public void onPartialResults(Bundle bundle) {}
@Override
public void onEvent(int i, Bundle bundle) {}
});
btnRecognize.setOnTouchListener(new View.OnTouchListener() {
@Override
public boolean onTouch(View view, MotionEvent motionEvent) {
switch (motionEvent.getAction()) {
case MotionEvent.ACTION_UP:
speechRecognizer.stopListening();
break;
case MotionEvent.ACTION_DOWN:
ET_ShowRecognized.setText(null);
ET_ShowRecognized.setText("Listening...");
speechRecognizer.startListening(intent);
break;
default:
break;
}
return false;
}
});
textToSpeech.speak("Hi! Seems good to meet you.", TextToSpeech.QUEUE_FLUSH, null, null);
}
public void process() {
ProcessingText = ET_ShowRecognized.getText().toString().toLowerCase();
switch (ProcessingText) {
case "hello":
textToSpeech.speak("Hello! Hope all is going fine.", TextToSpeech.QUEUE_FLUSH, null, null);
break;
case "hi":
textToSpeech.speak("Hi! I hope all is well.", TextToSpeech.QUEUE_FLUSH, null, null);
break;
case "what is your name":
textToSpeech.speak("My name is assistant.", TextToSpeech.QUEUE_FLUSH, null, null);
break;
case "bye":
finish();
System.exit(0);
default:
textToSpeech.speak(ProcessingText, TextToSpeech.QUEUE_FLUSH, null, null);
break;
}
}
}
XML code is not translated, as requested.
英文:
I have recently created a simple project in android studio using speech recognition and text to speech but the problem is that the text to speech doesn't speak the given line on starting the app for the first time but after this event it works very properly. Like when I start the app in the below code I have added the line to welcome the user but tts doesn't speak and then when I press the recognize button the app recognizes the sentence and also speaks correctly. Why is it so? It seems quite weird. I am providing the code below please do check it and tell me if I am making some mistake as soon as possible.
This is my Java code:
package com.maitreyastudio.ai;
import androidx.annotation.NonNull;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityCompat;
import androidx.core.content.ContextCompat;
import android.Manifest;
import android.annotation.SuppressLint;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.location.Address;
import android.location.Geocoder;
import android.location.Location;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.provider.Settings;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.speech.tts.TextToSpeech;
import android.view.MotionEvent;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.GridLayout;
import android.widget.TextView;
import com.chaquo.python.PyObject;
import com.chaquo.python.Python;
import com.chaquo.python.android.AndroidPlatform;
import com.google.android.gms.location.FusedLocationProviderClient;
import com.google.android.gms.tasks.OnCompleteListener;
import com.google.android.gms.tasks.OnSuccessListener;
import com.google.android.gms.tasks.Task;
import org.w3c.dom.Text;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import static android.Manifest.permission.ACCESS_FINE_LOCATION;
import static android.Manifest.permission.READ_EXTERNAL_STORAGE;
import static android.Manifest.permission.RECORD_AUDIO;
import static android.Manifest.permission.WRITE_EXTERNAL_STORAGE;
public class MainActivity extends AppCompatActivity {
private Button btnRecognize;
private SpeechRecognizer speechRecognizer;
private TextToSpeech textToSpeech;
private EditText ET_ShowRecognized;
String locality;
private Intent intent;
private String ET_ShowRecognizedText;
private String ProcessingText;
private ArrayList voices;
private FusedLocationProviderClient fusedLocationProviderClient;
Geocoder geocoder;
/*Python py;
PyObject pyobj;
PyObject obj;
String currentDate;
String currentTime;*/
@SuppressLint({"SetTextI18n", "ClickableViewAccessibility", "MissingPermission"})
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
ActivityCompat.requestPermissions(this, new String[]{RECORD_AUDIO, WRITE_EXTERNAL_STORAGE, READ_EXTERNAL_STORAGE, ACCESS_FINE_LOCATION}, PackageManager.PERMISSION_GRANTED);
ET_ShowRecognized = findViewById(R.id.ET_ShowRecognized);
btnRecognize = findViewById(R.id.btnRecognize);
/*fusedLocationProviderClient.getLastLocation().addOnCompleteListener(new OnCompleteListener<Location>() {
@Override
public void onComplete(@NonNull Task<Location> task) {
Location location = task.getResult();
if(location != null){
geocoder = new Geocoder(MainActivity.this, Locale.getDefault());
try {
List<Address> address = geocoder.getFromLocation(location.getLatitude(), location.getLongitude(), 1);
locality = address.get(0).getLocality();
} catch (IOException e) {
;
}
}
}
});
if(!Python.isStarted()){
Python.start(new AndroidPlatform(this));
}
py = Python.getInstance();
pyobj = py.getModule("WolframAlpha");
obj = pyobj.callAttr("main", locality);*/
textToSpeech = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
@Override
public void onInit(int i) {
if (i == TextToSpeech.SUCCESS) {
textToSpeech.setLanguage(Locale.ENGLISH);
}
}
});
textToSpeech.speak("Hi you succesfully ran me.", TextToSpeech.QUEUE_FLUSH, null, null);
//currentDate = new SimpleDateFormat("dd-MM-yyyy", Locale.getDefault()).format(new Date());
//currentTime = new SimpleDateFormat("HH:mm:ss", Locale.getDefault()).format(new Date());
//textToSpeech.speak("Hi! I am your personal assistant. Today date is something something ", TextToSpeech.QUEUE_FLUSH, null, null);
//Speak("Today's weather forecast for the current location is " + obj.toString());
intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
speechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
speechRecognizer.setRecognitionListener(new RecognitionListener() {
@Override
public void onReadyForSpeech(Bundle bundle) {
}
@Override
public void onBeginningOfSpeech() {
}
@Override
public void onRmsChanged(float v) {
}
@Override
public void onBufferReceived(byte[] bytes) {
}
@Override
public void onEndOfSpeech() {
}
@Override
public void onError(int i) {
}
@Override
public void onResults(Bundle bundle) {
ArrayList<String> mathches = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
if (mathches != null) {
ET_ShowRecognized.setText(mathches.get(0));
process();
}
}
@Override
public void onPartialResults(Bundle bundle) {
}
@Override
public void onEvent(int i, Bundle bundle) {
}
});
btnRecognize.setOnTouchListener(new View.OnTouchListener() {
@Override
public boolean onTouch(View view, MotionEvent motionEvent) {
switch (motionEvent.getAction()) {
case MotionEvent.ACTION_UP:
speechRecognizer.stopListening();
break;
case MotionEvent.ACTION_DOWN:
ET_ShowRecognized.setText(null);
ET_ShowRecognized.setText("Listening...");
speechRecognizer.startListening(intent);
break;
default:
break;
}
return false;
}
});
textToSpeech.speak("Hi! Seems good to meet you.", TextToSpeech.QUEUE_FLUSH, null, null);
}
public void process() {
ProcessingText = ET_ShowRecognized.getText().toString().toLowerCase();
switch (ProcessingText) {
case ("hello"):
textToSpeech.speak("Hello! Hope all is going fine.", TextToSpeech.QUEUE_FLUSH, null, null);
break;
case ("hi"):
textToSpeech.speak("Hi! I hope all is well.", TextToSpeech.QUEUE_FLUSH, null, null);
break;
case ("what is your name"):
textToSpeech.speak("My name is assistant.", TextToSpeech.QUEUE_FLUSH, null, null);
break;
case ("bye"):
finish();
System.exit(0);
default:
textToSpeech.speak(ProcessingText, TextToSpeech.QUEUE_FLUSH, null, null);
break;
}
}
}
And this is my XML code
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".MainActivity">
<Button
android:id="@+id/btnRecognize"
style="@style/Widget.AppCompat.Button"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="@string/recognize"
app:layout_constraintBottom_toBottomOf="parent"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent" />
<EditText
android:id="@+id/ET_ShowRecognized"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:ems="10"
android:inputType="textPersonName"
android:hint="@string/you_will_see_recognized_text_here"
app:layout_constraintBottom_toTopOf="@+id/btnRecognize"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent" />
</androidx.constraintlayout.widget.ConstraintLayout>
Please help me as soon as possible.
Thank You
答案1
得分: 0
你可以尝试通过创建一个应用程序类并在其中初始化你的TextToSpeech引擎,这样它将在应用程序启动时开始初始化。
public class MyApp extends Application {
private static MyApp instance = null;
private TextToSpeech t1;
public static MyApp getInstance() {
return instance;
}
@Override
public void onCreate() {
super.onCreate();
instance = this;
t1 = new TextToSpeech(this, new TextToSpeech.OnInitListener() {
@Override
public void onInit(int i) {
if (i == TextToSpeech.SUCCESS) {
}
}
});
}
public TextToSpeech getT1() {
return t1;
}
}
然后在你的活动中使用它。
英文:
You can try by creating an application class and initialize your textToSpeech engine in it so that it starts initializing on app launch.
public class MyApp extends Application {
private static MyApp instance = null;
private TextToSpeech t1;
public static MyApp getInstance() {
return instance;
}
@Override
public void onCreate() {
super.onCreate();
instance = this;
t1 = new TextToSpeech(this, new TextToSpeech.OnInitListener() {
@Override
public void onInit(int i) {
if (i == TextToSpeech.SUCCESS) {
}
}
});
}
public TextToSpeech getT1() {
return t1;
}
}
and than use that in your activity.
答案2
得分: 0
以下是翻译好的部分:
之前没有成功的原因与DB377所说的原因相同。这是因为TTS的初始化是异步的,只有在该过程完成时才会调用onInit。执行不一定会逐行进行。
您可以按照以下方式更改您的代码:
// 这部分首先运行
textToSpeech = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
// 这部分第三个运行!
@Override
public void onInit(int i) {
if (i == TextToSpeech.SUCCESS) {
textToSpeech.setLanguage(Locale.ENGLISH);
// 新位置
textToSpeech.speak("你好,你成功地运行了我。", TextToSpeech.QUEUE_FLUSH, null, null);
}
}
});
// 老位置(这部分第二个运行)
英文:
The reason it didn't work before is the same reason DB377 said. It's because the initialization of the tts is asynchronous and onInit is only called when that process is complete. Execution doesn't necessarily happen line-by-line.
You can change your code as follows:
// THIS RUNS FIRST
textToSpeech = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
// THIS RUNS THIRD!
@Override
public void onInit(int i) {
if (i == TextToSpeech.SUCCESS) {
textToSpeech.setLanguage(Locale.ENGLISH);
// NEW LOCATION
textToSpeech.speak("Hi you succesfully ran me.", TextToSpeech.QUEUE_FLUSH, null, null);
}
}
});
// OLD LOCATION (THIS RUNS SECOND)
答案3
得分: 0
// 这对我有效:
// 在这里添加你的文本转语音代码
// 创建一个 Handler 对象以在延迟后执行一些代码
Handler handler = new Handler();
handler.postDelayed(new Runnable() {
@Override
public void run() {
// 在延迟后运行的代码,对我来说是在另一个 handler 中
handler.post(runnable);
}
}, 3000); // 3秒延迟
英文:
// This worked for me:
// Your TextToSpeech here
// Create a Handler object to run some code after a delay
Handler handler = new Handler();
handler.postDelayed(new Runnable() {
@Override
public void run() {
// Your code to run after a delay. in my case in another handler
handler.post(runnable);
}
}, 3000); // 3 seconds delay
通过集体智慧和协作来改善编程学习和解决问题的方式。致力于成为全球开发者共同参与的知识库,让每个人都能够通过互相帮助和分享经验来进步。
评论