英文:
microphone not working while share screen + system audio shared using peerjs webrtc
问题
我们尝试了共享屏幕音频。在共享屏幕时,麦克风和屏幕共享音频不能同时工作。当系统音频开启时,麦克风不起作用。如果麦克风打开,系统音频也无法工作。请解释一下问题出在哪里。
以下是代码:
function startScreenShare() {
if (screenSharing) {
stopScreenSharing()
}
navigator.mediaDevices.getDisplayMedia(
{ video: { mediaSource: "screen" }, audio: true }
).then((stream) => {
setScreenSharingStream(stream);
screenStream = stream;
let videoTrack = screenStream.getVideoTracks()[0];
videoTrack.onended = () => {
console.log('Hiii')
stopScreenSharing()
}
if (peer) {
let sender = currentPeer.peerConnection.getSenders().find(function (s) {
return s.track.kind == videoTrack.kind;
})
sender.replaceTrack(videoTrack)
screenSharing = true
}
console.log(screenStream)
})
}
我们尝试了共享屏幕音频。在共享屏幕时,麦克风和屏幕共享音频不能同时工作。当系统音频开启时,麦克风不起作用。如果麦克风打开,系统音频也无法工作。
我想要麦克风和系统音频能够与共享屏幕一起播放。
英文:
We have tried to share screen audio. When sharing the screen, microphone and screen-sharing audio do not work together. Mic does not work when system audio is on. System audio does not working if end mic is on. please explain me what is the about issue.
Here is code:
function startScreenShare() {
if (screenSharing) {
stopScreenSharing()
}
navigator.mediaDevices.getDisplayMedia(
{ video: { mediaSource: "screen" }, audio: true }
).then((stream) => {
setScreenSharingStream(stream);
screenStream = stream;
let videoTrack = screenStream.getVideoTracks()[0];
videoTrack.onended = () => {
console.log('Hiii')
stopScreenSharing()
}
if (peer) {
let sender = currentPeer.peerConnection.getSenders().find(function (s) {
return s.track.kind == videoTrack.kind;
})
sender.replaceTrack(videoTrack)
screenSharing = true
}
console.log(screenStream)
})
}
We have tried to share screen audio. When sharing the screen, microphone and screen-sharing audio do not work together. Mic does not work when system audio is on. System audio does not working if end mic is on.
I want mic and system audio to play along with share screen.
答案1
得分: 1
const audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
audioStream.getAudioTracks().forEach((track) => {
screenStream1.addTrack(track);
});
const screenStream1 = await navigator.mediaDevices.getDisplayMedia({
video: {
cursor: "always"
},
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 44100
}
});
async function mergeTracks(audioStream, screenStream1) {
const audioContext = new AudioContext();
const audioStreamTwo = new MediaStream()
audioStreamTwo.addTrack(screenStream1.getAudioTracks()[0])
baseSource = audioContext.createMediaStreamSource(audioStream);
extraSource = audioContext.createMediaStreamSource(audioStreamTwo);
const dest = audioContext.createMediaStreamDestination();
var baseGain = audioContext.createGain();
var extraGain = audioContext.createGain();
baseGain.gain.value = 0.8;
extraGain.gain.value = 0.8;
baseSource.connect(baseGain).connect(dest);
extraSource.connect(extraGain).connect(dest);
baseSource.connect(dest);
extraSource.connect(dest);
return dest.stream;
}
var screenStream = screenStream1;
Stream = await mergeTracks(audioStream, screenStream); //FUNCTION
console.log('currentPeer---------', currentPeer)
if (peer && currentPeer) {
const videoTrack = screenStream.getVideoTracks()[0];
const sender = currentPeer.peerConnection.getSenders().find((s) => s.track.kind === videoTrack.kind);
sender.replaceTrack(videoTrack);
const audioTrack = Stream.getAudioTracks()[0];
const audioSender = currentPeer.peerConnection.getSenders().find((s) => s.track.kind === audioTrack.kind);
audioSender.replaceTrack(audioTrack);
screenSharing = true;
}
英文:
Re-Solved:
const audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
audioStream.getAudioTracks().forEach((track) => {
screenStream1.addTrack(track);
});
const screenStream1 = await navigator.mediaDevices.getDisplayMedia({
video: {
cursor: "always"
},
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 44100
}
});
async function mergeTracks(audioStream, screenStream1) {
const audioContext = new AudioContext();
const audioStreamTwo = new MediaStream()
audioStreamTwo.addTrack(screenStream1.getAudioTracks()[0])
baseSource = audioContext.createMediaStreamSource(audioStream);
extraSource = audioContext.createMediaStreamSource(audioStreamTwo);
const dest = audioContext.createMediaStreamDestination();
var baseGain = audioContext.createGain();
var extraGain = audioContext.createGain();
baseGain.gain.value = 0.8;
extraGain.gain.value = 0.8;
baseSource.connect(baseGain).connect(dest);
extraSource.connect(extraGain).connect(dest);
baseSource.connect(dest);
extraSource.connect(dest);
return dest.stream;
}
var screenStream = screenStream1;
Stream = await mergeTracks(audioStream, screenStream); //FUNCTION
console.log('currentPeer---------', currentPeer)
if (peer && currentPeer) {
const videoTrack = screenStream.getVideoTracks()[0];
const sender = currentPeer.peerConnection.getSenders().find((s) => s.track.kind === videoTrack.kind);
sender.replaceTrack(videoTrack);
const audioTrack = Stream.getAudioTracks()[0];
const audioSender = currentPeer.peerConnection.getSenders().find((s) => s.track.kind === audioTrack.kind);
audioSender.replaceTrack(audioTrack);
screenSharing = true;
}
通过集体智慧和协作来改善编程学习和解决问题的方式。致力于成为全球开发者共同参与的知识库,让每个人都能够通过互相帮助和分享经验来进步。
评论