前言
经过前面两篇文章的讲解,大家已经了解了audio的基本使用方法,下面我们就根据我们了解的api做一个直播。
web音频流转发之AudioNode
web音频流转发之音频源
原理
- 视频直播:采集一帧一帧的视频,转换为base64转发,接收到base64后,设置为img的src,然后不停的修改img的src形成视频
- 音频直播:采集一帧一帧的音频二进制数据,转发2进制数据,在接收端对2进制原始音频数据进行播放
采集和推流
- 获取摄像头,和麦克风需要https
- navigator.getUserMedia已经废弃,使用navigator.mediaDevices.getUserMedia,当然需要做兼容
//获取音频视频流数据
mediaDevices = navigator.mediaDevices.getUserMedia({audio: true,video: { width: 320, height: 240 }});
mediaDevices.then(stream => {
//视频流转换到video标签播放
video.srcObject = stream;
video.play();
//音频流转换到AudioNode做数据采集
let source = audioCtx.createMediaStreamSource(stream);
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//采集单声道数据
let inputBuffer = ev.inputBuffer.getChannelData(0);
//将视频画面转换成base64发送
ws.send(canvas.toDataURL(‘image/jpeg‘));
//发送音频pcm数据
ws.send(inputBuffer.buffer);
};
});
video.onplay = function(){
//将video绘制到canvas上
interval = setInterval(function(){
ctx.drawImage(video, 0, 0);
},30);
};
接收流文件
对接收的文件进行一个缓存,以达到一个好的用户体验
let ws = new WebSocket("wss://192.168.3.102"),
imgChuncks = [],
audioChuncks = [],
img = null;
//如何处理二进制数据,默认是Blob
ws.binaryType = ‘arraybuffer‘,
ws.onmessage = function(evt) {
if(evt.data.byteLength === undefined) {
//收到的base64图片
imgChuncks.push(evt.data);
}else{
//收到的音频二进制pcm数据
audioChuncks.push(new Float32Array(evt.data));
}
//缓存2帧的数据后开始播放
if(!img && audioChuncks.length > 2){
myplay();
}
};
处理流
//创建播放音频视频函数
function myplay(){
//创建img标签来播放base64图片
img = new Image();
document.body.appendChild(img);
//创建播放音频对象
let myBuffer = audioCtx.createBuffer(1, 2048, audioCtx.sampleRate),
source = audioCtx.createBufferSource(),
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//修改img的src达到视频的效果
img.src = imgChuncks.shift();
//播放audioChuncks里面真正的二进制数据
ev.outputBuffer.copyToChannel(audioChuncks.shift() || new Float32Array(2048), 0, 0);
};
}
注意
- 这只是一个实例程序,为进行任何优化
- 在测试时请给扬声器插上耳机收听,或者让扬声器和麦克风放置到不同的房间。因为没有做回音消除,和破音处理,这样听上去会很爽。
- 自己生成一个https文件做测试
完整代码
index.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title></title>
<link rel="stylesheet" href="">
<style type="text/css" media="screen">
video, canvas {
background-color: #e9e9e9;
margin:0 auto;
display: block;
}
body {
text-align: center;
}
video {
display: none;
}
</style>
</head>
<body>
<canvas width="320px" height="240px">
</canvas>
<video src="" width="320px" height="240px" controls muted></video>
<button type="button" class="start">开始</button>
</body>
<script type="text/javascript">
let ws = new WebSocket("wss://192.168.3.102"),
imgChuncks = [],
audioChuncks = [],
img = null;
//如何处理二进制数据,默认是Blob
ws.binaryType = ‘arraybuffer‘,
ws.onmessage = function(evt) {
if(evt.data.byteLength === undefined) {
//收到的base64图片
imgChuncks.push(evt.data);
}else{
//收到的音频二进制pcm数据
audioChuncks.push(new Float32Array(evt.data));
}
//缓存2帧的数据后开始播放
if(!img && audioChuncks.length > 2){
myplay();
}
};
//创建播放音频视频函数
function myplay(){
//创建img标签来播放base64图片
img = new Image();
document.body.appendChild(img);
//创建播放音频对象
let myBuffer = audioCtx.createBuffer(1, 2048, audioCtx.sampleRate),
source = audioCtx.createBufferSource(),
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//修改img的src达到视频的效果
img.src = imgChuncks.shift();
//播放audioChuncks里面真正的二进制数据
ev.outputBuffer.copyToChannel(audioChuncks.shift() || new Float32Array(2048), 0, 0);
};
}
let video = document.querySelector(‘video‘),
start = document.querySelector(‘.start‘),
stop = document.querySelector(‘.stop‘),
canvas = document.querySelector(‘canvas‘),
ctx = canvas.getContext(‘2d‘),
audioCtx = new (window.AudioContext || window.webkitAudioContext)(),
interval = null,
mediaDevices = null;
//点击开始
start.onclick = function(){
//获取音频视频流数据
mediaDevices = navigator.mediaDevices.getUserMedia({audio: true,video: { width: 320, height: 240 }});
mediaDevices.then(stream => {
//视频流转换到video标签播放
video.srcObject = stream;
video.play();
//音频流转换到AudioNode做数据采集
let source = audioCtx.createMediaStreamSource(stream);
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//采集单声道数据
let inputBuffer = ev.inputBuffer.getChannelData(0);
//将视频画面转换成base64发送
ws.send(canvas.toDataURL(‘image/jpeg‘));
//发送音频pcm数据
ws.send(inputBuffer.buffer);
};
});
};
video.onplay = function(){
//将video绘制到canvas上
interval = setInterval(function(){
ctx.drawImage(video, 0, 0);
},30);
};
</script>
</html>
servers.js
let https = require(‘https‘),
fs = require(‘fs‘),
WebSocket = require(‘ws‘),
options = {
key: fs.readFileSync(‘./key.pem‘),
cert:fs.readFileSync(‘./key-cert.pem‘)
},
server = https.createServer(options, function(req, res){
fs.readFile(‘./index.html‘, function(err, data){
res.writeHead(200,{‘Content-Type‘: ‘text/html‘});
res.end(data);
});
}).listen(443, function(){
console.log(‘服务启动成功‘)
});
const wss = new WebSocket.Server({server});
wss.binaryType = ‘arraybuffer‘;
wss.on(‘connection‘, (ws) => {
ws.on(‘message‘, function(data) {
wss.clients.forEach(function each(client) {
if (client.readyState === WebSocket.OPEN && client !== ws) {
client.send(data);
}
});
});
});
原文地址:https://www.cnblogs.com/homehtml/p/12207302.html
时间: 2024-11-05 13:44:43