websocket搜求其与话音

时间:2019-10-07 08:25来源:网页制作
websocket探究其与语音、图片的力量 2015/12/26 · JavaScript· 3 评论 ·websocket 初稿出处:AlloyTeam    提及websocket想比大家不会目生,假若目生的话也没提到,一句话归纳 “WebSocket protocol是

websocket探究其与语音、图片的力量

2015/12/26 · JavaScript · 3 评论 · websocket

初稿出处: AlloyTeam   

提及websocket想比大家不会目生,假若目生的话也没提到,一句话归纳

“WebSocket protocol 是HTML5一种新的协商。它实现了浏览器与服务器全双工通讯”

WebSocket相相比守旧这几个服务器推本事几乎好了太多,大家能够挥手向comet和长轮询这么些手艺说拜拜啦,庆幸大家生存在装有HTML5的时日~

那篇小说大家将分三有个别探索websocket

第一是websocket的科普使用,其次是完全自身创设服务器端websocket,最后是至关心重视要介绍利用websocket制作的八个demo,传输图片和在线语音聊天室,let’s go

一、websocket常见用法

此地介绍三种自己觉着大面积的websocket达成……(潜心:本文营造在node上下文意况

1、socket.io

先给demo

JavaScript

var http = require('http'); var io = require('socket.io'); var server = http.createServer(function(req, res) { res.writeHeader(200, {'content-type': 'text/html;charset="utf-8"'}); res.end(); }).listen(8888); var socket =.io.listen(server); socket.sockets.on('connection', function(socket) { socket.emit('xxx', {options}); socket.on('xxx', function(data) { // do someting }); });

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
var http = require('http');
var io = require('socket.io');
 
var server = http.createServer(function(req, res) {
    res.writeHeader(200, {'content-type': 'text/html;charset="utf-8"'});
    res.end();
}).listen(8888);
 
var socket =.io.listen(server);
 
socket.sockets.on('connection', function(socket) {
    socket.emit('xxx', {options});
 
    socket.on('xxx', function(data) {
        // do someting
    });
});

信任领会websocket的同班不容许不亮堂socket.io,因为socket.io太著名了,也很棒,它自个儿对过期、握手等都做了拍卖。笔者估摸那也是完结websocket使用最多的章程。socket.io最最最卓绝的少数就是文雅降级,当浏览器不补助websocket时,它会在中间高雅降级为长轮询等,客商和开垦者是无需关切具体达成的,很方便。

只是职业是有两面性的,socket.io因为它的宏观也拉动了坑的地点,最重大的就是臃肿,它的包裹也给多少拉动了非常多的通信冗余,况且温婉降级这一独到之处,也随同浏览器标准化的实行稳步失去了高大

Chrome Supported in version 4+
Firefox Supported in version 4+
Internet Explorer Supported in version 10+
Opera Supported in version 10+
Safari Supported in version 5+

在这里不是批评说socket.io倒霉,已经被淘汰了,而是有时候大家也可以思考部分别的的兑现~

 

2、http模块

正要说了socket.io臃肿,那今后就来讲说便捷的,首先demo

JavaScript

var http = require(‘http’); var server = http.createServer(); server.on(‘upgrade’, function(req) { console.log(req.headers); }); server.listen(8888);

1
2
3
4
5
6
var http = require(‘http’);
var server = http.createServer();
server.on(‘upgrade’, function(req) {
console.log(req.headers);
});
server.listen(8888);

比较粗略的兑现,其实socket.io内部对websocket也是如此达成的,不过前面帮大家封装了有的handle管理,这里大家也足以和谐去丰盛,给出两张socket.io中的源码图

图片 1

图片 2

 

3、ws模块

前边有个例子会用到,这里就提一下,后边具体看~

 

二、本身落成一套server端websocket

赶巧说了三种普及的websocket完毕格局,今后咱们想想,对于开辟者来讲

websocket相对于守旧http数据交互形式以来,扩大了服务器推送的风云,客户端接收到事件再开展相应处理,开拓起来差别并非太大呀

那是因为那个模块已经帮大家将数量帧深入分析此处的坑都填好了,第二部分我们将尝试自个儿创设一套简便的服务器端websocket模块

谢谢次碳酸钴的切磋援助,自家在那边这有的只是轻巧说下,假设对此风野趣好奇的请百度【web技能商量所】

和谐形成服务器端websocket首要有两点,几个是应用net模块接受数据流,还也许有一个是相对来讲官方的帧结构图剖析数据,达成这两部分就早已成功了整整的平底专门的职业

第一给贰个客商端发送websocket握手报文的抓包内容

客商端代码很轻巧

JavaScript

ws = new WebSocket("ws://127.0.0.1:8888");

1
ws = new WebSocket("ws://127.0.0.1:8888");

图片 3

劳动器端要针对那个key验证,就是讲key加上三个一定的字符串后做二遍sha1运算,将其结果调换为base64送回到

JavaScript

var crypto = require('crypto'); var WS = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'; require('net').createServer(function(o) { var key; o.on('data',function(e) { if(!key) { // 获取发送过来的KEY key = e.toString().match(/Sec-WebSocket-Key: (.+)/)[1]; // 连接上WS这么些字符串,并做二遍sha1运算,最终调换来Base64 key = crypto.createHash('sha1').update(key+WS).digest('base64'); // 输出重回给客商端的数据,那一个字段都以必需的 o.write('HTTP/1.1 101 Switching Protocolsrn'); o.write('Upgrade: websocketrn'); o.write('Connection: Upgradern'); // 那一个字段带上服务器管理后的KEY o.write('Sec-WebSocket-Accept: '+key+'rn'); // 输出空行,使HTTP头结束 o.write('rn'); } }); }).listen(8888);

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
var crypto = require('crypto');
var WS = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11';
 
require('net').createServer(function(o) {
var key;
o.on('data',function(e) {
if(!key) {
// 获取发送过来的KEY
key = e.toString().match(/Sec-WebSocket-Key: (.+)/)[1];
// 连接上WS这个字符串,并做一次sha1运算,最后转换成Base64
key = crypto.createHash('sha1').update(key+WS).digest('base64');
// 输出返回给客户端的数据,这些字段都是必须的
o.write('HTTP/1.1 101 Switching Protocolsrn');
o.write('Upgrade: websocketrn');
o.write('Connection: Upgradern');
// 这个字段带上服务器处理后的KEY
o.write('Sec-WebSocket-Accept: '+key+'rn');
// 输出空行,使HTTP头结束
o.write('rn');
}
});
}).listen(8888);

这么握手部分就早就成功了,后边便是数据帧深入分析与变化的活了

先看下官方提供的帧结构暗中提示图

图片 4

简短介绍下

FIN为是或不是终止的标志

MuranoSV为留下空间,0

opcode标记数据类型,是不是分片,是还是不是二进制分析,心跳包等等

付给一张opcode对应图

图片 5

MASK是或不是选取掩码

Payload len和前边extend payload length表示数据长度,这么些是最麻烦的

PayloadLen唯有7位,换来无符号整型的话唯有0到127的取值,这么小的数值当然无法描述异常的大的数额,由此规定当数码长度小于或等于125时候它才作为数据长度的描述,若是这些值为126,则时候背后的多个字节来存款和储蓄数据长度,假如为127则用后边四个字节来囤积数据长度

Masking-key掩码

下边贴出剖析数据帧的代码

JavaScript

function decodeDataFrame(e) { var i = 0, j,s, frame = { FIN: e[i] >> 7, Opcode: e[i++] & 15, Mask: e[i] >> 7, PayloadLength: e[i++] & 0x7F }; if(frame.PayloadLength === 126) { frame.PayloadLength = (e[i++] << 8) + e[i++]; } if(frame.PayloadLength === 127) { i += 4; frame.PayloadLength = (e[i++] << 24) + (e[i++] << 16) + (e[i++] << 8)

  • e[i++]; } if(frame.Mask) { frame.MaskingKey = [e[i++], e[i++], e[i++], e[i++]]; for(j = 0, s = []; j < frame.PayloadLength; j++) { s.push(e[i+j] ^ frame.MaskingKey[j%4]); } } else { s = e.slice(i, i+frame.PayloadLength); } s = new Buffer(s); if(frame.Opcode === 1) { s = s.toString(); } frame.PayloadData = s; return frame; }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
function decodeDataFrame(e) {
var i = 0,
j,s,
frame = {
FIN: e[i] >> 7,
Opcode: e[i++] & 15,
Mask: e[i] >> 7,
PayloadLength: e[i++] & 0x7F
};
 
if(frame.PayloadLength === 126) {
frame.PayloadLength = (e[i++] << 8) + e[i++];
}
 
if(frame.PayloadLength === 127) {
i += 4;
frame.PayloadLength = (e[i++] << 24) + (e[i++] << 16) + (e[i++] << 8) + e[i++];
}
 
if(frame.Mask) {
frame.MaskingKey = [e[i++], e[i++], e[i++], e[i++]];
 
for(j = 0, s = []; j < frame.PayloadLength; j++) {
s.push(e[i+j] ^ frame.MaskingKey[j%4]);
}
} else {
s = e.slice(i, i+frame.PayloadLength);
}
 
s = new Buffer(s);
 
if(frame.Opcode === 1) {
s = s.toString();
}
 
frame.PayloadData = s;
return frame;
}

接下来是变化数据帧的

JavaScript

function encodeDataFrame(e) { var s = [], o = new Buffer(e.PayloadData), l = o.length; s.push((e.FIN << 7) + e.Opcode); if(l < 126) { s.push(l); } else if(l < 0x10000) { s.push(126, (l&0xFF00) >> 8, l&0xFF); } else { s.push(127, 0, 0, 0, 0, (l&0xFF000000) >> 24, (l&0xFF0000) >> 16, (l&0xFF00) >> 8, l&0xFF); } return Buffer.concat([new Buffer(s), o]); }

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
function encodeDataFrame(e) {
var s = [],
o = new Buffer(e.PayloadData),
l = o.length;
 
s.push((e.FIN << 7) + e.Opcode);
 
if(l < 126) {
s.push(l);
} else if(l < 0x10000) {
s.push(126, (l&0xFF00) >> 8, l&0xFF);
} else {
s.push(127, 0, 0, 0, 0, (l&0xFF000000) >> 24, (l&0xFF0000) >> 16, (l&0xFF00) >> 8, l&0xFF);
}
 
return Buffer.concat([new Buffer(s), o]);
}

都以根据帧结构暗中表示图上的去管理,在此间不细讲,文章首要在下某些,倘若对那块感兴趣的话能够运动web本领商讨所~

 

三、websocket传输图片和websocket语音聊天室

正片环节到了,那篇文章最主要的照旧展现一下websocket的部分用到情况

1、传输图片

我们先思量传输图片的手续是什么,首先服务器收到到客商端央浼,然后读取图片文件,将二进制数据转发给顾客端,客商端如哪里理?当然是使用FileReader对象了

先给客商端代码

JavaScript

var ws = new WebSocket("ws://xxx.xxx.xxx.xxx:8888"); ws.onopen = function(){ console.log("握手成功"); }; ws.onmessage = function(e) { var reader = new FileReader(); reader.onload = function(event) { var contents = event.target.result; var a = new Image(); a.src = contents; document.body.appendChild(a); } reader.readAsDataUCR-VL(e.data); };

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
var ws = new WebSocket("ws://xxx.xxx.xxx.xxx:8888");
 
ws.onopen = function(){
    console.log("握手成功");
};
 
ws.onmessage = function(e) {
    var reader = new FileReader();
    reader.onload = function(event) {
        var contents = event.target.result;
        var a = new Image();
        a.src = contents;
        document.body.appendChild(a);
    }
    reader.readAsDataURL(e.data);
};

收受到消息,然后readAsDataU牧马人L,间接将图片base64加多到页面中

转到服务器端代码

JavaScript

fs.readdir("skyland", function(err, files) { if(err) { throw err; } for(var i = 0; i < files.length; i++) { fs.readFile('skyland/' + files[i], function(err, data) { if(err) { throw err; } o.write(encodeImgFrame(data)); }); } }); function encodeImgFrame(buf) { var s = [], l = buf.length, ret = []; s.push((1 << 7) + 2); if(l < 126) { s.push(l); } else if(l < 0x10000) { s.push(126, (l&0xFF00) >> 8, l&0xFF); } else { s.push(127, 0, 0, 0, 0, (l&0xFF000000) >> 24, (l&0xFF0000) >> 16, (l&0xFF00) >> 8, l&0xFF); } return Buffer.concat([new Buffer(s), buf]); }

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
fs.readdir("skyland", function(err, files) {
if(err) {
throw err;
}
for(var i = 0; i < files.length; i++) {
fs.readFile('skyland/' + files[i], function(err, data) {
if(err) {
throw err;
}
 
o.write(encodeImgFrame(data));
});
}
});
 
function encodeImgFrame(buf) {
var s = [],
l = buf.length,
ret = [];
 
s.push((1 << 7) + 2);
 
if(l < 126) {
s.push(l);
} else if(l < 0x10000) {
s.push(126, (l&0xFF00) >> 8, l&0xFF);
} else {
s.push(127, 0, 0, 0, 0, (l&0xFF000000) >> 24, (l&0xFF0000) >> 16, (l&0xFF00) >> 8, l&0xFF);
}
 
return Buffer.concat([new Buffer(s), buf]);
}

注意s.push((1 << 7) + 2)这一句,这里卓越间接把opcode写死了为2,对于Binary Frame,那样客商端接收到数量是不会尝试举行toString的,不然会报错~

代码很简短,在此地向大家大饱眼福一下websocket传输图片的进程怎么着

测验非常多张图纸,总共8.24M

日常说来静态财富服务器要求20s左右(服务器较远)

cdn需要2.8s左右

这大家的websocket格局呢??!

答案是一致供给20s左右,是还是不是很失望……速度便是慢在传输上,并非服务器读取图片,本机上同一的图形资源,1s左右得以成功……那样看来数据流也无力回天冲破距离的限制升高传输速度

上边大家来探视websocket的另一个用法~

 

用websocket搭建语音聊天室

先来收拾一下语音聊天室的机能

顾客步入频道随后从Mike风输入音频,然后发送给后台转载给频道里面包车型地铁别的人,别的人接收到消息举办广播

看起来困难在八个地点,第二个是节奏的输入,第二是收取到多少流进行播报

先说音频的输入,这里运用了HTML5的getUserMedia方法,但是注意了,本条情势上线是有红磡的,最终说,先贴代码

JavaScript

if (navigator.getUserMedia) { navigator.getUserMedia( { audio: true }, function (stream) { var rec = new SRecorder(stream); recorder = rec; }) }

1
2
3
4
5
6
7
8
if (navigator.getUserMedia) {
    navigator.getUserMedia(
        { audio: true },
        function (stream) {
            var rec = new SRecorder(stream);
            recorder = rec;
        })
}

首先个参数是{audio: true},只启用音频,然后创设了一个SRecorder对象,后续的操作基本上都在那些指标上海展览中心开。此时假诺代码运转在地方的话浏览器应该提示您是还是不是启用Mike风输入,分明现在就开发银行了

接下去大家看下SRecorder构造函数是什么,给出首要的有些

JavaScript

var SRecorder = function(stream) { …… var context = new AudioContext(); var audioInput = context.createMediaStreamSource(stream); var recorder = context.createScriptProcessor(4096, 1, 1); …… }

1
2
3
4
5
6
7
var SRecorder = function(stream) {
    ……
   var context = new AudioContext();
    var audioInput = context.createMediaStreamSource(stream);
    var recorder = context.createScriptProcessor(4096, 1, 1);
    ……
}

奥迪oContext是八个节奏上下文对象,有做过声音过滤管理的校友应该通晓“一段音频到达扬声器实行广播从前,半路对其进展阻挠,于是我们就收获了点子数据了,那几个拦截工作是由window.奥迪oContext来做的,大家富有对旋律的操作都基于那一个目的”,大家能够通过奥迪oContext创造差别的奥迪oNode节点,然后增添滤镜播放非常的响动

录音原理同样,大家也需求走奥迪oContext,可是多了一步对迈克风音频输入的吸取上,实际不是像之前管理音频一下用ajax央浼音频的ArrayBuffer对象再decode,迈克风的承受供给用到createMediaStreamSource方法,注意那些参数正是getUserMedia方法第3个参数的参数

加以createScriptProcessor方法,它官方的解释是:

Creates a ScriptProcessorNode, which can be used for direct audio processing via JavaScript.

——————

席卷下就是以此方式是选取JavaScript去管理音频搜聚操作

毕竟到点子收集了!胜利就在前方!

接下去让我们把话筒的输入和韵律收罗相连起来

JavaScript

audioInput.connect(recorder); recorder.connect(context.destination);

1
2
audioInput.connect(recorder);
recorder.connect(context.destination);

context.destination官方解释如下

The destination property of the AudioContext interface returns an AudioDestinationNoderepresenting the final destination of all audio in the context.

——————

context.destination重返代表在遇到中的音频的结尾目标地。

好,到了此时,大家还亟需贰个监听音频收集的事件

JavaScript

recorder.onaudioprocess = function (e) { audioData.input(e.inputBuffer.getChannelData(0)); }

1
2
3
recorder.onaudioprocess = function (e) {
    audioData.input(e.inputBuffer.getChannelData(0));
}

audioData是三个目的,那个是在英特网找的,笔者就加了三个clear方法因为前面会用到,首要有格外encodeWAV方法十分的赞,外人进行了累累的音频压缩和优化,那个最后会陪伴完整的代码一齐贴出来

此时总体客商走入频道随后从Mike风输入音频环节就早就做到啦,下边就该是向劳动器端发送音频流,稍微有一点点蛋疼的来了,刚才大家说了,websocket通过opcode不相同可以象征回去的数码是文件依旧二进制数据,而笔者辈onaudioprocess中input进去的是数组,最后播放声音须要的是Blob,{type: ‘audio/wav’}的靶子,那样我们就务供给在发送此前将数组转换到WAV的Blob,此时就用到了上边说的encodeWAV方法

服务器如同比非常粗略,只要转载就行了

地面测量试验确实能够,只是天坑来了!将次第跑在服务器上时候调用getUserMedia方法提醒笔者不能够不在多个平安的景况,也便是索要https,那表示ws也非得换来wss……为此服务器代码就未有接纳大家本身包装的拉手、分析和编码了,代码如下

JavaScript

var https = require('https'); var fs = require('fs'); var ws = require('ws'); var userMap = Object.create(null); var options = { key: fs.readFileSync('./privatekey.pem'), cert: fs.readFileSync('./certificate.pem') }; var server = https.createServer(options, function(req, res) { res.writeHead({ 'Content-Type' : 'text/html' }); fs.readFile('./testaudio.html', function(err, data) { if(err) { return ; } res.end(data); }); }); var wss = new ws.Server({server: server}); wss.on('connection', function(o) { o.on('message', function(message) { if(message.indexOf('user') === 0) { var user = message.split(':')[1]; userMap[user] = o; } else { for(var u in userMap) { userMap[u].send(message); } } }); }); server.listen(8888);

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
var https = require('https');
var fs = require('fs');
var ws = require('ws');
var userMap = Object.create(null);
var options = {
    key: fs.readFileSync('./privatekey.pem'),
    cert: fs.readFileSync('./certificate.pem')
};
var server = https.createServer(options, function(req, res) {
    res.writeHead({
        'Content-Type' : 'text/html'
    });
 
    fs.readFile('./testaudio.html', function(err, data) {
        if(err) {
            return ;
        }
 
        res.end(data);
    });
});
 
var wss = new ws.Server({server: server});
 
wss.on('connection', function(o) {
    o.on('message', function(message) {
if(message.indexOf('user') === 0) {
    var user = message.split(':')[1];
    userMap[user] = o;
} else {
    for(var u in userMap) {
userMap[u].send(message);
    }
}
    });
});
 
server.listen(8888);

代码仍旧相当的粗略的,使用https模块,然后用了起来说的ws模块,userMap是模拟的频段,只兑现转载的主干职能

行使ws模块是因为它拾叁分https完毕wss实在是太有利了,和逻辑代码0争持

https的搭建在那边就不提了,重假如亟需私钥、CSOdyssey证书具名和证件文件,感兴趣的同学可以了然下(不过不了解的话在现网遇到也用持续getUserMedia……)

上边是总体的前端代码

JavaScript

var a = document.getElementById('a'); var b = document.getElementById('b'); var c = document.getElementById('c'); navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia; var gRecorder = null; var audio = document.querySelector('audio'); var door = false; var ws = null; b.onclick = function() { if(a.value === '') { alert('请输入顾客名'); return false; } if(!navigator.getUserMedia) { alert('抱歉您的器材无立陶宛共和国(Republic of Lithuania)语音聊天'); return false; } SRecorder.get(function (rec) { gRecorder = rec; }); ws = new WebSocket("wss://x.x.x.x:8888"); ws.onopen = function() { console.log('握手成功'); ws.send('user:' + a.value); }; ws.onmessage = function(e) { receive(e.data); }; document.onkeydown = function(e) { if(e.keyCode === 65) { if(!door) { gRecorder.start(); door = true; } } }; document.onkeyup = function(e) { if(e.keyCode === 65) { if(door) { ws.send(gRecorder.getBlob()); gRecorder.clear(); gRecorder.stop(); door = false; } } } } c.onclick = function() { if(ws) { ws.close(); } } var SRecorder = function(stream) { config = {}; config.sampleBits = config.smapleBits || 8; config.sampleRate = config.sampleRate || (44100 / 6); var context = new 奥迪(Audi)oContext(); var audioInput = context.createMediaStreamSource(stream); var recorder = context.createScriptProcessor(4096, 1, 1); var audioData = { size: 0 //录音文件长度 , buffer: [] //录音缓存 , input萨姆pleRate: context.sampleRate //输入采集样品率 , inputSampleBits: 16 //输入采集样品数位 8, 16 , output萨姆pleRate: config.sampleRate //输出采样率 , oututSampleBits: config.sampleBits //输出采集样品数位 8, 16 , clear: function() { this.buffer = []; this.size = 0; } , input: function (data) { this.buffer.push(new Float32Array(data)); this.size += data.length; } , compress: function () { //合併压缩 //合并 var data = new Float32Array(this.size); var offset = 0; for (var i = 0; i < this.buffer.length; i++) { data.set(this.buffer[i], offset); offset += this.buffer[i].length; } //压缩 var compression = parseInt(this.inputSampleRate / this.outputSampleRate); var length = data.length / compression; var result = new Float32Array(length); var index = 0, j = 0; while (index < length) { result[index] = data[j]; j += compression; index++; } return result; } , encodeWAV: function () { var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate); var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits); var bytes = this.compress(); var dataLength = bytes.length * (sampleBits / 8); var buffer = new ArrayBuffer(44 + dataLength); var data = new DataView(buffer); var channelCount = 1;//单声道 var offset = 0; var writeString = function (str) { for (var i = 0; i < str.length; i++) { data.setUint8(offset + i, str.charCodeAt(i)); } }; // 财富沟通文件标记符 writeString('牧马人IFF'); offset += 4; // 下个地点开首到文件尾总字节数,即文件大小-8 data.setUint32(offset, 36 + dataLength, true); offset += 4; // WAV文件表明 writeString('WAVE'); offset += 4; // 波形格式标记 writeString('fmt '); offset += 4; // 过滤字节,平常为 0x10 = 16 data.setUint32(offset, 16, true); offset += 4; // 格式种类 (PCM格局采集样品数据) data.setUint16(offset, 1, true); offset += 2; // 通道数 data.setUint16(offset, channelCount, true); offset += 2; // 采样率,每秒样本数,表示种种通道的播报速度 data.setUint32(offset, sampleRate, true); offset += 4; // 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8 data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4; // 快数据调节数 采集样品叁次占用字节数 单声道×每样本的数目位数/8 data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2; // 每样本数量位数 data.setUint16(offset, sampleBits, true); offset += 2; // 数据标记符 writeString('data'); offset += 4; // 采集样品数据总的数量,即数据总大小-44 data.setUint32(offset, dataLength, true); offset += 4; // 写入采样数据 if (sampleBits === 8) { for (var i = 0; i < bytes.length; i++, offset++) { var s = Math.max(-1, Math.min(1, bytes[i])); var val = s < 0 ? s * 0x8000 : s * 0x7FFF; val = parseInt(255 / (65535 / (val + 32768))); data.setInt8(offset, val, true); } } else { for (var i = 0; i < bytes.length; i++, offset += 2) { var s = Math.max(-1, Math.min(1, bytes[i])); data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true); } } return new Blob([data], { type: 'audio/wav' }); } }; this.start = function () { audioInput.connect(recorder); recorder.connect(context.destination); } this.stop = function () { recorder.disconnect(); } this.getBlob = function () { return audioData.encodeWAV(); } this.clear = function() { audioData.clear(); } recorder.onaudioprocess = function (e) { audioData.input(e.inputBuffer.getChannelData(0)); } }; SRecorder.get = function (callback) { if (callback) { if (navigator.getUserMedia) { navigator.getUserMedia( { audio: true }, function (stream) { var rec = new SRecorder(stream); callback(rec); }) } } } function receive(e) { audio.src = window.URL.createObjectURL(e); }

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
var a = document.getElementById('a');
var b = document.getElementById('b');
var c = document.getElementById('c');
 
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
 
var gRecorder = null;
var audio = document.querySelector('audio');
var door = false;
var ws = null;
 
b.onclick = function() {
    if(a.value === '') {
        alert('请输入用户名');
        return false;
    }
    if(!navigator.getUserMedia) {
        alert('抱歉您的设备无法语音聊天');
        return false;
    }
 
    SRecorder.get(function (rec) {
        gRecorder = rec;
    });
 
    ws = new WebSocket("wss://x.x.x.x:8888");
 
    ws.onopen = function() {
        console.log('握手成功');
        ws.send('user:' + a.value);
    };
 
    ws.onmessage = function(e) {
        receive(e.data);
    };
 
    document.onkeydown = function(e) {
        if(e.keyCode === 65) {
            if(!door) {
                gRecorder.start();
                door = true;
            }
        }
    };
 
    document.onkeyup = function(e) {
        if(e.keyCode === 65) {
            if(door) {
                ws.send(gRecorder.getBlob());
                gRecorder.clear();
                gRecorder.stop();
                door = false;
            }
        }
    }
}
 
c.onclick = function() {
    if(ws) {
        ws.close();
    }
}
 
var SRecorder = function(stream) {
    config = {};
 
    config.sampleBits = config.smapleBits || 8;
    config.sampleRate = config.sampleRate || (44100 / 6);
 
    var context = new AudioContext();
    var audioInput = context.createMediaStreamSource(stream);
    var recorder = context.createScriptProcessor(4096, 1, 1);
 
    var audioData = {
        size: 0          //录音文件长度
        , buffer: []     //录音缓存
        , inputSampleRate: context.sampleRate    //输入采样率
        , inputSampleBits: 16       //输入采样数位 8, 16
        , outputSampleRate: config.sampleRate    //输出采样率
        , oututSampleBits: config.sampleBits       //输出采样数位 8, 16
        , clear: function() {
            this.buffer = [];
            this.size = 0;
        }
        , input: function (data) {
            this.buffer.push(new Float32Array(data));
            this.size += data.length;
        }
        , compress: function () { //合并压缩
            //合并
            var data = new Float32Array(this.size);
            var offset = 0;
            for (var i = 0; i < this.buffer.length; i++) {
                data.set(this.buffer[i], offset);
                offset += this.buffer[i].length;
            }
            //压缩
            var compression = parseInt(this.inputSampleRate / this.outputSampleRate);
            var length = data.length / compression;
            var result = new Float32Array(length);
            var index = 0, j = 0;
            while (index < length) {
                result[index] = data[j];
                j += compression;
                index++;
            }
            return result;
        }
        , encodeWAV: function () {
            var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate);
            var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits);
            var bytes = this.compress();
            var dataLength = bytes.length * (sampleBits / 8);
            var buffer = new ArrayBuffer(44 + dataLength);
            var data = new DataView(buffer);
 
            var channelCount = 1;//单声道
            var offset = 0;
 
            var writeString = function (str) {
                for (var i = 0; i < str.length; i++) {
                    data.setUint8(offset + i, str.charCodeAt(i));
                }
            };
 
            // 资源交换文件标识符
            writeString('RIFF'); offset += 4;
            // 下个地址开始到文件尾总字节数,即文件大小-8
            data.setUint32(offset, 36 + dataLength, true); offset += 4;
            // WAV文件标志
            writeString('WAVE'); offset += 4;
            // 波形格式标志
            writeString('fmt '); offset += 4;
            // 过滤字节,一般为 0x10 = 16
            data.setUint32(offset, 16, true); offset += 4;
            // 格式类别 (PCM形式采样数据)
            data.setUint16(offset, 1, true); offset += 2;
            // 通道数
            data.setUint16(offset, channelCount, true); offset += 2;
            // 采样率,每秒样本数,表示每个通道的播放速度
            data.setUint32(offset, sampleRate, true); offset += 4;
            // 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8
            data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4;
            // 快数据调整数 采样一次占用字节数 单声道×每样本的数据位数/8
            data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2;
            // 每样本数据位数
            data.setUint16(offset, sampleBits, true); offset += 2;
            // 数据标识符
            writeString('data'); offset += 4;
            // 采样数据总数,即数据总大小-44
            data.setUint32(offset, dataLength, true); offset += 4;
            // 写入采样数据
            if (sampleBits === 8) {
                for (var i = 0; i < bytes.length; i++, offset++) {
                    var s = Math.max(-1, Math.min(1, bytes[i]));
                    var val = s < 0 ? s * 0x8000 : s * 0x7FFF;
                    val = parseInt(255 / (65535 / (val + 32768)));
                    data.setInt8(offset, val, true);
                }
            } else {
                for (var i = 0; i < bytes.length; i++, offset += 2) {
                    var s = Math.max(-1, Math.min(1, bytes[i]));
                    data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
                }
            }
 
            return new Blob([data], { type: 'audio/wav' });
        }
    };
 
    this.start = function () {
        audioInput.connect(recorder);
        recorder.connect(context.destination);
    }
 
    this.stop = function () {
        recorder.disconnect();
    }
 
    this.getBlob = function () {
        return audioData.encodeWAV();
    }
 
    this.clear = function() {
        audioData.clear();
    }
 
    recorder.onaudioprocess = function (e) {
        audioData.input(e.inputBuffer.getChannelData(0));
    }
};
 
SRecorder.get = function (callback) {
    if (callback) {
        if (navigator.getUserMedia) {
            navigator.getUserMedia(
                { audio: true },
                function (stream) {
                    var rec = new SRecorder(stream);
                    callback(rec);
                })
        }
    }
}
 
function receive(e) {
    audio.src = window.URL.createObjectURL(e);
}

注意:按住a键说话,放开a键发送

投机有品味不开关实时对讲,通过setInterval发送,但意识杂音有一些重,效果糟糕,这几个须求encodeWAV再一层的包装,多去除碰到杂音的意义,本身挑选了更上一层楼便利的按钮说话的形式

 

那篇小说里第一展望了websocket的现在,然后根据专门的工作大家和好尝试解析和浮动数据帧,对websocket有了更加深一步的摸底

末尾经过八个demo见到了websocket的潜在的力量,关于语音聊天室的demo涉及的较广,未有接触过奥迪oContext对象的同班最棒先领悟下奥迪oContext

小提及此地就终止啦~有何样主见和难点款待我们提议来一齐商量搜求~

 

1 赞 11 收藏 3 评论

图片 6

编辑:网页制作 本文来源:websocket搜求其与话音

关键词: