web服务版智能语音对话

在前几篇的基础上,我们有了语音识别,语音合成,智能机器人,那么我们是不是可以创建一个可以实时对象的机器人了?

当然可以!

一,web版智能对话

前提:你得会flask和websocket

1 ,创建flask项目

#!/usr/bin/env python
# -*- coding:utf8 -*-

import os
from flask import Flask, render_template, send_file

app = Flask(__name__)

@app.route("/index")
def index():
    return render_template("index1.html")

@app.route("/get_audio/<file_path>/<file_name>")
def get_audio(file_path, file_name):
    new_file_path = os.path.join(file_path, file_name)
    return send_file(new_file_path)

if __name__ == ‘__main__‘:
    app.run("127.0.0.1", 8000, debug=True)

index.html:用的时候粘贴过去就行!

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>Title</title>

</head>
<body>
<audio src="" autoplay controls id="player"></audio>  <!-- 该标签在有src之后,autoplay属性控制着自动播放-->

<button onclick="start_reco()">录制消息</button>
<br>
<button onclick="stop_reco()">发送语音消息</button>

</body>
<script src="../static/Recorder.js"></script>
<script type="application/javascript">
    var serv = "http://127.0.0.1:8000";
    var ws_serv = "ws://127.0.0.1:8080/ws";

    var get_music = serv + "/get_audio/";
    var ws = new WebSocket(ws_serv);
    ws.onmessage = function (data) {  // 拿到后端返回的回答的语音文件的路径,再去请求该文件,自动播放
        {#console.log(data.data);#}
        document.getElementById("player").src = get_music + data.data
    };
    var reco = null;
    var audio_context = new AudioContext();  // 获取浏览器的所有媒体对象
    navigator.getUserMedia = (navigator.getUserMedia ||
        navigator.webkitGetUserMedia ||
        navigator.mozGetUserMedia ||
        navigator.msGetUserMedia);

    navigator.getUserMedia({audio: true}, create_stream, function (err) {
        console.log(err)
    });

    function create_stream(user_media) {
        var stream_input = audio_context.createMediaStreamSource(user_media);  // 创建一个 流 的容器,存放二进制语音
        reco = new Recorder(stream_input);
    }

    function start_reco() {
        reco.record();  // 把语音的二进制文件保存在 流 中
    }

    function stop_reco() {
        reco.stop();  // 停止存入
        get_audio();
        reco.clear();  // 把 流 清空,方便第二次使用
    }

    function get_audio() {  // 获取一个wav语音文件
        reco.exportWAV(function (wav_file) {
            // wav_file = Blob对象
            ws.send(wav_file);  // 发送给后端
        })
    }

</script>
</html>

里面依赖得Record.js文件,用于音频文件二进制存储在流那个容器中,用的时候粘贴过去就行。

(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.Recorder = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module ‘"+o+"‘");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
"use strict";

module.exports = require("./recorder").Recorder;

},{"./recorder":2}],2:[function(require,module,exports){
‘use strict‘;

var _createClass = (function () {
    function defineProperties(target, props) {
        for (var i = 0; i < props.length; i++) {
            var descriptor = props[i];descriptor.enumerable = descriptor.enumerable || false;descriptor.configurable = true;if ("value" in descriptor) descriptor.writable = true;Object.defineProperty(target, descriptor.key, descriptor);
        }
    }return function (Constructor, protoProps, staticProps) {
        if (protoProps) defineProperties(Constructor.prototype, protoProps);if (staticProps) defineProperties(Constructor, staticProps);return Constructor;
    };
})();

Object.defineProperty(exports, "__esModule", {
    value: true
});
exports.Recorder = undefined;

var _inlineWorker = require(‘inline-worker‘);

var _inlineWorker2 = _interopRequireDefault(_inlineWorker);

function _interopRequireDefault(obj) {
    return obj && obj.__esModule ? obj : { default: obj };
}

function _classCallCheck(instance, Constructor) {
    if (!(instance instanceof Constructor)) {
        throw new TypeError("Cannot call a class as a function");
    }
}

var Recorder = exports.Recorder = (function () {
    function Recorder(source, cfg) {
        var _this = this;

        _classCallCheck(this, Recorder);

        this.config = {
            bufferLen: 4096,
            numChannels: 2,
            mimeType: ‘audio_pcm/wav‘
        };
        this.recording = false;
        this.callbacks = {
            getBuffer: [],
            exportWAV: []
        };

        Object.assign(this.config, cfg);
        this.context = source.context;
        this.node = (this.context.createScriptProcessor || this.context.createJavaScriptNode).call(this.context, this.config.bufferLen, this.config.numChannels, this.config.numChannels);

        this.node.onaudioprocess = function (e) {
            if (!_this.recording) return;

            var buffer = [];
            for (var channel = 0; channel < _this.config.numChannels; channel++) {
                buffer.push(e.inputBuffer.getChannelData(channel));
            }
            _this.worker.postMessage({
                command: ‘record‘,
                buffer: buffer
            });
        };

        source.connect(this.node);
        this.node.connect(this.context.destination); //this should not be necessary

        var self = {};
        this.worker = new _inlineWorker2.default(function () {
            var recLength = 0,
                recBuffers = [],
                sampleRate = undefined,
                numChannels = undefined;

            self.onmessage = function (e) {
                switch (e.data.command) {
                    case ‘init‘:
                        init(e.data.config);
                        break;
                    case ‘record‘:
                        record(e.data.buffer);
                        break;
                    case ‘exportWAV‘:
                        exportWAV(e.data.type);
                        break;
                    case ‘getBuffer‘:
                        getBuffer();
                        break;
                    case ‘clear‘:
                        clear();
                        break;
                }
            };

            function init(config) {
                sampleRate = config.sampleRate;
                numChannels = config.numChannels;
                initBuffers();
            }

            function record(inputBuffer) {
                for (var channel = 0; channel < numChannels; channel++) {
                    recBuffers[channel].push(inputBuffer[channel]);
                }
                recLength += inputBuffer[0].length;
            }

            function exportWAV(type) {
                var buffers = [];
                for (var channel = 0; channel < numChannels; channel++) {
                    buffers.push(mergeBuffers(recBuffers[channel], recLength));
                }
                var interleaved = undefined;
                if (numChannels === 2) {
                    interleaved = interleave(buffers[0], buffers[1]);
                } else {
                    interleaved = buffers[0];
                }
                var dataview = encodeWAV(interleaved);
                var audioBlob = new Blob([dataview], { type: type });

                self.postMessage({ command: ‘exportWAV‘, data: audioBlob });
            }

            function getBuffer() {
                var buffers = [];
                for (var channel = 0; channel < numChannels; channel++) {
                    buffers.push(mergeBuffers(recBuffers[channel], recLength));
                }
                self.postMessage({ command: ‘getBuffer‘, data: buffers });
            }

            function clear() {
                recLength = 0;
                recBuffers = [];
                initBuffers();
            }

            function initBuffers() {
                for (var channel = 0; channel < numChannels; channel++) {
                    recBuffers[channel] = [];
                }
            }

            function mergeBuffers(recBuffers, recLength) {
                var result = new Float32Array(recLength);
                var offset = 0;
                for (var i = 0; i < recBuffers.length; i++) {
                    result.set(recBuffers[i], offset);
                    offset += recBuffers[i].length;
                }
                return result;
            }

            function interleave(inputL, inputR) {
                var length = inputL.length + inputR.length;
                var result = new Float32Array(length);

                var index = 0,
                    inputIndex = 0;

                while (index < length) {
                    result[index++] = inputL[inputIndex];
                    result[index++] = inputR[inputIndex];
                    inputIndex++;
                }
                return result;
            }

            function floatTo16BitPCM(output, offset, input) {
                for (var i = 0; i < input.length; i++, offset += 2) {
                    var s = Math.max(-1, Math.min(1, input[i]));
                    output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
                }
            }

            function writeString(view, offset, string) {
                for (var i = 0; i < string.length; i++) {
                    view.setUint8(offset + i, string.charCodeAt(i));
                }
            }

            function encodeWAV(samples) {
                var buffer = new ArrayBuffer(44 + samples.length * 2);
                var view = new DataView(buffer);

                /* RIFF identifier */
                writeString(view, 0, ‘RIFF‘);
                /* RIFF chunk length */
                view.setUint32(4, 36 + samples.length * 2, true);
                /* RIFF type */
                writeString(view, 8, ‘WAVE‘);
                /* format chunk identifier */
                writeString(view, 12, ‘fmt ‘);
                /* format chunk length */
                view.setUint32(16, 16, true);
                /* sample format (raw) */
                view.setUint16(20, 1, true);
                /* channel count */
                view.setUint16(22, numChannels, true);
                /* sample rate */
                view.setUint32(24, sampleRate, true);
                /* byte rate (sample rate * block align) */
                view.setUint32(28, sampleRate * 4, true);
                /* block align (channel count * bytes per sample) */
                view.setUint16(32, numChannels * 2, true);
                /* bits per sample */
                view.setUint16(34, 16, true);
                /* data chunk identifier */
                writeString(view, 36, ‘data‘);
                /* data chunk length */
                view.setUint32(40, samples.length * 2, true);

                floatTo16BitPCM(view, 44, samples);

                return view;
            }
        }, self);

        this.worker.postMessage({
            command: ‘init‘,
            config: {
                sampleRate: this.context.sampleRate,
                numChannels: this.config.numChannels
            }
        });

        this.worker.onmessage = function (e) {
            var cb = _this.callbacks[e.data.command].pop();
            if (typeof cb == ‘function‘) {
                cb(e.data.data);
            }
        };
    }

    _createClass(Recorder, [{
        key: ‘record‘,
        value: function record() {
            this.recording = true;
        }
    }, {
        key: ‘stop‘,
        value: function stop() {
            this.recording = false;
        }
    }, {
        key: ‘clear‘,
        value: function clear() {
            this.worker.postMessage({ command: ‘clear‘ });
        }
    }, {
        key: ‘getBuffer‘,
        value: function getBuffer(cb) {
            cb = cb || this.config.callback;
            if (!cb) throw new Error(‘Callback not set‘);

            this.callbacks.getBuffer.push(cb);

            this.worker.postMessage({ command: ‘getBuffer‘ });
        }
    }, {
        key: ‘exportWAV‘,
        value: function exportWAV(cb, mimeType) {
            mimeType = mimeType || this.config.mimeType;
            cb = cb || this.config.callback;
            if (!cb) throw new Error(‘Callback not set‘);

            this.callbacks.exportWAV.push(cb);

            this.worker.postMessage({
                command: ‘exportWAV‘,
                type: mimeType
            });
        }
    }], [{
        key: ‘forceDownload‘,
        value: function forceDownload(blob, filename) {
            var url = (window.URL || window.webkitURL).createObjectURL(blob);
            var link = window.document.createElement(‘a‘);
            link.href = url;
            link.download = filename || ‘output.wav‘;
            var click = document.createEvent("Event");
            click.initEvent("click", true, true);
            link.dispatchEvent(click);
        }
    }]);

    return Recorder;
})();

exports.default = Recorder;

},{"inline-worker":3}],3:[function(require,module,exports){
"use strict";

module.exports = require("./inline-worker");
},{"./inline-worker":4}],4:[function(require,module,exports){
(function (global){
"use strict";

var _createClass = (function () { function defineProperties(target, props) { for (var key in props) { var prop = props[key]; prop.configurable = true; if (prop.value) prop.writable = true; } Object.defineProperties(target, props); } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();

var _classCallCheck = function (instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } };

var WORKER_ENABLED = !!(global === global.window && global.URL && global.Blob && global.Worker);

var InlineWorker = (function () {
  function InlineWorker(func, self) {
    var _this = this;

    _classCallCheck(this, InlineWorker);

    if (WORKER_ENABLED) {
      var functionBody = func.toString().trim().match(/^function\s*\w*\s*\([\w\s,]*\)\s*{([\w\W]*?)}$/)[1];
      var url = global.URL.createObjectURL(new global.Blob([functionBody], { type: "text/javascript" }));

      return new global.Worker(url);
    }

    this.self = self;
    this.self.postMessage = function (data) {
      setTimeout(function () {
        _this.onmessage({ data: data });
      }, 0);
    };

    setTimeout(function () {
      func.call(self);
    }, 0);
  }

  _createClass(InlineWorker, {
    postMessage: {
      value: function postMessage(data) {
        var _this = this;

        setTimeout(function () {
          _this.self.onmessage({ data: data });
        }, 0);
      }
    }
  });

  return InlineWorker;
})();

module.exports = InlineWorker;
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}]},{},[1])(1)
});

Record.js

2 ,在index页面内置ws,实时对话

#!/usr/bin/env python
# -*- coding:utf8 -*-

import uuid
import os
from flask_ai_demo.nip_demo import get_ret_file
from flask import Flask, request
from geventwebsocket.websocket import WebSocket
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler

app = Flask(__name__)

@app.route("/ws")
def ws():
    user_socket = request.environ.get("wsgi.websocket")  # type:WebSocket
    if user_socket:
        while True:
            try:
                msg = user_socket.receive()
                # q_file_name = f"{uuid.uuid4()}.wav"
                q_file_path = os.path.join("question_audio_pcm", f"{uuid.uuid4()}.wav")
                with open(q_file_path, "wb") as f:
                    f.write(msg)
                ret_file_name = get_ret_file(q_file_path)
                user_socket.send(ret_file_name)  # 把生成好的语音文件名称发给前端,让前端请求获取,播放
                os.remove(q_file_path)  # 删除生成的语音文件
            except Exception as e:
                continue

if __name__ == ‘__main__‘:
    http_server = WSGIServer(("127.0.0.1", 8080), app, handler_class=WebSocketHandler)
    http_server.serve_forever()

完成后,我们去访问http://127.0.0.1:8000/index页面 和小可爱机器人完了!!!

原文地址:https://www.cnblogs.com/glh-ty/p/9813641.html

时间: 2024-10-29 05:23:36

web服务版智能语音对话的相关文章

别急着进入智能语音行业,先看看市场趋势

在商业的角度来看待,人工智能还处在于市场化初期的时代,从相关市场方案的复合增长率看,预计会在未来十年保持65%的增长,并于2027年达到万亿美元.人工智能热潮受到科技进步.政府红利以及全球投资者不断的加持.其持续高速发展的核心驱动因素主要有以下3个. 数据规模和深度-数据日益成为公司最有价值的资产,数据生成和收集技术的成熟(记录.图像.音频等),尤其是物联网的普及使得数据可获取的来源呈现几何级增长.对于各行业的公司来说,如何从数据中获取有价值的商业洞察与实现流程优化,从而为企业创造价值将成为必修

处理畅捷通的T+ 12.0版,web服务无故自动停止的问题

用了几个月的畅捷通T+ 12.0版,一直都挺正常,但最近这两周,出现了好几次web服务自动停止的情况,今天抽空仔细看了Windows的日志,发现在半夜2点左右,TPlusProWebService1200这个服务,都会自动停一下,然后再启动,而出现问题的那几次,停了之后就没自动启动,费解,得找畅捷通的技术支持问问了

一场由度秘掀起的智能语音连接O2O大战正在爆发

提到O2O,日前刘旷刚刚在微信朋友圈发布的一条针对百度世界大会的评论引发了好友们的纷纷点赞,我在评论中这样写道:“唱衰O2O的罪魁祸首就是美国资本家,他们先是掏空中国股市.打压中国互联网公司,而后制造舆论唱衰中国的O2O,企图破坏李克强总理提出的互联网+经济战略,我相信只要百度以及中国千千万万的创业者脚踏实地做好连接人与服务,不过度迷恋资本,中国定能实现在互联网+时代超越美国.” 三人成虎,人言可畏呐!美国资本家最擅长的就是制造舆论,引导大批没有主见的中国网民.于是,各种O2O寒冬论谣言四起,而

科大讯飞2014公布会看点二:智能语音装进车载车机!

一直以来,汽车行业的科技创新不胜枚举.早前福特引入SYNC车载多媒体通讯娱乐系统,使得车载声控成为现实:全新一代雪佛兰科鲁兹全面升级MyLink智能车载互联系统,具备语音识别能力:丰田汽车在新款车型中陆续增加语音对话型系统"Agent". 纵观汽车行业的未来发展,车企厂商越来越重视人机交互体验,动口不动手的驾车生活离我们已不再遥远.8月20日下午,科大讯飞将在北京国家会议中心举行以"语音点亮生活"的主题公布会,用语音技术升级车载车机的智能化体验,让语音交互在汽车应用

科大讯飞2014发布会看点二:智能语音装进车载车机!

一直以来,汽车行业的科技创新不胜枚举.早前福特引入SYNC车载多媒体通讯娱乐系统,使得车载声控成为现实:全新一代雪佛兰科鲁兹全面升级MyLink智能车载互联系统,具备语音识别能力:丰田汽车在新款车型中陆续加入语音对话型系统"Agent". 纵观汽车行业的未来发展,车企厂商越来越重视人机交互体验,动口不动手的驾车生活离我们已不再遥远.8月20日下午,科大讯飞将在北京国家会议中心举行以"语音点亮生活"的主题发布会,用语音技术升级车载车机的智能化体验,让语音交互在汽车应用

智能语音机器人市场对手如此多,微服网络如何更胜一筹

在传统电销行业中,销售是一个需要庞大服务人员的工作,电话作为一种快捷方便获取信息的通讯工具,更是必不可少,如今市场上的电话机器人就是基于语音识别技术的一种应用产品.智能以服务低端劳力密集型企业为宗旨,帮助企业突破人工客服效率低.工作时间长的瓶颈,从而帮助企业扩大规模和提升业绩. 随着人工智能技术日趋成熟,机器人.智能语音技术落地应用逐渐成熟.越来越多企业家投入其中,为解决电销行业问题,创办智能语音机器人,深圳微服网络信息技术有限公司就是智能语音领域的企业之一,多年呼叫中心经验,引领智能呼叫时代.

简单剖析智能语音交互技术

机器学习和自然语言处理技术的进步为语音与人工智能的交互提供了可能.人们可以通过对话获得信息,并与机器互动,而机器将不再只存在于科幻小说中.语音交互是未来的发展方向.智能扬声器是语音交互着陆的第一代产品. 以市面上面流行的智能电话机器人为例,他的AI模块主要包含了4部分自动语音识别(Automatic Speech Recognition, ASR),自然语言理解(Natural Language Understanding, NLU),自然语言生成(Natural Language Genera

Web服务之Nginx浅析

一.Nginx 简介: nginx [engine x]是Igor Sysoev编写的一个高性能的HTTP和反向代理服务器,另外它也可以作为邮件代理服务器. 在大多数情况下都是用来做静态web服务器和反向代理服务器,在作为反向代理服务器的时候,Nginx可以对后端的real server做负载均衡,基于应用层的负载均衡,但是他仅支持一些常见的协议,如:http.mysql.ftp.smtp. 特性: Nginx是一款面向性能设计的HTTP服务器,相较于Apache.lighttpd具有占有内存少

Web服务之Nginx反向代理与负载均衡

一.代理 正向代理: 正向代理是一个位于客户端和目标服务器之间的服务器,为了从目标服务器取得内容,客户端向代理发送一个请求并指定目标服务器,然后代理向目标服务器转交请求并将获得的内容返回给客户端.客户端必须要进行一些特别的设置才能使用正向代理. 作用: 访问无法访问的服务器(翻墙,懂得) 加速访问目标服务器(链路加速) Cache缓存(访问加速) 实现客户端访问授权 隐藏访问者 反向代理: 反向代理(Reverse Proxy)方式是指以代理服务器来接受internet上的连接请求,然后将请求转