HTML5音频实践活动总结(Preact)

2021-02-22 02:56 jianzhan

获得 PCM 数据信息

解决 PCM 数据信息

Float32Int16

ArrayBufferBase64

PCM 文档播发

重取样

PCMMP3

PCMWAV

短时动能测算

Web Worker提升特性

声频储存(IndexedDB)

WebView 打开 WebRTC

获得 PCM 数据信息

查询 DEMO

https://github.com/deepkolos/pc-pcm-wave

样例编码:

const mediaStream = await window.navigator.mediaDevices.getUserMedia({
    audio: {
		// sampleRate: 44100, // 取样率 不起效必须手动式重取样
        channelCount: 1, // 声道
        // echoCancellation: true,
        // noiseSuppression: true, // 降噪 实测实际效果非常好
    },
})
const audioContext = new window.AudioContext()
const inputSampleRate = audioContext.sampleRate
const mediaNode = audioContext.createMediaStreamSource(mediaStream)

if (!audioContext.createScriptProcessor) {
	audioContext.createScriptProcessor = audioContext.createJavaScriptNode
}
// 建立1个jsNode
const jsNode = audioContext.createScriptProcessor(4096, 1, 1)
jsNode.connect(audioContext.destination)
jsNode.onaudioprocess = (e) => {
    // e.inputBuffer.getChannelData(0) (left)
    // 双安全通道根据e.inputBuffer.getChannelData(1)获得 (right)
}
mediaNode.connect(jsNode)

扼要步骤以下:

start=>start: 刚开始
getUserMedia=>operation: 获得MediaStream
audioContext=>operation: 建立AudioContext
scriptNode=>operation: 建立scriptNode并关系AudioContext
onaudioprocess=>operation: 设定onaudioprocess并解决数据信息
end=>end: 完毕

start->getUserMedia->audioContext->scriptNode->onaudioprocess->end

终止录制只必须把 audioContext 挂在的 node 卸载便可,随后把储存的每帧数据信息合拼便可产出 PCM 数据信息

jsNode.disconnect()
mediaNode.disconnect()
jsNode.onaudioprocess = null

PCM 数据信息解决

根据 WebRTC 获得的 PCM 数据信息文件格式是 Float32 的, 假如是双安全通道音频的话, 还必须提升合拼安全通道

const leftDataList = [];
const rightDataList = [];
function onAudioProcess(event) {
  // 1帧的声频PCM数据信息
  let audioBuffer = event.inputBuffer;
  leftDataList.push(audioBuffer.getChannelData(0).slice(0));
  rightDataList.push(audioBuffer.getChannelData(1).slice(0));
}

// 交叉式合拼上下声道的数据信息
function interleaveLeftAndRight(left, right) {
  let totalLength = left.length + right.length;
  let data = new Float32Array(totalLength);
  for (let i = 0; i < left.length; i++) {
    let k = i * 2;
    data[k] = left[i];
    data[k + 1] = right[i];
  }
  return data;
}

Float32 转 Int16

const float32 = new Float32Array(1)
const int16 = Int16Array.from(
	float32.map(x => (x > 0 ? x * 0x7fff : x * 0x8000)),
)

arrayBuffer 转 Base64

留意: 在访问器上有个 btoa() 涵数也是能够变换为 Base64 可是键入主要参数务必为标识符串, 假如传送 buffer 主要参数会先被 toString() 随后再 Base64 , 应用 ffplay 播发反编码序列化的 Base64 , 会较为吱吱声

应用 base64-arraybuffer 便可进行

import { encode } from 'base64-arraybuffer'

const float32 = new Float32Array(1)
const int16 = Int16Array.from(
	float32.map(x => (x > 0 ? x * 0x7fff : x * 0x8000)),
)
console.log(encode(int16.buffer))

认证 Base64 是不是正确, 能够在 node 下把产出的 Base64 变换为 Int16 的 PCM 文档, 随后应用 FFPlay 播发, 看看声频是不是一切正常播发

PCM 文档播发

# 单安全通道 取样率:16000 Int16
ffplay -f s16le -ar 16k -ac 1 test.pcm

# 双安全通道 取样率:48000 Float32
ffplay -f f32le -ar 48000 -ac 2 test.pcm

重取样/调剂取样率

尽管 getUserMedia 主要参数可设定取样率, 可是在全新Chrome也不起效, 因此必须手动式做个重取样

const mediaStream = await window.navigator.mediaDevices.getUserMedia({
    audio: {
    	// sampleRate: 44100, // 取样率 设定不起效
        channelCount: 1, // 声道
        // echoCancellation: true, // 减低徊音
        // noiseSuppression: true, // 降噪, 实测实际效果非常好
    },
})

应用 wave-resampler 便可进行

import { resample } from 'wave-resampler'

const inputSampleRate =  44100
const outputSampleRate = 16000
const resampledBuffers = resample(
    // 必须onAudioProcess每帧的buffer合拼后的数字能量数组
	mergeArray(audioBuffers),
	inputSampleRate,
	outputSampleRate,
)

PCM 转 MP3

import { Mp3Encoder } from 'lamejs'

let mp3buf
const mp三data = []
const sampleBlockSize = 576 * 10 // 工作中缓存文件区, 576的倍数
const mp3Encoder = new Mp3Encoder(1, outputSampleRate, kbps)
const samples = float32ToInt16(
  audioBuffers,
  inputSampleRate,
  outputSampleRate,
)

let remaining = samples.length
for (let i = 0; remaining >= 0; i += sampleBlockSize) {
  const left = samples.subarray(i, i + sampleBlockSize)
  mp3buf = mp3Encoder.encodeBuffer(left)
  mp三data.push(new Int8Array(mp3buf))
  remaining -= sampleBlockSize
}

mp三data.push(new Int8Array(mp3Encoder.flush()))
console.log(mp三data)

// 专用工具涵数
function float32ToInt16(audioBuffers, inputSampleRate, outputSampleRate) {
  const float32 = resample(
    // 必须onAudioProcess每帧的buffer合拼后的数字能量数组
    mergeArray(audioBuffers),
    inputSampleRate,
    outputSampleRate,
  )
  const int16 = Int16Array.from(
    float32.map(x => (x > 0 ? x * 0x7fff : x * 0x8000)),
  )
  return int16
}

应用 lamejs 便可, 可是体积较大(160+KB), 假如沒有储存要求可以使用 WAV 文件格式

> ls -alh
-rwxrwxrwx 1 root root  95K  4月 22 12:45 12s.mp3*
-rwxrwxrwx 1 root root 1.1M  4月 22 12:44 12s.wav*
-rwxrwxrwx 1 root root 235K  4月 22 12:41 30s.mp3*
-rwxrwxrwx 1 root root 2.6M  4月 22 12:40 30s.wav*
-rwxrwxrwx 1 root root  63K  4月 22 12:49 8s.mp3*
-rwxrwxrwx 1 root root 689K  4月 22 12:48 8s.wav*

PCM 转 WAV

function mergeArray(list) {
  const length = list.length * list[0].length
  const data = new Float32Array(length)
  let offset = 0
  for (let i = 0; i < list.length; i++) {
    data.set(list[i], offset)
    offset += list[i].length
  }
  return data
}

function writeUTFBytes(view, offset, string) {
  var lng = string.length
  for (let i = 0; i < lng; i++) {
    view.setUint8(offset + i, string.charCodeAt(i))
  }
}

function createWavBuffer(audioData, sampleRate = 44100, channels = 1) {
  const WAV_HEAD_SIZE = 44
  const buffer = new ArrayBuffer(audioData.length * 2 + WAV_HEAD_SIZE)
  // 必须用1个view来操纵buffer
  const view = new DataView(buffer)
  // 写入wav头顶部信息内容
  // RIFF chunk descriptor/identifier
  writeUTFBytes(view, 0, 'RIFF')
  // RIFF chunk length
  view.setUint32(4, 44 + audioData.length * 2, true)
  // RIFF type
  writeUTFBytes(view, 8, 'WAVE')
  // format chunk identifier
  // FMT sub-chunk
  writeUTFBytes(view, 12, 'fmt')
  // format chunk length
  view.setUint32(16, 16, true)
  // sample format (raw)
  view.setUint16(20, 1, true)
  // stereo (2 channels)
  view.setUint16(22, channels, true)
  // sample rate
  view.setUint32(24, sampleRate, true)
  // byte rate (sample rate * block align)
  view.setUint32(28, sampleRate * 2, true)
  // block align (channel count * bytes per sample)
  view.setUint16(32, channels * 2, true)
  // bits per sample
  view.setUint16(34, 16, true)
  // data sub-chunk
  // data chunk identifier
  writeUTFBytes(view, 36, 'data')
  // data chunk length
  view.setUint32(40, audioData.length * 2, true)

  // 写入PCM数据信息
  let index = 44
  const volume = 1
  const { length } = audioData
  for (let i = 0; i < length; i++) {
    view.setInt16(index, audioData[i] * (0x7fff * volume), true)
    index += 2
  }
  return buffer
}

// 必须onAudioProcess每帧的buffer合拼后的数字能量数组
createWavBuffer(mergeArray(audioBuffers))

WAV 基础上是 PCM 再加1些声频信息内容

简易的短时动能测算

function shortTimeEnergy(audioData) {
  let sum = 0
  const energy = []
  const { length } = audioData
  for (let i = 0; i < length; i++) {
    sum += audioData[i] ** 2

    if ((i + 1) % 256 === 0) {
      energy.push(sum)
      sum = 0
    } else if (i === length - 1) {
      energy.push(sum)
    }
  }
  return energy
}

因为测算結果有会因机器设备的音频增益差别较大, 测算出数据信息也较大, 因此应用比值简易区别人声和噪声

查询 DEMO

const NoiseVoiceWatershedWave = 2.3
const energy = shortTimeEnergy(e.inputBuffer.getChannelData(0).slice(0))
const avg = energy.reduce((a, b) => a + b) / energy.length

const nextState = Math.max(...energy) / avg > NoiseVoiceWatershedWave ? 'voice' : 'noise'

Web Worker 提升特性

声频数据信息数据信息量较大, 因此可使用 Web Worker 开展提升, 不卡 UI 进程

在 Webpack 新项目里 Web Worker 较为简易, 安裝 worker-loader 便可

preact.config.js

export default (config, env, helpers) => {
    config.module.rules.push({
        test: /\.worker\.js$/,
        use: { loader: 'worker-loader', options: { inline: true } },
      })
}

recorder.worker.js

self.addEventListener('message', event => {
  console.log(event.data)
  // 转MP3/转Base64/转WAV这些
  const output = ''
  self.postMessage(output)
}

应用 Worker

async function toMP3(audioBuffers, inputSampleRate, outputSampleRate = 16000) {
  const { default: Worker } = await import('./recorder.worker')
  const worker = new Worker()
  // 简易应用, 新项目能够在recorder案例化的情况下建立worker案例, 有并法要求可好几个案例

  return new Promise(resolve => {
    worker.postMessage({
      audioBuffers: audioBuffers,
      inputSampleRate: inputSampleRate,
      outputSampleRate: outputSampleRate,
      type: 'mp3',
    })
    worker.onmessage = event => resolve(event.data)
  })
}

声频的储存

访问器长久化存储的地区有 LocalStorage 和 IndexedDB , 在其中 LocalStorage 较为常见, 可是只能存储标识符串, 而 IndexedDB 可立即存储 Blob , 因此优先选择挑选 IndexedDB ,应用 LocalStorage 则必须转 Base64 体积可能更大

因此以便防止占有客户太多室内空间, 因此挑选MP3文件格式开展储存

> ls -alh
-rwxrwxrwx 1 root root  95K  4月 22 12:45 12s.mp3*
-rwxrwxrwx 1 root root 1.1M  4月 22 12:44 12s.wav*
-rwxrwxrwx 1 root root 235K  4月 22 12:41 30s.mp3*
-rwxrwxrwx 1 root root 2.6M  4月 22 12:40 30s.wav*
-rwxrwxrwx 1 root root  63K  4月 22 12:49 8s.mp3*
-rwxrwxrwx 1 root root 689K  4月 22 12:48 8s.wav*

IndexedDB 简易封裝以下, 熟习后台管理的同学能够找个 ORM 库便捷数据信息读写能力

const indexedDB =
  window.indexedDB ||
  window.webkitIndexedDB ||
  window.mozIndexedDB ||
  window.OIndexedDB ||
  window.msIndexedDB

const IDBTransaction =
  window.IDBTransaction ||
  window.webkitIDBTransaction ||
  window.OIDBTransaction ||
  window.msIDBTransaction

const readWriteMode =
  typeof IDBTransaction.READ_WRITE === 'undefined'
    ? 'readwrite'
    : IDBTransaction.READ_WRITE

const dbVersion = 1
const storeDefault = 'mp3'

let dbLink

function initDB(store) {
  return new Promise((resolve, reject) => {
    if (dbLink) resolve(dbLink)

    // Create/open database
    const request = indexedDB.open('audio', dbVersion)

    request.onsuccess = event => {
      const db = request.result

      db.onerror = event => {
        reject(event)
      }

      if (db.version === dbVersion) resolve(db)
    }

    request.onerror = event => {
      reject(event)
    }

    // For future use. Currently only in latest Firefox versions
    request.onupgradeneeded = event => {
      dbLink = event.target.result
      const { transaction } = event.target

      if (!dbLink.objectStoreNames.contains(store)) {
        dbLink.createObjectStore(store)
      }

      transaction.oncomplete = event => {
        // Now store is available to be populated
        resolve(dbLink)
      }
    }
  })
}

export const writeIDB = async (name, blob, store = storeDefault) => {
  const db = await initDB(store)

  const transaction = db.transaction([store], readWriteMode)
  const objStore = transaction.objectStore(store)

  return new Promise((resolve, reject) => {
    const request = objStore.put(blob, name)
    request.onsuccess = event => resolve(event)
    request.onerror = event => reject(event)
    transaction.commit && transaction.commit()
  })
}

export const readIDB = async (name, store = storeDefault) => {
  const db = await initDB(store)

  const transaction = db.transaction([store], readWriteMode)
  const objStore = transaction.objectStore(store)

  return new Promise((resolve, reject) => {
    const request = objStore.get(name)
    request.onsuccess = event => resolve(event.target.result)
    request.onerror = event => reject(event)
    transaction.commit && transaction.commit()
  })
}

export const clearIDB = async (store = storeDefault) => {
  const db = await initDB(store)

  const transaction = db.transaction([store], readWriteMode)
  const objStore = transaction.objectStore(store)
  return new Promise((resolve, reject) => {
    const request = objStore.clear()
    request.onsuccess = event => resolve(event)
    request.onerror = event => reject(event)
    transaction.commit && transaction.commit()
  })
}

WebView 打开 WebRTC

见 WebView WebRTC not working

webView.setWebChromeClient(new WebChromeClient(){
	@TargetApi(Build.VERSION_CODES.LOLLIPOP)
	@Override
	public void onPermissionRequest(final PermissionRequest request) {
		request.grant(request.getResources());
	}
});

到此这篇有关HTML5音频实践活动总结(Preact)的文章内容就详细介绍到这了,更多有关html5音频內容请检索脚本制作之家之前的文章内容或再次访问下面的有关文章内容,期待大伙儿之后多多适用脚本制作之家!