update: 听歌识曲接口完善, 补充demo页面

This commit is contained in:
binaryify 2023-09-10 15:30:59 +08:00
parent 37bad1cc33
commit 70db662026
11 changed files with 1941 additions and 7 deletions

View File

@ -1,5 +1,6 @@
module.exports = {
root: true,
ignorePatterns: ['public/'],
parserOptions: {
parser: 'babel-eslint',
ecmaVersion: 2018,

View File

@ -1,4 +1,12 @@
# 更新日志
### 4.12.0 | 2023.09.10
- 听歌识曲接口完善, 补充demo页面
- NMTID 动态添加 #1792
- weapi ua 固定
### 4.11.3 | 2023.09.09
- 返回内容的`code`统一处理

View File

@ -274,6 +274,7 @@
256. 播客声音上传
257. 验证接口-二维码生成
258. 验证接口-二维码检测
259. 听歌识曲
## 安装
@ -4245,6 +4246,19 @@ qrCodeStatus:10,detailReason:0 二维码已扫描,并且手机号相同
qrCodeStatus:20,detailReason:0 验证成功qrCodeStatus:21,detailReason:0 二维码已失效
### 听歌识曲
说明: 使用此接口,上传音频文件或者麦克风采集声音可识别对应歌曲信息,具体调用例子参考 `/audio_match_demo/index.html` (项目文件: `public/audio_match_demo/index.html`)
**接口地址:** `/audio/match`
**必选参数:**
`duration`: 音频时长,单位秒
`audioFP`: 音频指纹,参考项目调用例子获取
## 离线访问此文档
此文档同时也是 Progressive Web Apps(PWA), 加入了 serviceWorker, 可离线访问

7
interface.d.ts vendored
View File

@ -244,6 +244,13 @@ export function artists(
params: { id: string | number } & RequestBaseConfig,
): Promise<Response>
export function audio_match(
params: {
duration: string | number
audioFP: string | number
} & RequestBaseConfig,
): Promise<Response>
export function avatar_upload(
params: ImageUploadConfig & RequestBaseConfig,
): Promise<Response>

View File

@ -1,14 +1,20 @@
const realData =
'eJx10mtIU2EcBvDtnCwNMfO2klUSmSQ5ZugKW/v/0TIjJVdhDStbXpqXrhY5Kwhtrcwiut9VSqMUMxX6IFqsD92sD1YgWGHRBcowKrpnPa/v+drg4flt572ds2PQ6XQut7MwJ940w2TOyS0pzF+/BV/MJrNO+3TVLOHUzKx5iw3/H5uZ7yxegct3tTl7Cr6QEa0gZ/dZOFsvfe5YHe1D+yFZxpncqEj/cCdwoirdVxHNnZrX3xygU5g7Eh6I9uOx8Ch4y9FQjlKkDz1pYrFXIJLUOovFGcYivqJgXqaXDqu7Rzc0XzmZxG81B/fF8wRVusn2jN5rDnwca8tFhyAJP4L4qiI9vX8cWzEmVKzT/46qxNpIdZOZz2HNcHhSkZ3D4AjYFpfGFkX6+dB+FvcSBe/SWbkLPVnEOJ1DFelXxVVci/Wj4TsBLhrQ/LGoaU4HxsTA28L76Cc8Dfau/U6F6FgkyBDDJar0g8tesmOvOHioWeXXmme6l3MLbIIre6wciU5E2t/k8WVxHfHvuUWXsH4SPCv1NW1Cz0aivgYO34vw1AEvi3MlIw0xHl6JNVPEGW41UJsqPaXYYTuEnotMdHwYfv7CFR/i+aXmrY5wrlSkEwr+0EJ0GvLmdw4/RS9Amj93UAbGZMIF40ezE3PtcG/yBWrT3L6oh66hFyMXK4xsUKT7aufzapxnFTwiNc3Wis5Bdm+OYCvmOuHj/ZeoQPOI00PUrUjXpG+kMFU61tFFDvQaZOn5DH4mzoLw4Hsaj14rzu/K4jF66fSWTnJinW3wBvcveqjZN3iFjKp0qKuF1mi21keST3NtTcbwu1eG3Dussr9eemljLIco0tVH7HwA493wOr+FlIjfy+GvkR4uwfjt4v/6G8K3NX8K38lt6B1ISa+Bv2O8Fy69foZOovci2S4Lr1aku4P9OEWVTt9wgMQ7exgJ8JXyI0W694WFyuBjcH75XyrEXsfhg+ZSvqZIf/Lct8Wp0md2tJN4PifEfjcm8gu02Ptbj459eum8eg8bFWlLXTb/A+uo9bM='
function createRandomString(len) {
const str = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
let result = ''
for (let i = len; i > 0; --i)
result += str[Math.floor(Math.random() * str.length)]
return result
}
module.exports = (query, request) => {
query.cookie.os = 'pc'
const data = {
algorithmCode: 'shazam_v2',
times: 1,
sessionId: 'C999431ACDC84EDBB984763654E6F8D7',
duration: 3.3066249999999995,
sessionId: createRandomString(16),
duration: Number(query.duration),
from: 'recognize-song',
rawdata: realData,
decrypt: '1',
rawdata: query.audioFP,
}
return request('POST', `https://music.163.com/api/music/audio/match`, data, {
crypto: 'weapi',

View File

@ -1,6 +1,6 @@
{
"name": "NeteaseCloudMusicApi",
"version": "4.11.3",
"version": "4.12.0",
"description": "网易云音乐 NodeJS 版 API",
"scripts": {
"start": "node app.js",

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -0,0 +1,184 @@
<!DOCTYPE html>
<head>
<style>
* {
font-family: sans-serif;
}
pre {
font-family: monospace;
}
a {
font-family: sans-serif;
}
audio {
width: 100%;
}
canvas {
width: 100%;
height: 0;
transition: all linear 0.1s;
}
.canvas-active {
height: 15vh;
}
pre {
overflow: scroll;
}
</style>
</head>
<body>
<h1>听歌识曲 Demo (Credit: <a href="https://github.com/mos9527/ncm-afp" target="_blank">https://github.com/mos9527/ncm-afp</a>)</h1>
<p>Usage:</p>
<li>Select your audio file through "Choose File" picker</li>
<li>Seek to a point where your music should sound the most distinct</li>
<li>Hit the "Clip" button and wait for the results!</li>
<p>Sorry if your music somehow sounds 100x awful here, since everything is in <i>telephone quality</i> and that's what <i>they</i>'re using :/</p>
<audio id="audio" controls autoplay></audio>
<canvas id="canvas"></canvas>
<button id="invoke">Clip</button>
<input type="file" name="picker" accept="*" id="file">
<hr>
<label for="use-mic">Listen from microphone</label>
<input type="checkbox" name="use-mic" id="usemic">
<hr>
<pre id="logs"></pre>
</body>
<script type="module">
import { InstantiateRuntime , GenerateFP } from './afp.js'
const duration = 5
let audioCtx, recorderNode, micSourceNode
let audioBuffer,bufferHealth
let runtime = InstantiateRuntime()
let audio = document.getElementById('audio')
let file = document.getElementById('file')
let clip = document.getElementById('invoke')
let usemic = document.getElementById('usemic')
let canvas = document.getElementById('canvas')
let canvasCtx = canvas.getContext('2d')
let logs = document.getElementById('logs')
logs.write = line => logs.innerHTML += line + '\n'
function RecorderCallback(channelL){
let sampleBuffer = new Float32Array(channelL.subarray(0, duration * 8000))
let FP = GenerateFP(sampleBuffer)
logs.write(`[index] Generated FP ${FP}`)
logs.write('[index] Now querying, please wait...')
fetch(
'http://localhost:3000/audio/match?' +
new URLSearchParams(Object.assign({
audioFP: FP,
duration: duration
}))
).then(resp => resp.json()).then(resp => {
if (!resp.data.result){
return logs.write('[index] Query failed with no results.')
}
logs.write(`[index] Query complete. Results=${resp.data.result.length}`)
for (var song of resp.data.result) {
logs.write(
`<a target="_blank" href="https://music.163.com/song?id=${song.song.id}">${song.song.name} - ${song.song.album.name} (${song.startTime / 1000}s)</a>`
)
}
})
}
function InitAudioCtx(){
// AFP.wasm can't do it with anything other than 8KHz
audioCtx = new AudioContext({ 'sampleRate': 8000 })
if (audioCtx.state == 'suspended')
return false
let audioNode = audioCtx.createMediaElementSource(audio)
audioCtx.audioWorklet.addModule('rec.js').then(() => {
recorderNode = new AudioWorkletNode(audioCtx, 'timed-recorder')
audioNode.connect(recorderNode) // recorderNode doesn't output anything
audioNode.connect(audioCtx.destination)
recorderNode.port.onmessage = event => {
switch (event.data.message) {
case 'finished':
RecorderCallback(event.data.recording)
clip.innerHTML = 'Clip'
clip.disabled = false
canvas.classList.remove('canvas-active')
break
case 'bufferhealth':
clip.innerHTML = `${(duration * (1-event.data.health)).toFixed(2)}s`
bufferHealth = event.data.health
audioBuffer = event.data.recording
break
default:
logs.write(event.data.message)
}
}
// Attempt to get user's microphone and connect it to the AudioContext.
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: false,
autoGainControl: false,
noiseSuppression: false,
latency: 0,
},
}).then(micStream=>{
micSourceNode = audioCtx.createMediaStreamSource(micStream);
micSourceNode.connect(recorderNode)
usemic.checked = true
logs.write('[rec.js] Microphone attached.')
});
});
return true
}
runtime.then(() => logs.write('[index] Wasm module loaded.'))
clip.addEventListener('click', event => {
recorderNode.port.postMessage({
message: 'start', duration: duration
})
clip.disabled = true
canvas.classList.add('canvas-active')
})
usemic.addEventListener('change',event=>{
if (!usemic.checked)
micSourceNode.disconnect(recorderNode)
else
micSourceNode.connect(recorderNode)
})
file.addEventListener('change', event => {
file.files[0].arrayBuffer().then(
async buffer => {
await runtime
logs.write(`[index] File ${file.files[0].name} loaded.`)
audio.src = window.URL.createObjectURL(new Blob([buffer]))
clip.disabled = false
})
})
function UpdateCanvas(){
let w = canvas.clientWidth, h = canvas.clientHeight
canvas.width = w,canvas.height = h
canvasCtx.fillStyle = 'rgba(0,0,0,0)';
canvasCtx.fillRect(0, 0, w,h);
if (audioBuffer){
canvasCtx.fillStyle = 'black';
for (var x=0;x<w * bufferHealth;x++){
var y = audioBuffer[Math.ceil((x / w) * audioBuffer.length)]
var z = Math.abs(y) * h / 2
canvasCtx.fillRect(x,h / 2 - (y > 0 ? z : 0),1,z)
}
}
requestAnimationFrame(UpdateCanvas)
}
UpdateCanvas()
let requestCtx = setInterval(()=>{
try {
if (InitAudioCtx()) { // Put this here so we don't have to deal with the 'user did not interact' thing
clearInterval(requestCtx)
logs.write('[rec.js] Audio Context started.')
}
} catch {
// Fail silently
}
},100)
</script>

View File

@ -0,0 +1,49 @@
/* AudioWorkletProcesser must be initialized as modules (i.e. seperate files)
* Ref : https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor
*/
class TimedRecorder extends AudioWorkletProcessor {
constructor(options) {
super()
this.max_length = 0
this.recbuffer = new Float32Array()
this.recording = false
this.buf_index = 0
this.port.onmessage = (event) => {
switch (event.data.message) {
case 'start':
this.max_length = event.data.duration * 8000
this.recbuffer = new Float32Array(this.max_length)
this.buf_index = 0
this.recording = true
this.port.postMessage({ message: '[rec.js] Recording started' })
break
}
}
}
process(inputs) {
// Only take care of channel 0 (Left)
if (this.recording) {
let channelL = inputs[0][0]
this.port.postMessage({
message: 'bufferhealth',
health: this.buf_index / this.max_length,
recording: this.recbuffer,
})
if (this.buf_index + channelL.length > this.max_length) {
this.port.postMessage({ message: '[rec.js] Recording finished' })
this.recording = false
this.buf_index = 0
this.port.postMessage({
message: 'finished',
recording: this.recbuffer,
})
} else {
this.recbuffer.set(channelL, this.buf_index)
this.buf_index += channelL.length
}
}
return true
}
}
registerProcessor('timed-recorder', TimedRecorder)

View File

@ -17,7 +17,8 @@
<li>1. <a href="./search?keywords=海阔天空">搜索</a></li>
<li>2. <a href="./comment/music?id=186016&limit=1">歌曲评论</a></li>
<li>3. <a href="./dj/program?rid=336355127">电台节目</a></li>
<li>4. <a href="/qrlogin.html">二维码登录</a></li>
<li>4. <a href="./qrlogin.html">二维码登录</a></li>
<li>4. <a href="./audio_match_demo/index.html">听歌识曲</a></li>
</ul>
<style>
html,