點擊藍字 ╳ 關(guān)注我們
巴延興
深圳開鴻數(shù)字產(chǎn)業(yè)發(fā)展有限公司
資深OS框架開發(fā)工程師
一、簡介
二、目錄
audio_framework
├── frameworks
│ ├── js #js 接口
│ │ └── napi
│ │ └── audio_renderer #audio_renderer NAPI接口
│ │ ├── include
│ │ │ ├── audio_renderer_callback_napi.h
│ │ │ ├── renderer_data_request_callback_napi.h
│ │ │ ├── renderer_period_position_callback_napi.h
│ │ │ └── renderer_position_callback_napi.h
│ │ └── src
│ │ ├── audio_renderer_callback_napi.cpp
│ │ ├── audio_renderer_napi.cpp
│ │ ├── renderer_data_request_callback_napi.cpp
│ │ ├── renderer_period_position_callback_napi.cpp
│ │ └── renderer_position_callback_napi.cpp
│ └── native #native 接口
│ └── audiorenderer
│ ├── BUILD.gn
│ ├── include
│ │ ├── audio_renderer_private.h
│ │ └── audio_renderer_proxy_obj.h
│ ├── src
│ │ ├── audio_renderer.cpp
│ │ └── audio_renderer_proxy_obj.cpp
│ └── test
│ └── example
│ └── audio_renderer_test.cpp
├── interfaces
│ ├── inner_api #native實現(xiàn)的接口
│ │ └── native
│ │ └── audiorenderer #audio渲染本地實現(xiàn)的接口定義
│ │ └── include
│ │ └── audio_renderer.h
│ └── kits #js調(diào)用的接口
│ └── js
│ └── audio_renderer #audio渲染NAPI接口的定義
│ └── include
│ └── audio_renderer_napi.h
└── services #服務(wù)端
└── audio_service
├── BUILD.gn
├── client #IPC調(diào)用中的proxy端
│ ├── include
│ │ ├── audio_manager_proxy.h
│ │ ├── audio_service_client.h
│ └── src
│ ├── audio_manager_proxy.cpp
│ ├── audio_service_client.cpp
└── server #IPC調(diào)用中的server端
├── include
│ └── audio_server.h
└── src
├── audio_manager_stub.cpp
└──audio_server.cpp三、音頻渲染總體流程

四、Native接口使用
bool TestPlayback(int argc, char *argv[]) const
{
FILE* wavFile = fopen(path, "rb");
//讀取wav文件頭信息
size_t bytesRead = fread(&wavHeader, 1, headerSize, wavFile);
//設(shè)置AudioRenderer參數(shù)
AudioRendererOptions rendererOptions = {};
rendererOptions.streamInfo.encoding = AudioEncodingType::ENCODING_PCM;
rendererOptions.streamInfo.samplingRate = static_cast(wavHeader.SamplesPerSec);
rendererOptions.streamInfo.format = GetSampleFormat(wavHeader.bitsPerSample);
rendererOptions.streamInfo.channels = static_cast(wavHeader.NumOfChan);
rendererOptions.rendererInfo.contentType = contentType;
rendererOptions.rendererInfo.streamUsage = streamUsage;
rendererOptions.rendererInfo.rendererFlags = 0;
//創(chuàng)建AudioRender實例
unique_ptr audioRenderer = AudioRenderer::Create(rendererOptions);
shared_ptr cb1 = make_shared();
//設(shè)置音頻渲染回調(diào)
ret = audioRenderer->SetRendererCallback(cb1);
//InitRender方法主要調(diào)用了audioRenderer實例的Start方法,啟動音頻渲染
if (!InitRender(audioRenderer)) {
AUDIO_ERR_LOG("AudioRendererTest: Init render failed");
fclose(wavFile);
return false;
}
//StartRender方法主要是讀取wavFile文件的數(shù)據(jù),然后通過調(diào)用audioRenderer實例的Write方法進行播放
if (!StartRender(audioRenderer, wavFile)) {
AUDIO_ERR_LOG("AudioRendererTest: Start render failed");
fclose(wavFile);
return false;
}
//停止渲染
if (!audioRenderer->Stop()) {
AUDIO_ERR_LOG("AudioRendererTest: Stop failed");
}
//釋放渲染
if (!audioRenderer->Release()) {
AUDIO_ERR_LOG("AudioRendererTest: Release failed");
}
//關(guān)閉wavFile
fclose(wavFile);
return true;
}五、調(diào)用流程

std::unique_ptr AudioRenderer::Create(const std::string cachePath,
const AudioRendererOptions &rendererOptions, const AppInfo &appInfo)
{
ContentType contentType = rendererOptions.rendererInfo.contentType;
StreamUsage streamUsage = rendererOptions.rendererInfo.streamUsage;
AudioStreamType audioStreamType = AudioStream::GetStreamType(contentType, streamUsage);
auto audioRenderer = std::make_unique(audioStreamType, appInfo);
if (!cachePath.empty()) {
AUDIO_DEBUG_LOG("Set application cache path");
audioRenderer->SetApplicationCachePath(cachePath);
}
audioRenderer->rendererInfo_.contentType = contentType;
audioRenderer->rendererInfo_.streamUsage = streamUsage;
audioRenderer->rendererInfo_.rendererFlags = rendererOptions.rendererInfo.rendererFlags;
AudioRendererParams params;
params.sampleFormat = rendererOptions.streamInfo.format;
params.sampleRate = rendererOptions.streamInfo.samplingRate;
params.channelCount = rendererOptions.streamInfo.channels;
params.encodingType = rendererOptions.streamInfo.encoding;
if (audioRenderer->SetParams(params) != SUCCESS) {
AUDIO_ERR_LOG("SetParams failed in renderer");
audioRenderer = nullptr;
return nullptr;
}
return audioRenderer;
}int32_t AudioRendererPrivate::SetRendererCallback(const std::shared_ptr &callback)
{
RendererState state = GetStatus();
if (state == RENDERER_NEW || state == RENDERER_RELEASED) {
return ERR_ILLEGAL_STATE;
}
if (callback == nullptr) {
return ERR_INVALID_PARAM;
}
// Save reference for interrupt callback
if (audioInterruptCallback_ == nullptr) {
return ERROR;
}
std::shared_ptr cbInterrupt =
std::static_pointer_cast(audioInterruptCallback_);
cbInterrupt->SaveCallback(callback);
// Save and Set reference for stream callback. Order is important here.
if (audioStreamCallback_ == nullptr) {
audioStreamCallback_ = std::make_shared();
if (audioStreamCallback_ == nullptr) {
return ERROR;
}
}
std::shared_ptr cbStream =
std::static_pointer_cast(audioStreamCallback_);
cbStream->SaveCallback(callback);
(void)audioStream_->SetStreamCallback(audioStreamCallback_);
return SUCCESS;
}bool AudioRendererPrivate::Start(StateChangeCmdType cmdType) const
{
AUDIO_INFO_LOG("AudioRenderer::Start");
RendererState state = GetStatus();
AudioInterrupt audioInterrupt;
switch (mode_) {
case InterruptMode:
audioInterrupt = sharedInterrupt_;
break;
case InterruptMode:
audioInterrupt = audioInterrupt_;
break;
default:
break;
}
AUDIO_INFO_LOG("AudioRenderer: %{public}d, streamType: %{public}d, sessionID: %{public}d",
mode_, audioInterrupt.streamType, audioInterrupt.sessionID);
if (audioInterrupt.streamType == STREAM_DEFAULT || audioInterrupt.sessionID == INVALID_SESSION_ID) {
return false;
}
int32_t ret = AudioPolicyManager::GetInstance().ActivateAudioInterrupt(audioInterrupt);
if (ret != 0) {
AUDIO_ERR_LOG("AudioRendererPrivate::ActivateAudioInterrupt Failed");
return false;
}
return audioStream_->StartAudioStream(cmdType);
}bool AudioStream::StartAudioStream(StateChangeCmdType cmdType)
{
int32_t ret = StartStream(cmdType);
resetTime_ = true;
int32_t retCode = clock_gettime(CLOCK_MONOTONIC, &baseTimestamp_);
if (renderMode_ == RENDER_MODE_CALLBACK) {
isReadyToWrite_ = true;
writeThread_ = std::make_unique<std::thread>(&AudioStream::WriteCbTheadLoop, this);
} else if (captureMode_ == CAPTURE_MODE_CALLBACK) {
isReadyToRead_ = true;
readThread_ = std::make_unique<std::thread>(&AudioStream::ReadCbThreadLoop, this);
}
isFirstRead_ = true;
isFirstWrite_ = true;
state_ = RUNNING;
AUDIO_INFO_LOG("StartAudioStream SUCCESS");
if (audioStreamTracker_) {
AUDIO_DEBUG_LOG("AudioStream:Calling Update tracker for Running");
audioStreamTracker_->UpdateTracker(sessionId_, state_, rendererInfo_, capturerInfo_);
}
return true;
}int32_t AudioServiceClient::StartStream(StateChangeCmdType cmdType)
{
int error;
lock_guard lockdata(dataMutex);
pa_operation *operation = nullptr;
pa_threaded_mainloop_lock(mainLoop);
pa_stream_state_t state = pa_stream_get_state(paStream);
streamCmdStatus = 0;
stateChangeCmdType_ = cmdType;
operation = pa_stream_cork(paStream, 0, PAStreamStartSuccessCb, (void *)this);
while (pa_operation_get_state(operation) == PA_OPERATION_RUNNING) {
pa_threaded_mainloop_wait(mainLoop);
}
pa_operation_unref(operation);
pa_threaded_mainloop_unlock(mainLoop);
if (!streamCmdStatus) {
AUDIO_ERR_LOG("Stream Start Failed");
ResetPAAudioClient();
return AUDIO_CLIENT_START_STREAM_ERR;
} else {
AUDIO_INFO_LOG("Stream Started Successfully");
return AUDIO_CLIENT_SUCCESS;
}
}int32_t AudioRendererPrivate::Write(uint8_t *buffer, size_t bufferSize)
{
return audioStream_->Write(buffer, bufferSize);
}size_t AudioStream::Write(uint8_t *buffer, size_t buffer_size)
{
int32_t writeError;
StreamBuffer stream;
stream.buffer = buffer;
stream.bufferLen = buffer_size;
isWriteInProgress_ = true;
if (isFirstWrite_) {
if (RenderPrebuf(stream.bufferLen)) {
return ERR_WRITE_FAILED;
}
isFirstWrite_ = false;
}
size_t bytesWritten = WriteStream(stream, writeError);
isWriteInProgress_ = false;
if (writeError != 0) {
AUDIO_ERR_LOG("WriteStream fail,writeError:%{public}d", writeError);
return ERR_WRITE_FAILED;
}
return bytesWritten;
}size_t AudioServiceClient::WriteStream(const StreamBuffer &stream, int32_t &pError)
{
size_t cachedLen = WriteToAudioCache(stream);
if (!acache.isFull) {
pError = error;
return cachedLen;
}
pa_threaded_mainloop_lock(mainLoop);
const uint8_t *buffer = acache.buffer.get();
size_t length = acache.totalCacheSize;
error = PaWriteStream(buffer, length);
acache.readIndex += acache.totalCacheSize;
acache.isFull = false;
if (!error && (length >= 0) && !acache.isFull) {
uint8_t *cacheBuffer = acache.buffer.get();
uint32_t offset = acache.readIndex;
uint32_t size = (acache.writeIndex - acache.readIndex);
if (size > 0) {
if (memcpy_s(cacheBuffer, acache.totalCacheSize, cacheBuffer + offset, size)) {
AUDIO_ERR_LOG("Update cache failed");
pa_threaded_mainloop_unlock(mainLoop);
pError = AUDIO_CLIENT_WRITE_STREAM_ERR;
return cachedLen;
}
AUDIO_INFO_LOG("rearranging the audio cache");
}
acache.readIndex = 0;
acache.writeIndex = 0;
if (cachedLen < stream.bufferLen) {
StreamBuffer str;
str.buffer = stream.buffer + cachedLen;
str.bufferLen = stream.bufferLen - cachedLen;
AUDIO_DEBUG_LOG("writing pending data to audio cache: %{public}d", str.bufferLen);
cachedLen += WriteToAudioCache(str);
}
}
pa_threaded_mainloop_unlock(mainLoop);
pError = error;
return cachedLen;
}六、總結(jié)
原文標(biāo)題:OpenHarmony 3.2 Beta Audio——音頻渲染
文章出處:【微信公眾號:OpenAtom OpenHarmony】歡迎添加關(guān)注!文章轉(zhuǎn)載請注明出處。
聲明:本文內(nèi)容及配圖由入駐作者撰寫或者入駐合作網(wǎng)站授權(quán)轉(zhuǎn)載。文章觀點僅代表作者本人,不代表電子發(fā)燒友網(wǎng)立場。文章及其配圖僅供工程師學(xué)習(xí)之用,如有內(nèi)容侵權(quán)或者其他違規(guī)問題,請聯(lián)系本站處理。
舉報投訴
-
鴻蒙
+關(guān)注
關(guān)注
60文章
3012瀏覽量
46154 -
OpenHarmony
+關(guān)注
關(guān)注
33文章
3970瀏覽量
21339
原文標(biāo)題:OpenHarmony 3.2 Beta Audio——音頻渲染
文章出處:【微信號:gh_e4f28cfa3159,微信公眾號:OpenAtom OpenHarmony】歡迎添加關(guān)注!文章轉(zhuǎn)載請注明出處。
發(fā)布評論請先 登錄
相關(guān)推薦
熱點推薦
通過對?數(shù)字音頻信號進行數(shù)學(xué)運算和算法處理的高性能Audio DSP-DU562
高性能 Audio DSP(音頻數(shù)字信號處理器)的核心工作原理是通過對?數(shù)字音頻信號進行數(shù)學(xué)運算和算法處理?,實現(xiàn)音質(zhì)優(yōu)化、噪聲抑制、空間增強等效果。
藍牙5.3 經(jīng)典音頻 + LE Audio,一顆模塊兼顧兩種生態(tài)
用藍牙連手機聽歌、打電話,大家早已習(xí)以為常;近幾年 LE Audio 冒頭,多設(shè)備同步、低延遲、廣播音頻成了新賣點。問題來了:經(jīng)典藍牙生態(tài)一時半會退不了,LE Audio 又確實香,選誰?——其實
LE Audio融合BLE雙模重塑藍牙音頻生態(tài)的革命性技術(shù)
在藍牙技術(shù)誕生后的第28個年頭,一場由LE Audio(Low Energy Audio,低功耗音頻)引發(fā)的音頻技術(shù)革命正在席卷全球。這項由藍牙技術(shù)聯(lián)盟(Bluetooth SIG)于
LE Audio藍牙模塊方案:重塑無線音頻新體驗
? 在無線音頻技術(shù)日新月異的今天,藍牙模塊作為連接設(shè)備的核心組件,其性能與功能直接決定了用戶體驗的優(yōu)劣。近期,基于LE Audio標(biāo)準(zhǔn)的新一代藍牙模塊方案橫空出世,以其卓越的技術(shù)特性和廣泛的應(yīng)用場
探索 AURIX? 音頻應(yīng)用套件:硬件設(shè)計與網(wǎng)絡(luò)音頻應(yīng)用剖析
的 AURIX? 音頻應(yīng)用套件(Audio Application Kit)為音頻開發(fā)者提供了強大的工具。本文將深入剖析該套件的硬件設(shè)計和網(wǎng)絡(luò)音頻應(yīng)用,帶領(lǐng)大家了解其特點和技術(shù)細節(jié)。
MERUS? EVAL_AUDIO_MA12070_B 和 EVAL_AUDIO_MA12070P_B評估板使用指南
MERUS? EVAL_AUDIO_MA12070_B 和 EVAL_AUDIO_MA12070P_B評估板使用指南 一、前言 在音頻放大器設(shè)計領(lǐng)域,一款性能優(yōu)良的評估板能為工程師們節(jié)省大量的時間
EVAL_AUDIO_MA2304xNS_B評估板使用指南:音頻放大器設(shè)計的得力助手
EVAL_AUDIO_MA2304xNS_B評估板使用指南:音頻放大器設(shè)計的得力助手 作為電子工程師,在音頻放大器設(shè)計領(lǐng)域不斷探索時,一款優(yōu)質(zhì)的評估板能為我們的工作帶來極大便利。今天就來詳細介紹一下
藍牙模塊低功耗革命:LE Audio多通道音頻技術(shù)詳解(TWS同步/家庭影院/VR音效)
一、引言 隨著科技的飛速發(fā)展,藍牙技術(shù)作為無線傳輸?shù)馁撸呀?jīng)深入到我們的日常生活中。從最初的數(shù)據(jù)傳輸,到后來的音頻傳輸,再到如今的藍牙LE Audio(低功耗音頻)技術(shù)的問世,藍牙不斷刷新著我們
藍牙模塊低功耗新突破:LE Audio技術(shù)詳解(LC3編解碼/多設(shè)備串流/廣播音頻)
Audio是藍牙技術(shù)聯(lián)盟(SIG)在2020年推出的全新音頻技術(shù)標(biāo)準(zhǔn),以低功耗藍牙5.2為基礎(chǔ),采用ISOC(isochronous)架構(gòu),引入了創(chuàng)新的LC3音頻編碼算法,具有更低的延遲和更高的傳輸質(zhì)量,同時
請問STM32如何移植Audio框架?
最近在學(xué)習(xí)音頻解碼,想用一下Audio框架。
1、這個該如何移植到自己創(chuàng)建的BSP并對接到device框架中?看了官方移植文檔沒有對沒有對該部分的描述。
2、我只想實現(xiàn)一個簡單的播放功能,只用一個DAC芯片(比如CS4344)是否就能達到我的需求?
發(fā)表于 09-25 07:17
XR空間音頻革命:蘋果、三星推出新技術(shù),ASAF成Vision Pro最佳搭檔
Audio)格式:Apple Spatial Audio Format(ASAF,蘋果空間音頻格式),可以用來打造真正沉浸式的音頻體驗。 ? ASAF 通過確保使用聲學(xué)提示來
LE-Audio是什么?
近年來,隨著藍牙技術(shù)的快速發(fā)展,無線通信領(lǐng)域的應(yīng)用變得越來越廣泛。然而,在對音頻質(zhì)量和功耗不斷追求的同時,藍牙技術(shù)也需要不斷創(chuàng)新和改進。在這方面,LE-Audio(低功耗音頻)作為一項新興技術(shù)
發(fā)表于 06-28 21:32
開源鴻蒙6.0Beta1版本發(fā)布!觸覺智能將率先適配RK3566/RK3568/RK3576等芯片平臺芯片
開放原子開源鴻蒙(OpenAtomOpenHarmony,簡稱“開源鴻蒙”或“OpenHarmony”)6.0Beta1版本正式發(fā)布。相比5.1.0Release版本進一步增強ArkUI組件能力
藍牙LE Audio技術(shù)簡介和優(yōu)勢分析
藍牙LE Audio,也稱為低功耗音頻(Bluetooth Low Energy Audio),是藍牙技術(shù)家族中的最新成員,專門為音頻傳輸而設(shè)計。它繼承了藍牙低功耗(Bluetooth
OpenHarmony 3.2 Beta Audio——音頻渲染

評論