no message

This commit is contained in:
gem
2025-02-18 15:21:31 +08:00
commit 2d133e56d7
1980 changed files with 465595 additions and 0 deletions

View File

@@ -0,0 +1,614 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "2d/renderer/Batcher2d.h"
#include "application/ApplicationManager.h"
#include "base/TypeDef.h"
#include "core/Root.h"
#include "core/scene-graph/Scene.h"
#include "editor-support/MiddlewareManager.h"
#include "renderer/pipeline/Define.h"
#include "scene/Pass.h"
namespace cc {
Batcher2d::Batcher2d() : Batcher2d(nullptr) {
}
Batcher2d::Batcher2d(Root* root)
: _drawBatchPool([]() { return ccnew scene::DrawBatch2D(); }, [](auto* obj) { delete obj; }, 10U) {
if (root == nullptr) {
root = Root::getInstance();
}
_root = root;
_device = _root->getDevice();
_stencilManager = StencilManager::getInstance();
}
Batcher2d::~Batcher2d() { // NOLINT
_drawBatchPool.destroy();
for (auto iter : _descriptorSetCache) {
delete iter.second;
}
for (auto* drawBatch : _batches) {
delete drawBatch;
}
_attributes.clear();
if (_maskClearModel != nullptr) {
Root::getInstance()->destroyModel(_maskClearModel);
_maskClearModel = nullptr;
}
if (_maskModelMesh != nullptr) {
_maskModelMesh->destroy();
_maskModelMesh = nullptr;
}
_maskClearMtl = nullptr;
_maskAttributes.clear();
}
void Batcher2d::syncMeshBuffersToNative(uint16_t accId, ccstd::vector<UIMeshBuffer*>&& buffers) {
_meshBuffersMap[accId] = std::move(buffers);
}
UIMeshBuffer* Batcher2d::getMeshBuffer(uint16_t accId, uint16_t bufferId) { // NOLINT(bugprone-easily-swappable-parameters)
const auto& map = _meshBuffersMap[accId];
return map[bufferId];
}
gfx::Device* Batcher2d::getDevice() {
if (_device == nullptr) {
_device = Root::getInstance()->getDevice();
}
return _device;
}
void Batcher2d::updateDescriptorSet() {
}
void Batcher2d::syncRootNodesToNative(ccstd::vector<Node*>&& rootNodes) {
_rootNodeArr = std::move(rootNodes);
}
void Batcher2d::fillBuffersAndMergeBatches() {
size_t index = 0;
for (auto* rootNode : _rootNodeArr) {
// _batches will add by generateBatch
walk(rootNode, 1);
generateBatch(_currEntity, _currDrawInfo);
auto* scene = rootNode->getScene()->getRenderScene();
size_t const count = _batches.size();
for (size_t i = index; i < count; i++) {
scene->addBatch(_batches.at(i));
}
index = count;
}
}
void Batcher2d::walk(Node* node, float parentOpacity) { // NOLINT(misc-no-recursion)
if (!node->isActiveInHierarchy()) {
return;
}
bool breakWalk = false;
auto* entity = static_cast<RenderEntity*>(node->getUserData());
if (entity) {
if (entity->getColorDirty()) {
float localOpacity = entity->getLocalOpacity();
float localColorAlpha = entity->getColorAlpha();
entity->setOpacity(parentOpacity * localOpacity * localColorAlpha);
entity->setColorDirty(false);
entity->setVBColorDirty(true);
}
if (math::isEqualF(entity->getOpacity(), 0)) {
breakWalk = true;
} else if (entity->isEnabled()) {
uint32_t size = entity->getRenderDrawInfosSize();
for (uint32_t i = 0; i < size; i++) {
auto* drawInfo = entity->getRenderDrawInfoAt(i);
handleDrawInfo(entity, drawInfo, node);
}
entity->setVBColorDirty(false);
}
if (entity->getRenderEntityType() == RenderEntityType::CROSSED) {
breakWalk = true;
}
}
if (!breakWalk) {
const auto& children = node->getChildren();
float thisOpacity = entity ? entity->getOpacity() : parentOpacity;
for (const auto& child : children) {
// we should find parent opacity recursively upwards if it doesn't have an entity.
walk(child, thisOpacity);
}
}
// post assembler
if (_stencilManager->getMaskStackSize() > 0 && entity && entity->isEnabled()) {
handlePostRender(entity);
}
}
void Batcher2d::handlePostRender(RenderEntity* entity) {
bool isMask = entity->getIsMask();
if (isMask) {
generateBatch(_currEntity, _currDrawInfo);
resetRenderStates();
_stencilManager->exitMask();
}
}
CC_FORCE_INLINE void Batcher2d::handleComponentDraw(RenderEntity* entity, RenderDrawInfo* drawInfo, Node* node) {
ccstd::hash_t dataHash = drawInfo->getDataHash();
if (drawInfo->getIsMeshBuffer()) {
dataHash = 0;
}
// may slow
bool isMask = entity->getIsMask();
if (isMask) {
// Mask subComp
insertMaskBatch(entity);
} else {
entity->setEnumStencilStage(_stencilManager->getStencilStage());
}
auto tempStage = static_cast<StencilStage>(entity->getStencilStage());
if (_currHash != dataHash || dataHash == 0 || _currMaterial != drawInfo->getMaterial() || _currStencilStage != tempStage) {
// Generate a batch if not batching
generateBatch(_currEntity, _currDrawInfo);
if (!drawInfo->getIsMeshBuffer()) {
UIMeshBuffer* buffer = drawInfo->getMeshBuffer();
if (_currMeshBuffer != buffer) {
_currMeshBuffer = buffer;
_indexStart = _currMeshBuffer->getIndexOffset();
}
}
_currHash = dataHash;
_currMaterial = drawInfo->getMaterial();
_currStencilStage = tempStage;
_currLayer = entity->getNode()->getLayer();
_currEntity = entity;
_currDrawInfo = drawInfo;
_currTexture = drawInfo->getTexture();
_currSampler = drawInfo->getSampler();
if (_currSampler == nullptr) {
_currSamplerHash = 0;
} else {
_currSamplerHash = _currSampler->getHash();
}
}
if (!drawInfo->getIsMeshBuffer()) {
if (node->getChangedFlags() || drawInfo->getVertDirty()) {
fillVertexBuffers(entity, drawInfo);
drawInfo->setVertDirty(false);
}
if (entity->getVBColorDirty()) {
fillColors(entity, drawInfo);
}
fillIndexBuffers(drawInfo);
}
if (isMask) {
_stencilManager->enableMask();
}
}
CC_FORCE_INLINE void Batcher2d::handleModelDraw(RenderEntity* entity, RenderDrawInfo* drawInfo) {
generateBatch(_currEntity, _currDrawInfo);
resetRenderStates();
// stencil stage
gfx::DepthStencilState* depthStencil = nullptr;
ccstd::hash_t dssHash = 0;
Material* renderMat = drawInfo->getMaterial();
bool isMask = entity->getIsMask();
if (isMask) {
// Mask Comp
insertMaskBatch(entity);
} else {
entity->setEnumStencilStage(_stencilManager->getStencilStage());
}
StencilStage entityStage = entity->getEnumStencilStage();
depthStencil = _stencilManager->getDepthStencilState(entityStage, renderMat);
dssHash = _stencilManager->getStencilHash(entityStage);
// Model
auto* model = drawInfo->getModel();
if (model == nullptr) return;
auto stamp = CC_CURRENT_ENGINE()->getTotalFrames();
model->updateTransform(stamp);
model->updateUBOs(stamp);
const auto& subModelList = model->getSubModels();
for (const auto& submodel : subModelList) {
auto* curdrawBatch = _drawBatchPool.alloc();
curdrawBatch->setVisFlags(entity->getNode()->getLayer());
curdrawBatch->setModel(model);
curdrawBatch->setInputAssembler(submodel->getInputAssembler());
curdrawBatch->setDescriptorSet(submodel->getDescriptorSet());
curdrawBatch->fillPass(renderMat, depthStencil, dssHash, &(submodel->getPatches()));
_batches.push_back(curdrawBatch);
}
if (isMask) {
_stencilManager->enableMask();
}
}
CC_FORCE_INLINE void Batcher2d::handleMiddlewareDraw(RenderEntity* entity, RenderDrawInfo* drawInfo) {
auto layer = entity->getNode()->getLayer();
Material* material = drawInfo->getMaterial();
auto* texture = drawInfo->getTexture();
auto* sampler = drawInfo->getSampler();
auto* meshBuffer = drawInfo->getMeshBuffer();
// check for merge draw
auto enableBatch = !entity->getUseLocal();
if (enableBatch && _currTexture == texture && _currMeshBuffer == meshBuffer && !_currEntity->getUseLocal() && material->getHash() == _currMaterial->getHash() && drawInfo->getIndexOffset() == _currDrawInfo->getIndexOffset() + _currDrawInfo->getIbCount() && layer == _currLayer) {
auto ibCount = _currDrawInfo->getIbCount();
_currDrawInfo->setIbCount(ibCount + drawInfo->getIbCount());
} else {
generateBatch(_currEntity, _currDrawInfo);
_currLayer = layer;
_currMaterial = material;
_currTexture = texture;
_currMeshBuffer = meshBuffer;
_currEntity = entity;
_currDrawInfo = drawInfo;
_currHash = 0;
}
}
CC_FORCE_INLINE void Batcher2d::handleSubNode(RenderEntity* entity, RenderDrawInfo* drawInfo) { // NOLINT
if (drawInfo->getSubNode()) {
walk(drawInfo->getSubNode(), entity->getOpacity());
}
}
CC_FORCE_INLINE void Batcher2d::handleDrawInfo(RenderEntity* entity, RenderDrawInfo* drawInfo, Node* node) { // NOLINT(misc-no-recursion)
CC_ASSERT(entity);
CC_ASSERT(drawInfo);
RenderDrawInfoType drawInfoType = drawInfo->getEnumDrawInfoType();
switch (drawInfoType) {
case RenderDrawInfoType::COMP:
handleComponentDraw(entity, drawInfo, node);
break;
case RenderDrawInfoType::MODEL:
handleModelDraw(entity, drawInfo);
break;
case RenderDrawInfoType::MIDDLEWARE:
handleMiddlewareDraw(entity, drawInfo);
break;
case RenderDrawInfoType::SUB_NODE:
handleSubNode(entity, drawInfo);
break;
default:
break;
}
}
void Batcher2d::generateBatch(RenderEntity* entity, RenderDrawInfo* drawInfo) {
if (drawInfo == nullptr) {
return;
}
if (drawInfo->getEnumDrawInfoType() == RenderDrawInfoType::MIDDLEWARE) {
generateBatchForMiddleware(entity, drawInfo);
return;
}
if (_currMaterial == nullptr) {
return;
}
gfx::InputAssembler* ia = nullptr;
uint32_t indexOffset = 0;
uint32_t indexCount = 0;
if (drawInfo->getIsMeshBuffer()) {
// Todo MeshBuffer RenderData
ia = drawInfo->requestIA(getDevice());
indexOffset = drawInfo->getIndexOffset();
indexCount = drawInfo->getIbCount();
_meshRenderDrawInfo.emplace_back(drawInfo);
} else {
UIMeshBuffer* currMeshBuffer = drawInfo->getMeshBuffer();
currMeshBuffer->setDirty(true);
ia = currMeshBuffer->requireFreeIA(getDevice());
indexCount = currMeshBuffer->getIndexOffset() - _indexStart;
if (ia == nullptr) {
return;
}
indexOffset = _indexStart;
_indexStart = currMeshBuffer->getIndexOffset();
}
_currMeshBuffer = nullptr;
// stencilStage
gfx::DepthStencilState* depthStencil = nullptr;
ccstd::hash_t dssHash = 0;
StencilStage entityStage = entity->getEnumStencilStage();
depthStencil = _stencilManager->getDepthStencilState(entityStage, _currMaterial);
dssHash = _stencilManager->getStencilHash(entityStage);
auto* curdrawBatch = _drawBatchPool.alloc();
curdrawBatch->setVisFlags(_currLayer);
curdrawBatch->setInputAssembler(ia);
curdrawBatch->setFirstIndex(indexOffset);
curdrawBatch->setIndexCount(indexCount);
curdrawBatch->fillPass(_currMaterial, depthStencil, dssHash);
const auto& pass = curdrawBatch->getPasses().at(0);
if (entity->getUseLocal()) {
drawInfo->updateLocalDescriptorSet(entity->getRenderTransform(), pass->getLocalSetLayout());
curdrawBatch->setDescriptorSet(drawInfo->getLocalDes());
} else {
curdrawBatch->setDescriptorSet(getDescriptorSet(_currTexture, _currSampler, pass->getLocalSetLayout()));
}
_batches.push_back(curdrawBatch);
}
void Batcher2d::generateBatchForMiddleware(RenderEntity* entity, RenderDrawInfo* drawInfo) {
auto layer = entity->getNode()->getLayer();
auto* material = drawInfo->getMaterial();
auto* texture = drawInfo->getTexture();
auto* sampler = drawInfo->getSampler();
auto* meshBuffer = drawInfo->getMeshBuffer();
// set meshbuffer offset
auto indexOffset = drawInfo->getIndexOffset();
auto indexCount = drawInfo->getIbCount();
indexOffset += indexCount;
if (meshBuffer->getIndexOffset() < indexOffset) {
meshBuffer->setIndexOffset(indexOffset);
}
meshBuffer->setDirty(true);
gfx::InputAssembler* ia = meshBuffer->requireFreeIA(getDevice());
// stencilstage
auto stencilStage = _stencilManager->getStencilStage();
gfx::DepthStencilState* depthStencil = _stencilManager->getDepthStencilState(stencilStage, material);
ccstd::hash_t dssHash = _stencilManager->getStencilHash(stencilStage);
auto* curdrawBatch = _drawBatchPool.alloc();
curdrawBatch->setVisFlags(_currLayer);
curdrawBatch->setInputAssembler(ia);
curdrawBatch->setFirstIndex(drawInfo->getIndexOffset());
curdrawBatch->setIndexCount(drawInfo->getIbCount());
curdrawBatch->fillPass(material, depthStencil, dssHash);
const auto& pass = curdrawBatch->getPasses().at(0);
if (entity->getUseLocal()) {
drawInfo->updateLocalDescriptorSet(entity->getNode(), pass->getLocalSetLayout());
curdrawBatch->setDescriptorSet(drawInfo->getLocalDes());
} else {
curdrawBatch->setDescriptorSet(getDescriptorSet(texture, sampler, pass->getLocalSetLayout()));
}
_batches.push_back(curdrawBatch);
// make sure next generateBatch return.
resetRenderStates();
_currMeshBuffer = nullptr;
}
void Batcher2d::resetRenderStates() {
_currMaterial = nullptr;
_currTexture = nullptr;
_currSampler = nullptr;
_currSamplerHash = 0;
_currLayer = 0;
_currEntity = nullptr;
_currDrawInfo = nullptr;
}
gfx::DescriptorSet* Batcher2d::getDescriptorSet(gfx::Texture* texture, gfx::Sampler* sampler, const gfx::DescriptorSetLayout* dsLayout) {
ccstd::hash_t hash = 2;
size_t textureHash;
if (texture != nullptr) {
textureHash = boost::hash_value(texture);
ccstd::hash_combine(hash, textureHash);
}
if (sampler != nullptr) {
ccstd::hash_combine(hash, sampler->getHash());
}
auto iter = _descriptorSetCache.find(hash);
if (iter != _descriptorSetCache.end()) {
if (texture != nullptr && sampler != nullptr) {
iter->second->bindTexture(static_cast<uint32_t>(pipeline::ModelLocalBindings::SAMPLER_SPRITE), texture);
iter->second->bindSampler(static_cast<uint32_t>(pipeline::ModelLocalBindings::SAMPLER_SPRITE), sampler);
}
iter->second->forceUpdate();
return iter->second;
}
_dsInfo.layout = dsLayout;
auto* ds = getDevice()->createDescriptorSet(_dsInfo);
if (texture != nullptr && sampler != nullptr) {
ds->bindTexture(static_cast<uint32_t>(pipeline::ModelLocalBindings::SAMPLER_SPRITE), texture);
ds->bindSampler(static_cast<uint32_t>(pipeline::ModelLocalBindings::SAMPLER_SPRITE), sampler);
}
ds->update();
_descriptorSetCache.emplace(hash, ds);
return ds;
}
void Batcher2d::releaseDescriptorSetCache(gfx::Texture* texture, gfx::Sampler* sampler) {
ccstd::hash_t hash = 2;
size_t textureHash;
if (texture != nullptr) {
textureHash = boost::hash_value(texture);
ccstd::hash_combine(hash, textureHash);
}
if (sampler != nullptr) {
ccstd::hash_combine(hash, sampler->getHash());
}
auto iter = _descriptorSetCache.find(hash);
if (iter != _descriptorSetCache.end()) {
delete iter->second;
_descriptorSetCache.erase(hash);
}
}
bool Batcher2d::initialize() {
_isInit = true;
return _isInit;
}
void Batcher2d::update() {
fillBuffersAndMergeBatches();
resetRenderStates();
}
void Batcher2d::uploadBuffers() {
if (_batches.empty()) {
return;
}
for (auto& meshRenderData : _meshRenderDrawInfo) {
meshRenderData->uploadBuffers();
}
for (auto& map : _meshBuffersMap) {
for (auto& buffer : map.second) {
buffer->uploadBuffers();
buffer->reset();
}
}
updateDescriptorSet();
}
void Batcher2d::reset() {
for (auto& batch : _batches) {
batch->clear();
_drawBatchPool.free(batch);
}
_batches.clear();
for (auto& meshRenderData : _meshRenderDrawInfo) {
meshRenderData->resetMeshIA();
}
_meshRenderDrawInfo.clear();
// meshDataArray
for (auto& map : _meshBuffersMap) {
for (auto& buffer : map.second) {
if (buffer) {
buffer->resetIA();
}
}
}
// meshBuffer cannot clear because it is not transported at every frame.
_currMeshBuffer = nullptr;
_indexStart = 0;
_currHash = 0;
_currLayer = 0;
_currMaterial = nullptr;
_currTexture = nullptr;
_currSampler = nullptr;
// stencilManager
}
void Batcher2d::insertMaskBatch(RenderEntity* entity) {
generateBatch(_currEntity, _currDrawInfo);
resetRenderStates();
createClearModel();
_maskClearModel->setNode(entity->getNode());
_maskClearModel->setTransform(entity->getNode());
_stencilManager->pushMask();
auto stage = _stencilManager->clear(entity);
gfx::DepthStencilState* depthStencil = nullptr;
ccstd::hash_t dssHash = 0;
if (_maskClearMtl != nullptr) {
depthStencil = _stencilManager->getDepthStencilState(stage, _maskClearMtl);
dssHash = _stencilManager->getStencilHash(stage);
}
// Model
if (_maskClearModel == nullptr) return;
auto stamp = CC_CURRENT_ENGINE()->getTotalFrames();
_maskClearModel->updateTransform(stamp);
_maskClearModel->updateUBOs(stamp);
const auto& subModelList = _maskClearModel->getSubModels();
for (const auto& submodel : subModelList) {
auto* curdrawBatch = _drawBatchPool.alloc();
curdrawBatch->setVisFlags(entity->getNode()->getLayer());
curdrawBatch->setModel(_maskClearModel);
curdrawBatch->setInputAssembler(submodel->getInputAssembler());
curdrawBatch->setDescriptorSet(submodel->getDescriptorSet());
curdrawBatch->fillPass(_maskClearMtl, depthStencil, dssHash, &(submodel->getPatches()));
_batches.push_back(curdrawBatch);
}
_stencilManager->enterLevel(entity);
}
void Batcher2d::createClearModel() {
if (_maskClearModel == nullptr) {
_maskClearMtl = BuiltinResMgr::getInstance()->get<Material>(ccstd::string("default-clear-stencil"));
_maskClearModel = Root::getInstance()->createModel<scene::Model>();
uint32_t stride = 12; // vfmt
auto* vertexBuffer = _device->createBuffer({
gfx::BufferUsageBit::VERTEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE,
4 * stride,
stride,
});
const float vertices[] = {-1, -1, 0, 1, -1, 0, -1, 1, 0, 1, 1, 0};
vertexBuffer->update(vertices);
auto* indexBuffer = _device->createBuffer({
gfx::BufferUsageBit::INDEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE,
6 * sizeof(uint16_t),
sizeof(uint16_t),
});
const uint16_t indices[] = {0, 2, 1, 2, 1, 3};
indexBuffer->update(indices);
gfx::BufferList vbReference;
vbReference.emplace_back(vertexBuffer);
_maskModelMesh = ccnew RenderingSubMesh(vbReference, _maskAttributes, _primitiveMode, indexBuffer);
_maskModelMesh->setSubMeshIdx(0);
_maskClearModel->initSubModel(0, _maskModelMesh, _maskClearMtl);
}
}
} // namespace cc

View File

@@ -0,0 +1,202 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "2d/renderer/RenderDrawInfo.h"
#include "2d/renderer/RenderEntity.h"
#include "2d/renderer/UIMeshBuffer.h"
#include "base/Macros.h"
#include "base/Ptr.h"
#include "base/TypeDef.h"
#include "core/assets/Material.h"
#include "core/memop/Pool.h"
#include "renderer/gfx-base/GFXTexture.h"
#include "renderer/gfx-base/states/GFXSampler.h"
#include "scene/DrawBatch2D.h"
namespace cc {
class Root;
using UIMeshBufferArray = ccstd::vector<UIMeshBuffer*>;
using UIMeshBufferMap = ccstd::unordered_map<uint16_t, UIMeshBufferArray>;
class Batcher2d final {
public:
Batcher2d();
explicit Batcher2d(Root* root);
~Batcher2d();
void syncMeshBuffersToNative(uint16_t accId, ccstd::vector<UIMeshBuffer*>&& buffers);
bool initialize();
void update();
void uploadBuffers();
void reset();
void syncRootNodesToNative(ccstd::vector<Node*>&& rootNodes);
void releaseDescriptorSetCache(gfx::Texture* texture, gfx::Sampler* sampler);
UIMeshBuffer* getMeshBuffer(uint16_t accId, uint16_t bufferId);
gfx::Device* getDevice();
inline ccstd::vector<gfx::Attribute>* getDefaultAttribute() { return &_attributes; }
void updateDescriptorSet();
void fillBuffersAndMergeBatches();
void walk(Node* node, float parentOpacity);
void handlePostRender(RenderEntity* entity);
void handleDrawInfo(RenderEntity* entity, RenderDrawInfo* drawInfo, Node* node);
void handleComponentDraw(RenderEntity* entity, RenderDrawInfo* drawInfo, Node* node);
void handleModelDraw(RenderEntity* entity, RenderDrawInfo* drawInfo);
void handleMiddlewareDraw(RenderEntity* entity, RenderDrawInfo* drawInfo);
void handleSubNode(RenderEntity* entity, RenderDrawInfo* drawInfo);
void generateBatch(RenderEntity* entity, RenderDrawInfo* drawInfo);
void generateBatchForMiddleware(RenderEntity* entity, RenderDrawInfo* drawInfo);
void resetRenderStates();
private:
bool _isInit = false;
inline void fillIndexBuffers(RenderDrawInfo* drawInfo) { // NOLINT(readability-convert-member-functions-to-static)
uint16_t* ib = drawInfo->getIDataBuffer();
UIMeshBuffer* buffer = drawInfo->getMeshBuffer();
uint32_t indexOffset = buffer->getIndexOffset();
uint16_t* indexb = drawInfo->getIbBuffer();
uint32_t indexCount = drawInfo->getIbCount();
memcpy(&ib[indexOffset], indexb, indexCount * sizeof(uint16_t));
indexOffset += indexCount;
buffer->setIndexOffset(indexOffset);
}
inline void fillVertexBuffers(RenderEntity* entity, RenderDrawInfo* drawInfo) { // NOLINT(readability-convert-member-functions-to-static)
Node* node = entity->getNode();
const Mat4& matrix = node->getWorldMatrix();
uint8_t stride = drawInfo->getStride();
uint32_t size = drawInfo->getVbCount() * stride;
float* vbBuffer = drawInfo->getVbBuffer();
for (int i = 0; i < size; i += stride) {
Render2dLayout* curLayout = drawInfo->getRender2dLayout(i);
// make sure that the layout of Vec3 is three consecutive floats
static_assert(sizeof(Vec3) == 3 * sizeof(float));
// cast to reduce value copy instructions
reinterpret_cast<Vec3*>(vbBuffer + i)->transformMat4(curLayout->position, matrix);
}
}
inline void setIndexRange(RenderDrawInfo* drawInfo) { // NOLINT(readability-convert-member-functions-to-static)
UIMeshBuffer* buffer = drawInfo->getMeshBuffer();
uint32_t indexOffset = drawInfo->getIndexOffset();
uint32_t indexCount = drawInfo->getIbCount();
indexOffset += indexCount;
if (buffer->getIndexOffset() < indexOffset) {
buffer->setIndexOffset(indexOffset);
}
}
inline void fillColors(RenderEntity* entity, RenderDrawInfo* drawInfo) { // NOLINT(readability-convert-member-functions-to-static)
Color temp = entity->getColor();
uint8_t stride = drawInfo->getStride();
uint32_t size = drawInfo->getVbCount() * stride;
float* vbBuffer = drawInfo->getVbBuffer();
uint32_t offset = 0;
for (int i = 0; i < size; i += stride) {
offset = i + 5;
vbBuffer[offset++] = static_cast<float>(temp.r) / 255.0F;
vbBuffer[offset++] = static_cast<float>(temp.g) / 255.0F;
vbBuffer[offset++] = static_cast<float>(temp.b) / 255.0F;
vbBuffer[offset++] = entity->getOpacity();
}
}
void insertMaskBatch(RenderEntity* entity);
void createClearModel();
gfx::DescriptorSet* getDescriptorSet(gfx::Texture* texture, gfx::Sampler* sampler, const gfx::DescriptorSetLayout* dsLayout);
StencilManager* _stencilManager{nullptr};
// weak reference
Root* _root{nullptr};
// weak reference
ccstd::vector<Node*> _rootNodeArr;
// manage memory manually
ccstd::vector<scene::DrawBatch2D*> _batches;
memop::Pool<scene::DrawBatch2D> _drawBatchPool;
// weak reference
gfx::Device* _device{nullptr}; // use getDevice()
// weak reference
RenderEntity* _currEntity{nullptr};
// weak reference
RenderDrawInfo* _currDrawInfo{nullptr};
// weak reference
UIMeshBuffer* _currMeshBuffer{nullptr};
uint32_t _indexStart{0};
ccstd::hash_t _currHash{0};
uint32_t _currLayer{0};
StencilStage _currStencilStage{StencilStage::DISABLED};
// weak reference
Material* _currMaterial{nullptr};
// weak reference
gfx::Texture* _currTexture{nullptr};
// weak reference
gfx::Sampler* _currSampler{nullptr};
ccstd::hash_t _currSamplerHash{0};
// weak reference
ccstd::vector<RenderDrawInfo*> _meshRenderDrawInfo;
// manage memory manually
ccstd::unordered_map<ccstd::hash_t, gfx::DescriptorSet*> _descriptorSetCache;
gfx::DescriptorSetInfo _dsInfo;
UIMeshBufferMap _meshBuffersMap;
// DefaultAttribute
ccstd::vector<gfx::Attribute> _attributes{
gfx::Attribute{gfx::ATTR_NAME_POSITION, gfx::Format::RGB32F},
gfx::Attribute{gfx::ATTR_NAME_TEX_COORD, gfx::Format::RG32F},
gfx::Attribute{gfx::ATTR_NAME_COLOR, gfx::Format::RGBA32F},
};
// Mask use
IntrusivePtr<scene::Model> _maskClearModel;
IntrusivePtr<Material> _maskClearMtl;
IntrusivePtr<RenderingSubMesh> _maskModelMesh;
ccstd::vector<gfx::Attribute> _maskAttributes{
gfx::Attribute{gfx::ATTR_NAME_POSITION, gfx::Format::RGB32F},
};
gfx::PrimitiveMode _primitiveMode{gfx::PrimitiveMode::TRIANGLE_LIST};
CC_DISALLOW_COPY_MOVE_ASSIGN(Batcher2d);
};
} // namespace cc

View File

@@ -0,0 +1,136 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "2d/renderer/RenderDrawInfo.h"
#include "2d/renderer/Batcher2d.h"
#include "base/TypeDef.h"
#include "core/Root.h"
#include "renderer/gfx-base/GFXDevice.h"
namespace cc {
static gfx::DescriptorSetInfo gDsInfo;
static float matrixData[pipeline::UBOLocal::COUNT] = {0.F};
void mat4ToFloatArray(const cc::Mat4& mat, float* out, index_t ofs = 0) {
memcpy(out + ofs, mat.m, 16 * sizeof(float));
}
RenderDrawInfo::RenderDrawInfo() {
_attrSharedBufferActor.initialize(&_drawInfoAttrs, sizeof(_drawInfoAttrs));
}
RenderDrawInfo::~RenderDrawInfo() {
destroy();
}
void RenderDrawInfo::changeMeshBuffer() {
CC_ASSERT(Root::getInstance()->getBatcher2D());
_meshBuffer = Root::getInstance()->getBatcher2D()->getMeshBuffer(_drawInfoAttrs._accId, _drawInfoAttrs._bufferId);
}
gfx::InputAssembler* RenderDrawInfo::requestIA(gfx::Device* device) {
CC_ASSERT(_drawInfoAttrs._isMeshBuffer && _drawInfoAttrs._drawInfoType == RenderDrawInfoType::COMP);
return initIAInfo(device);
}
void RenderDrawInfo::uploadBuffers() {
CC_ASSERT(_drawInfoAttrs._isMeshBuffer && _drawInfoAttrs._drawInfoType == RenderDrawInfoType::COMP);
if (_drawInfoAttrs._vbCount == 0 || _drawInfoAttrs._ibCount == 0) return;
uint32_t size = _drawInfoAttrs._vbCount * 9 * sizeof(float); // magic Number
gfx::Buffer* vBuffer = _ia->getVertexBuffers()[0];
vBuffer->resize(size);
vBuffer->update(_vDataBuffer);
gfx::Buffer* iBuffer = _ia->getIndexBuffer();
uint32_t iSize = _drawInfoAttrs._ibCount * 2;
iBuffer->resize(iSize);
iBuffer->update(_iDataBuffer);
}
void RenderDrawInfo::resetMeshIA() { // NOLINT(readability-make-member-function-const)
CC_ASSERT(_drawInfoAttrs._isMeshBuffer && _drawInfoAttrs._drawInfoType == RenderDrawInfoType::COMP);
}
void RenderDrawInfo::destroy() {
_vb = nullptr;
_ib = nullptr;
_ia = nullptr;
if (_localDSBF) {
CC_SAFE_DELETE(_localDSBF->ds);
CC_SAFE_DELETE(_localDSBF->uboBuf);
CC_SAFE_DELETE(_localDSBF);
}
}
gfx::InputAssembler* RenderDrawInfo::initIAInfo(gfx::Device* device) {
if (!_ia) {
gfx::InputAssemblerInfo iaInfo = {};
uint32_t vbStride = 9 * sizeof(float); // magic Number
uint32_t ibStride = sizeof(uint16_t);
_vb = device->createBuffer({
gfx::BufferUsageBit::VERTEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE | gfx::MemoryUsageBit::HOST,
vbStride * 3,
vbStride,
});
_ib = device->createBuffer({
gfx::BufferUsageBit::INDEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE | gfx::MemoryUsageBit::HOST,
ibStride * 3,
ibStride,
});
iaInfo.attributes = *(Root::getInstance()->getBatcher2D()->getDefaultAttribute());
iaInfo.vertexBuffers.emplace_back(_vb);
iaInfo.indexBuffer = _ib;
_ia = device->createInputAssembler(iaInfo);
}
return _ia;
}
void RenderDrawInfo::updateLocalDescriptorSet(Node* transform, const gfx::DescriptorSetLayout* dsLayout) {
if (_localDSBF == nullptr) {
_localDSBF = new LocalDSBF();
auto* device = Root::getInstance()->getDevice();
gDsInfo.layout = dsLayout;
_localDSBF->ds = device->createDescriptorSet(gDsInfo);
_localDSBF->uboBuf = device->createBuffer({
gfx::BufferUsageBit::UNIFORM | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::HOST | gfx::MemoryUsageBit::DEVICE,
pipeline::UBOLocal::SIZE,
pipeline::UBOLocal::SIZE,
});
}
if (_texture != nullptr && _sampler != nullptr) {
_localDSBF->ds->bindTexture(static_cast<uint32_t>(pipeline::ModelLocalBindings::SAMPLER_SPRITE), _texture);
_localDSBF->ds->bindSampler(static_cast<uint32_t>(pipeline::ModelLocalBindings::SAMPLER_SPRITE), _sampler);
}
_localDSBF->ds->bindBuffer(pipeline::UBOLocal::BINDING, _localDSBF->uboBuf);
_localDSBF->ds->update();
const auto& worldMatrix = transform->getWorldMatrix();
mat4ToFloatArray(worldMatrix, matrixData, pipeline::UBOLocal::MAT_WORLD_OFFSET);
_localDSBF->uboBuf->update(matrixData);
}
} // namespace cc

View File

@@ -0,0 +1,300 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "2d/renderer/UIMeshBuffer.h"
#include "base/Ptr.h"
#include "base/Macros.h"
#include "base/TypeDef.h"
#include "bindings/utils/BindingUtils.h"
#include "core/ArrayBuffer.h"
#include "core/assets/Material.h"
#include "core/scene-graph/Node.h"
#include "math/Color.h"
#include "math/Vec2.h"
#include "math/Vec3.h"
#include "math/Vec4.h"
#include "renderer/gfx-base/states/GFXSampler.h"
#include "scene/Model.h"
namespace cc {
struct Render2dLayout {
Vec3 position;
Vec2 uv;
Vec4 color;
};
enum class RenderDrawInfoType : uint8_t {
COMP,
MODEL,
MIDDLEWARE,
SUB_NODE,
};
struct LocalDSBF {
gfx::DescriptorSet* ds;
gfx::Buffer* uboBuf;
};
class Batcher2d;
class RenderDrawInfo final {
public:
RenderDrawInfo();
~RenderDrawInfo();
inline uint32_t getDrawInfoType() const { return static_cast<uint32_t>(_drawInfoAttrs._drawInfoType); }
inline void setDrawInfoType(uint32_t type) {
_drawInfoAttrs._drawInfoType = static_cast<RenderDrawInfoType>(type);
}
inline uint16_t getAccId() const { return _drawInfoAttrs._accId; }
inline void setAccId(uint16_t id) {
_drawInfoAttrs._accId = id;
}
inline uint16_t getBufferId() const { return _drawInfoAttrs._bufferId; }
inline void setBufferId(uint16_t bufferId) {
_drawInfoAttrs._bufferId = bufferId;
}
inline uint32_t getVertexOffset() const { return _drawInfoAttrs._vertexOffset; }
inline void setVertexOffset(uint32_t vertexOffset) {
_drawInfoAttrs._vertexOffset = vertexOffset;
}
inline uint32_t getIndexOffset() const { return _drawInfoAttrs._indexOffset; }
inline void setIndexOffset(uint32_t indexOffset) {
_drawInfoAttrs._indexOffset = indexOffset;
}
inline uint32_t getVbCount() const { return _drawInfoAttrs._vbCount; }
inline void setVbCount(uint32_t vbCount) {
_drawInfoAttrs._vbCount = vbCount;
}
inline uint32_t getIbCount() const { return _drawInfoAttrs._ibCount; }
inline void setIbCount(uint32_t ibCount) {
_drawInfoAttrs._ibCount = ibCount;
}
inline bool getVertDirty() const { return _drawInfoAttrs._vertDirty; }
inline void setVertDirty(bool val) {
_drawInfoAttrs._vertDirty = val;
}
inline ccstd::hash_t getDataHash() const { return _drawInfoAttrs._dataHash; }
inline void setDataHash(ccstd::hash_t dataHash) {
_drawInfoAttrs._dataHash = dataHash;
}
inline bool getIsMeshBuffer() const { return _drawInfoAttrs._isMeshBuffer; }
inline void setIsMeshBuffer(bool isMeshBuffer) {
_drawInfoAttrs._isMeshBuffer = isMeshBuffer;
}
inline uint8_t getStride() const { return _drawInfoAttrs._stride; }
inline void setStride(uint8_t stride) {
_drawInfoAttrs._stride = stride;
}
inline Material* getMaterial() const { return _material; }
inline void setMaterial(Material* material) {
_material = material;
}
inline void setMeshBuffer(UIMeshBuffer* meshBuffer) {
_meshBuffer = meshBuffer;
}
inline UIMeshBuffer* getMeshBuffer() const {
return _meshBuffer;
}
inline float* getVDataBuffer() const {
return _vDataBuffer;
}
inline void setVDataBuffer(float* vDataBuffer) {
_vDataBuffer = vDataBuffer;
}
inline uint16_t* getIDataBuffer() const {
return _iDataBuffer;
}
inline void setIDataBuffer(uint16_t* iDataBuffer) {
_iDataBuffer = iDataBuffer;
}
inline gfx::Texture* getTexture() const {
return _texture;
}
inline void setTexture(gfx::Texture* texture) {
_texture = texture;
}
inline gfx::Sampler* getSampler() const {
return _sampler;
}
inline void setSampler(gfx::Sampler* sampler) {
_sampler = sampler;
}
inline float* getVbBuffer() const {
return _vbBuffer;
}
inline void setVbBuffer(float* vbBuffer) {
_vbBuffer = vbBuffer;
}
inline uint16_t* getIbBuffer() const {
return _ibBuffer;
}
inline void setIbBuffer(uint16_t* ibBuffer) {
_ibBuffer = ibBuffer;
}
inline scene::Model* getModel() const {
CC_ASSERT_EQ(_drawInfoAttrs._drawInfoType, RenderDrawInfoType::MODEL);
return _model;
}
inline void setModel(scene::Model* model) {
CC_ASSERT_EQ(_drawInfoAttrs._drawInfoType, RenderDrawInfoType::MODEL);
if (_drawInfoAttrs._drawInfoType == RenderDrawInfoType::MODEL) {
_model = model;
}
}
inline Node* getSubNode() const {
CC_ASSERT_EQ(_drawInfoAttrs._drawInfoType, RenderDrawInfoType::SUB_NODE);
return _subNode;
}
inline void setSubNode(Node* node) {
CC_ASSERT_EQ(_drawInfoAttrs._drawInfoType, RenderDrawInfoType::SUB_NODE);
_subNode = node;
}
void changeMeshBuffer();
inline RenderDrawInfoType getEnumDrawInfoType() const { return _drawInfoAttrs._drawInfoType; }
inline void setRender2dBufferToNative(uint8_t* buffer) { // NOLINT(bugprone-easily-swappable-parameters)
CC_ASSERT(_drawInfoAttrs._drawInfoType == RenderDrawInfoType::COMP && !_drawInfoAttrs._isMeshBuffer);
_sharedBuffer = buffer;
}
inline Render2dLayout* getRender2dLayout(uint32_t dataOffset) const {
CC_ASSERT(_drawInfoAttrs._drawInfoType == RenderDrawInfoType::COMP && !_drawInfoAttrs._isMeshBuffer);
return reinterpret_cast<Render2dLayout*>(_sharedBuffer + dataOffset * sizeof(float));
}
inline se::Object* getAttrSharedBufferForJS() const { return _attrSharedBufferActor.getSharedArrayBufferObject(); }
gfx::InputAssembler* requestIA(gfx::Device* device);
void uploadBuffers();
void resetMeshIA();
inline gfx::DescriptorSet* getLocalDes() { return _localDSBF->ds; }
void updateLocalDescriptorSet(Node* transform, const gfx::DescriptorSetLayout* dsLayout);
inline void resetDrawInfo() {
destroy();
_drawInfoAttrs._bufferId = 0;
_drawInfoAttrs._accId = 0;
_drawInfoAttrs._vertexOffset = 0;
_drawInfoAttrs._indexOffset = 0;
_drawInfoAttrs._vbCount = 0;
_drawInfoAttrs._ibCount = 0;
_drawInfoAttrs._stride = 0;
_drawInfoAttrs._dataHash = 0;
_drawInfoAttrs._vertDirty = false;
_drawInfoAttrs._isMeshBuffer = false;
_vbBuffer = nullptr;
_ibBuffer = nullptr;
_vDataBuffer = nullptr;
_iDataBuffer = nullptr;
_material = nullptr;
_texture = nullptr;
_sampler = nullptr;
_subNode = nullptr;
_model = nullptr;
_sharedBuffer = nullptr;
}
private:
CC_DISALLOW_COPY_MOVE_ASSIGN(RenderDrawInfo);
void destroy();
gfx::InputAssembler* initIAInfo(gfx::Device* device);
struct DrawInfoAttrs {
RenderDrawInfoType _drawInfoType{RenderDrawInfoType::COMP};
bool _vertDirty{false};
bool _isMeshBuffer{false};
uint8_t _stride{0};
uint16_t _bufferId{0};
uint16_t _accId{0};
uint32_t _vertexOffset{0};
uint32_t _indexOffset{0};
uint32_t _vbCount{0};
uint32_t _ibCount{0};
ccstd::hash_t _dataHash{0};
} _drawInfoAttrs{};
bindings::NativeMemorySharedToScriptActor _attrSharedBufferActor;
// weak reference
Material* _material{nullptr};
// weak reference
float* _vDataBuffer{nullptr};
// weak reference
uint16_t* _iDataBuffer{nullptr};
// weak reference
UIMeshBuffer* _meshBuffer{nullptr};
// weak reference
gfx::Texture* _texture{nullptr};
// weak reference
gfx::Sampler* _sampler{nullptr};
// weak reference
float* _vbBuffer{nullptr};
// weak reference
uint16_t* _ibBuffer{nullptr};
union {
Node* _subNode{nullptr};
scene::Model* _model;
uint8_t* _sharedBuffer;
};
LocalDSBF* _localDSBF{nullptr};
// ia
IntrusivePtr<gfx::InputAssembler> _ia;
IntrusivePtr<gfx::Buffer> _vb;
IntrusivePtr<gfx::Buffer> _ib;
};
} // namespace cc

View File

@@ -0,0 +1,115 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "2d/renderer/RenderEntity.h"
#include "2d/renderer/Batcher2d.h"
#include "bindings/utils/BindingUtils.h"
namespace cc {
RenderEntity::RenderEntity(RenderEntityType type) : _renderEntityType(type) {
if (type == RenderEntityType::STATIC) {
ccnew_placement(&_staticDrawInfos) std::array<RenderDrawInfo, RenderEntity::STATIC_DRAW_INFO_CAPACITY>();
} else {
ccnew_placement(&_dynamicDrawInfos) ccstd::vector<RenderDrawInfo*>();
}
_entitySharedBufferActor.initialize(&_entityAttrLayout, sizeof(EntityAttrLayout));
}
RenderEntity::~RenderEntity() {
if (_renderEntityType == RenderEntityType::STATIC) {
_staticDrawInfos.~array();
} else {
_dynamicDrawInfos.~vector();
}
};
void RenderEntity::addDynamicRenderDrawInfo(RenderDrawInfo* drawInfo) {
CC_ASSERT_NE(_renderEntityType, RenderEntityType::STATIC);
_dynamicDrawInfos.push_back(drawInfo);
}
void RenderEntity::setDynamicRenderDrawInfo(RenderDrawInfo* drawInfo, uint32_t index) {
CC_ASSERT_NE(_renderEntityType, RenderEntityType::STATIC);
if (index < _dynamicDrawInfos.size()) {
_dynamicDrawInfos[index] = drawInfo;
}
}
void RenderEntity::removeDynamicRenderDrawInfo() {
CC_ASSERT_NE(_renderEntityType, RenderEntityType::STATIC);
if (_dynamicDrawInfos.empty()) return;
_dynamicDrawInfos.pop_back(); // warning: memory leaking & crash
}
void RenderEntity::clearDynamicRenderDrawInfos() {
CC_ASSERT_NE(_renderEntityType, RenderEntityType::STATIC);
_dynamicDrawInfos.clear();
}
void RenderEntity::clearStaticRenderDrawInfos() {
CC_ASSERT_EQ(_renderEntityType, RenderEntityType::STATIC);
for (uint32_t i = 0; i < _staticDrawInfoSize; i++) {
RenderDrawInfo& drawInfo = _staticDrawInfos[i];
drawInfo.resetDrawInfo();
}
_staticDrawInfoSize = 0;
}
void RenderEntity::setNode(Node* node) {
if (_node) {
_node->setUserData(nullptr);
}
_node = node;
if (_node) {
_node->setUserData(this);
}
}
void RenderEntity::setRenderTransform(Node* renderTransform) {
_renderTransform = renderTransform;
}
RenderDrawInfo* RenderEntity::getDynamicRenderDrawInfo(uint32_t index) {
CC_ASSERT_NE(_renderEntityType, RenderEntityType::STATIC);
if (index >= _dynamicDrawInfos.size()) {
return nullptr;
}
return _dynamicDrawInfos[index];
}
ccstd::vector<RenderDrawInfo*>& RenderEntity::getDynamicRenderDrawInfos() {
CC_ASSERT_NE(_renderEntityType, RenderEntityType::STATIC);
return _dynamicDrawInfos;
}
void RenderEntity::setStaticDrawInfoSize(uint32_t size) {
CC_ASSERT(_renderEntityType == RenderEntityType::STATIC && size <= RenderEntity::STATIC_DRAW_INFO_CAPACITY);
_staticDrawInfoSize = size;
}
RenderDrawInfo* RenderEntity::getStaticRenderDrawInfo(uint32_t index) {
CC_ASSERT(_renderEntityType == RenderEntityType::STATIC && index < _staticDrawInfoSize);
return &(_staticDrawInfos[index]);
}
std::array<RenderDrawInfo, RenderEntity::STATIC_DRAW_INFO_CAPACITY>& RenderEntity::getStaticRenderDrawInfos() {
CC_ASSERT_EQ(_renderEntityType, RenderEntityType::STATIC);
return _staticDrawInfos;
}
} // namespace cc

View File

@@ -0,0 +1,158 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <array>
#include "2d/renderer/RenderDrawInfo.h"
#include "2d/renderer/StencilManager.h"
#include "base/Macros.h"
#include "base/TypeDef.h"
#include "bindings/utils/BindingUtils.h"
#include "core/ArrayBuffer.h"
#include "core/scene-graph/Node.h"
namespace cc {
class Batcher2d;
enum class RenderEntityType : uint8_t {
STATIC,
DYNAMIC,
CROSSED,
};
enum class MaskMode : uint8_t {
NONE,
MASK,
MASK_INVERTED,
MASK_NODE,
MASK_NODE_INVERTED
};
struct EntityAttrLayout {
float localOpacity{1.0F};
uint8_t colorR{255};
uint8_t colorG{255};
uint8_t colorB{255};
uint8_t colorA{255};
uint8_t maskMode{0};
uint8_t colorDirtyBit{1};
uint8_t enabledIndex{0};
uint8_t useLocal{0};
};
class RenderEntity final : public Node::UserData {
public:
static constexpr uint32_t STATIC_DRAW_INFO_CAPACITY = 4;
explicit RenderEntity(RenderEntityType type);
~RenderEntity() override;
void addDynamicRenderDrawInfo(RenderDrawInfo* drawInfo);
void setDynamicRenderDrawInfo(RenderDrawInfo* drawInfo, uint32_t index);
void removeDynamicRenderDrawInfo();
void clearDynamicRenderDrawInfos();
void clearStaticRenderDrawInfos();
inline bool getIsMask() const {
return static_cast<MaskMode>(_entityAttrLayout.maskMode) == MaskMode::MASK || static_cast<MaskMode>(_entityAttrLayout.maskMode) == MaskMode::MASK_INVERTED;
}
inline bool getIsSubMask() const {
return static_cast<MaskMode>(_entityAttrLayout.maskMode) == MaskMode::MASK_NODE || static_cast<MaskMode>(_entityAttrLayout.maskMode) == MaskMode::MASK_NODE_INVERTED;
}
inline bool getIsMaskInverted() const {
return static_cast<MaskMode>(_entityAttrLayout.maskMode) == MaskMode::MASK_INVERTED || static_cast<MaskMode>(_entityAttrLayout.maskMode) == MaskMode::MASK_NODE_INVERTED;
}
inline bool getUseLocal() const { return _entityAttrLayout.useLocal; }
inline void setUseLocal(bool useLocal) {
_entityAttrLayout.useLocal = useLocal;
}
inline Node* getNode() const { return _node; }
void setNode(Node* node);
inline Node* getRenderTransform() const { return _renderTransform; }
void setRenderTransform(Node* renderTransform);
inline uint32_t getStencilStage() const { return static_cast<uint32_t>(_stencilStage); }
inline void setStencilStage(uint32_t stage) {
_stencilStage = static_cast<StencilStage>(stage);
}
inline StencilStage getEnumStencilStage() const { return _stencilStage; }
inline void setEnumStencilStage(StencilStage stage) {
_stencilStage = stage;
}
inline RenderEntityType getRenderEntityType() const { return _renderEntityType; };
inline uint32_t getStaticDrawInfoSize() const { return _staticDrawInfoSize; };
void setStaticDrawInfoSize(uint32_t size);
RenderDrawInfo* getStaticRenderDrawInfo(uint32_t index);
std::array<RenderDrawInfo, RenderEntity::STATIC_DRAW_INFO_CAPACITY>& getStaticRenderDrawInfos();
RenderDrawInfo* getDynamicRenderDrawInfo(uint32_t index);
ccstd::vector<RenderDrawInfo*>& getDynamicRenderDrawInfos();
inline se::Object* getEntitySharedBufferForJS() const { return _entitySharedBufferActor.getSharedArrayBufferObject(); }
inline bool getColorDirty() const { return _entityAttrLayout.colorDirtyBit != 0; }
inline void setColorDirty(bool dirty) { _entityAttrLayout.colorDirtyBit = dirty ? 1 : 0; }
inline bool getVBColorDirty() const { return _vbColorDirty; }
inline void setVBColorDirty(bool vbColorDirty) { _vbColorDirty = vbColorDirty; }
inline Color getColor() const { return Color(_entityAttrLayout.colorR, _entityAttrLayout.colorG, _entityAttrLayout.colorB, _entityAttrLayout.colorA); }
inline float getColorAlpha() const { return static_cast<float>(_entityAttrLayout.colorA) / 255.F; }
inline float getLocalOpacity() const { return _entityAttrLayout.localOpacity; }
inline float getOpacity() const { return _opacity; }
inline void setOpacity(float opacity) { _opacity = opacity; }
inline bool isEnabled() const { return _entityAttrLayout.enabledIndex != 0; }
inline uint32_t getRenderDrawInfosSize() const {
return _renderEntityType == RenderEntityType::STATIC ? _staticDrawInfoSize : static_cast<uint32_t>(_dynamicDrawInfos.size());
}
inline RenderDrawInfo* getRenderDrawInfoAt(uint32_t index) {
return _renderEntityType == RenderEntityType::STATIC ? &(_staticDrawInfos[index]) : _dynamicDrawInfos[index];
}
private:
CC_DISALLOW_COPY_MOVE_ASSIGN(RenderEntity);
// weak reference
Node* _node{nullptr};
// weak reference
Node* _renderTransform{nullptr};
EntityAttrLayout _entityAttrLayout;
float _opacity{1.0F};
bindings::NativeMemorySharedToScriptActor _entitySharedBufferActor;
union {
std::array<RenderDrawInfo, RenderEntity::STATIC_DRAW_INFO_CAPACITY> _staticDrawInfos;
ccstd::vector<RenderDrawInfo*> _dynamicDrawInfos;
};
StencilStage _stencilStage{StencilStage::DISABLED};
RenderEntityType _renderEntityType{RenderEntityType::STATIC};
uint8_t _staticDrawInfoSize{0};
bool _vbColorDirty{true};
};
} // namespace cc

View File

@@ -0,0 +1,164 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "StencilManager.h"
#include "2d/renderer/RenderEntity.h"
namespace cc {
namespace {
StencilManager* instance = nullptr;
}
StencilManager* StencilManager::getInstance() {
if (instance == nullptr) {
instance = new StencilManager();
}
return instance;
}
StencilManager::~StencilManager() {
for (auto pair : _cacheStateMap) {
CC_SAFE_DELETE(pair.second);
}
for (auto pair : _cacheStateMapWithDepth) {
CC_SAFE_DELETE(pair.second);
}
}
StencilStage StencilManager::clear(RenderEntity* entity) { // NOLINT(readability-convert-member-functions-to-static)
bool inverted = entity->getIsMaskInverted();
return inverted ? StencilStage::CLEAR_INVERTED : StencilStage::CLEAR;
}
void StencilManager::enterLevel(RenderEntity* entity) { // NOLINT(readability-convert-member-functions-to-static)
bool inverted = entity->getIsMaskInverted();
entity->setEnumStencilStage(inverted ? StencilStage::ENTER_LEVEL_INVERTED : StencilStage::ENTER_LEVEL);
}
gfx::DepthStencilState* StencilManager::getDepthStencilState(StencilStage stage, Material* mat) {
uint32_t key = 0;
bool depthTest = false;
bool depthWrite = false;
gfx::ComparisonFunc depthFunc = gfx::ComparisonFunc::LESS;
auto* cacheMap = &_cacheStateMap;
if (mat && !mat->getPasses()->empty()) {
IntrusivePtr<scene::Pass>& pass = mat->getPasses()->at(0);
const gfx::DepthStencilState* dss = pass->getDepthStencilState();
uint32_t depthTestValue = 0;
uint32_t depthWriteValue = 0;
if (dss->depthTest) {
depthTestValue = 1;
}
if (dss->depthWrite) {
depthWriteValue = 1;
}
key = (depthTestValue) | (depthWriteValue << 1) | (static_cast<uint32_t>(dss->depthFunc) << 2) | (static_cast<uint32_t>(stage) << 6) | (_maskStackSize << 9);
depthTest = dss->depthTest;
depthWrite = static_cast<uint32_t>(dss->depthWrite);
depthFunc = dss->depthFunc;
cacheMap = &_cacheStateMapWithDepth;
} else {
key = ((static_cast<uint32_t>(stage)) << 16) | (_maskStackSize);
}
auto iter = cacheMap->find(key);
if (iter != cacheMap->end()) {
return iter->second;
}
setDepthStencilStateFromStage(stage);
auto* depthStencilState = ccnew gfx::DepthStencilState();
depthStencilState->depthTest = depthTest;
depthStencilState->depthWrite = depthWrite;
depthStencilState->depthFunc = depthFunc;
depthStencilState->stencilTestFront = _stencilPattern.stencilTest;
depthStencilState->stencilFuncFront = _stencilPattern.func;
depthStencilState->stencilReadMaskFront = _stencilPattern.stencilMask;
depthStencilState->stencilWriteMaskFront = _stencilPattern.writeMask;
depthStencilState->stencilFailOpFront = _stencilPattern.failOp;
depthStencilState->stencilZFailOpFront = _stencilPattern.zFailOp;
depthStencilState->stencilPassOpFront = _stencilPattern.passOp;
depthStencilState->stencilRefFront = _stencilPattern.ref;
depthStencilState->stencilTestBack = _stencilPattern.stencilTest;
depthStencilState->stencilFuncBack = _stencilPattern.func;
depthStencilState->stencilReadMaskBack = _stencilPattern.stencilMask;
depthStencilState->stencilWriteMaskBack = _stencilPattern.writeMask;
depthStencilState->stencilFailOpBack = _stencilPattern.failOp;
depthStencilState->stencilZFailOpBack = _stencilPattern.zFailOp;
depthStencilState->stencilPassOpBack = _stencilPattern.passOp;
depthStencilState->stencilRefBack = _stencilPattern.ref;
const auto& pair = std::pair<uint32_t, gfx::DepthStencilState*>(key, depthStencilState);
cacheMap->insert(pair);
return depthStencilState;
}
void StencilManager::setDepthStencilStateFromStage(StencilStage stage) {
StencilEntity& pattern = _stencilPattern;
if (stage == StencilStage::DISABLED) {
pattern.stencilTest = false;
pattern.func = gfx::ComparisonFunc::ALWAYS;
pattern.failOp = gfx::StencilOp::KEEP;
pattern.stencilMask = pattern.writeMask = 0xffff;
pattern.ref = 1;
} else {
pattern.stencilTest = true;
if (stage == StencilStage::ENABLED) {
pattern.func = gfx::ComparisonFunc::EQUAL;
pattern.failOp = gfx::StencilOp::KEEP;
pattern.stencilMask = pattern.ref = getStencilRef();
pattern.writeMask = getWriteMask();
} else if (stage == StencilStage::CLEAR) {
pattern.func = gfx::ComparisonFunc::NEVER;
pattern.failOp = gfx::StencilOp::ZERO;
pattern.writeMask = getWriteMask();
pattern.stencilMask = getWriteMask();
pattern.ref = getWriteMask();
} else if (stage == StencilStage::CLEAR_INVERTED) {
pattern.func = gfx::ComparisonFunc::NEVER;
pattern.failOp = gfx::StencilOp::REPLACE;
pattern.writeMask = pattern.stencilMask = pattern.ref = getWriteMask();
} else if (stage == StencilStage::ENTER_LEVEL) {
pattern.func = gfx::ComparisonFunc::NEVER;
pattern.failOp = gfx::StencilOp::REPLACE;
pattern.writeMask = pattern.stencilMask = pattern.ref = getWriteMask();
} else if (stage == StencilStage::ENTER_LEVEL_INVERTED) {
pattern.func = gfx::ComparisonFunc::NEVER;
pattern.failOp = gfx::StencilOp::ZERO;
pattern.writeMask = pattern.stencilMask = pattern.ref = getWriteMask();
}
}
}
void StencilManager::setStencilStage(uint32_t stageIndex) {
_stage = static_cast<StencilStage>(stageIndex);
}
} // namespace cc

View File

@@ -0,0 +1,140 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <stack>
#include "base/Macros.h"
#include "base/TypeDef.h"
#include "core/ArrayBuffer.h"
#include "core/assets/Material.h"
#include "renderer/gfx-base/GFXDef-common.h"
#include "scene/Pass.h"
namespace cc {
class RenderEntity;
enum class StencilStage : uint8_t {
// Stencil disabled
DISABLED = 0,
// Clear stencil buffer
CLEAR = 1,
// Entering a new level, should handle new stencil
ENTER_LEVEL = 2,
// In content
ENABLED = 3,
// Exiting a level, should restore old stencil or disable
EXIT_LEVEL = 4,
// Clear stencil buffer & USE INVERTED
CLEAR_INVERTED = 5,
// Entering a new level & USE INVERTED
ENTER_LEVEL_INVERTED = 6
};
struct StencilEntity {
uint32_t stencilTest{0};
gfx::ComparisonFunc func{gfx::ComparisonFunc::ALWAYS};
uint32_t stencilMask{0};
uint32_t writeMask{0};
gfx::StencilOp failOp{gfx::StencilOp::KEEP};
gfx::StencilOp zFailOp{gfx::StencilOp::KEEP};
gfx::StencilOp passOp{gfx::StencilOp::KEEP};
uint32_t ref{0};
};
class StencilManager final {
public:
static StencilManager* getInstance();
StencilManager() = default;
~StencilManager();
inline StencilStage getStencilStage() const { return _stage; }
gfx::DepthStencilState* getDepthStencilState(StencilStage stage, Material* mat = nullptr);
void setDepthStencilStateFromStage(StencilStage stage);
inline uint32_t getMaskStackSize() const { return _maskStackSize; }
inline void setMaskStackSize(uint32_t size) {
_maskStackSize = size;
}
inline void pushMask() {
++_maskStackSize;
}
StencilStage clear(RenderEntity* entity);
void enterLevel(RenderEntity* entity);
inline void enableMask() {
_stage = StencilStage::ENABLED;
}
inline void exitMask() {
if (_maskStackSize == 0) {
return;
}
--_maskStackSize;
if (_maskStackSize == 0) {
_stage = StencilStage::DISABLED;
} else {
_stage = StencilStage::ENABLED;
}
}
inline uint32_t getWriteMask() const {
return 1 << (_maskStackSize - 1);
}
inline uint32_t getExitWriteMask() const {
return 1 << _maskStackSize;
}
inline uint32_t getStencilRef() const {
uint32_t result = 0;
for (uint32_t i = 0; i < _maskStackSize; i++) {
result += (1 << i);
}
return result;
}
inline uint32_t getStencilHash(StencilStage stage) const {
return ((static_cast<uint32_t>(stage)) << 8) | _maskStackSize;
}
void setStencilStage(uint32_t stageIndex);
private:
CC_DISALLOW_COPY_MOVE_ASSIGN(StencilManager);
StencilEntity _stencilPattern;
ArrayBuffer::Ptr _stencilSharedBuffer;
StencilStage _stage{StencilStage::DISABLED};
uint32_t _maskStackSize{0};
ccstd::unordered_map<uint32_t, gfx::DepthStencilState*> _cacheStateMap;
ccstd::unordered_map<uint32_t, gfx::DepthStencilState*> _cacheStateMapWithDepth;
};
} // namespace cc

View File

@@ -0,0 +1,178 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "2d/renderer/UIMeshBuffer.h"
#include "renderer/gfx-base/GFXDevice.h"
namespace cc {
static uint32_t getAttributesStride(ccstd::vector<gfx::Attribute>& attrs) {
uint32_t stride = 0;
for (auto& attr : attrs) {
const auto& info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr.format)];
stride += info.size;
}
return stride;
}
UIMeshBuffer::~UIMeshBuffer() {
destroy();
}
void UIMeshBuffer::setVData(float* vData) {
_vData = vData;
}
void UIMeshBuffer::setIData(uint16_t* iData) {
_iData = iData;
}
void UIMeshBuffer::initialize(ccstd::vector<gfx::Attribute>&& attrs, bool needCreateLayout) {
_attributes = attrs;
_vertexFormatBytes = getAttributesStride(attrs);
if (needCreateLayout) {
_meshBufferLayout = new MeshBufferLayout();
}
_needDeleteLayout = needCreateLayout;
}
void UIMeshBuffer::reset() {
setIndexOffset(0);
_dirty = false;
}
void UIMeshBuffer::resetIA() {
}
void UIMeshBuffer::destroy() {
reset();
_attributes.clear();
_vb = nullptr;
_ib = nullptr;
if (_needDeleteVData) {
delete _vData;
delete _iData;
}
_vData = nullptr;
_iData = nullptr;
// Destroy InputAssemblers
_ia = nullptr;
if (_needDeleteLayout) {
CC_SAFE_DELETE(_meshBufferLayout);
}
}
void UIMeshBuffer::setDirty() {
_dirty = true;
}
gfx::InputAssembler* UIMeshBuffer::requireFreeIA(gfx::Device* device) {
return createNewIA(device);
}
void UIMeshBuffer::uploadBuffers() {
uint32_t byteOffset = getByteOffset();
bool dirty = getDirty();
if (_meshBufferLayout == nullptr || byteOffset == 0 || !dirty || !_ia) {
return;
}
uint32_t indexCount = getIndexOffset();
uint32_t byteCount = getByteOffset();
gfx::BufferList vBuffers = _ia->getVertexBuffers();
if (!vBuffers.empty()) {
gfx::Buffer* vBuffer = vBuffers[0];
if (byteCount > vBuffer->getSize()) {
vBuffer->resize(byteCount);
}
vBuffer->update(_vData);
}
gfx::Buffer* iBuffer = _ia->getIndexBuffer();
if (indexCount * 2 > iBuffer->getSize()) {
iBuffer->resize(indexCount * 2);
}
iBuffer->update(_iData);
setDirty(false);
}
// use less
void UIMeshBuffer::recycleIA(gfx::InputAssembler* ia) {
}
gfx::InputAssembler* UIMeshBuffer::createNewIA(gfx::Device* device) {
if (!_ia) {
uint32_t vbStride = _vertexFormatBytes;
uint32_t ibStride = sizeof(uint16_t);
gfx::InputAssemblerInfo iaInfo = {};
_vb = device->createBuffer({
gfx::BufferUsageBit::VERTEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE | gfx::MemoryUsageBit::HOST,
vbStride * 3,
vbStride,
});
_ib = device->createBuffer({
gfx::BufferUsageBit::INDEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE | gfx::MemoryUsageBit::HOST,
ibStride * 3,
ibStride,
});
iaInfo.attributes = _attributes;
iaInfo.vertexBuffers.emplace_back(_vb);
iaInfo.indexBuffer = _ib;
_ia = device->createInputAssembler(iaInfo);
}
return _ia;
}
void UIMeshBuffer::syncSharedBufferToNative(uint32_t* buffer) {
_sharedBuffer = buffer;
parseLayout();
}
void UIMeshBuffer::parseLayout() {
_meshBufferLayout = reinterpret_cast<MeshBufferLayout*>(_sharedBuffer);
}
void UIMeshBuffer::setByteOffset(uint32_t byteOffset) {
_meshBufferLayout->byteOffset = byteOffset;
}
void UIMeshBuffer::setVertexOffset(uint32_t vertexOffset) {
_meshBufferLayout->vertexOffset = vertexOffset;
}
void UIMeshBuffer::setIndexOffset(uint32_t indexOffset) {
_meshBufferLayout->indexOffset = indexOffset;
}
void UIMeshBuffer::setDirty(bool dirty) const {
_meshBufferLayout->dirtyMark = dirty ? 1 : 0;
}
} // namespace cc

View File

@@ -0,0 +1,100 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "base/Ptr.h"
#include "base/Macros.h"
#include "base/TypeDef.h"
#include "renderer/gfx-base/GFXInputAssembler.h"
#include "renderer/gfx-base/GFXDef-common.h"
#include "renderer/gfx-base/GFXBuffer.h"
namespace cc {
struct MeshBufferLayout {
uint32_t byteOffset;
uint32_t vertexOffset;
uint32_t indexOffset;
uint32_t dirtyMark;
};
class UIMeshBuffer final {
public:
UIMeshBuffer() = default;
~UIMeshBuffer();
inline float* getVData() const { return _vData; }
void setVData(float* vData);
inline uint16_t* getIData() const { return _iData; }
void setIData(uint16_t* iData);
void initialize(ccstd::vector<gfx::Attribute>&& attrs, bool needCreateLayout = false);
void reset();
void destroy();
void setDirty();
void uploadBuffers();
void syncSharedBufferToNative(uint32_t* buffer);
void resetIA();
void recycleIA(gfx::InputAssembler* ia);
void parseLayout();
gfx::InputAssembler* requireFreeIA(gfx::Device* device);
gfx::InputAssembler* createNewIA(gfx::Device* device);
inline uint32_t getByteOffset() const { return _meshBufferLayout->byteOffset; }
void setByteOffset(uint32_t byteOffset);
inline uint32_t getVertexOffset() const { return _meshBufferLayout->vertexOffset; }
void setVertexOffset(uint32_t vertexOffset);
inline uint32_t getIndexOffset() const { return _meshBufferLayout->indexOffset; }
void setIndexOffset(uint32_t indexOffset);
inline bool getDirty() const { return _meshBufferLayout->dirtyMark != 0; }
void setDirty(bool dirty) const;
inline const ccstd::vector<gfx::Attribute>& getAttributes() const {
return _attributes;
}
protected:
CC_DISALLOW_COPY_MOVE_ASSIGN(UIMeshBuffer);
private:
float* _vData{nullptr};
uint16_t* _iData{nullptr};
MeshBufferLayout* _meshBufferLayout{nullptr};
uint32_t* _sharedBuffer{nullptr};
uint32_t _vertexFormatBytes{0};
uint32_t _initVDataCount{0};
uint32_t _initIDataCount{0};
ccstd::vector<gfx::Attribute> _attributes;
IntrusivePtr<gfx::InputAssembler> _ia;
IntrusivePtr<gfx::Buffer> _vb;
IntrusivePtr<gfx::Buffer> _ib;
bool _dirty{false};
bool _needDeleteVData{false};
bool _needDeleteLayout{false};
};
} // namespace cc

View File

@@ -0,0 +1,156 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "UIModelProxy.h"
#include "2d/renderer/RenderEntity.h"
#include "core/assets/RenderingSubMesh.h"
namespace cc {
UIModelProxy::UIModelProxy() {
_device = Root::getInstance()->getDevice();
}
UIModelProxy::~UIModelProxy() {
destroy();
}
void UIModelProxy::initModel(Node* node) {
_model = Root::getInstance()->createModel<scene::Model>();
_model->setNode(node);
_model->setTransform(node);
_node = node;
}
void UIModelProxy::activeSubModels() {
if (_model == nullptr) return;
auto* entity = static_cast<RenderEntity*>(_node->getUserData());
auto drawInfoSize = entity->getDynamicRenderDrawInfos().size();
auto subModelSize = _model->getSubModels().size();
if (drawInfoSize > subModelSize) {
for (size_t i = subModelSize; i < drawInfoSize; i++) {
if (_model->getSubModels().size() <= i) {
RenderDrawInfo* drawInfo = entity->getDynamicRenderDrawInfo(static_cast<uint32_t>(i));
if (drawInfo == nullptr) {
return;
}
auto* vertexBuffer = _device->createBuffer({
gfx::BufferUsageBit::VERTEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE,
65535 * _stride,
_stride,
});
auto* indexBuffer = _device->createBuffer({
gfx::BufferUsageBit::INDEX | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE,
65535 * sizeof(uint16_t) * 2,
sizeof(uint16_t),
});
gfx::BufferList vbReference;
vbReference.emplace_back(vertexBuffer);
auto* renderMesh = ccnew RenderingSubMesh(vbReference, _attributes, _primitiveMode, indexBuffer);
renderMesh->setSubMeshIdx(0);
_model->initSubModel(static_cast<index_t>(i), renderMesh, drawInfo->getMaterial());
_graphicsUseSubMeshes.emplace_back(renderMesh);
}
}
}
}
void UIModelProxy::uploadData() {
auto* entity = static_cast<RenderEntity*>(_node->getUserData());
const auto& drawInfos = entity->getDynamicRenderDrawInfos();
const auto& subModelList = _model->getSubModels();
for (size_t i = 0; i < drawInfos.size(); i++) {
auto* drawInfo = drawInfos[i];
auto* ia = subModelList.at(i)->getInputAssembler();
if (drawInfo->getVertexOffset() <= 0 || drawInfo->getIndexOffset() <= 0) continue;
gfx::BufferList vBuffers = ia->getVertexBuffers();
if (!vBuffers.empty()) {
auto size = drawInfo->getVertexOffset() * _stride;
// if (size > vBuffers[0]->getSize()) {
vBuffers[0]->resize(size);
// }
vBuffers[0]->update(drawInfo->getVDataBuffer()); // vdata
}
ia->setVertexCount(drawInfo->getVertexOffset()); // count
gfx::Buffer* iBuffer = ia->getIndexBuffer();
auto size = drawInfo->getIndexOffset() * 2;
// if (size > iBuffer->getSize()) {
iBuffer->resize(size);
// }
iBuffer->update(drawInfo->getIDataBuffer()); // idata
ia->setIndexCount(drawInfo->getIndexOffset()); // indexCount
// drawInfo->setModel(_model); // hack, render by model
}
if (!drawInfos.empty()) {
drawInfos[0]->setModel(_model);
}
}
void UIModelProxy::destroy() {
if (_model != nullptr) {
Root::getInstance()->destroyModel(_model);
_model = nullptr;
}
for (auto& subMesh : _graphicsUseSubMeshes) {
subMesh->destroy();
subMesh = nullptr;
}
_graphicsUseSubMeshes.clear();
_models.clear();
}
void UIModelProxy::clear() {
}
// for ui model
void UIModelProxy::updateModels(scene::Model* model) {
_models.emplace_back(model);
}
void UIModelProxy::attachDrawInfo() {
auto* entity = static_cast<RenderEntity*>(_node->getUserData());
auto& drawInfos = entity->getDynamicRenderDrawInfos();
if (drawInfos.size() != _models.size()) return;
for (size_t i = 0; i < drawInfos.size(); i++) {
drawInfos[i]->setModel(_models[i]);
}
}
void UIModelProxy::attachNode(Node* node) {
_node = node;
}
void UIModelProxy::clearModels() {
_models.clear();
}
} // namespace cc

View File

@@ -0,0 +1,67 @@
/****************************************************************************
Copyright (c) 2019-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "base/Macros.h"
#include "core/Root.h"
#include "scene/Model.h"
namespace cc {
class UIModelProxy final {
public:
UIModelProxy();
~UIModelProxy();
void initModel(Node* node);
void activeSubModels();
void uploadData();
void destroy();
void clear();
inline scene::Model* getModel() const { return _model; }
// For UIModel
void updateModels(scene::Model* models);
void attachDrawInfo();
void attachNode(Node* node);
void clearModels();
protected:
CC_DISALLOW_COPY_MOVE_ASSIGN(UIModelProxy);
private:
Node* _node{nullptr};
IntrusivePtr<scene::Model> _model;
ccstd::vector<IntrusivePtr<RenderingSubMesh>> _graphicsUseSubMeshes{};
// For UIModel
ccstd::vector<scene::Model*> _models{};
gfx::Device* _device{nullptr};
uint32_t _stride{32};
ccstd::vector<gfx::Attribute> _attributes{
gfx::Attribute{gfx::ATTR_NAME_POSITION, gfx::Format::RGB32F},
gfx::Attribute{gfx::ATTR_NAME_COLOR, gfx::Format::RGBA32F},
gfx::Attribute{"a_dist", gfx::Format::R32F},
};
gfx::PrimitiveMode _primitiveMode{gfx::PrimitiveMode::TRIANGLE_LIST};
};
} // namespace cc

1485
cocos/3d/assets/Mesh.cpp Normal file

File diff suppressed because it is too large Load Diff

500
cocos/3d/assets/Mesh.h Normal file
View File

@@ -0,0 +1,500 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "3d/assets/Morph.h"
#include "3d/assets/MorphRendering.h"
#include "base/std/optional.h"
#include "core/assets/Asset.h"
#include "core/geometry/AABB.h"
#include "math/Mat4.h"
#include "math/Vec3.h"
#include "primitive/PrimitiveDefine.h"
#include "renderer/gfx-base/GFXDef.h"
namespace cc {
class Skeleton;
class RenderingSubMesh;
/**
* @en Mesh asset
* @zh 网格资源。
*/
class Mesh : public Asset {
public:
using Super = Asset;
using IBufferView = IMeshBufferView;
/**
* @en Vertex bundle, it describes a set of interleaved vertex attributes and their values.
* @zh 顶点块。顶点块描述了一组**交错排列**interleaved的顶点属性并存储了顶点属性的实际数据。<br>
* 交错排列是指在实际数据的缓冲区中,每个顶点的所有属性总是依次排列,并总是出现在下一个顶点的所有属性之前。
*/
struct IVertexBundle {
ccstd::optional<uint8_t> _padding; // NOTE: avoid jsb cache map
/**
* @en The actual value for all vertex attributes.
* You must use DataView to access the data.
* @zh 所有顶点属性的实际数据块。
* 你必须使用 DataView 来读取数据。
* 因为不能保证所有属性的起始偏移都按 TypedArray 要求的字节对齐。
*/
IBufferView view;
/**
* @en All attributes included in the bundle
* @zh 包含的所有顶点属性。
*/
gfx::AttributeList attributes;
};
struct IMeshCluster {
IBufferView clusterView;
IBufferView triangleView;
IBufferView vertexView;
IBufferView coneView;
};
/**
* @en Sub mesh contains a list of primitives with the same type (Point, Line or Triangle)
* @zh 子网格。子网格由一系列相同类型的图元组成(例如点、线、面等)。
*/
struct ISubMesh {
/**
* @en The vertex bundle references used by the sub mesh.
* @zh 此子网格引用的顶点块,索引至网格的顶点块数组。
*/
ccstd::vector<uint32_t> vertexBundelIndices;
/**
* @en The primitive mode of the sub mesh
* @zh 此子网格的图元类型。
*/
gfx::PrimitiveMode primitiveMode;
/**
* @en The index data of the sub mesh
* @zh 此子网格使用的索引数据。
*/
ccstd::optional<IBufferView> indexView;
/**
* @en The joint map index in [[IStruct.jointMaps]]. Could be absent
* @zh 此子网格使用的关节索引映射表在 [[IStruct.jointMaps]] 中的索引。
* 如未定义或指向的映射表不存在,则默认 VB 内所有关节索引数据直接对应骨骼资源数据。
*/
ccstd::optional<uint32_t> jointMapIndex;
ccstd::optional<IMeshCluster> cluster;
};
/**
* @en The info use to create dynamic mesh.
* @zh 描述了创建动态网格需要的预分配信息。
*/
struct IDynamicInfo {
/**
* @en max submesh count
* @zh 最大子模型个数。
*/
uint32_t maxSubMeshes{0U};
/**
* @en max submesh vertex count
* @zh 子模型最大顶点个数。
*/
uint32_t maxSubMeshVertices{0U};
/**
* @en max submesh index count
* @zh 子模型最大索引个数。
*/
uint32_t maxSubMeshIndices{0U};
};
/**
* @en The structure use to create dynamic mesh.
* @zh 描述了创建动态网格的结构。
*/
struct IDynamicStruct {
/**
* @en dynamic mesh info
* @zh 动态模型信息。
*/
IDynamicInfo info;
/**
* @en dynamic submesh bounds
* @zh 动态子模型包围盒。
*/
ccstd::vector<geometry::AABB> bounds;
};
/**
* @en The structure of the mesh
* @zh 描述了网格的结构。
*/
struct IStruct {
/**
* @en All vertex bundles of the mesh
* @zh 此网格所有的顶点块。
*/
ccstd::vector<IVertexBundle> vertexBundles;
/**
* @en All sub meshes
* @zh 此网格的所有子网格。
*/
ccstd::vector<ISubMesh> primitives;
/**
* @en The minimum position of all vertices in the mesh
* @zh (各分量都)小于等于此网格任何顶点位置的最大位置。
*/
ccstd::optional<Vec3> minPosition;
inline const ccstd::optional<Vec3> &getMinPosition() const { return minPosition; } // For JSB binding only
inline void setMinPosition(const ccstd::optional<Vec3> &v) { minPosition = v; } // For JSB binding only
/**
* @en The maximum position of all vertices in the mesh
* @zh (各分量都)大于等于此网格任何顶点位置的最小位置。
*/
ccstd::optional<Vec3> maxPosition;
inline const ccstd::optional<Vec3> &getMaxPosition() const { return maxPosition; } // For JSB binding only
inline void setMaxPosition(const ccstd::optional<Vec3> &v) { maxPosition = v; } // For JSB binding only
/**
* @en The joint index map list.
* @zh 此网格使用的关节索引映射关系列表,数组长度应为子模型中实际使用到的所有关节,
* 每个元素都对应一个原骨骼资源里的索引,按子模型 VB 内的实际索引排列。
*/
ccstd::optional<ccstd::vector<ccstd::vector<index_t>>> jointMaps;
/**
* @en The morph information of the mesh
* @zh 网格的形变数据
*/
ccstd::optional<Morph> morph;
/**
* @en The specific data of the dynamic mesh
* @zh 动态网格特有数据
*/
ccstd::optional<IDynamicStruct> dynamic;
ccstd::optional<bool> encoded;
ccstd::optional<bool> compressed;
ccstd::optional<bool> quantized;
};
struct ICreateInfo {
/**
* @en Mesh structure
* @zh 网格结构。
*/
IStruct structInfo;
/**
* @en Mesh binary data
* @zh 网格二进制数据。
*/
Uint8Array data;
};
Mesh() = default;
~Mesh() override;
ccstd::any getNativeAsset() const override;
void setNativeAsset(const ccstd::any &obj) override;
void setAssetData(ArrayBuffer *data) {
_data = Uint8Array(data);
}
ArrayBuffer *getAssetData() const {
return _data.buffer();
}
/**
* @en The sub meshes count of the mesh.
* @zh 此网格的子网格数量。
* @deprecated Please use [[renderingSubMeshes.length]] instead
*/
uint32_t getSubMeshCount() const;
/**
* @en The minimum position of all vertices in the mesh
* @zh (各分量都)小于等于此网格任何顶点位置的最大位置。
* @deprecated Please use [[struct.minPosition]] instead
*/
const Vec3 *getMinPosition() const;
/**
* @en The maximum position of all vertices in the mesh
* @zh (各分量都)大于等于此网格任何顶点位置的最大位置。
* @deprecated Please use [[struct.maxPosition]] instead
*/
const Vec3 *getMaxPosition() const;
/**
* @en The struct of the mesh
* @zh 此网格的结构。
*/
inline const IStruct &getStruct() const {
return _struct;
}
inline void setStruct(const IStruct &input) {
_struct = input;
}
inline Uint8Array &getData() {
return _data;
}
inline void setData(const Uint8Array &data) {
_data = data;
}
/**
* @en The hash of the mesh
* @zh 此网格的哈希值。
*/
ccstd::hash_t getHash();
/**
* @en Set the hash of the mesh
* @zh 设置此网格的哈希值。
*/
void setHash(ccstd::hash_t hash) { _hash = hash; }
using JointBufferIndicesType = ccstd::vector<index_t>;
/**
* The index of the joint buffer of all sub meshes in the joint map buffers
*/
const JointBufferIndicesType &getJointBufferIndices();
using RenderingSubMeshList = ccstd::vector<IntrusivePtr<RenderingSubMesh>>;
/**
* @en The sub meshes for rendering. Mesh could be split into different sub meshes for rendering.
* @zh 此网格创建的渲染网格。
*/
inline const RenderingSubMeshList &getRenderingSubMeshes() {
initialize();
return _renderingSubMeshes;
}
void onLoaded() override {
initialize();
}
void initialize();
/**
* @en Destroy the mesh and release all related GPU resources
* @zh 销毁此网格,并释放它占有的所有 GPU 资源。
*/
bool destroy() override {
destroyRenderingMesh();
return Super::destroy();
}
/**
* @en Release all related GPU resources
* @zh 释放此网格占有的所有 GPU 资源。
*/
void destroyRenderingMesh();
/**
* @en Reset the struct and data of the mesh
* @zh 重置此网格的结构和数据。
* @param struct The new struct
* @param data The new data
* @deprecated Will be removed in v3.0.0, please use [[reset]] instead
*/
void assign(const IStruct &structInfo, const Uint8Array &data);
/**
* @en Reset the mesh with mesh creation information
* @zh 重置此网格。
* @param info Mesh creation information including struct and data
*/
void reset(ICreateInfo &&info);
using BoneSpaceBounds = ccstd::vector<IntrusivePtr<geometry::AABB>>;
/**
* @en Get [[AABB]] bounds in the skeleton's bone space
* @zh 获取骨骼变换空间内下的 [[AABB]] 包围盒
* @param skeleton
*/
BoneSpaceBounds getBoneSpaceBounds(Skeleton *skeleton);
/**
* @en Merge the given mesh into the current mesh
* @zh 合并指定的网格到此网格中。
* @param mesh The mesh to be merged
* @param worldMatrix The world matrix of the given mesh
* @param [validate=false] Whether to validate the mesh
* @returns Check the mesh state and return the validation result.
*/
bool merge(Mesh *mesh, const Mat4 *worldMatrix = nullptr, bool validate = false);
/**
* @en Validation for whether the given mesh can be merged into the current mesh.
* To pass the validation, it must satisfy either of these two requirements:
* - When the current mesh have no data
* - When the two mesh have the same vertex bundle count, the same sub meshes count, and the same sub mesh layout.
*
* Same mesh layout means:
* - They have the same primitive type and reference to the same amount vertex bundle with the same indices.
* - And they all have or don't have index view
* @zh 验证指定网格是否可以合并至当前网格。
*
* 当满足以下条件之一时,指定网格可以合并至当前网格:
* - 当前网格无数据而待合并网格有数据;
* - 它们的顶点块数目相同且对应顶点块的布局一致,并且它们的子网格数目相同且对应子网格的布局一致。
*
* 两个顶点块布局一致当且仅当:
* - 它们具有相同数量的顶点属性且对应的顶点属性具有相同的属性格式。
*
* 两个子网格布局一致,当且仅当:
* - 它们具有相同的图元类型并且引用相同数量、相同索引的顶点块;并且,
* - 要么都需要索引绘制,要么都不需要索引绘制。
* @param mesh The other mesh to be validated
*/
bool validateMergingMesh(Mesh *mesh);
/**
* @en Read the requested attribute of the given sub mesh
* @zh 读取子网格的指定属性。
* @param primitiveIndex Sub mesh index
* @param attributeName Attribute name
* @returns Return null if not found or can't read, otherwise, will create a large enough typed array to contain all data of the attribute,
* the array type will match the data type of the attribute.
*/
TypedArray readAttribute(index_t primitiveIndex, const char *attributeName);
/**
* @en Read the requested attribute of the given sub mesh and fill into the given buffer.
* @zh 读取子网格的指定属性到目标缓冲区中。
* @param primitiveIndex Sub mesh index
* @param attributeName Attribute name
* @param buffer The target array buffer
* @param stride Byte distance between two attributes in the target buffer
* @param offset The offset of the first attribute in the target buffer
* @returns Return false if failed to access attribute, return true otherwise.
*/
bool copyAttribute(index_t primitiveIndex, const char *attributeName, ArrayBuffer *buffer, uint32_t stride, uint32_t offset);
/**
* @en Read the indices data of the given sub mesh
* @zh 读取子网格的索引数据。
* @param primitiveIndex Sub mesh index
* @returns Return null if not found or can't read, otherwise, will create a large enough typed array to contain all indices data,
* the array type will use the corresponding stride size.
*/
IBArray readIndices(index_t primitiveIndex);
/**
* @en Read the indices data of the given sub mesh and fill into the given array
* @zh 读取子网格的索引数据到目标数组中。
* @param primitiveIndex Sub mesh index
* @param outputArray The target output array
* @returns Return false if failed to access the indices data, return true otherwise.
*/
bool copyIndices(index_t primitiveIndex, TypedArray &outputArray);
/**
* @en Read the format by attributeName of submesh
* @zh 根据属性名读取子网格的属性信息。
* @param primitiveIndex @en Sub mesh index @zh 子网格索引
* @param attributeName @en Attribute name @zh 属性名称
* @returns @en Return null if failed to read format, return the format otherwise. @zh 读取失败返回 null 否则返回 format
*/
const gfx::FormatInfo *readAttributeFormat(index_t primitiveIndex, const char *attributeName);
/**
* @en update dynamic sub mesh geometry
* @zh 更新动态子网格的几何数据
* @param primitiveIndex: sub mesh index
* @param geometry: sub mesh geometry data
*/
void updateSubMesh(index_t primitiveIndex, const IDynamicGeometry &geometry);
/**
* @en Set whether the data of this mesh could be accessed (read or wrote), it could be used only for static mesh
* @zh 设置此网格的数据是否可被存取,此接口只针对静态网格资源生效
* @param allowDataAccess @en Indicate whether the data of this mesh could be accessed (read or wrote) @zh 是否允许存取网格数据
*/
void setAllowDataAccess(bool allowDataAccess);
/**
* @en Get whether the data of this mesh could be read or wrote
* @zh 获取此网格的数据是否可被存取
* @return @en whether the data of this mesh could be accessed (read or wrote) @zh 此网格的数据是否可被存取
*/
inline bool isAllowDataAccess() const { return _allowDataAccess; }
private:
using AccessorType = std::function<void(const IVertexBundle &vertexBundle, int32_t iAttribute)>;
void accessAttribute(index_t primitiveIndex, const char *attributeName, const AccessorType &accessor);
gfx::BufferList createVertexBuffers(gfx::Device *gfxDevice, ArrayBuffer *data);
void tryConvertVertexData();
void initDefault(const ccstd::optional<ccstd::string> &uuid) override;
void releaseData();
static TypedArray createTypedArrayWithGFXFormat(gfx::Format format, uint32_t count);
public:
IntrusivePtr<MorphRendering> morphRendering;
private:
IStruct _struct;
ccstd::hash_t _hash{0U};
Uint8Array _data;
bool _initialized{false};
bool _allowDataAccess{true};
bool _isMeshDataUploaded{false};
RenderingSubMeshList _renderingSubMeshes;
ccstd::unordered_map<uint64_t, BoneSpaceBounds> _boneSpaceBounds;
JointBufferIndicesType _jointBufferIndices;
friend class MeshDeserializer;
CC_DISALLOW_COPY_MOVE_ASSIGN(Mesh);
};
} // namespace cc

98
cocos/3d/assets/Morph.h Normal file
View File

@@ -0,0 +1,98 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "3d/assets/Types.h"
namespace cc {
struct IMeshBufferView {
uint32_t offset{0};
uint32_t length{0};
uint32_t count{0};
uint32_t stride{0};
};
/**
* @en Morph target contains all displacements data of each vertex attribute like position and normal.
* @zh 形变目标数据包含网格顶点属性在形变下的变化值,可能包含位移、法线等属性
*/
struct MorphTarget {
/**
* Displacement of each target attribute.
*/
ccstd::vector<IMeshBufferView> displacements;
};
/**
* @en Sub mesh morph data describes all morph targets for one sub mesh,
* including attributes in each morph target, morph targets data and weights corresponding each targets.
* @zh 子网格形变数据描述一个子网格下所有的形变目标数据,包含顶点形变属性,形变目标数据和对应每个形变目标的权重。
*/
struct SubMeshMorph {
/**
* Attributes to morph.
*/
ccstd::vector<ccstd::string> attributes;
/**
* Targets.
*/
ccstd::vector<MorphTarget> targets;
/**
* Initial weights of each target.
*/
ccstd::optional<MeshWeightsType> weights;
};
/**
* @en Mesh morph data structure to describe the sub meshes data of all sub meshes,
* it also contains all sub mesh morphs, global weights configuration and target names.
* Normally the global weights configuration should be identical to the sub mesh morph weights,
* but if not, the global weights in morph is less prioritized.
* @zh 网格的形变数据结构,包含所有子网格形变数据,全局的权重配置和所有形变目标名称。
* 一般来说,全局权重配置和子网格形变数据中保持一致,但如果有差异,以子网格形变数据中的权重配置为准。
*/
struct Morph {
/**
* Morph data of each sub-mesh.
*/
ccstd::vector<ccstd::optional<SubMeshMorph>> subMeshMorphs;
/**
* Common initial weights of each sub-mesh.
*/
ccstd::optional<MeshWeightsType> weights;
/**
* Name of each target of each sub-mesh morph.
* This field is only meaningful if every sub-mesh has the same number of targets.
*/
ccstd::optional<ccstd::vector<ccstd::string>> targetNames;
};
} // namespace cc

View File

@@ -0,0 +1,731 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/assets/MorphRendering.h"
#include <memory>
#include "3d/assets/Mesh.h"
#include "3d/assets/Morph.h"
#include "base/RefCounted.h"
#include "core/DataView.h"
#include "core/TypedArray.h"
#include "core/assets/ImageAsset.h"
#include "core/assets/RenderingSubMesh.h"
#include "core/assets/Texture2D.h"
#include "platform/Image.h"
#include "renderer/pipeline/Define.h"
#include "scene/Pass.h"
namespace cc {
MorphRendering *createMorphRendering(Mesh *mesh, gfx::Device *gfxDevice) {
return ccnew StdMorphRendering(mesh, gfxDevice);
}
/**
* The instance of once sub-mesh morph rendering.
*/
class SubMeshMorphRenderingInstance : public RefCounted {
public:
~SubMeshMorphRenderingInstance() override = default;
/**
* Set weights of each morph target.
* @param weights The weights.
*/
virtual void setWeights(const ccstd::vector<float> &weights) = 0;
/**
* Asks the define overrides needed to do the rendering.
*/
virtual ccstd::vector<scene::IMacroPatch> requiredPatches() = 0;
/**
* Adapts the pipelineState to apply the rendering.
* @param pipelineState
*/
virtual void adaptPipelineState(gfx::DescriptorSet *descriptorSet) = 0;
/**
* Destroy this instance.
*/
virtual void destroy() = 0;
};
/**
* Describes how to render a sub-mesh morph.
*/
class SubMeshMorphRendering : public RefCounted {
public:
~SubMeshMorphRendering() override = default;
/**
* Creates a rendering instance.
*/
virtual SubMeshMorphRenderingInstance *createInstance() = 0;
};
namespace {
/**
* True if force to use cpu computing based sub-mesh rendering.
*/
const bool PREFER_CPU_COMPUTING = false;
class MorphTexture final : public RefCounted {
public:
MorphTexture() = default;
~MorphTexture() override = default;
/**
* Gets the GFX texture.
*/
gfx::Texture *getTexture() {
return _textureAsset->getGFXTexture();
}
/**
* Gets the GFX sampler.
*/
gfx::Sampler *getSampler() {
return _sampler;
}
/**
* Value view.
*/
Float32Array &getValueView() {
return _valueView;
}
/**
* Destroy the texture. Release its GPU resources.
*/
void destroy() {
_textureAsset->destroy();
// Samplers allocated from `samplerLib` are not required and
// should not be destroyed.
// _sampler.destroy();
}
/**
* Update the pixels content to `valueView`.
*/
void updatePixels() {
_textureAsset->uploadData(_arrayBuffer->getData());
}
void initialize(gfx::Device *gfxDevice, uint32_t width, uint32_t height, uint32_t pixelBytes, bool /*useFloat32Array*/, PixelFormat pixelFormat) {
_arrayBuffer = ccnew ArrayBuffer(width * height * pixelBytes);
_valueView = Float32Array(_arrayBuffer);
auto *imageAsset = ccnew ImageAsset();
IMemoryImageSource source{_arrayBuffer, false, width, height, pixelFormat};
imageAsset->setNativeAsset(source);
_textureAsset = ccnew Texture2D();
_textureAsset->setFilters(Texture2D::Filter::NEAREST, Texture2D::Filter::NEAREST);
_textureAsset->setMipFilter(Texture2D::Filter::NONE);
_textureAsset->setWrapMode(Texture2D::WrapMode::CLAMP_TO_EDGE, Texture2D::WrapMode::CLAMP_TO_EDGE, Texture2D::WrapMode::CLAMP_TO_EDGE);
_textureAsset->setImage(imageAsset);
if (nullptr == _textureAsset->getGFXTexture()) {
CC_LOG_WARNING("Unexpected: failed to create morph texture?");
}
_sampler = gfxDevice->getSampler(_textureAsset->getSamplerInfo());
}
private:
IntrusivePtr<Texture2D> _textureAsset;
gfx::Sampler *_sampler{nullptr};
ArrayBuffer::Ptr _arrayBuffer;
Float32Array _valueView;
CC_DISALLOW_COPY_MOVE_ASSIGN(MorphTexture);
};
struct GpuMorphAttribute {
ccstd::string attributeName;
IntrusivePtr<MorphTexture> morphTexture;
};
struct CpuMorphAttributeTarget {
Float32Array displacements;
};
using CpuMorphAttributeTargetList = ccstd::vector<CpuMorphAttributeTarget>;
struct CpuMorphAttribute {
ccstd::string name;
CpuMorphAttributeTargetList targets;
};
struct Vec4TextureFactory {
uint32_t width{0};
uint32_t height{0};
std::function<MorphTexture *()> create{nullptr};
};
/**
* Decides a best texture size to have the specified pixel capacity at least.
* The decided width and height has the following characteristics:
* - the width and height are both power of 2;
* - if the width and height are different, the width would be set to the larger once;
* - the width is ensured to be multiple of 4.
* @param nPixels Least pixel capacity.
*/
bool bestSizeToHavePixels(uint32_t nPixels, uint32_t *pWidth, uint32_t *pHeight) {
if (pWidth == nullptr || pHeight == nullptr) {
if (pWidth != nullptr) {
*pWidth = 0;
}
if (pHeight != nullptr) {
*pHeight = 0;
}
return false;
}
if (nPixels < 5) {
nPixels = 5;
}
const uint32_t aligned = pipeline::nextPow2(nPixels);
const auto epxSum = static_cast<uint32_t>(std::log2(aligned));
const uint32_t h = epxSum >> 1;
const uint32_t w = (epxSum & 1) ? (h + 1) : h;
*pWidth = 1 << w;
*pHeight = 1 << h;
return true;
}
/**
* When use vertex-texture-fetch technique, we do need
* `gl_vertexId` when we sample per-vertex data.
* WebGL 1.0 does not have `gl_vertexId`; WebGL 2.0, however, does.
* @param mesh
* @param subMeshIndex
* @param gfxDevice
*/
void enableVertexId(Mesh *mesh, uint32_t subMeshIndex, gfx::Device *gfxDevice) {
mesh->getRenderingSubMeshes()[subMeshIndex]->enableVertexIdChannel(gfxDevice);
}
/**
*
* @param gfxDevice
* @param vec4Capacity Capacity of vec4.
*/
Vec4TextureFactory createVec4TextureFactory(gfx::Device *gfxDevice, uint32_t vec4Capacity) {
bool hasFeatureFloatTexture = static_cast<uint32_t>(gfxDevice->getFormatFeatures(gfx::Format::RGBA32F) & gfx::FormatFeature::SAMPLED_TEXTURE) != 0;
uint32_t pixelRequired = 0;
PixelFormat pixelFormat = PixelFormat::RGBA8888;
uint32_t pixelBytes = 4;
bool useFloat32Array = false;
if (hasFeatureFloatTexture) {
pixelRequired = vec4Capacity;
pixelBytes = 16;
pixelFormat = Texture2D::PixelFormat::RGBA32F;
useFloat32Array = true;
} else {
pixelRequired = 4 * vec4Capacity;
pixelBytes = 4;
pixelFormat = Texture2D::PixelFormat::RGBA8888;
useFloat32Array = false;
}
uint32_t width = 0;
uint32_t height = 0;
bestSizeToHavePixels(pixelRequired, &width, &height);
CC_ASSERT_GE(width * height, pixelRequired);
Vec4TextureFactory ret;
ret.width = width;
ret.height = height;
ret.create = [=]() -> MorphTexture * {
auto *texture = ccnew MorphTexture(); // texture will be held by IntrusivePtr in GpuMorphAttribute
texture->initialize(gfxDevice, width, height, pixelBytes, useFloat32Array, pixelFormat);
return texture;
};
return ret;
}
/**
* Provides the access to morph related uniforms.
*/
class MorphUniforms final : public RefCounted {
public:
MorphUniforms(gfx::Device *gfxDevice, uint32_t targetCount) {
_targetCount = targetCount;
_localBuffer = ccnew DataView(ccnew ArrayBuffer(pipeline::UBOMorph::SIZE));
_remoteBuffer = gfxDevice->createBuffer(gfx::BufferInfo{
gfx::BufferUsageBit::UNIFORM | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::HOST | gfx::MemoryUsageBit::DEVICE,
pipeline::UBOMorph::SIZE,
pipeline::UBOMorph::SIZE,
});
}
~MorphUniforms() override {
delete _localBuffer;
}
void destroy() {
_remoteBuffer->destroy();
}
gfx::Buffer *getBuffer() const {
return _remoteBuffer;
}
void setWeights(const ccstd::vector<float> &weights) {
CC_ASSERT_EQ(weights.size(), _targetCount);
for (size_t iWeight = 0; iWeight < weights.size(); ++iWeight) {
_localBuffer->setFloat32(static_cast<uint32_t>(pipeline::UBOMorph::OFFSET_OF_WEIGHTS + 4 * iWeight), weights[iWeight]);
}
}
void setMorphTextureInfo(float width, float height) {
_localBuffer->setFloat32(pipeline::UBOMorph::OFFSET_OF_DISPLACEMENT_TEXTURE_WIDTH, width);
_localBuffer->setFloat32(pipeline::UBOMorph::OFFSET_OF_DISPLACEMENT_TEXTURE_HEIGHT, height);
}
void setVerticesCount(uint32_t count) {
_localBuffer->setFloat32(pipeline::UBOMorph::OFFSET_OF_VERTICES_COUNT, static_cast<float>(count));
}
void commit() {
ArrayBuffer *buffer = _localBuffer->buffer();
_remoteBuffer->update(buffer->getData(), buffer->byteLength());
}
private:
uint32_t _targetCount{0};
DataView *_localBuffer{nullptr};
IntrusivePtr<gfx::Buffer> _remoteBuffer;
};
class CpuComputing final : public SubMeshMorphRendering {
public:
explicit CpuComputing(Mesh *mesh, uint32_t subMeshIndex, const Morph *morph, gfx::Device *gfxDevice);
SubMeshMorphRenderingInstance *createInstance() override;
const ccstd::vector<CpuMorphAttribute> &getData() const;
private:
ccstd::vector<CpuMorphAttribute> _attributes;
gfx::Device *_gfxDevice{nullptr};
};
class GpuComputing final : public SubMeshMorphRendering {
public:
explicit GpuComputing(Mesh *mesh, uint32_t subMeshIndex, const Morph *morph, gfx::Device *gfxDevice);
SubMeshMorphRenderingInstance *createInstance() override;
void destroy();
private:
gfx::Device *_gfxDevice{nullptr};
const SubMeshMorph *_subMeshMorph{nullptr};
uint32_t _textureWidth{0};
uint32_t _textureHeight{0};
ccstd::vector<GpuMorphAttribute> _attributes;
uint32_t _verticesCount{0};
friend class GpuComputingRenderingInstance;
};
class CpuComputingRenderingInstance final : public SubMeshMorphRenderingInstance {
public:
explicit CpuComputingRenderingInstance(CpuComputing *owner, uint32_t nVertices, gfx::Device *gfxDevice) {
_owner = owner; //NOTE: release by mesh`s destroy, it`ll call current instance`s destroy method
_morphUniforms = ccnew MorphUniforms(gfxDevice, 0 /* TODO? */);
auto vec4TextureFactory = createVec4TextureFactory(gfxDevice, nVertices);
_morphUniforms->setMorphTextureInfo(static_cast<float>(vec4TextureFactory.width), static_cast<float>(vec4TextureFactory.height));
_morphUniforms->commit();
for (const auto &attributeMorph : _owner->getData()) {
auto *morphTexture = vec4TextureFactory.create();
_attributes.emplace_back(GpuMorphAttribute{attributeMorph.name, morphTexture});
}
}
void setWeights(const ccstd::vector<float> &weights) override {
for (size_t iAttribute = 0; iAttribute < _attributes.size(); ++iAttribute) {
const auto &myAttribute = _attributes[iAttribute];
Float32Array &valueView = myAttribute.morphTexture->getValueView();
const auto &attributeMorph = _owner->getData()[iAttribute];
CC_ASSERT(weights.size() == attributeMorph.targets.size());
for (size_t iTarget = 0; iTarget < attributeMorph.targets.size(); ++iTarget) {
const auto &targetDisplacements = attributeMorph.targets[iTarget].displacements;
const float weight = weights[iTarget];
const uint32_t nVertices = targetDisplacements.length() / 3;
if (iTarget == 0) {
for (uint32_t iVertex = 0; iVertex < nVertices; ++iVertex) {
valueView[4 * iVertex + 0] = targetDisplacements[3 * iVertex + 0] * weight;
valueView[4 * iVertex + 1] = targetDisplacements[3 * iVertex + 1] * weight;
valueView[4 * iVertex + 2] = targetDisplacements[3 * iVertex + 2] * weight;
}
} else if (std::fabs(weight) >= std::numeric_limits<float>::epsilon()) {
for (uint32_t iVertex = 0; iVertex < nVertices; ++iVertex) {
valueView[4 * iVertex + 0] += targetDisplacements[3 * iVertex + 0] * weight;
valueView[4 * iVertex + 1] += targetDisplacements[3 * iVertex + 1] * weight;
valueView[4 * iVertex + 2] += targetDisplacements[3 * iVertex + 2] * weight;
}
}
}
myAttribute.morphTexture->updatePixels();
}
}
ccstd::vector<scene::IMacroPatch> requiredPatches() override {
return {
{"CC_MORPH_TARGET_USE_TEXTURE", true},
{"CC_MORPH_PRECOMPUTED", true},
};
}
void adaptPipelineState(gfx::DescriptorSet *descriptorSet) override {
for (const auto &attribute : _attributes) {
const auto &attributeName = attribute.attributeName;
ccstd::optional<uint32_t> binding;
if (attributeName == gfx::ATTR_NAME_POSITION) {
binding = uint32_t{pipeline::POSITIONMORPH::BINDING};
} else if (attributeName == gfx::ATTR_NAME_NORMAL) {
binding = uint32_t{pipeline::NORMALMORPH::BINDING};
} else if (attributeName == gfx::ATTR_NAME_TANGENT) {
binding = uint32_t{pipeline::TANGENTMORPH::BINDING};
} else {
CC_LOG_WARNING("Unexpected attribute!");
}
if (binding.has_value()) {
descriptorSet->bindSampler(binding.value(), attribute.morphTexture->getSampler());
descriptorSet->bindTexture(binding.value(), attribute.morphTexture->getTexture());
}
}
descriptorSet->bindBuffer(pipeline::UBOMorph::BINDING, _morphUniforms->getBuffer());
descriptorSet->update();
}
void destroy() override {
CC_SAFE_DESTROY(_morphUniforms);
for (auto &myAttribute : _attributes) {
CC_SAFE_DESTROY(myAttribute.morphTexture);
}
}
private:
ccstd::vector<GpuMorphAttribute> _attributes;
IntrusivePtr<CpuComputing> _owner;
IntrusivePtr<MorphUniforms> _morphUniforms;
};
class GpuComputingRenderingInstance final : public SubMeshMorphRenderingInstance {
public:
explicit GpuComputingRenderingInstance(GpuComputing *owner, gfx::Device *gfxDevice) {
_owner = owner;
_morphUniforms = ccnew MorphUniforms(gfxDevice, static_cast<uint32_t>(_owner->_subMeshMorph->targets.size()));
_morphUniforms->setMorphTextureInfo(static_cast<float>(_owner->_textureWidth), static_cast<float>(_owner->_textureHeight));
_morphUniforms->setVerticesCount(_owner->_verticesCount);
_morphUniforms->commit();
_attributes = &_owner->_attributes;
}
void setWeights(const ccstd::vector<float> &weights) override {
_morphUniforms->setWeights(weights);
_morphUniforms->commit();
}
ccstd::vector<scene::IMacroPatch> requiredPatches() override {
return {
{"CC_MORPH_TARGET_USE_TEXTURE", true},
};
}
void adaptPipelineState(gfx::DescriptorSet *descriptorSet) override {
for (const auto &attribute : *_attributes) {
const auto &attributeName = attribute.attributeName;
ccstd::optional<uint32_t> binding;
if (attributeName == gfx::ATTR_NAME_POSITION) {
binding = uint32_t{pipeline::POSITIONMORPH::BINDING};
} else if (attributeName == gfx::ATTR_NAME_NORMAL) {
binding = uint32_t{pipeline::NORMALMORPH::BINDING};
} else if (attributeName == gfx::ATTR_NAME_TANGENT) {
binding = uint32_t{pipeline::TANGENTMORPH::BINDING};
} else {
CC_LOG_WARNING("Unexpected attribute!");
}
if (binding.has_value()) {
descriptorSet->bindSampler(binding.value(), attribute.morphTexture->getSampler());
descriptorSet->bindTexture(binding.value(), attribute.morphTexture->getTexture());
}
}
descriptorSet->bindBuffer(pipeline::UBOMorph::BINDING, _morphUniforms->getBuffer());
descriptorSet->update();
}
void destroy() override {
}
private:
ccstd::vector<GpuMorphAttribute> *_attributes{nullptr};
IntrusivePtr<GpuComputing> _owner;
IntrusivePtr<MorphUniforms> _morphUniforms;
};
CpuComputing::CpuComputing(Mesh *mesh, uint32_t subMeshIndex, const Morph *morph, gfx::Device *gfxDevice) {
_gfxDevice = gfxDevice;
const auto &subMeshMorph = morph->subMeshMorphs[subMeshIndex].value();
enableVertexId(mesh, subMeshIndex, gfxDevice);
for (size_t attributeIndex = 0, len = subMeshMorph.attributes.size(); attributeIndex < len; ++attributeIndex) {
const auto &attributeName = subMeshMorph.attributes[attributeIndex];
CpuMorphAttribute attr;
attr.name = attributeName;
attr.targets.resize(subMeshMorph.targets.size());
uint32_t i = 0;
for (const auto &attributeDisplacement : subMeshMorph.targets) {
const Mesh::IBufferView &displacementsView = attributeDisplacement.displacements[attributeIndex];
attr.targets[i].displacements = Float32Array(mesh->getData().buffer(),
mesh->getData().byteOffset() + displacementsView.offset,
attributeDisplacement.displacements[attributeIndex].count);
++i;
}
_attributes.emplace_back(attr);
}
}
SubMeshMorphRenderingInstance *CpuComputing::createInstance() {
return ccnew CpuComputingRenderingInstance(
this,
_attributes[0].targets[0].displacements.length() / 3,
_gfxDevice);
}
const ccstd::vector<CpuMorphAttribute> &CpuComputing::getData() const {
return _attributes;
}
GpuComputing::GpuComputing(Mesh *mesh, uint32_t subMeshIndex, const Morph *morph, gfx::Device *gfxDevice) {
_gfxDevice = gfxDevice;
const auto &subMeshMorph = morph->subMeshMorphs[subMeshIndex].value();
_subMeshMorph = &subMeshMorph;
// assertIsNonNullable(subMeshMorph);
enableVertexId(mesh, subMeshIndex, gfxDevice);
uint32_t nVertices = mesh->getStruct().vertexBundles[mesh->getStruct().primitives[subMeshIndex].vertexBundelIndices[0]].view.count;
_verticesCount = nVertices;
auto nTargets = static_cast<uint32_t>(subMeshMorph.targets.size());
uint32_t vec4Required = nVertices * nTargets;
auto vec4TextureFactory = createVec4TextureFactory(gfxDevice, vec4Required);
_textureWidth = vec4TextureFactory.width;
_textureHeight = vec4TextureFactory.height;
// Creates texture for each attribute.
uint32_t attributeIndex = 0;
_attributes.reserve(subMeshMorph.attributes.size());
for (const auto &attributeName : subMeshMorph.attributes) {
auto *vec4Tex = vec4TextureFactory.create();
Float32Array &valueView = vec4Tex->getValueView();
// if (DEV) { // Make it easy to view texture in profilers...
// for (let i = 0; i < valueView.length / 4; ++i) {
// valueView[i * 4 + 3] = 1.0;
// }
// }
uint32_t morphTargetIndex = 0;
for (const auto &morphTarget : subMeshMorph.targets) {
const auto &displacementsView = morphTarget.displacements[attributeIndex];
Float32Array displacements(mesh->getData().buffer(),
mesh->getData().byteOffset() + displacementsView.offset,
displacementsView.count);
const uint32_t displacementsOffset = (nVertices * morphTargetIndex) * 4;
for (uint32_t iVertex = 0; iVertex < nVertices; ++iVertex) {
valueView[displacementsOffset + 4 * iVertex + 0] = displacements[3 * iVertex + 0];
valueView[displacementsOffset + 4 * iVertex + 1] = displacements[3 * iVertex + 1];
valueView[displacementsOffset + 4 * iVertex + 2] = displacements[3 * iVertex + 2];
}
++morphTargetIndex;
}
vec4Tex->updatePixels();
_attributes.emplace_back(GpuMorphAttribute{attributeName, vec4Tex});
++attributeIndex;
}
}
SubMeshMorphRenderingInstance *GpuComputing::createInstance() {
return ccnew GpuComputingRenderingInstance(this, _gfxDevice);
}
void GpuComputing::destroy() {
for (auto &attribute : _attributes) {
attribute.morphTexture->destroy();
}
}
} // namespace
class StdMorphRenderingInstance : public MorphRenderingInstance {
public:
explicit StdMorphRenderingInstance(StdMorphRendering *owner) {
_owner = owner;
size_t nSubMeshes = _owner->_mesh->getStruct().primitives.size();
_subMeshInstances.resize(nSubMeshes, nullptr);
for (size_t iSubMesh = 0; iSubMesh < nSubMeshes; ++iSubMesh) {
if (_owner->_subMeshRenderings[iSubMesh] != nullptr) {
_subMeshInstances[iSubMesh] = _owner->_subMeshRenderings[iSubMesh]->createInstance();
}
}
}
~StdMorphRenderingInstance() override = default;
void setWeights(index_t subMeshIndex, const MeshWeightsType &weights) override {
if (_subMeshInstances[subMeshIndex]) {
_subMeshInstances[subMeshIndex]->setWeights(weights);
}
}
void adaptPipelineState(index_t subMeshIndex, gfx::DescriptorSet *descriptorSet) override {
if (_subMeshInstances[subMeshIndex]) {
_subMeshInstances[subMeshIndex]->adaptPipelineState(descriptorSet);
}
}
ccstd::vector<scene::IMacroPatch> requiredPatches(index_t subMeshIndex) override {
CC_ASSERT(_owner->_mesh->getStruct().morph.has_value());
const auto &subMeshMorphOpt = _owner->_mesh->getStruct().morph.value().subMeshMorphs[subMeshIndex];
auto *subMeshRenderingInstance = _subMeshInstances[subMeshIndex].get();
if (subMeshRenderingInstance == nullptr || !subMeshMorphOpt.has_value()) {
return {};
}
const auto &subMeshMorph = subMeshMorphOpt.value();
ccstd::vector<scene::IMacroPatch> patches{
{"CC_USE_MORPH", true},
{"CC_MORPH_TARGET_COUNT", static_cast<int32_t>(subMeshMorph.targets.size())}};
auto posIter = std::find(subMeshMorph.attributes.begin(), subMeshMorph.attributes.end(), gfx::ATTR_NAME_POSITION);
if (posIter != subMeshMorph.attributes.end()) {
patches.emplace_back(scene::IMacroPatch{
"CC_MORPH_TARGET_HAS_POSITION",
true,
});
}
auto normalIter = std::find(subMeshMorph.attributes.begin(), subMeshMorph.attributes.end(), gfx::ATTR_NAME_NORMAL);
if (normalIter != subMeshMorph.attributes.end()) {
patches.emplace_back(scene::IMacroPatch{
"CC_MORPH_TARGET_HAS_NORMAL",
true,
});
}
auto tangentIter = std::find(subMeshMorph.attributes.begin(), subMeshMorph.attributes.end(), gfx::ATTR_NAME_TANGENT);
if (tangentIter != subMeshMorph.attributes.end()) {
patches.emplace_back(scene::IMacroPatch{
"CC_MORPH_TARGET_HAS_TANGENT",
true,
});
}
auto renderingInstancePatches = subMeshRenderingInstance->requiredPatches();
for (auto &renderingInstancePatch : renderingInstancePatches) {
patches.emplace_back(renderingInstancePatch);
}
return patches;
}
void destroy() override {
for (auto &subMeshInstance : _subMeshInstances) {
if (subMeshInstance != nullptr) {
subMeshInstance->destroy();
}
}
}
private:
IntrusivePtr<StdMorphRendering> _owner;
ccstd::vector<IntrusivePtr<SubMeshMorphRenderingInstance>> _subMeshInstances;
};
StdMorphRendering::StdMorphRendering(Mesh *mesh, gfx::Device *gfxDevice) {
_mesh = mesh;
const auto &structInfo = _mesh->getStruct();
if (!structInfo.morph.has_value()) {
return;
}
const size_t nSubMeshes = structInfo.primitives.size();
_subMeshRenderings.resize(nSubMeshes, nullptr);
const auto &morph = structInfo.morph.value();
for (size_t iSubMesh = 0; iSubMesh < nSubMeshes; ++iSubMesh) {
const auto &subMeshMorphHolder = morph.subMeshMorphs[iSubMesh];
if (!subMeshMorphHolder.has_value()) {
continue;
}
const auto &subMeshMorph = subMeshMorphHolder.value();
if (PREFER_CPU_COMPUTING || subMeshMorph.targets.size() > pipeline::UBOMorph::MAX_MORPH_TARGET_COUNT) {
_subMeshRenderings[iSubMesh] = ccnew CpuComputing(
_mesh,
static_cast<uint32_t>(iSubMesh),
&morph,
gfxDevice);
} else {
_subMeshRenderings[iSubMesh] = ccnew GpuComputing(
_mesh,
static_cast<uint32_t>(iSubMesh),
&morph,
gfxDevice);
}
}
}
StdMorphRendering::~StdMorphRendering() = default;
MorphRenderingInstance *StdMorphRendering::createInstance() {
auto *ret = ccnew StdMorphRenderingInstance(this);
return ret;
}
} // namespace cc

View File

@@ -0,0 +1,114 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "3d/assets/MorphRendering.h"
#include "3d/assets/Types.h"
#include "base/Ptr.h"
#include "scene/Define.h"
namespace cc {
class SubMeshMorphRendering;
class Mesh;
namespace gfx {
class Device;
class DescriptorSet;
} // namespace gfx
/**
* @en The instance of [[MorphRendering]] for dedicated control in the mesh renderer.
* The root [[MorphRendering]] is owned by [[Mesh]] asset, each [[MeshRenderer]] can have its own morph rendering instance.
* @zh 用于网格渲染器中独立控制 [[MorphRendering]] 的实例。原始 [[MorphRendering]] 被 [[Mesh]] 资源持有,每个 [[MeshRenderer]] 都持有自己的形变网格渲染实例。
*/
class MorphRenderingInstance : public RefCounted {
public:
~MorphRenderingInstance() override = default;
/**
* Sets weights of targets of specified sub mesh.
* @param subMeshIndex
* @param weights
*/
virtual void setWeights(index_t subMeshIndex, const MeshWeightsType &weights) = 0;
/**
* Adapts pipeline state to do the rendering.
* @param subMeshIndex
* @param pipelineState
*/
virtual void adaptPipelineState(index_t subMeshIndex, gfx::DescriptorSet *descriptorSet) = 0;
virtual ccstd::vector<scene::IMacroPatch> requiredPatches(index_t subMeshIndex) = 0;
/**
* Destroy the rendering instance.
*/
virtual void destroy() = 0;
};
/**
* @en Interface for classes which control the rendering of morph resources.
* @zh 支持形变网格渲染的基类。
*/
class MorphRendering : public RefCounted {
public:
~MorphRendering() override = default;
virtual MorphRenderingInstance *createInstance() = 0;
};
/**
* @en Standard morph rendering class, it supports both GPU and CPU based morph blending.
* If sub mesh morph targets count is less than [[pipeline.UBOMorph.MAX_MORPH_TARGET_COUNT]], then GPU based blending is enabled.
* Each of the sub-mesh morph has its own [[MorphRenderingInstance]],
* its morph target weights, render pipeline state and strategy of morph blending are controlled separately.
* @zh 标准形变网格渲染类,它同时支持 CPU 和 GPU 的形变混合计算。
* 如果子网格形变目标数量少于 [[pipeline.UBOMorph.MAX_MORPH_TARGET_COUNT]],那么就会使用基于 GPU 的形变混合计算。
* 每个子网格形变都使用自己独立的 [[MorphRenderingInstance]],它的形变目标权重、渲染管线状态和形变混合计算策略都是独立控制的。
*/
class StdMorphRendering final : public MorphRendering {
public:
explicit StdMorphRendering(Mesh *mesh, gfx::Device *gfxDevice);
~StdMorphRendering() override;
MorphRenderingInstance *createInstance() override;
private:
Mesh *_mesh{nullptr};
ccstd::vector<IntrusivePtr<SubMeshMorphRendering>> _subMeshRenderings;
CC_DISALLOW_COPY_MOVE_ASSIGN(StdMorphRendering);
friend class StdMorphRenderingInstance;
};
/**
* @en Create morph rendering from mesh which contains morph targets data.
* @zh 从包含形变对象的网格资源中创建形变网格渲染对象。
* @param mesh @en The mesh to create morph rendering from. @zh 用于创建形变网格渲染对象的原始网格资源。
* @param gfxDevice @en The device instance acquired from [[Root]]. @zh 设备对象实例,可以从 [[Root]] 获取。
*/
MorphRendering *createMorphRendering(Mesh *mesh, gfx::Device *gfxDevice);
} // namespace cc

View File

@@ -0,0 +1,72 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/assets/Skeleton.h"
#include <iomanip>
#include <sstream>
#include "base/std/hash/hash.h"
namespace cc {
const ccstd::vector<Mat4> &Skeleton::getInverseBindposes() {
if (!_invBindposes.has_value()) {
_invBindposes = ccstd::vector<Mat4>{};
for (const auto &bindpose : _bindposes) {
_invBindposes.value().emplace_back(bindpose.getInversed());
}
}
return *_invBindposes;
}
ccstd::hash_t Skeleton::getHash() {
// hashes should already be computed offline, but if not, make one
if (!_hash) {
std::stringstream sstr;
for (const auto &ibm : _bindposes) {
sstr << std::fixed << std::setprecision(2)
<< ibm.m[0] << " " << ibm.m[1] << " " << ibm.m[2] << " " << ibm.m[3] << " "
<< ibm.m[4] << " " << ibm.m[5] << " " << ibm.m[6] << " " << ibm.m[7] << " "
<< ibm.m[8] << " " << ibm.m[9] << " " << ibm.m[10] << " " << ibm.m[11] << " "
<< ibm.m[12] << " " << ibm.m[13] << " " << ibm.m[14] << " " << ibm.m[15] << "\n";
}
ccstd::string str{sstr.str()};
ccstd::hash_t seed = 666;
ccstd::hash_range(seed, str.begin(), str.end());
_hash = seed;
}
return _hash;
}
bool Skeleton::destroy() {
//cjh TODO: (legacyCC.director.root?.dataPoolManager as DataPoolManager)?.releaseSkeleton(this);
return Super::destroy();
}
bool Skeleton::validate() const {
return !_joints.empty() && !_bindposes.empty();
}
} // namespace cc

View File

@@ -0,0 +1,87 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "core/assets/Asset.h"
namespace cc {
/**
* @en The skeleton asset. It stores the path related to [[SkinnedMeshRenderer.skinningRoot]] of all bones and its bind pose matrix.
* @zh 骨骼资源。骨骼资源记录了每个关节(相对于 [[SkinnedMeshRenderer.skinningRoot]])的路径以及它的绑定姿势矩阵。
*/
class Skeleton final : public Asset {
public:
using Super = Asset;
Skeleton() = default;
~Skeleton() override = default;
/**
* @en The path of all bones, the length always equals the length of [[bindposes]]
* @zh 所有关节的路径。该数组的长度始终与 [[bindposes]] 的长度相同。
*/
inline const ccstd::vector<ccstd::string> &getJoints() const {
return _joints;
}
inline void setJoints(const ccstd::vector<ccstd::string> &value) {
_joints = value;
}
/**
* @en The bind poses matrix of all bones, the length always equals the length of [[joints]]
* @zh 所有关节的绑定姿势矩阵。该数组的长度始终与 [[joints]] 的长度相同。
*/
const ccstd::vector<Mat4> &getBindposes() const {
return _bindposes;
}
void setBindposes(const ccstd::vector<Mat4> &value) {
_bindposes = value;
}
/**
* @en Gets the inverse bind poses matrix
* @zh 获取反向绑定姿势矩阵
*/
const ccstd::vector<Mat4> &getInverseBindposes();
/**
* @en Gets the hash of the skeleton asset
* @zh 获取骨骼资源的哈希值
*/
ccstd::hash_t getHash();
void setHash(ccstd::hash_t hash) { _hash = hash; }
bool destroy() override;
bool validate() const override;
private:
ccstd::vector<ccstd::string> _joints;
ccstd::vector<Mat4> _bindposes;
ccstd::optional<ccstd::vector<Mat4>> _invBindposes;
ccstd::hash_t _hash{0U};
};
} // namespace cc

60
cocos/3d/assets/Types.h Normal file
View File

@@ -0,0 +1,60 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <cstdint>
#include "base/std/container/string.h"
#include "base/std/optional.h"
#include "core/TypedArray.h"
namespace cc {
using MeshWeightsType = ccstd::vector<float>;
/**
* @en Array views for index buffer
* @zh 允许存储索引的数组视图
*/
using IBArray = ccstd::variant<ccstd::monostate, Uint8Array, Uint16Array, Uint32Array>;
template <typename T>
T getIBArrayValue(const IBArray &arr, uint32_t idx) {
#define IBARRAY_GET_VALUE(type) \
do { \
auto *p = ccstd::get_if<type>(&arr); \
if (p != nullptr) { \
return static_cast<T>((*p)[idx]); \
} \
} while (false)
IBARRAY_GET_VALUE(Uint16Array);
IBARRAY_GET_VALUE(Uint32Array);
IBARRAY_GET_VALUE(Uint8Array);
#undef IBARRAY_GET_VALUE
return 0;
}
} // namespace cc

125
cocos/3d/misc/Buffer.cpp Normal file
View File

@@ -0,0 +1,125 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/misc/Buffer.h"
#include "base/std/variant.h"
namespace cc {
namespace {
ccstd::unordered_map<gfx::FormatType, ccstd::string> typeMap{
{gfx::FormatType::UNORM, "Uint"},
{gfx::FormatType::SNORM, "Int"},
{gfx::FormatType::UINT, "Uint"},
{gfx::FormatType::INT, "Int"},
{gfx::FormatType::UFLOAT, "Float"},
{gfx::FormatType::FLOAT, "Float"},
};
ccstd::string getDataViewType(const gfx::FormatInfo &info) {
ccstd::string type;
auto iter = typeMap.find(info.type);
if (iter != typeMap.end()) {
type = iter->second;
} else {
type = "Uint";
}
const uint32_t bytes = info.size / info.count * 8;
return type + std::to_string(bytes);
}
} // namespace
using DataVariant = ccstd::variant<ccstd::monostate, int32_t, float>;
using MapBufferCallback = std::function<DataVariant(const DataVariant &cur, uint32_t idx, const DataView &view)>;
DataView mapBuffer(DataView &target,
const MapBufferCallback &callback,
ccstd::optional<gfx::Format> aFormat,
ccstd::optional<uint32_t> aOffset,
ccstd::optional<uint32_t> aLength,
ccstd::optional<uint32_t> aStride,
DataView *out) {
gfx::Format format = aFormat.has_value() ? aFormat.value() : gfx::Format::R32F;
uint32_t offset = aOffset.has_value() ? aOffset.value() : 0;
uint32_t length = aLength.has_value() ? aLength.value() : target.byteLength() - offset;
uint32_t stride = aStride.has_value() ? aStride.value() : 0;
DataView dataView;
if (out == nullptr) {
out = &dataView;
dataView.assign(target.buffer()->slice(target.byteOffset(), target.byteOffset() + target.byteLength()));
}
const auto &info = gfx::GFX_FORMAT_INFOS[static_cast<int32_t>(format)];
if (stride == 0) {
stride = info.size;
}
static const ccstd::string SET_PREFIX{"set"};
static const ccstd::string GET_PREFIX{"get"};
bool isFloat = info.type == gfx::FormatType::FLOAT || info.type == gfx::FormatType::UFLOAT;
DataView::IntWritter intWritter = nullptr;
if (!isFloat) {
intWritter = DataView::intWritterMap[SET_PREFIX + getDataViewType(info)];
}
DataView::ReaderVariant intReader;
if (!isFloat) {
intReader = DataView::intReaderMap[GET_PREFIX + getDataViewType(info)];
}
const uint32_t componentBytesLength = info.size / info.count;
const uint32_t nSeg = floor(length / stride);
for (uint32_t iSeg = 0; iSeg < nSeg; ++iSeg) {
const uint32_t x = offset + stride * iSeg;
for (uint32_t iComponent = 0; iComponent < info.count; ++iComponent) {
const uint32_t y = x + componentBytesLength * iComponent;
if (isFloat) {
float cur = target.getFloat32(y);
auto dataVariant = callback(cur, iComponent, target);
if (ccstd::holds_alternative<float>(dataVariant)) {
out->setFloat32(y, ccstd::get<float>(dataVariant));
} else {
CC_LOG_ERROR("mapBuffer, wrong data type, expect float");
}
} else {
int32_t cur = target.readInt(intReader, y);
// iComponent is usually more useful than y
auto dataVariant = callback(cur, iComponent, target);
if (ccstd::holds_alternative<int32_t>(dataVariant)) {
(target.*intWritter)(y, ccstd::get<int32_t>(dataVariant));
} else {
CC_LOG_ERROR("mapBuffer, wrong data type, expect int32_t");
}
}
}
}
return dataView;
}
} // namespace cc

125
cocos/3d/misc/Buffer.h Normal file
View File

@@ -0,0 +1,125 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <cmath>
#include "base/std/optional.h"
#include "core/DataView.h"
#include "renderer/gfx-base/GFXDef.h"
namespace cc {
// default params behaviors just like on an plain, compact Float32Array
template <typename T>
void writeBuffer(DataView &target,
const ccstd::vector<T> &data,
const gfx::Format &format = gfx::Format::R32F,
uint32_t offset = 0,
uint32_t stride = 0) {
const gfx::FormatInfo &info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(format)];
if (stride == 0) {
stride = info.size;
}
const uint32_t componentBytesLength = info.size / info.count;
const auto nSeg = static_cast<uint32_t>(floor(data.size() / info.count));
const uint32_t bytes = info.size / info.count * 8;
for (uint32_t iSeg = 0; iSeg < nSeg; ++iSeg) {
uint32_t x = offset + stride * iSeg;
for (uint32_t iComponent = 0; iComponent < info.count; ++iComponent) {
const uint32_t y = x + componentBytesLength * iComponent;
// default Little-Endian
switch (info.type) {
case gfx::FormatType::UINT:
case gfx::FormatType::UNORM:
switch (bytes) {
case 8:
target.setUint8(y, static_cast<uint8_t>(data[info.count * iSeg + iComponent]));
break;
case 16:
target.setUint16(y, static_cast<uint16_t>(data[info.count * iSeg + iComponent]));
break;
case 32:
target.setUint32(y, static_cast<uint32_t>(data[info.count * iSeg + iComponent]));
break;
default:
CC_ABORT();
break;
}
break;
case gfx::FormatType::INT:
case gfx::FormatType::SNORM:
switch (bytes) {
case 8:
target.setInt8(y, static_cast<int8_t>(data[info.count * iSeg + iComponent]));
break;
case 16:
target.setInt16(y, static_cast<int16_t>(data[info.count * iSeg + iComponent]));
break;
case 32:
target.setInt32(y, static_cast<int32_t>(data[info.count * iSeg + iComponent]));
break;
default:
CC_ABORT();
break;
}
break;
case gfx::FormatType::UFLOAT:
case gfx::FormatType::FLOAT:
switch (bytes) {
case 8:
target.setFloat32(y, static_cast<float>(data[info.count * iSeg + iComponent]));
break;
case 16:
target.setFloat32(y, static_cast<float>(data[info.count * iSeg + iComponent]));
break;
case 32:
target.setFloat32(y, static_cast<float>(data[info.count * iSeg + iComponent]));
break;
default:
CC_ABORT();
break;
}
break;
default:
CC_ABORT();
break;
}
}
}
}
using DataVariant = ccstd::variant<ccstd::monostate, int32_t, float>;
using MapBufferCallback = std::function<DataVariant(const DataVariant &cur, uint32_t idx, const DataView &view)>;
DataView mapBuffer(DataView &target,
const MapBufferCallback &callback,
ccstd::optional<gfx::Format> aFormat,
ccstd::optional<uint32_t> aOffset,
ccstd::optional<uint32_t> aLength,
ccstd::optional<uint32_t> aStride,
DataView *out);
} // namespace cc

View File

@@ -0,0 +1,63 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/misc/BufferBlob.h"
#include "core/TypedArray.h"
namespace cc {
void BufferBlob::setNextAlignment(uint32_t align) {
if (align != 0) {
const uint32_t remainder = _length % align;
if (remainder != 0) {
const uint32_t padding = align - remainder;
_arrayBufferOrPaddings.emplace_back(padding);
_length += padding;
}
}
}
uint32_t BufferBlob::addBuffer(ArrayBuffer *arrayBuffer) {
const uint32_t result = _length;
_arrayBufferOrPaddings.emplace_back(arrayBuffer);
_length += arrayBuffer->byteLength();
return result;
}
ArrayBuffer::Ptr BufferBlob::getCombined() {
Int8Array result(_length);
uint32_t counter = 0;
for (const auto &arrayBufferOrPadding : _arrayBufferOrPaddings) {
if (const auto *p = ccstd::get_if<uint32_t>(&arrayBufferOrPadding)) {
counter += *p;
} else if (const auto *p = ccstd::get_if<ArrayBuffer::Ptr>(&arrayBufferOrPadding)) {
result.set(*p, counter);
counter += (*p)->byteLength();
}
}
return result.buffer();
}
} // namespace cc

View File

@@ -0,0 +1,47 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "base/std/variant.h"
#include "cocos/base/std/container/vector.h"
#include "core/ArrayBuffer.h"
namespace cc {
class BufferBlob {
public:
void setNextAlignment(uint32_t align);
uint32_t addBuffer(ArrayBuffer *arrayBuffer);
inline uint32_t getLength() const { return _length; }
ArrayBuffer::Ptr getCombined();
private:
ccstd::vector<ccstd::variant<ccstd::monostate, ArrayBuffer::Ptr, uint32_t>> _arrayBufferOrPaddings;
uint32_t _length{0};
};
} // namespace cc

View File

@@ -0,0 +1,483 @@
/****************************************************************************
Copyright (c) 2022-2023 Xiamen Yaji Software Co., Ltd.
https://www.cocos.com/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/misc/CreateMesh.h"
#include <zlib.h>
#include <algorithm>
#include "3d/misc/Buffer.h"
#include "3d/misc/BufferBlob.h"
#include "core/ArrayBuffer.h"
#include "core/DataView.h"
#include "core/assets/RenderingSubMesh.h"
#include "meshopt/meshoptimizer.h"
#include "renderer/gfx-base/GFXDef-common.h"
namespace cc {
namespace {
gfx::AttributeList defAttrs = {
gfx::Attribute{gfx::ATTR_NAME_POSITION, gfx::Format::RGB32F},
gfx::Attribute{gfx::ATTR_NAME_NORMAL, gfx::Format::RGB32F},
gfx::Attribute{gfx::ATTR_NAME_TEX_COORD, gfx::Format::RG32F},
gfx::Attribute{gfx::ATTR_NAME_TANGENT, gfx::Format::RGBA32F},
gfx::Attribute{gfx::ATTR_NAME_COLOR, gfx::Format::RGBA32F},
};
} // namespace
Mesh *MeshUtils::createMesh(const IGeometry &geometry, Mesh *out /*= nullptr*/, const ICreateMeshOptions &options /*= {}*/) {
if (!out) {
out = ccnew Mesh();
}
out->reset(createMeshInfo(geometry, options));
return out;
}
Mesh::ICreateInfo MeshUtils::createMeshInfo(const IGeometry &geometry, const ICreateMeshOptions &options /* = {}*/) {
// Collect attributes and calculate length of result vertex buffer.
gfx::AttributeList attributes;
uint32_t stride = 0;
struct Channel {
uint32_t offset{0};
ccstd::vector<float> data; // float?
gfx::Attribute attribute;
};
ccstd::vector<Channel> channels;
uint32_t vertCount = 0;
const gfx::Attribute *attr = nullptr;
ccstd::vector<float> positions(geometry.positions);
if (!positions.empty()) {
attr = nullptr;
if (geometry.attributes.has_value()) {
for (const auto &att : geometry.attributes.value()) {
if (att.name == gfx::ATTR_NAME_POSITION) {
attr = &att;
break;
}
}
}
if (attr == nullptr) {
attr = &defAttrs[0];
}
attributes.emplace_back(*attr);
const auto &info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr->format)];
vertCount = std::max(vertCount, static_cast<uint32_t>(std::floor(positions.size() / info.count)));
channels.emplace_back(Channel{stride, positions, *attr});
stride += info.size;
}
if (geometry.normals.has_value() && !geometry.normals.value().empty()) {
attr = nullptr;
if (geometry.attributes.has_value()) {
for (const auto &att : geometry.attributes.value()) {
if (att.name == gfx::ATTR_NAME_NORMAL) {
attr = &att;
break;
}
}
}
if (attr == nullptr) {
attr = &defAttrs[1];
}
attributes.emplace_back(*attr);
const auto &info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr->format)];
vertCount = std::max(vertCount, static_cast<uint32_t>(std::floor(geometry.normals->size() / info.count)));
channels.emplace_back(Channel{stride, geometry.normals.value(), *attr});
stride += info.size;
}
if (geometry.uvs.has_value() && !geometry.uvs.value().empty()) {
attr = nullptr;
if (geometry.attributes.has_value()) {
for (const auto &att : geometry.attributes.value()) {
if (att.name == gfx::ATTR_NAME_TEX_COORD) {
attr = &att;
break;
}
}
}
if (attr == nullptr) {
attr = &defAttrs[2];
}
attributes.emplace_back(*attr);
const auto &info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr->format)];
vertCount = std::max(vertCount, static_cast<uint32_t>(std::floor(geometry.uvs->size() / info.count)));
channels.emplace_back(Channel{stride, geometry.uvs.value(), *attr});
stride += info.size;
}
if (geometry.tangents.has_value() && !geometry.tangents.value().empty()) {
attr = nullptr;
if (geometry.attributes.has_value()) {
for (const auto &att : geometry.attributes.value()) {
if (att.name == gfx::ATTR_NAME_TANGENT) {
attr = &att;
break;
}
}
}
if (attr == nullptr) {
attr = &defAttrs[3];
}
attributes.emplace_back(*attr);
const auto &info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr->format)];
vertCount = std::max(vertCount, static_cast<uint32_t>(std::floor(geometry.tangents->size() / info.count)));
channels.emplace_back(Channel{stride, geometry.tangents.value(), *attr});
stride += info.size;
}
if (geometry.colors.has_value() && !geometry.colors.value().empty()) {
attr = nullptr;
if (geometry.attributes.has_value()) {
for (const auto &att : geometry.attributes.value()) {
if (att.name == gfx::ATTR_NAME_COLOR) {
attr = &att;
break;
}
}
}
if (attr == nullptr) {
attr = &defAttrs[4];
}
attributes.emplace_back(*attr);
const auto &info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr->format)];
vertCount = std::max(vertCount, static_cast<uint32_t>(std::floor(geometry.colors->size() / info.count)));
channels.emplace_back(Channel{stride, geometry.colors.value(), *attr});
stride += info.size;
}
if (geometry.customAttributes.has_value()) {
for (const auto &ca : geometry.customAttributes.value()) {
const auto &info = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr->format)];
attributes.emplace_back(ca.attr);
vertCount = std::max(vertCount, static_cast<uint32_t>(std::floor(ca.values.size() / info.count)));
channels.emplace_back(Channel{stride, ca.values, ca.attr});
stride += info.size;
}
}
// Use this to generate final merged buffer.
BufferBlob bufferBlob;
// Fill vertex buffer.
auto *vertexBuffer = ccnew ArrayBuffer(vertCount * stride);
DataView vertexBufferView(vertexBuffer);
for (const auto &channel : channels) {
writeBuffer(vertexBufferView, channel.data, channel.attribute.format, channel.offset, stride);
}
bufferBlob.setNextAlignment(0);
Mesh::IVertexBundle vertexBundle;
Mesh::IBufferView buffferView;
buffferView.offset = bufferBlob.getLength();
buffferView.length = static_cast<uint32_t>(vertexBuffer->byteLength());
buffferView.count = vertCount;
buffferView.stride = stride;
vertexBundle.attributes = attributes;
vertexBundle.view = buffferView;
bufferBlob.addBuffer(vertexBuffer);
// Fill index buffer.
ArrayBuffer::Ptr indexBuffer;
uint32_t idxCount = 0;
const uint32_t idxStride = 2;
if (geometry.indices.has_value()) {
const ccstd::vector<uint32_t> &indices = geometry.indices.value();
idxCount = static_cast<uint32_t>(indices.size());
indexBuffer = ccnew ArrayBuffer(idxStride * idxCount);
DataView indexBufferView(indexBuffer);
writeBuffer(indexBufferView, indices, gfx::Format::R16UI);
}
// Create primitive.
Mesh::ISubMesh primitive;
primitive.vertexBundelIndices = {0};
primitive.primitiveMode = geometry.primitiveMode.has_value() ? geometry.primitiveMode.value() : gfx::PrimitiveMode::TRIANGLE_LIST;
if (indexBuffer) {
bufferBlob.setNextAlignment(idxStride);
Mesh::IBufferView bufferView;
bufferView.offset = bufferBlob.getLength();
bufferView.length = indexBuffer->byteLength();
bufferView.count = idxCount;
bufferView.stride = idxStride;
primitive.indexView = bufferView;
bufferBlob.addBuffer(indexBuffer);
}
ccstd::optional<Vec3> minPosition = geometry.minPos;
if (!minPosition.has_value() && options.calculateBounds.has_value() && options.calculateBounds.value()) {
minPosition = Vec3(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
for (uint32_t iVertex = 0; iVertex < vertCount; ++iVertex) {
Vec3::min(minPosition.value(), Vec3(positions[iVertex * 3 + 0], positions[iVertex * 3 + 1], positions[iVertex * 3 + 2]), &minPosition.value());
}
}
ccstd::optional<Vec3> maxPosition = geometry.maxPos;
if (!maxPosition.has_value() && options.calculateBounds.has_value() && options.calculateBounds.value()) {
maxPosition = Vec3(-std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity());
for (uint32_t iVertex = 0; iVertex < vertCount; ++iVertex) {
Vec3::max(maxPosition.value(), Vec3(positions[iVertex * 3 + 0], positions[iVertex * 3 + 1], positions[iVertex * 3 + 2]), &maxPosition.value());
}
}
// Create mesh struct
Mesh::IStruct meshStruct;
meshStruct.vertexBundles = {vertexBundle};
meshStruct.primitives = {primitive};
if (minPosition.has_value()) {
meshStruct.minPosition = minPosition.value();
}
if (maxPosition.has_value()) {
meshStruct.maxPosition = maxPosition.value();
}
Mesh::ICreateInfo createInfo;
createInfo.structInfo = std::move(meshStruct);
createInfo.data = Uint8Array(bufferBlob.getCombined());
return createInfo;
}
static inline uint32_t getPadding(uint32_t length, uint32_t align) {
if (align > 0U) {
const uint32_t remainder = length % align;
if (remainder != 0U) {
const uint32_t padding = align - remainder;
return padding;
}
}
return 0U;
}
Mesh *MeshUtils::createDynamicMesh(index_t primitiveIndex, const IDynamicGeometry &geometry, Mesh *out /*= nullptr*/, const ICreateDynamicMeshOptions &options /*= {}*/) {
if (!out) {
out = ccnew Mesh();
}
out->reset(MeshUtils::createDynamicMeshInfo(geometry, options));
out->initialize();
out->updateSubMesh(primitiveIndex, geometry);
return out;
}
Mesh::ICreateInfo MeshUtils::createDynamicMeshInfo(const IDynamicGeometry &geometry, const ICreateDynamicMeshOptions &options /* = {}*/) {
gfx::AttributeList attributes;
uint32_t stream = 0U;
if (!geometry.positions.empty()) {
attributes.push_back({gfx::ATTR_NAME_POSITION, gfx::Format::RGB32F, false, stream++, false, 0U});
}
if (geometry.normals.has_value() && !geometry.normals.value().empty()) {
attributes.push_back({gfx::ATTR_NAME_NORMAL, gfx::Format::RGB32F, false, stream++, false, 0U});
}
if (geometry.uvs.has_value() && !geometry.uvs.value().empty()) {
attributes.push_back({gfx::ATTR_NAME_TEX_COORD, gfx::Format::RG32F, false, stream++, false, 0U});
}
if (geometry.tangents.has_value() && !geometry.tangents.value().empty()) {
attributes.push_back({gfx::ATTR_NAME_TANGENT, gfx::Format::RGBA32F, false, stream++, false, 0U});
}
if (geometry.colors.has_value() && !geometry.colors.value().empty()) {
attributes.push_back({gfx::ATTR_NAME_COLOR, gfx::Format::RGBA32F, false, stream++, false, 0U});
}
if (geometry.customAttributes.has_value()) {
for (const auto &ca : geometry.customAttributes.value()) {
auto attr = ca.attr;
attr.stream = stream++;
attributes.emplace_back(attr);
}
}
ccstd::vector<Mesh::IVertexBundle> vertexBundles;
ccstd::vector<Mesh::ISubMesh> primitives;
uint32_t dataSize = 0U;
for (auto i = 0U; i < options.maxSubMeshes; i++) {
Mesh::ISubMesh primitive;
primitive.primitiveMode = geometry.primitiveMode.has_value() ? geometry.primitiveMode.value() : gfx::PrimitiveMode::TRIANGLE_LIST;
// add vertex buffers
for (const auto &attr : attributes) {
const auto &formatInfo = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(attr.format)];
uint32_t vertexBufferSize = options.maxSubMeshVertices * formatInfo.size;
Mesh::IBufferView vertexView = {
dataSize,
vertexBufferSize,
0U,
formatInfo.size};
Mesh::IVertexBundle vertexBundle = {
0U,
vertexView,
{attr}};
const auto vertexBundleIndex = static_cast<uint32_t>(vertexBundles.size());
primitive.vertexBundelIndices.emplace_back(vertexBundleIndex);
vertexBundles.emplace_back(vertexBundle);
dataSize += vertexBufferSize;
}
// add index buffer
uint32_t stride = 0U;
if (geometry.indices16.has_value() && !geometry.indices16.value().empty()) {
stride = sizeof(uint16_t);
} else if (geometry.indices32.has_value() && !geometry.indices32.value().empty()) {
stride = sizeof(uint32_t);
}
if (stride > 0U) {
dataSize += getPadding(dataSize, stride);
uint32_t indexBufferSize = options.maxSubMeshIndices * stride;
Mesh::IBufferView indexView = {
dataSize,
indexBufferSize,
0U,
stride};
primitive.indexView = indexView;
dataSize += indexBufferSize;
}
primitives.emplace_back(primitive);
}
Mesh::IDynamicInfo dynamicInfo = {options.maxSubMeshes,
options.maxSubMeshVertices,
options.maxSubMeshIndices};
Mesh::IDynamicStruct dynamicStruct;
dynamicStruct.info = dynamicInfo;
dynamicStruct.bounds.resize(options.maxSubMeshes);
for (auto &bound : dynamicStruct.bounds) {
bound.setValid(false);
}
Mesh::IStruct meshStruct;
meshStruct.vertexBundles = vertexBundles;
meshStruct.primitives = primitives;
meshStruct.dynamic = std::move(dynamicStruct);
Mesh::ICreateInfo createInfo;
createInfo.structInfo = std::move(meshStruct);
createInfo.data = Uint8Array(dataSize);
return createInfo;
}
void MeshUtils::inflateMesh(const Mesh::IStruct &structInfo, Uint8Array &data) {
uLongf uncompressedSize = 0U;
for (const auto &prim : structInfo.primitives) {
if (prim.indexView.has_value()) {
uncompressedSize += prim.indexView->length + prim.indexView->stride;
}
if (prim.cluster.has_value()) {
uncompressedSize += prim.cluster->vertexView.length + prim.cluster->vertexView.stride;
uncompressedSize += prim.cluster->triangleView.length + prim.cluster->triangleView.stride;
uncompressedSize += prim.cluster->clusterView.length + prim.cluster->clusterView.stride;
uncompressedSize += prim.cluster->coneView.length + prim.cluster->coneView.stride;
}
}
for (const auto &vb : structInfo.vertexBundles) {
uncompressedSize += vb.view.length + vb.view.stride;
}
auto uncompressedData = Uint8Array(static_cast<uint32_t>(uncompressedSize));
auto res = uncompress(uncompressedData.buffer()->getData(), &uncompressedSize, data.buffer()->getData(), data.byteLength());
data = Uint8Array(uncompressedData.buffer(), 0, static_cast<uint32_t>(uncompressedSize));
}
void MeshUtils::decodeMesh(Mesh::IStruct &structInfo, Uint8Array &data) {
BufferBlob bufferBlob;
for (auto &bundle : structInfo.vertexBundles) {
auto &view = bundle.view;
auto bound = view.count * view.stride;
auto *buffer = ccnew ArrayBuffer(bound);
auto vertex = Uint8Array(data.buffer(), view.offset, view.length);
int res = meshopt_decodeVertexBuffer(buffer->getData(), view.count, view.stride, vertex.buffer()->getData() + vertex.byteOffset(), view.length);
if (res < 0) {
assert(false && "failed to decode vertex buffer");
}
bufferBlob.setNextAlignment(view.stride);
Mesh::IVertexBundle vertexBundle;
Mesh::IBufferView buffferView;
buffferView.offset = bufferBlob.getLength();
buffferView.length = bound;
buffferView.count = view.count;
buffferView.stride = view.stride;
bufferBlob.addBuffer(buffer);
bundle.view = buffferView;
}
for (auto &primitive : structInfo.primitives) {
if (!primitive.indexView.has_value()) {
continue;
}
auto view = *primitive.indexView;
auto bound = view.count * view.stride;
auto *buffer = ccnew ArrayBuffer(bound);
auto index = DataView(data.buffer(), view.offset, view.length);
int res = meshopt_decodeIndexBuffer(buffer->getData(), view.count, view.stride, index.buffer()->getData() + index.byteOffset(), view.length);
if (res < 0) {
assert(false && "failed to decode index buffer");
}
bufferBlob.setNextAlignment(view.stride);
Mesh::IBufferView buffferView;
buffferView.offset = bufferBlob.getLength();
buffferView.length = bound;
buffferView.count = view.count;
buffferView.stride = view.stride;
bufferBlob.addBuffer(buffer);
primitive.indexView = buffferView;
}
data = Uint8Array(bufferBlob.getCombined());
}
} // namespace cc

101
cocos/3d/misc/CreateMesh.h Normal file
View File

@@ -0,0 +1,101 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "base/std/optional.h"
#include "3d/assets/Mesh.h"
#include "primitive/PrimitiveDefine.h"
namespace cc {
struct ICreateMeshOptions {
/**
* @en calculate mesh's aabb or not
* @zh 是否计算模型的包围盒。
*/
ccstd::optional<bool> calculateBounds;
};
struct ICreateDynamicMeshOptions {
/**
* @en max submesh count
* @zh 最大子模型个数。
*/
uint32_t maxSubMeshes{1U};
/**
* @en max submesh vertex count
* @zh 子模型最大顶点个数。
*/
uint32_t maxSubMeshVertices{1024U};
/**
* @en max submesh index count
* @zh 子模型最大索引个数。
*/
uint32_t maxSubMeshIndices{1024U};
};
/**
* @en mesh utility class, use to create mesh.
* @zh 网格工具类,用于创建网格。
*/
class MeshUtils {
public:
/**
* @en create a static mesh.
* @zh 创建一个静态网格。
*/
static Mesh *createMesh(const IGeometry &geometry, Mesh *out = nullptr, const ICreateMeshOptions &options = {});
/**
* @en create a static mesh ICreateInfo.
* @zh 创建一个静态网格ICreateInfo。
*/
static Mesh::ICreateInfo createMeshInfo(const IGeometry &geometry, const ICreateMeshOptions &options = {});
/**
* @en create a dynamic mesh.
* @zh 创建一个动态网格。
*/
static Mesh *createDynamicMesh(index_t primitiveIndex, const IDynamicGeometry &geometry, Mesh *out = nullptr, const ICreateDynamicMeshOptions &options = {});
/**
* @en create a dynamic mesh ICreateInfo.
* @zh 创建一个动态网格ICreateInfo。
*/
static Mesh::ICreateInfo createDynamicMeshInfo(const IDynamicGeometry &geometry, const ICreateDynamicMeshOptions &options = {});
/**
*
*/
static void inflateMesh(const Mesh::IStruct &structInfo, Uint8Array &data);
static void decodeMesh(Mesh::IStruct &structInfo, Uint8Array &data);
static void dequantizeMesh(Mesh::IStruct &structInfo, Uint8Array &data);
};
} // namespace cc

View File

@@ -0,0 +1,258 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/models/BakedSkinningModel.h"
#include "3d/assets/Mesh.h"
//#include "3d/skeletal-animation/DataPoolManager.h"
#include "core/Root.h"
#include "scene/Model.h"
#include "scene/SubModel.h"
namespace {
const cc::gfx::SamplerInfo JOINT_TEXTURE_SAMPLER_INFO{
cc::gfx::Filter::POINT,
cc::gfx::Filter::POINT,
cc::gfx::Filter::NONE,
cc::gfx::Address::CLAMP,
cc::gfx::Address::CLAMP,
cc::gfx::Address::CLAMP,
};
ccstd::vector<cc::scene::IMacroPatch> myPatches{
{"CC_USE_SKINNING", true},
{"CC_USE_BAKED_ANIMATION", true}};
const ccstd::string INST_JOINT_ANIM_INFO = "a_jointAnimInfo";
} // namespace
namespace cc {
BakedSkinningModel::BakedSkinningModel()
//, _dataPoolManager(Root::getInstance()->getDataPoolManager())
{
_type = Model::Type::BAKED_SKINNING;
_jointMedium.jointTextureInfo.reset(4);
// JSB uses _dataPoolManager in JS and the data is synchronized by syncDataForJS & syncAnimInfoForJS
// _jointMedium.animInfo = _dataPoolManager->jointAnimationInfo->getData();
}
void BakedSkinningModel::destroy() {
// CC_SAFE_DELETE(uploadedAnim);
_jointMedium.boundsInfo.clear();
if (_jointMedium.buffer != nullptr) {
CC_SAFE_DESTROY_NULL(_jointMedium.buffer);
}
if (_jointMedium.texture.has_value()) {
CC_SAFE_DELETE(_jointMedium.texture.value());
}
applyJointTexture(ccstd::nullopt);
Super::destroy();
}
void BakedSkinningModel::bindSkeleton(Skeleton *skeleton, Node *skinningRoot, Mesh *mesh) {
_skeleton = skeleton;
_mesh = mesh;
if (skeleton == nullptr || skinningRoot == nullptr || mesh == nullptr) return;
setTransform(skinningRoot);
// JSB uses _dataPoolManager in JS and the data is synchronized by syncDataForJS & syncAnimInfoForJS
// _jointMedium.animInfo = _dataPoolManager->jointAnimationInfo->getData(skinningRoot->getUuid());
if (_jointMedium.buffer == nullptr) {
_jointMedium.buffer = _device->createBuffer({
gfx::BufferUsageBit::UNIFORM | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::DEVICE,
pipeline::UBOSkinning::size,
pipeline::UBOSkinning::size,
});
}
}
void BakedSkinningModel::updateTransform(uint32_t stamp) {
Super::updateTransform(stamp);
if (!_isUploadedAnim) {
return;
}
IAnimInfo &animInfo = _jointMedium.animInfo;
geometry::AABB *skelBound = nullptr;
const float *curFrame = animInfo.curFrame;
// float curFrame = info.data[0];
auto index = static_cast<index_t>(std::roundf(*curFrame));
if (!_jointMedium.boundsInfo.empty() && index < _jointMedium.boundsInfo.size()) {
skelBound = &_jointMedium.boundsInfo[index].value();
}
if (_worldBounds && skelBound != nullptr) {
Node *node = getTransform();
skelBound->transform(node->getWorldMatrix(), _worldBounds);
_worldBoundsDirty = true;
}
}
void BakedSkinningModel::updateUBOs(uint32_t stamp) {
Super::updateUBOs(stamp);
IAnimInfo &info = _jointMedium.animInfo;
const int idx = _instAnimInfoIdx;
const float *curFrame = info.curFrame;
bool hasNonInstancingPass = false;
for (const auto &subModel : _subModels) {
if (idx >= 0) {
auto &views = subModel->getInstancedAttributeBlock().views[idx];
setTypedArrayValue(views, 0, *curFrame);
} else {
hasNonInstancingPass = true;
}
}
const uint32_t frameDataBytes = info.frameDataBytes;
if (hasNonInstancingPass && *info.dirtyForJSB != 0) {
info.buffer->update(curFrame, frameDataBytes);
*info.dirtyForJSB = 0;
}
}
void BakedSkinningModel::applyJointTexture(const ccstd::optional<IJointTextureHandle *> &texture) {
auto oldTex = _jointMedium.texture;
if (oldTex.has_value() && texture.has_value() && (&oldTex.value() != &texture.value())) {
// _dataPoolManager->jointTexturePool->releaseHandle(oldTex.value());
}
_jointMedium.texture = texture;
if (!texture.has_value()) {
return;
}
auto *textureHandle = texture.value();
auto *buffer = _jointMedium.buffer.get();
auto &jointTextureInfo = _jointMedium.jointTextureInfo;
jointTextureInfo[0] = static_cast<float>(textureHandle->handle.texture->getWidth());
jointTextureInfo[1] = static_cast<float>(_skeleton->getJoints().size());
jointTextureInfo[2] = static_cast<float>(textureHandle->pixelOffset) + 0.1F; // guard against floor() underflow
jointTextureInfo[3] = 1 / jointTextureInfo[0];
updateInstancedJointTextureInfo();
if (buffer != nullptr) {
buffer->update(&jointTextureInfo[0], jointTextureInfo.byteLength());
}
auto *tex = textureHandle->handle.texture;
for (const auto &subModel : _subModels) {
auto *descriptorSet = subModel->getDescriptorSet();
descriptorSet->bindTexture(pipeline::JOINTTEXTURE::BINDING, tex);
}
}
ccstd::vector<scene::IMacroPatch> BakedSkinningModel::getMacroPatches(index_t subModelIndex) {
auto patches = Super::getMacroPatches(subModelIndex);
patches.reserve(patches.size() + myPatches.size());
patches.insert(std::end(patches), std::begin(myPatches), std::end(myPatches));
return patches;
}
void BakedSkinningModel::updateLocalDescriptors(index_t subModelIndex, gfx::DescriptorSet *descriptorSet) {
Super::updateLocalDescriptors(subModelIndex, descriptorSet);
gfx::Buffer *buffer = _jointMedium.buffer;
auto &texture = _jointMedium.texture;
const IAnimInfo &animInfo = _jointMedium.animInfo;
descriptorSet->bindBuffer(pipeline::UBOSkinningTexture::BINDING, buffer);
descriptorSet->bindBuffer(pipeline::UBOSkinningAnimation::BINDING, animInfo.buffer);
if (texture.has_value()) {
auto *sampler = _device->getSampler(JOINT_TEXTURE_SAMPLER_INFO);
descriptorSet->bindTexture(pipeline::JOINTTEXTURE::BINDING, texture.value()->handle.texture);
descriptorSet->bindSampler(pipeline::JOINTTEXTURE::BINDING, sampler);
}
}
void BakedSkinningModel::updateInstancedAttributes(const ccstd::vector<gfx::Attribute> &attributes, scene::SubModel *subModel) {
Super::updateInstancedAttributes(attributes, subModel);
_instAnimInfoIdx = subModel->getInstancedAttributeIndex(INST_JOINT_ANIM_INFO);
updateInstancedJointTextureInfo();
}
void BakedSkinningModel::updateInstancedJointTextureInfo() {
const auto &jointTextureInfo = _jointMedium.jointTextureInfo;
const IAnimInfo &animInfo = _jointMedium.animInfo;
const index_t idx = _instAnimInfoIdx;
for (const auto &subModel : _subModels) {
auto &views = subModel->getInstancedAttributeBlock().views;
if (idx >= 0 && !views.empty()) {
auto &view = views[idx];
setTypedArrayValue(view, 0, *animInfo.curFrame); //NOTE: curFrame is only used in JSB.
setTypedArrayValue(view, 1, jointTextureInfo[1]);
setTypedArrayValue(view, 2, jointTextureInfo[2]);
}
}
}
void BakedSkinningModel::syncAnimInfoForJS(gfx::Buffer *buffer, const Float32Array &data, Uint8Array &dirty) {
_jointMedium.animInfo.buffer = buffer;
_jointMedium.animInfo.curFrame = &data[0];
_jointMedium.animInfo.frameDataBytes = data.byteLength();
_jointMedium.animInfo.dirtyForJSB = &dirty[0];
}
void BakedSkinningModel::syncDataForJS(const ccstd::vector<ccstd::optional<geometry::AABB>> &boundsInfo,
const ccstd::optional<geometry::AABB> &modelBound,
float jointTextureInfo0,
float jointTextureInfo1,
float jointTextureInfo2,
float jointTextureInfo3,
gfx::Texture *tex,
const Float32Array &animInfoData) {
_jointMedium.boundsInfo = boundsInfo;
if (modelBound.has_value()) {
const geometry::AABB &modelBounldValue = modelBound.value();
_modelBounds->set(modelBounldValue.center, modelBounldValue.halfExtents);
} else {
_modelBounds = nullptr;
}
_jointMedium.jointTextureInfo[0] = jointTextureInfo0;
_jointMedium.jointTextureInfo[1] = jointTextureInfo1;
_jointMedium.jointTextureInfo[2] = jointTextureInfo2;
_jointMedium.jointTextureInfo[3] = jointTextureInfo3;
_jointMedium.animInfo.curFrame = &animInfoData[0];
_jointMedium.animInfo.frameDataBytes = animInfoData.byteLength();
if (_jointMedium.texture.has_value()) {
delete _jointMedium.texture.value();
_jointMedium.texture = ccstd::nullopt;
}
IJointTextureHandle *textureInfo = IJointTextureHandle::createJoinTextureHandle();
textureInfo->handle.texture = tex;
_jointMedium.texture = textureInfo;
updateInstancedJointTextureInfo();
auto *buffer = _jointMedium.buffer.get();
if (buffer != nullptr) {
buffer->update(&_jointMedium.jointTextureInfo[0], _jointMedium.jointTextureInfo.byteLength());
}
for (const auto &subModel : _subModels) {
auto *descriptorSet = subModel->getDescriptorSet();
descriptorSet->bindTexture(pipeline::JOINTTEXTURE::BINDING, tex);
}
}
} // namespace cc

View File

@@ -0,0 +1,101 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <utility>
#include "3d/assets/Skeleton.h"
#include "3d/models/MorphModel.h"
#include "3d/skeletal-animation/SkeletalAnimationUtils.h"
#include "gfx-base/GFXDef-common.h"
namespace cc {
namespace gfx {
class Texture;
}
class DataPoolManager;
struct BakedJointInfo {
IntrusivePtr<gfx::Buffer> buffer;
Float32Array jointTextureInfo;
ccstd::optional<IJointTextureHandle *> texture;
IAnimInfo animInfo;
ccstd::vector<ccstd::optional<geometry::AABB>> boundsInfo;
};
class BakedSkinningModel final : public MorphModel {
public:
using Super = MorphModel;
BakedSkinningModel();
~BakedSkinningModel() override = default;
void destroy() override;
ccstd::vector<scene::IMacroPatch> getMacroPatches(index_t subModelIndex) override;
void updateLocalDescriptors(index_t subModelIndex, gfx::DescriptorSet *descriptorSet) override;
void updateTransform(uint32_t stamp) override;
void updateUBOs(uint32_t stamp) override;
void updateInstancedAttributes(const ccstd::vector<gfx::Attribute> &attributes, scene::SubModel *subModel) override;
void updateInstancedJointTextureInfo();
// void uploadAnimation(AnimationClip *anim); // TODO(xwx): AnimationClip not define
void bindSkeleton(Skeleton *skeleton, Node *skinningRoot, Mesh *mesh);
inline void updateModelBounds(geometry::AABB *modelBounds) {
if (modelBounds == nullptr) {
return;
}
_modelBounds->setValid(true);
_modelBounds->set(modelBounds->getCenter(), modelBounds->getHalfExtents());
}
void syncAnimInfoForJS(gfx::Buffer *buffer, const Float32Array &data, Uint8Array &dirty);
void syncDataForJS(const ccstd::vector<ccstd::optional<geometry::AABB>> &boundsInfo,
const ccstd::optional<geometry::AABB> &modelBound,
float jointTextureInfo0,
float jointTextureInfo1,
float jointTextureInfo2,
float jointTextureInfo3,
gfx::Texture *tex,
const Float32Array &animInfoData);
void setUploadedAnimForJS(bool value) { _isUploadedAnim = value; }
protected:
void applyJointTexture(const ccstd::optional<IJointTextureHandle *> &texture);
private:
BakedJointInfo _jointMedium;
index_t _instAnimInfoIdx{CC_INVALID_INDEX};
// IntrusivePtr<DataPoolManager> _dataPoolManager;
IntrusivePtr<Skeleton> _skeleton;
IntrusivePtr<Mesh> _mesh;
// AnimationClip* uploadedAnim;
bool _isUploadedAnim{false};
CC_DISALLOW_COPY_MOVE_ASSIGN(BakedSkinningModel);
};
} // namespace cc

View File

@@ -0,0 +1,64 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/models/MorphModel.h"
namespace cc {
ccstd::vector<scene::IMacroPatch> MorphModel::getMacroPatches(index_t subModelIndex) {
ccstd::vector<scene::IMacroPatch> superMacroPatches = Super::getMacroPatches(subModelIndex);
if (_morphRenderingInstance) {
ccstd::vector<scene::IMacroPatch> morphInstanceMacroPatches = _morphRenderingInstance->requiredPatches(subModelIndex);
if (!morphInstanceMacroPatches.empty()) {
if (!superMacroPatches.empty()) {
morphInstanceMacroPatches.reserve(morphInstanceMacroPatches.size() + superMacroPatches.size());
morphInstanceMacroPatches.insert(morphInstanceMacroPatches.end(), superMacroPatches.begin(), superMacroPatches.end());
}
return morphInstanceMacroPatches;
}
}
return superMacroPatches;
}
void MorphModel::initSubModel(index_t idx, RenderingSubMesh *subMeshData, Material *mat) {
Super::initSubModel(idx, subMeshData, launderMaterial(mat));
}
void MorphModel::destroy() {
Super::destroy();
_morphRenderingInstance = nullptr; //minggo: should delete it?
}
void MorphModel::setSubModelMaterial(index_t idx, Material *mat) {
Super::setSubModelMaterial(idx, launderMaterial(mat));
}
void MorphModel::updateLocalDescriptors(index_t subModelIndex, gfx::DescriptorSet *descriptorSet) {
Super::updateLocalDescriptors(subModelIndex, descriptorSet);
if (_morphRenderingInstance) {
_morphRenderingInstance->adaptPipelineState(subModelIndex, descriptorSet);
}
}
} // namespace cc

View File

@@ -0,0 +1,57 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <set>
#include "3d/assets/MorphRendering.h"
#include "scene/Define.h"
#include "scene/Model.h"
namespace cc {
class MorphModel : public scene::Model {
public:
using Super = scene::Model;
MorphModel() = default;
~MorphModel() override = default;
CC_DISALLOW_COPY_MOVE_ASSIGN(MorphModel);
ccstd::vector<scene::IMacroPatch> getMacroPatches(index_t subModelIndex) override;
void initSubModel(index_t idx, RenderingSubMesh *subMeshData, Material *mat) override;
void destroy() override;
void setSubModelMaterial(index_t idx, Material *mat) override;
inline void setMorphRendering(MorphRenderingInstance *morphRendering) { _morphRenderingInstance = morphRendering; }
protected:
void updateLocalDescriptors(index_t subModelIndex, gfx::DescriptorSet *descriptorSet) override;
private:
inline Material *launderMaterial(Material *material) { return material; } //NOLINT(readability-convert-member-functions-to-static)
IntrusivePtr<MorphRenderingInstance> _morphRenderingInstance;
};
} // namespace cc

View File

@@ -0,0 +1,359 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/models/SkinningModel.h"
#include <utility>
#include "3d/assets/Mesh.h"
#include "3d/assets/Skeleton.h"
#include "core/platform/Debug.h"
#include "core/scene-graph/Node.h"
#include "renderer/gfx-base/GFXBuffer.h"
#include "scene/Pass.h"
#include "scene/RenderScene.h"
const uint32_t REALTIME_JOINT_TEXTURE_WIDTH = 256;
const uint32_t REALTIME_JOINT_TEXTURE_HEIGHT = 3;
namespace {
void getRelevantBuffers(ccstd::vector<index_t> &outIndices, ccstd::vector<int32_t> &outBuffers, const ccstd::vector<ccstd::vector<int32_t>> &jointMaps, int32_t targetJoint) {
for (int32_t i = 0; i < jointMaps.size(); i++) {
index_t index = CC_INVALID_INDEX;
for (int32_t j = 0; j < jointMaps[i].size(); j++) {
if (jointMaps[i][j] == targetJoint) {
index = j;
break;
}
}
if (index >= 0) {
outBuffers.emplace_back(i);
outIndices.emplace_back(index);
}
}
}
ccstd::vector<cc::scene::IMacroPatch> uniformPatches{{"CC_USE_SKINNING", true}, {"CC_USE_REAL_TIME_JOINT_TEXTURE", false}};
ccstd::vector<cc::scene::IMacroPatch> texturePatches{{"CC_USE_SKINNING", true}, {"CC_USE_REAL_TIME_JOINT_TEXTURE", true}};
} // namespace
namespace cc {
SkinningModel::SkinningModel() {
_type = Model::Type::SKINNING;
}
SkinningModel::~SkinningModel() {
releaseData();
}
void SkinningModel::destroy() {
bindSkeleton(nullptr, nullptr, nullptr);
releaseData();
Super::destroy();
}
void SkinningModel::bindSkeleton(Skeleton *skeleton, Node *skinningRoot, Mesh *mesh) {
for (const JointInfo &joint : _joints) {
deleteTransform(joint.target);
}
_bufferIndices.clear();
_joints.clear();
if (!skeleton || !skinningRoot || !mesh) return;
auto jointCount = static_cast<uint32_t>(skeleton->getJoints().size());
_realTimeTextureMode = pipeline::SkinningJointCapacity::jointUniformCapacity < jointCount;
setTransform(skinningRoot);
auto boneSpaceBounds = mesh->getBoneSpaceBounds(skeleton);
const auto &jointMaps = mesh->getStruct().jointMaps;
ensureEnoughBuffers((jointMaps.has_value() && !jointMaps->empty()) ? static_cast<uint32_t>(jointMaps->size()) : 1);
_bufferIndices = mesh->getJointBufferIndices();
initRealTimeJointTexture();
for (index_t index = 0; index < skeleton->getJoints().size(); ++index) {
geometry::AABB *bound = boneSpaceBounds[index];
auto *target = skinningRoot->getChildByPath(skeleton->getJoints()[index]);
if (!bound || !target) continue;
auto *transform = cc::getTransform(target, skinningRoot);
const Mat4 &bindPose = skeleton->getBindposes()[index];
ccstd::vector<index_t> indices;
ccstd::vector<index_t> buffers;
if (!jointMaps.has_value()) {
indices.emplace_back(index);
buffers.emplace_back(0);
} else {
getRelevantBuffers(indices, buffers, jointMaps.value(), index);
}
JointInfo jointInfo;
jointInfo.bound = bound;
jointInfo.target = target;
jointInfo.bindpose = bindPose;
jointInfo.transform = transform;
jointInfo.buffers = std::move(buffers);
jointInfo.indices = std::move(indices);
_joints.emplace_back(std::move(jointInfo));
}
}
void SkinningModel::updateTransform(uint32_t stamp) {
auto *root = getTransform();
if (root->getChangedFlags() || root->isTransformDirty()) {
root->updateWorldTransform();
_localDataUpdated = true;
}
Vec3 v3Min{INFINITY, INFINITY, INFINITY};
Vec3 v3Max{-INFINITY, -INFINITY, -INFINITY};
geometry::AABB ab1;
Vec3 v31;
Vec3 v32;
for (JointInfo &jointInfo : _joints) {
auto &transform = jointInfo.transform;
Mat4 worldMatrix = cc::getWorldMatrix(transform, static_cast<int32_t>(stamp));
jointInfo.bound->transform(worldMatrix, &ab1);
ab1.getBoundary(&v31, &v32);
Vec3::min(v3Min, v31, &v3Min);
Vec3::max(v3Max, v32, &v3Max);
}
if (_modelBounds && _modelBounds->isValid() && _worldBounds) {
geometry::AABB::fromPoints(v3Min, v3Max, _modelBounds);
_modelBounds->transform(root->getWorldMatrix(), _worldBounds);
_worldBoundsDirty = true;
}
}
void SkinningModel::updateUBOs(uint32_t stamp) {
Super::updateUBOs(stamp);
uint32_t bIdx = 0;
Mat4 mat4;
for (const JointInfo &jointInfo : _joints) {
Mat4::multiply(jointInfo.transform->world, jointInfo.bindpose, &mat4);
for (uint32_t buffer : jointInfo.buffers) {
uploadJointData(jointInfo.indices[bIdx] * 12, mat4, _dataArray[buffer]);
bIdx++;
}
bIdx = 0;
}
if (_realTimeTextureMode) {
updateRealTimeJointTextureBuffer();
} else {
bIdx = 0;
for (gfx::Buffer *buffer : _buffers) {
buffer->update(_dataArray[bIdx], buffer->getSize());
bIdx++;
}
}
}
void SkinningModel::initSubModel(index_t idx, RenderingSubMesh *subMeshData, Material *mat) {
const auto &original = subMeshData->getVertexBuffers();
auto &iaInfo = subMeshData->getIaInfo();
iaInfo.vertexBuffers = subMeshData->getJointMappedBuffers();
Super::initSubModel(idx, subMeshData, mat);
iaInfo.vertexBuffers = original;
}
ccstd::vector<scene::IMacroPatch> SkinningModel::getMacroPatches(index_t subModelIndex) {
auto patches = Super::getMacroPatches(subModelIndex);
auto myPatches = uniformPatches;
if (_realTimeTextureMode) {
myPatches = texturePatches;
}
if (!patches.empty()) {
patches.reserve(myPatches.size() + patches.size());
patches.insert(std::begin(patches), std::begin(myPatches), std::end(myPatches));
return patches;
}
return myPatches;
}
void SkinningModel::uploadJointData(uint32_t base, const Mat4 &mat, float *dst) {
memcpy(reinterpret_cast<void *>(dst + base), mat.m, sizeof(float) * 12);
dst[base + 3] = mat.m[12];
dst[base + 7] = mat.m[13];
dst[base + 11] = mat.m[14];
}
void SkinningModel::updateLocalDescriptors(index_t submodelIdx, gfx::DescriptorSet *descriptorset) {
Super::updateLocalDescriptors(submodelIdx, descriptorset);
uint32_t idx = _bufferIndices[submodelIdx];
if (!_realTimeTextureMode) {
gfx::Buffer *buffer = _buffers[idx];
if (buffer) {
descriptorset->bindBuffer(pipeline::UBOSkinning::BINDING, buffer);
}
} else {
bindRealTimeJointTexture(idx, descriptorset);
}
}
void SkinningModel::updateInstancedAttributes(const ccstd::vector<gfx::Attribute> &attributes, scene::SubModel *subModel) {
auto *pass = subModel->getPass(0);
if (pass->getBatchingScheme() != scene::BatchingSchemes::NONE) {
// TODO(holycanvas): #9203 better to print the complete path instead of only the current node
debug::warnID(3936, getNode()->getName());
CC_LOG_WARNING("pass batchingScheme is none, %s", getNode()->getName().c_str());
}
Super::updateInstancedAttributes(attributes, subModel);
}
void SkinningModel::ensureEnoughBuffers(uint32_t count) {
if (!_buffers.empty()) {
for (gfx::Buffer *buffer : _buffers) {
CC_SAFE_DESTROY(buffer);
}
_buffers.clear();
}
if (!_dataArray.empty()) {
for (auto *data : _dataArray) {
CC_SAFE_DELETE_ARRAY(data);
}
_dataArray.clear();
}
_dataArray.resize(count);
if (!_realTimeTextureMode) {
_buffers.resize(count);
uint32_t length = pipeline::UBOSkinning::count;
for (uint32_t i = 0; i < count; i++) {
_buffers[i] = _device->createBuffer({
gfx::BufferUsageBit::UNIFORM | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::HOST | gfx::MemoryUsageBit::DEVICE,
pipeline::UBOSkinning::size,
pipeline::UBOSkinning::size,
});
_dataArray[i] = new float[length];
memset(_dataArray[i], 0, sizeof(float) * length);
}
} else {
uint32_t length = 4 * REALTIME_JOINT_TEXTURE_WIDTH * REALTIME_JOINT_TEXTURE_HEIGHT;
for (uint32_t i = 0; i < count; i++) {
if (_dataArray[i] == nullptr) {
_dataArray[i] = new float[length];
memset(_dataArray[i], 0, sizeof(float) * length);
}
}
}
}
void SkinningModel::initRealTimeJointTexture() {
CC_SAFE_DELETE(_realTimeJointTexture);
if (!_realTimeTextureMode) return;
_realTimeJointTexture = ccnew RealTimeJointTexture;
auto *device = gfx::Device::getInstance();
uint32_t texWidth = REALTIME_JOINT_TEXTURE_WIDTH;
uint32_t texHeight = REALTIME_JOINT_TEXTURE_HEIGHT;
gfx::Format textureFormat = gfx::Format::RGBA32F;
gfx::FormatFeature formatFeature = device->getFormatFeatures(gfx::Format::RGBA32F);
if (!(formatFeature & gfx::FormatFeature::SAMPLED_TEXTURE)) {
textureFormat = gfx::Format::RGBA8;
texWidth = texWidth * 4;
}
uint32_t length = 4 * REALTIME_JOINT_TEXTURE_WIDTH * REALTIME_JOINT_TEXTURE_HEIGHT;
const size_t count = _dataArray.size();
for (size_t i = 0; i < count; i++) {
gfx::TextureInfo textureInfo;
textureInfo.width = texWidth;
textureInfo.height = texHeight;
textureInfo.usage = gfx::TextureUsageBit::STORAGE | gfx::TextureUsageBit::SAMPLED | gfx::TextureUsageBit::TRANSFER_SRC | gfx::TextureUsageBit::TRANSFER_DST;
textureInfo.format = textureFormat;
IntrusivePtr<gfx::Texture> texture = device->createTexture(textureInfo);
_realTimeJointTexture->textures.push_back(texture);
}
_realTimeJointTexture->buffer = new float[length];
}
void SkinningModel::bindRealTimeJointTexture(uint32_t idx, gfx::DescriptorSet *descriptorset) {
if (_realTimeJointTexture->textures.size() < idx + 1) return;
gfx::Texture *texture = _realTimeJointTexture->textures[idx];
if (texture) {
gfx::SamplerInfo info{
gfx::Filter::POINT,
gfx::Filter::POINT,
gfx::Filter::NONE,
gfx::Address::CLAMP,
gfx::Address::CLAMP,
gfx::Address::CLAMP,
};
auto *device = gfx::Device::getInstance();
auto *sampler = device->getSampler(info);
descriptorset->bindTexture(pipeline::REALTIMEJOINTTEXTURE::BINDING, texture);
descriptorset->bindSampler(pipeline::REALTIMEJOINTTEXTURE::BINDING, sampler);
}
}
void SkinningModel::updateRealTimeJointTextureBuffer() {
uint32_t bIdx = 0;
uint32_t width = REALTIME_JOINT_TEXTURE_WIDTH;
uint32_t height = REALTIME_JOINT_TEXTURE_HEIGHT;
for (const auto &texture : _realTimeJointTexture->textures) {
auto *buffer = _realTimeJointTexture->buffer;
auto *dst = buffer;
auto *src = _dataArray[bIdx];
uint32_t count = width;
for (uint32_t i = 0; i < count; i++) {
dst = buffer + (4 * i);
memcpy(dst, src, 16);
src = src + 4;
dst = buffer + (4 * (i + width));
memcpy(dst, src, 16);
src = src + 4;
dst = buffer + 4 * (i + 2 * width);
memcpy(dst, src, 16);
src = src + 4;
}
uint32_t buffOffset = 0;
gfx::TextureSubresLayers layer;
gfx::Offset texOffset;
gfx::Extent extent{width, height, 1};
gfx::BufferTextureCopy region{
buffOffset,
width,
height,
texOffset,
extent,
layer};
auto *device = gfx::Device::getInstance();
device->copyBuffersToTexture(reinterpret_cast<const uint8_t *const *>(&buffer), texture, &region, 1);
bIdx++;
}
}
void SkinningModel::releaseData() {
if (!_dataArray.empty()) {
for (auto *data : _dataArray) {
CC_SAFE_DELETE_ARRAY(data);
}
_dataArray.clear();
}
CC_SAFE_DELETE(_realTimeJointTexture);
if (!_buffers.empty()) {
for (gfx::Buffer *buffer : _buffers) {
CC_SAFE_DESTROY(buffer);
}
_buffers.clear();
}
}
} // namespace cc

View File

@@ -0,0 +1,85 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <utility>
#include "3d/models/MorphModel.h"
#include "base/std/container/array.h"
#include "core/animation/SkeletalAnimationUtils.h"
#include "math/Mat4.h"
#include "renderer/gfx-base/GFXDef-common.h"
#include "renderer/pipeline/Define.h"
namespace cc {
class Skeleton;
namespace geometry {
class AABB;
}
struct JointInfo {
geometry::AABB *bound{nullptr};
Node *target{nullptr};
Mat4 bindpose;
IntrusivePtr<IJointTransform> transform;
ccstd::vector<index_t> buffers;
ccstd::vector<index_t> indices;
};
class SkinningModel final : public MorphModel {
public:
using Super = MorphModel;
SkinningModel();
~SkinningModel() override;
void updateLocalDescriptors(index_t submodelIdx, gfx::DescriptorSet *descriptorset) override;
void updateTransform(uint32_t stamp) override;
void updateUBOs(uint32_t stamp) override;
void destroy() override;
void initSubModel(index_t idx, RenderingSubMesh *subMeshData, Material *mat) override;
ccstd::vector<scene::IMacroPatch> getMacroPatches(index_t subModelIndex) override;
void updateInstancedAttributes(const ccstd::vector<gfx::Attribute> &attributes, scene::SubModel *subModel) override;
void bindSkeleton(Skeleton *skeleton, Node *skinningRoot, Mesh *mesh);
private:
static void uploadJointData(uint32_t base, const Mat4 &mat, float *dst);
void ensureEnoughBuffers(uint32_t count);
void updateRealTimeJointTextureBuffer();
void initRealTimeJointTexture();
void bindRealTimeJointTexture(uint32_t idx, gfx::DescriptorSet *descriptorset);
void releaseData();
ccstd::vector<index_t> _bufferIndices;
ccstd::vector<IntrusivePtr<gfx::Buffer>> _buffers;
ccstd::vector<JointInfo> _joints;
ccstd::vector<float *> _dataArray;
bool _realTimeTextureMode = false;
RealTimeJointTexture *_realTimeJointTexture = nullptr;
CC_DISALLOW_COPY_MOVE_ASSIGN(SkinningModel);
};
} // namespace cc

View File

@@ -0,0 +1,513 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "3d/skeletal-animation/SkeletalAnimationUtils.h"
#include "3d/assets/Mesh.h"
#include "core/scene-graph/Node.h"
#include "renderer/pipeline/Define.h"
namespace {
const float INF = std::numeric_limits<float>::infinity();
cc::gfx::Format selectJointsMediumFormat(cc::gfx::Device *device) {
if (static_cast<uint32_t>(device->getFormatFeatures(cc::gfx::Format::RGBA32F) & cc::gfx::FormatFeature::SAMPLED_TEXTURE)) {
return cc::gfx::Format::RGBA32F;
}
return cc::gfx::Format::RGBA8;
}
// Linear Blending Skinning
void uploadJointDataLBS(cc::Float32Array out, uint32_t base, const cc::Mat4 &mat, bool /*firstBone*/) {
out[base + 0] = mat.m[0];
out[base + 1] = mat.m[1];
out[base + 2] = mat.m[2];
out[base + 3] = mat.m[12];
out[base + 4] = mat.m[4];
out[base + 5] = mat.m[5];
out[base + 6] = mat.m[6];
out[base + 7] = mat.m[13];
out[base + 8] = mat.m[8];
out[base + 9] = mat.m[9];
out[base + 10] = mat.m[10];
out[base + 11] = mat.m[14];
}
cc::Quaternion dq0;
cc::Quaternion dq1;
cc::Vec3 v31;
cc::Quaternion qt1;
cc::Vec3 v32;
float dot(const cc::Quaternion &a, const cc::Quaternion &b) {
return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
}
void multiplyScalar(const cc::Quaternion &a, float b, cc::Quaternion *out) {
out->x = a.x * b;
out->y = a.y * b;
out->z = a.z * b;
out->w = a.w * b;
}
// Dual Quaternion Skinning
void uploadJointDataDQS(cc::Float32Array out, uint32_t base, cc::Mat4 &mat, bool firstBone) {
cc::Mat4::toRTS(mat, &qt1, &v31, &v32);
// // sign consistency
if (firstBone) {
dq0 = qt1;
} else if (dot(dq0, qt1) < 0) {
multiplyScalar(qt1, -1, &qt1);
}
// conversion
dq1.x = v31.x;
dq1.y = v31.y;
dq1.z = v31.z;
dq1.w = 0;
multiplyScalar(dq1 * qt1, 0.5, &dq1);
// upload
out[base + 0] = qt1.x;
out[base + 1] = qt1.y;
out[base + 2] = qt1.z;
out[base + 3] = qt1.w;
out[base + 4] = dq1.x;
out[base + 5] = dq1.y;
out[base + 6] = dq1.z;
out[base + 7] = dq1.w;
out[base + 8] = v32.x;
out[base + 9] = v32.y;
out[base + 10] = v32.z;
}
// change here and cc-skinning.chunk to use other skinning algorithms
constexpr auto UPLOAD_JOINT_DATA = uploadJointDataLBS;
#if CC_EDITOR
const uint32_t MINIMUM_JOINT_TEXTURE_SIZE = 2040;
#else
const uint32_t MINIMUM_JOINT_TEXTURE_SIZE = 480; // have to be multiples of 12
#endif
uint32_t roundUpTextureSize(uint32_t targetLength, uint32_t formatSize) {
double formatScale = 4 / std::sqrt(formatSize);
return static_cast<uint32_t>(std::ceil(std::max(MINIMUM_JOINT_TEXTURE_SIZE * formatScale, static_cast<double>(targetLength)) / 12) * 12);
}
const cc::gfx::SamplerInfo JOINT_TEXTURE_SAMPLER_INFO{
cc::gfx::Filter::POINT,
cc::gfx::Filter::POINT,
cc::gfx::Filter::NONE,
cc::gfx::Address::CLAMP,
cc::gfx::Address::CLAMP,
cc::gfx::Address::CLAMP,
};
cc::Mat4 *getWorldTransformUntilRoot(cc::Node *target, cc::Node *root, cc::Mat4 *outMatrix) {
outMatrix->setIdentity();
cc::Mat4 mat4;
while (target != root) {
cc::Mat4::fromRTS(target->getRotation(), target->getPosition(), target->getScale(), &mat4);
cc::Mat4::multiply(*outMatrix, mat4, outMatrix);
target = target->getParent();
}
return outMatrix;
}
} // namespace
namespace cc {
JointTexturePool::JointTexturePool(gfx::Device *device) {
_device = device;
const auto &format = selectJointsMediumFormat(_device);
_formatSize = gfx::GFX_FORMAT_INFOS[static_cast<uint32_t>(format)].size;
_pixelsPerJoint = 48 / _formatSize;
_pool = ccnew TextureBufferPool(device);
ITextureBufferPoolInfo poolInfo;
poolInfo.format = format;
poolInfo.roundUpFn = roundUpType{roundUpTextureSize};
_pool->initialize(poolInfo);
_customPool = ccnew TextureBufferPool(device);
ITextureBufferPoolInfo customPoolInfo;
customPoolInfo.format = format;
customPoolInfo.roundUpFn = roundUpType{roundUpTextureSize};
_customPool->initialize(customPoolInfo);
}
void JointTexturePool::clear() {
CC_SAFE_DESTROY(_pool);
_textureBuffers.clear();
}
void JointTexturePool::registerCustomTextureLayouts(const ccstd::vector<ICustomJointTextureLayout> &layouts) {
for (const auto &layout : layouts) {
auto textureLength = layout.textureLength;
if (!(static_cast<uint32_t>(_device->getFormatFeatures(cc::gfx::Format::RGBA32F) & cc::gfx::FormatFeature::SAMPLED_TEXTURE))) {
textureLength *= 2;
}
uint32_t chunkIdx = _customPool->createChunk(textureLength);
for (const auto &content : layout.contents) {
auto skeleton = content.skeleton;
_chunkIdxMap[skeleton] = chunkIdx; // include default pose too
for (const auto &clip : content.clips) {
_chunkIdxMap[skeleton ^ clip] = chunkIdx;
}
}
}
}
ccstd::optional<IJointTextureHandle *> JointTexturePool::getDefaultPoseTexture(Skeleton *skeleton, Mesh *mesh, Node *skinningRoot) {
ccstd::hash_t hash = skeleton->getHash() ^ 0; // may not equal to skeleton.hash
ccstd::optional<IJointTextureHandle *> texture;
if (_textureBuffers.find(hash) != _textureBuffers.end()) {
texture = _textureBuffers[hash];
}
const ccstd::vector<ccstd::string> &joints = skeleton->getJoints();
const ccstd::vector<Mat4> &bindPoses = skeleton->getBindposes();
Float32Array textureBuffer;
bool buildTexture = false;
auto jointCount = static_cast<uint32_t>(joints.size());
if (!texture.has_value()) {
uint32_t bufSize = jointCount * 12;
ITextureBufferHandle handle;
if (_chunkIdxMap.find(hash) != _chunkIdxMap.end()) {
handle = _customPool->alloc(bufSize * Float32Array::BYTES_PER_ELEMENT, _chunkIdxMap[hash]);
} else {
handle = _pool->alloc(bufSize * Float32Array::BYTES_PER_ELEMENT);
return texture;
}
IJointTextureHandle *textureHandle = IJointTextureHandle::createJoinTextureHandle();
textureHandle->pixelOffset = handle.start / _formatSize;
textureHandle->refCount = 1;
textureHandle->clipHash = 0;
textureHandle->skeletonHash = skeleton->getHash();
textureHandle->readyToBeDeleted = false;
textureHandle->handle = handle;
texture = textureHandle;
textureBuffer = Float32Array(bufSize);
buildTexture = true;
} else {
texture.value()->refCount++;
}
geometry::AABB ab1;
Mat4 mat4;
Vec3 v34;
Vec3 v33;
Vec3 v3Min(-INF, -INF, -INF);
Vec3 v3Max(-INF, -INF, -INF);
auto boneSpaceBounds = mesh->getBoneSpaceBounds(skeleton);
for (uint32_t j = 0, offset = 0; j < jointCount; ++j, offset += 12) {
auto *node = skinningRoot->getChildByPath(joints[j]);
Mat4 mat = node ? *getWorldTransformUntilRoot(node, skinningRoot, &mat4) : skeleton->getInverseBindposes()[j];
if (j < boneSpaceBounds.size()) {
auto *bound = boneSpaceBounds[j].get();
bound->transform(mat, &ab1);
ab1.getBoundary(&v33, &v34);
Vec3::min(v3Min, v33, &v3Min);
Vec3::max(v3Max, v34, &v3Max);
}
if (buildTexture) {
if (node != nullptr) {
Mat4::multiply(mat, bindPoses[j], &mat);
}
uploadJointDataLBS(textureBuffer, offset, node ? mat : Mat4::IDENTITY, j == 0);
}
}
ccstd::vector<geometry::AABB> bounds;
texture.value()->bounds[static_cast<uint32_t>(mesh->getHash())] = bounds;
geometry::AABB::fromPoints(v3Min, v3Max, &bounds[0]);
if (buildTexture) {
_pool->update(texture.value()->handle, textureBuffer.buffer());
_textureBuffers[hash] = texture.value();
}
return texture;
}
// TODO(xwx): need to implement this function after define AnimationClip
// ccstd::optional<IJointTextureHandle> JointTexturePool::getSequencePoseTexture(Skeleton *skeleton,AnimationClip *clip, Mesh *mesh, Node *skinningRoot) {
// uint64_t hash = skeleton->getHash() ^ clip->getHash();
// ccstd::optional<IJointTextureHandle> texture;
// if (_textureBuffers.find(hash) != _textureBuffers.end()) {
// texture = _textureBuffers[hash];
// if (texture->bounds.find(mesh->getHash()) != texture->bounds.end()) {
// texture->refCount++;
// return texture;
// }
// }
// const ccstd::vector<ccstd::string> &joints = skeleton->getJoints();
// const ccstd::vector<Mat4> & bindPoses = skeleton->getBindposes();
// // const clipData = SkelAnimDataHub.getOrExtract(clip);
// // const { frames } = clipData;
// Float32Array textureBuffer;
// bool buildTexture = false;
// uint32_t jointCount = joints.size();
// if (!texture.has_value()) {
// uint32_t bufSize = jointCount * 12;
// ITextureBufferHandle handle;
// if (_chunkIdxMap.find(hash) != _chunkIdxMap.end()) {
// handle = _customPool->alloc(bufSize * sizeof(Float32Array), _chunkIdxMap[hash]); // TODO(xwx): Float32Array.BYTES_PER_ELEMENT == sizeof(Float32Array) ?
// } else {
// handle = _pool->alloc(bufSize * sizeof(Float32Array));
// return texture;
// }
// // auto animInfos = createAnimInfos(skeleton, clip, skinningRoot); // TODO(xwx): createAnimInfos not implement
// texture = IJointTextureHandle{
// .pixelOffset = handle.start / _formatSize,
// .refCount = 1,
// .clipHash = 0,
// .skeletonHash = skeleton->getHash(),
// .readyToBeDeleted = false,
// .handle = handle,
// // .animInfos = animInfos // TODO(xwx)
// };
// textureBuffer.resize(bufSize);
// buildTexture = true;
// } else {
// texture->refCount++;
// }
// auto boneSpaceBounds = mesh->getBoneSpaceBounds(skeleton);
// ccstd::vector<geometry::AABB> bounds;
// texture->bounds[mesh->getHash()] = bounds;
// // for (uint32_t f = 0; f < frames; ++f) { // TODO(xwx): frames not define
// // bounds.emplace_back(geometry::AABB(INF, INF, INF, -INF, -INF, -INF));
// // }
// // TODO(xwx) : need to implement when define animInfos
// // for (uint32_t f = 0, offset = 0; f < frames; ++f) {
// // auto bound = bounds[f];
// // for (uint32_t j = 0; j < jointCount; ++j, offset += 12) {
// // const {
// // curveData,
// // downstream,
// // bindposeIdx,
// // bindposeCorrection,
// // } = texture.animInfos ![j];
// // let mat : Mat4;
// // let transformValid = true;
// // if (curveData && downstream) { // curve & static two-way combination
// // mat = Mat4.multiply(m4_1, curveData[f], downstream);
// // } else if (curveData) { // there is a curve directly controlling the joint
// // mat = curveData[f];
// // } else if (downstream) { // fallback to default pose if no animation curve can be found upstream
// // mat = downstream;
// // } else { // bottom line: render the original mesh as-is
// // mat = skeleton.inverseBindposes[bindposeIdx];
// // transformValid = false;
// // }
// // if (j < boneSpaceBounds.size()) {
// // auto bound = boneSpaceBounds[j];
// // auto tarnsform = bindposeCorrection ? Mat4::multiply(mat, bindposeCorrection, &m42) : mat; // TODO(xwx): mat not define
// // ab1.getBoundary(&v33, &v34);
// // Vec3::min(bound.center, v33, &bound.center);
// // Vec3::max(bound.halfExtents, v34, &bound.halfExtents);
// // }
// // if (buildTexture) {
// // if (transformValid) {
// // Mat4::multiply(mat, bindPoses[bindposIdx], &m41);
// // UPLOAD_JOINT_DATA(textureBuffer, offset, transformValid ? m41 : Mat4::IDENTITY, j == 0);
// // }
// // }
// // }
// // AABB::fromPoints(bound.center, bound.halfExtents, &bound);
// // }
// if (buildTexture) {
// // _pool->update(texture->handle, textureBuffer.buffer); // TODO(xwx): ArrayBuffer not implemented
// _textureBuffers[hash] = texture.value();
// }
// return texture;
// }
// }
void JointTexturePool::releaseHandle(IJointTextureHandle *handle) {
if (handle->refCount > 0) {
handle->refCount--;
}
if (!handle->refCount && handle->readyToBeDeleted) {
ccstd::hash_t hash = handle->skeletonHash ^ handle->clipHash;
if (_chunkIdxMap.find(hash) != _chunkIdxMap.end()) {
_customPool->free(handle->handle);
} else {
_pool->free(handle->handle);
}
if (_textureBuffers[hash] == handle) {
_textureBuffers.erase(hash);
CC_SAFE_DELETE(handle);
}
}
}
void JointTexturePool::releaseSkeleton(Skeleton *skeleton) {
for (const auto &texture : _textureBuffers) {
auto *handle = texture.second;
if (handle->skeletonHash == skeleton->getHash()) {
handle->readyToBeDeleted = true;
if (handle->refCount > 0) {
// delete handle record immediately so new allocations with the same asset could work
_textureBuffers.erase(handle->skeletonHash ^ handle->clipHash);
} else {
releaseHandle(handle);
}
}
}
}
// TODO(xwx): AnimationClip not define
// public releaseAnimationClip(clip: AnimationClip) {
// const it = this._textureBuffers.values();
// let res = it.next();
// while (!res.done) {
// const handle = res.value;
// if (handle.clipHash == = clip.hash) {
// handle.readyToBeDeleted = true;
// if (handle.refCount) {
// // delete handle record immediately so new allocations with the same asset could work
// this._textureBuffers.delete(handle.skeletonHash ^ handle.clipHash);
// } else {
// this.releaseHandle(handle);
// }
// }
// res = it.next();
// }
// }
// TODO(xwx): AnimationClip not define
// private _createAnimInfos (skeleton: Skeleton, clip: AnimationClip, skinningRoot: Node) {
// const animInfos: IInternalJointAnimInfo[] = [];
// const { joints, bindposes } = skeleton;
// const jointCount = joints.length;
// const clipData = SkelAnimDataHub.getOrExtract(clip);
// for (let j = 0; j < jointCount; j++) {
// let animPath = joints[j];
// let source = clipData.joints[animPath];
// let animNode = skinningRoot.getChildByPath(animPath);
// let downstream: Mat4 | undefined;
// let correctionPath: string | undefined;
// while (!source) {
// const idx = animPath.lastIndexOf('/');
// animPath = animPath.substring(0, idx);
// source = clipData.joints[animPath];
// if (animNode) {
// if (!downstream) { downstream = ccnew Mat4(); }
// Mat4.fromRTS(m4_1, animNode.rotation, animNode.position, animNode.scale);
// Mat4.multiply(downstream, m4_1, downstream);
// animNode = animNode.parent;
// } else { // record the nearest curve path if no downstream pose is present
// correctionPath = animPath;
// }
// if (idx < 0) { break; }
// }
// // the default behavior, just use the bindpose for current joint directly
// let bindposeIdx = j;
// let bindposeCorrection: Mat4 | undefined;
// /**
// * It is regularly observed that developers may choose to delete the whole
// * skeleton node tree for skinning models that only use baked animations
// * as an effective optimization strategy (substantial improvements on both
// * package size and runtime efficiency).
// *
// * This becomes troublesome in some cases during baking though, e.g. when a
// * skeleton joint node is not directly controlled by any animation curve,
// * but its parent nodes are. Due to lack of proper downstream default pose,
// * the joint transform can not be calculated accurately.
// *
// * We address this issue by employing some pragmatic approximation.
// * Specifically, by multiplying the bindpose of the joint corresponding to
// * the nearest curve, instead of the actual target joint. This effectively
// * merges the skinning influence of the 'incomplete' joint into its nearest
// * parent with accurate transform data.
// * It gives more visually-plausible results compared to the naive approach
// * for most cases we've covered.
// */
// if (correctionPath !== undefined && source) {
// // just use the previous joint if the exact path is not found
// bindposeIdx = j - 1;
// for (let t = 0; t < jointCount; t++) {
// if (joints[t] === correctionPath) {
// bindposeIdx = t;
// bindposeCorrection = ccnew Mat4();
// Mat4.multiply(bindposeCorrection, bindposes[t], skeleton.inverseBindposes[j]);
// break;
// }
// }
// }
// animInfos.push({
// curveData: source && source.transforms, downstream, bindposeIdx, bindposeCorrection,
// });
// }
// return animInfos;
// }
JointAnimationInfo::JointAnimationInfo(gfx::Device *device)
: _device(device) {
}
IAnimInfo JointAnimationInfo::getData(const ccstd::string &nodeID) {
if (_pool.find(nodeID) != _pool.end()) {
return _pool[nodeID];
}
auto *buffer = _device->createBuffer(gfx::BufferInfo{
gfx::BufferUsageBit::UNIFORM | gfx::BufferUsageBit::TRANSFER_DST,
gfx::MemoryUsageBit::HOST | gfx::MemoryUsageBit::DEVICE,
pipeline::UBOSkinningAnimation::SIZE,
pipeline::UBOSkinningAnimation::SIZE,
});
Float32Array data;
buffer->update(data.buffer()->getData());
IAnimInfo info;
info.buffer = buffer;
info.data = data;
info.dirty = false;
_pool[nodeID] = info;
return info;
}
void JointAnimationInfo::destroy(const ccstd::string &nodeID) {
if (_pool.find(nodeID) != _pool.end()) {
CC_SAFE_DESTROY_AND_DELETE(_pool[nodeID].buffer);
_pool.erase(nodeID);
}
}
const IAnimInfo &JointAnimationInfo::switchClip(IAnimInfo &info /*, AnimationClip *clip */) {
// info.currentClip = clip;
info.data[0] = -1;
info.buffer->update(info.data.buffer()->getData());
info.dirty = false;
return info;
}
void JointAnimationInfo::clear() {
for (auto pool : _pool) {
CC_SAFE_DESTROY_AND_DELETE(pool.second.buffer);
}
_pool.clear();
}
} // namespace cc

View File

@@ -0,0 +1,154 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "base/std/container/unordered_map.h"
#include "base/std/optional.h"
#include "3d/assets/Skeleton.h"
#include "core/TypedArray.h"
#include "core/geometry/AABB.h"
#include "gfx-base/GFXDef-common.h"
#include "renderer/core/TextureBufferPool.h"
#include "renderer/gfx-base/GFXBuffer.h"
#include "renderer/gfx-base/GFXDevice.h"
namespace cc {
class Node;
class Mesh;
// _chunkIdxMap[key] = skeleton ^ clips[i]
struct IChunkContent {
uint32_t skeleton{0U};
ccstd::vector<uint32_t> clips;
};
struct ICustomJointTextureLayout {
uint32_t textureLength{0};
ccstd::vector<IChunkContent> contents;
};
struct IInternalJointAnimInfo {
ccstd::optional<Mat4> downstream; // downstream default pose, if present
ccstd::optional<ccstd::vector<Mat4>> curveData; // the nearest animation curve, if present
index_t bindposeIdx{0}; // index of the actual bindpose to use
ccstd::optional<Mat4> bindposeCorrection; // correction factor from the original bindpose
};
class IJointTextureHandle {
public:
uint32_t pixelOffset{0};
uint32_t refCount{0};
ccstd::hash_t clipHash{0U};
ccstd::hash_t skeletonHash{0U};
bool readyToBeDeleted{false};
ITextureBufferHandle handle;
ccstd::unordered_map<uint32_t, ccstd::vector<geometry::AABB>> bounds;
ccstd::optional<ccstd::vector<IInternalJointAnimInfo>> animInfos;
static IJointTextureHandle *createJoinTextureHandle() {
return ccnew IJointTextureHandle();
}
private:
IJointTextureHandle() = default;
};
class JointTexturePool : public RefCounted {
public:
JointTexturePool() = default;
explicit JointTexturePool(gfx::Device *device);
~JointTexturePool() override = default;
inline uint32_t getPixelsPerJoint() const { return _pixelsPerJoint; }
void clear();
void registerCustomTextureLayouts(const ccstd::vector<ICustomJointTextureLayout> &layouts);
/**
* @en
* Get joint texture for the default pose.
* @zh
* 获取默认姿势的骨骼贴图。
*/
ccstd::optional<IJointTextureHandle *> getDefaultPoseTexture(Skeleton *skeleton, Mesh *mesh, Node *skinningRoot);
/**
* @en
* Get joint texture for the specified animation clip.
* @zh
* 获取指定动画片段的骨骼贴图。
*/
// ccstd::optional<IJointTextureHandle> getSequencePoseTexture(Skeleton *skeleton, AnimationClip *clip, Mesh *mesh, Node *skinningRoot);
void releaseHandle(IJointTextureHandle *handle);
void releaseSkeleton(Skeleton *skeleton);
// void releaseAnimationClip (AnimationClip* clip); // TODO(xwx): AnimationClip not define
private:
// const IInternalJointAnimInfo &createAnimInfos(Skeleton *skeleton, AnimationClip *clip, Node *skinningRoot); // TODO(xwx): AnimationClip not define
gfx::Device *_device{nullptr};
IntrusivePtr<TextureBufferPool> _pool;
ccstd::unordered_map<ccstd::hash_t, IJointTextureHandle *> _textureBuffers;
uint32_t _formatSize{0};
uint32_t _pixelsPerJoint{0};
IntrusivePtr<TextureBufferPool> _customPool;
ccstd::unordered_map<ccstd::hash_t, index_t> _chunkIdxMap; // hash -> chunkIdx
CC_DISALLOW_COPY_MOVE_ASSIGN(JointTexturePool);
};
struct IAnimInfo {
gfx::Buffer *buffer{nullptr};
Float32Array data;
const float *curFrame{nullptr}; // Only used in JSB
uint32_t frameDataBytes{0}; // Only used in JSB
uint8_t *dirtyForJSB{nullptr}; // Only used in JSB
bool dirty{false};
};
class JointAnimationInfo : public RefCounted {
public:
JointAnimationInfo() = default;
explicit JointAnimationInfo(gfx::Device *device);
~JointAnimationInfo() override = default;
IAnimInfo getData(const ccstd::string &nodeID = "-1");
void destroy(const ccstd::string &nodeID);
static const IAnimInfo &switchClip(IAnimInfo &info /*, AnimationClip *clip */);
void clear();
private:
ccstd::unordered_map<ccstd::string, IAnimInfo> _pool; // pre node
gfx::Device *_device{nullptr};
CC_DISALLOW_COPY_MOVE_ASSIGN(JointAnimationInfo);
};
} // namespace cc

View File

@@ -0,0 +1,57 @@
/****************************************************************************
Copyright (c) 2021-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "application/ApplicationManager.h"
#include "base/Macros.h"
namespace cc {
// static
ApplicationManager *ApplicationManager::getInstance() {
static ApplicationManager mgr;
return &mgr;
}
void ApplicationManager::releseAllApplications() {
_apps.clear();
}
ApplicationManager::ApplicationPtr ApplicationManager::getCurrentApp() const {
if (_currentApp.expired()) {
return nullptr;
}
return _currentApp.lock();
}
ApplicationManager::ApplicationPtr ApplicationManager::getCurrentAppSafe() const {
CC_ASSERT(!_currentApp.expired());
return _currentApp.lock();
}
} // namespace cc
//
void cocos_destory() { // NOLINT(readability-identifier-naming)
// Called in the platform layer, because the platform layer is isolated from the application layer
// It is the platform layer to drive applications and reclaim resources.
cc::ApplicationManager::getInstance()->releseAllApplications();
}

View File

@@ -0,0 +1,96 @@
/****************************************************************************
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <memory>
#include "application/BaseApplication.h"
#include "base/std/container/vector.h"
namespace cc {
class CC_DLL ApplicationManager {
public:
static ApplicationManager* getInstance();
using ApplicationPtr = std::shared_ptr<BaseApplication>;
/**
* @brief Generate application entry.
*/
template <class T>
std::enable_if_t<std::is_base_of<BaseApplication, T>::value, ApplicationPtr>
createApplication(int argc, const char* argv[]) {
ApplicationPtr app = std::make_shared<T>();
app->setArgumentsInternal(argc, argv);
_apps.push_back(app);
_currentApp = app;
return app;
}
/**
* @brief Release all generated applications.
*/
void releseAllApplications();
/**
* @brief Get the current application, may get empty.
*/
ApplicationPtr getCurrentApp() const;
/**
* @brief Get the current application, make sure it is not empty.
* Used to get the engine.
*/
ApplicationPtr getCurrentAppSafe() const;
private:
std::weak_ptr<BaseApplication> _currentApp;
ccstd::vector<ApplicationPtr> _apps;
};
} // namespace cc
#define CC_APPLICATION_MANAGER() cc::ApplicationManager::getInstance()
#define CC_CURRENT_APPLICATION() CC_APPLICATION_MANAGER()->getCurrentApp()
#define CC_CURRENT_APPLICATION_SAFE() CC_APPLICATION_MANAGER()->getCurrentAppSafe()
#define CC_CURRENT_ENGINE() CC_CURRENT_APPLICATION_SAFE()->getEngine()
#define CC_GET_PLATFORM_INTERFACE(intf) CC_CURRENT_ENGINE()->getInterface<intf>()
#define CC_GET_SYSTEM_WINDOW(id) CC_GET_PLATFORM_INTERFACE(cc::ISystemWindowManager)->getWindow(id)
#define CC_GET_MAIN_SYSTEM_WINDOW() CC_GET_SYSTEM_WINDOW(cc::ISystemWindow::mainWindowId) // Assuming the 1st created window is the main system window for now!
#define CC_GET_XR_INTERFACE() BasePlatform::getPlatform()->getInterface<IXRInterface>()
/**
* @brief Called at the user-defined main entry
*/
#define CC_START_APPLICATION(className) \
do { \
auto app = CC_APPLICATION_MANAGER()->createApplication<className>(argc, argv); \
if (app->init()) { \
return -1; \
} \
return app->run(argc, argv); \
} while (0)
#define CC_REGISTER_APPLICATION(className) \
int cocos_main(int argc, const char** argv) { \
CC_START_APPLICATION(className); \
}

View File

@@ -0,0 +1,80 @@
/****************************************************************************
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <memory>
#include "engine/BaseEngine.h"
namespace cc {
class CC_DLL BaseApplication {
public:
virtual ~BaseApplication() = default;
/**
* @brief Application initialization
*/
virtual int32_t init() = 0;
/**
* @brief Application main business logic.
*/
virtual int32_t run(int argc,
const char **argv) = 0;
/**
* @brief Pause the application.
*/
virtual void pause() = 0;
/**
* @brief Resume the application.
*/
virtual void resume() = 0;
/**
* @brief Restart the application.
*/
virtual void restart() = 0;
/**
* @brief Close the application.
*/
virtual void close() = 0;
/**
* @brief Get engine.
*/
virtual BaseEngine::Ptr getEngine() const = 0;
/**
* @brief Get arguments passed to execution file
*/
virtual const std::vector<std::string> &getArguments() const = 0;
protected:
/**
* @brief Set arguments passed to execution file
* @note setArgumentsInternal needs to be protected since it should only be used internally.
*/
virtual void setArgumentsInternal(int argc, const char *argv[]) = 0;
friend class ApplicationManager;
};
} // namespace cc

View File

@@ -0,0 +1,90 @@
/****************************************************************************
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "BaseGame.h"
#include <string>
#include "ApplicationManager.h"
#include "platform/interfaces/modules/ISystemWindowManager.h"
#include "renderer/pipeline/GlobalDescriptorSetManager.h"
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include "platform/android/adpf_manager.h"
#endif
extern "C" void cc_load_all_plugins(); // NOLINT
namespace cc {
int BaseGame::init() {
cc::pipeline::GlobalDSManager::setDescriptorSetLayout();
cc_load_all_plugins();
#if (CC_PLATFORM == CC_PLATFORM_ANDROID) && CC_SUPPORT_ADPF
ADPFManager::getInstance().Initialize();
#endif
#if CC_PLATFORM == CC_PLATFORM_WINDOWS || CC_PLATFORM == CC_PLATFORM_LINUX || CC_PLATFORM == CC_PLATFORM_QNX || CC_PLATFORM == CC_PLATFORM_MACOS
// override default value
//_windowInfo.x = _windowInfo.x == -1 ? 0 : _windowInfo.x;
//_windowInfo.y = _windowInfo.y == -1 ? 0 : _windowInfo.y;
_windowInfo.width = _windowInfo.width == -1 ? 800 : _windowInfo.width;
_windowInfo.height = _windowInfo.height == -1 ? 600 : _windowInfo.height;
_windowInfo.flags = _windowInfo.flags == -1 ? cc::ISystemWindow::CC_WINDOW_SHOWN |
cc::ISystemWindow::CC_WINDOW_RESIZABLE |
cc::ISystemWindow::CC_WINDOW_INPUT_FOCUS
: _windowInfo.flags;
std::call_once(_windowCreateFlag, [&]() {
ISystemWindowInfo info;
info.title = _windowInfo.title;
#if CC_PLATFORM == CC_PLATFORM_WINDOWS
info.x = _windowInfo.x == -1 ? 50 : _windowInfo.x; // 50 meams move window a little for now
info.y = _windowInfo.y == -1 ? 50 : _windowInfo.y; // same above
#else
info.x = _windowInfo.x == -1 ? 0 : _windowInfo.x;
info.y = _windowInfo.y == -1 ? 0 : _windowInfo.y;
#endif
info.width = _windowInfo.width;
info.height = _windowInfo.height;
info.flags = _windowInfo.flags;
ISystemWindowManager* windowMgr = CC_GET_PLATFORM_INTERFACE(ISystemWindowManager);
windowMgr->createWindow(info);
});
#endif
if (_debuggerInfo.enabled) {
setDebugIpAndPort(_debuggerInfo.address, _debuggerInfo.port, _debuggerInfo.pauseOnStart);
}
int ret = cc::CocosApplication::init();
if (ret != 0) {
return ret;
}
setXXTeaKey(_xxteaKey);
runScript("jsb-adapter/web-adapter.js");
runScript("main.js");
return 0;
}
} // namespace cc

View File

@@ -0,0 +1,55 @@
/****************************************************************************
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <string>
#include "CocosApplication.h"
namespace cc {
class BaseGame : public CocosApplication {
public:
struct DebuggerInfo {
bool enabled{true};
int32_t port{6086};
std::string address{"0.0.0.0"};
bool pauseOnStart{false};
};
struct WindowInfo {
std::string title;
int32_t x{-1};
int32_t y{-1};
int32_t width{-1};
int32_t height{-1};
int32_t flags{-1};
};
BaseGame() = default;
int init() override;
protected:
std::string _xxteaKey;
DebuggerInfo _debuggerInfo;
WindowInfo _windowInfo;
std::once_flag _windowCreateFlag;
};
} // namespace cc

View File

@@ -0,0 +1,192 @@
/****************************************************************************
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "application/CocosApplication.h"
#include "base/Macros.h"
#include "ApplicationManager.h"
#include "cocos/bindings/event/EventDispatcher.h"
#include "cocos/bindings/jswrapper/SeApi.h"
#include "cocos/bindings/manual/jsb_classtype.h"
#include "cocos/bindings/manual/jsb_global.h"
#include "cocos/bindings/manual/jsb_module_register.h"
#include "cocos/engine/BaseEngine.h"
#include "cocos/platform/interfaces/modules/IScreen.h"
#include "cocos/platform/interfaces/modules/ISystemWindowManager.h"
namespace cc {
CocosApplication::CocosApplication() {
_engine = BaseEngine::createEngine();
}
CocosApplication::~CocosApplication() {
unregisterAllEngineEvents();
}
void CocosApplication::unregisterAllEngineEvents() {
if (_engine != nullptr) {
_engine->off(_engineEvents);
}
}
int CocosApplication::init() {
if (_engine->init()) {
return -1;
}
unregisterAllEngineEvents();
_systemWindow = CC_GET_MAIN_SYSTEM_WINDOW();
_engineEvents = _engine->on<BaseEngine::EngineStatusChange>([this](BaseEngine * /*emitter*/, BaseEngine::EngineStatus status) {
switch (status) {
case BaseEngine::ON_START:
this->onStart();
break;
case BaseEngine::ON_RESUME:
this->onResume();
break;
case BaseEngine::ON_PAUSE:
this->onPause();
break;
case BaseEngine::ON_CLOSE:
this->onClose();
break;
default:
CC_ABORT();
}
});
se::ScriptEngine *se = se::ScriptEngine::getInstance();
jsb_init_file_operation_delegate();
se->setExceptionCallback(
std::bind(&CocosApplication::handleException, this, // NOLINT(modernize-avoid-bind)
std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
jsb_register_all_modules();
#if CC_EDITOR
auto isolate = v8::Isolate::GetCurrent();
se->start(isolate);
#else
se->start();
#endif
#if (CC_PLATFORM == CC_PLATFORM_IOS)
auto logicSize = _systemWindow->getViewSize();
IScreen *screen = _engine->getInterface<IScreen>();
float pixelRatio = screen->getDevicePixelRatio();
uint32_t windowId = _systemWindow->getWindowId();
events::Resize::broadcast(logicSize.width * pixelRatio, logicSize.height * pixelRatio, windowId);
#endif
return 0;
}
int32_t CocosApplication::run(int argc, const char **argv) {
CC_UNUSED_PARAM(argc);
CC_UNUSED_PARAM(argv);
return _engine->run();
}
void CocosApplication::pause() {
_engine->pause();
}
void CocosApplication::resume() {
_engine->resume();
}
void CocosApplication::restart() {
_engine->restart();
}
// IMPORTANT!!The method `onClose` is a function to be listen close event, while `close` is a jsb binding method mean to close the whole application.
void CocosApplication::close() {
_systemWindow->closeWindow();
}
BaseEngine::Ptr CocosApplication::getEngine() const {
return _engine;
}
const std::vector<std::string> &CocosApplication::getArguments() const {
return _argv;
}
void CocosApplication::setArgumentsInternal(int argc, const char *argv[]) {
_argv.clear();
_argv.reserve(argc);
for (int i = 0; i < argc; ++i) {
_argv.emplace_back(argv[i]);
}
}
void CocosApplication::onStart() {
// TODO(cc): Handling engine start events
}
void CocosApplication::onPause() {
// TODO(cc): Handling pause events
}
void CocosApplication::onResume() {
// TODO(cc): Handling resume events
}
void CocosApplication::onClose() {
_engine->close();
}
void CocosApplication::setDebugIpAndPort(const ccstd::string &serverAddr, uint32_t port, bool isWaitForConnect) {
// Enable debugger here
jsb_enable_debugger(serverAddr, port, isWaitForConnect);
}
void CocosApplication::runScript(const ccstd::string &filePath) {
jsb_run_script(filePath);
}
void CocosApplication::handleException(const char *location, const char *message, const char *stack) {
// Send exception information to server like Tencent Bugly.
CC_LOG_ERROR("\nUncaught Exception:\n - location : %s\n - msg : %s\n - detail : \n %s\n", location, message, stack);
}
void CocosApplication::setXXTeaKey(const ccstd::string &key) {
jsb_set_xxtea_key(key);
}
#if CC_PLATFORM == CC_PLATFORM_WINDOWS || CC_PLATFORM == CC_PLATFORM_LINUX || CC_PLATFORM == CC_PLATFORM_QNX || CC_PLATFORM == CC_PLATFORM_MACOS
void CocosApplication::createWindow(const char *title, int32_t w,
int32_t h, int32_t flags) {
_systemWindow->createWindow(title, w, h, flags);
}
void CocosApplication::createWindow(const char *title,
int32_t x, int32_t y, int32_t w,
int32_t h, int32_t flags) {
_systemWindow->createWindow(title, x, y, w, h, flags);
}
#endif
} // namespace cc

View File

@@ -0,0 +1,151 @@
/****************************************************************************
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <iostream>
#include "application/BaseApplication.h"
#include "cocos/platform/interfaces/modules/ISystemWindow.h"
namespace cc {
class BaseEngine;
class CC_DLL CocosApplication : public BaseApplication {
public:
CocosApplication();
~CocosApplication() override;
/**
* @brief Application initialization.
*/
int32_t init() override;
/**
* @brief Application main business logic.
*/
int32_t run(int argc, const char **argv) override;
/**
* @brief Pause the application.
*/
void pause() override;
/**
* @brief Resume the application.
*/
void resume() override;
/**
* @brief Restart the application.
*/
void restart() override;
/**
* @brief Close the application.
*/
void close() override;
/**
* @brief Get engine.
*/
BaseEngine::Ptr getEngine() const override;
/**
* @brief Get arguments passed to execution file
*/
const std::vector<std::string> &getArguments() const override;
protected:
/**
* @brief Set arguments passed to execution file
* @note setArgumentsInternal needs to be protected since it should only be used internally.
*/
void setArgumentsInternal(int argc, const char *argv[]) override;
public:
/**
* @brief Processing engine start events.
*/
virtual void onStart();
/**
* @brief Processing pause events..
*/
virtual void onPause();
/**
* @brief Processing recovery events.
*/
virtual void onResume();
/**
* @brief Processing close events.
*/
virtual void onClose();
#if CC_PLATFORM == CC_PLATFORM_WINDOWS || CC_PLATFORM == CC_PLATFORM_LINUX || CC_PLATFORM == CC_PLATFORM_QNX || CC_PLATFORM == CC_PLATFORM_MACOS
/**
* @brief Create window.
* @param title: Window title
* @param x: x-axis coordinate
* @param y: y-axis coordinate
* @param w: Window width
* @param h: Window height
* @param flags: Window flag
*/
virtual void createWindow(const char *title,
int32_t x, int32_t y, int32_t w,
int32_t h, int32_t flags);
/**
* @brief Create a centered window.
* @param title: Window title
* @param w: Window width
* @param h: Window height
* @param flags: Window flag
*/
virtual void createWindow(const char *title, int32_t w,
int32_t h, int32_t flags);
#endif
/**
* @brief Set the debugging server Addr and port
* @param serverAddr:Server address.
* @param port:Server port.
* @param isWaitForConnect:Is Wait for connect.
*/
virtual void setDebugIpAndPort(const ccstd::string &serverAddr, uint32_t port, bool isWaitForConnect);
/**
* @brief Run the script file
* @param filePath:script path.
*/
virtual void runScript(const ccstd::string &filePath);
/**
* @brief Script exception handling
* @param location,Exception location
* @param message,Exception message
* @param stack,Exception stack
*/
virtual void handleException(const char *location, const char *message, const char *stack);
virtual void setXXTeaKey(const ccstd::string &key);
private:
void unregisterAllEngineEvents();
ISystemWindow *_systemWindow{nullptr};
BaseEngine::Ptr _engine{nullptr};
BaseEngine::EngineStatusChange::EventID _engineEvents;
std::vector<std::string> _argv;
};
} // namespace cc

598
cocos/audio/AudioEngine.cpp Normal file
View File

@@ -0,0 +1,598 @@
/****************************************************************************
Copyright (c) 2014-2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "audio/include/AudioEngine.h"
#include <condition_variable>
#include <cstdint>
#include <mutex>
#include <thread>
#include "base/Log.h"
#include "base/Utils.h"
#include "base/memory/Memory.h"
#include "base/std/container/queue.h"
#include "platform/FileUtils.h"
#if CC_PLATFORM == CC_PLATFORM_ANDROID || CC_PLATFORM == CC_PLATFORM_OPENHARMONY
// OpenHarmony and Android use the same audio playback module
#include "audio/android/AudioEngine-inl.h"
#elif CC_PLATFORM == CC_PLATFORM_IOS || CC_PLATFORM == CC_PLATFORM_MACOS
#include "audio/apple/AudioEngine-inl.h"
#elif CC_PLATFORM == CC_PLATFORM_WINDOWS || CC_PLATFORM == CC_PLATFORM_OHOS
#include "audio/oalsoft/AudioEngine-soft.h"
#elif CC_PLATFORM == CC_PLATFORM_WINRT
#include "audio/winrt/AudioEngine-winrt.h"
#elif CC_PLATFORM == CC_PLATFORM_LINUX || CC_PLATFORM == CC_PLATFORM_QNX
#include "audio/oalsoft/AudioEngine-soft.h"
#elif CC_PLATFORM == CC_PLATFORM_TIZEN
#include "audio/tizen/AudioEngine-tizen.h"
#endif
#define TIME_DELAY_PRECISION 0.0001
#ifdef ERROR
#undef ERROR
#endif // ERROR
namespace cc {
const int AudioEngine::INVALID_AUDIO_ID = -1;
const float AudioEngine::TIME_UNKNOWN = -1.0F;
//audio file path,audio IDs
ccstd::unordered_map<ccstd::string, ccstd::list<int>> AudioEngine::sAudioPathIDMap;
//profileName,ProfileHelper
ccstd::unordered_map<ccstd::string, AudioEngine::ProfileHelper> AudioEngine::sAudioPathProfileHelperMap;
unsigned int AudioEngine::sMaxInstances = MAX_AUDIOINSTANCES;
AudioEngine::ProfileHelper *AudioEngine::sDefaultProfileHelper = nullptr;
ccstd::unordered_map<int, AudioEngine::AudioInfo> AudioEngine::sAudioIDInfoMap;
AudioEngineImpl *AudioEngine::sAudioEngineImpl = nullptr;
float AudioEngine::sVolumeFactor = 1.0F;
events::EnterBackground::Listener AudioEngine::sOnPauseListenerID;
events::EnterForeground::Listener AudioEngine::sOnResumeListenerID;
ccstd::vector<int> AudioEngine::sBreakAudioID;
AudioEngine::AudioEngineThreadPool *AudioEngine::sThreadPool = nullptr;
bool AudioEngine::sIsEnabled = true;
AudioEngine::AudioInfo::AudioInfo()
: filePath(nullptr),
profileHelper(nullptr),
volume(1.0F),
loop(false),
duration(TIME_UNKNOWN),
state(AudioState::INITIALIZING) {
}
class AudioEngine::AudioEngineThreadPool {
public:
explicit AudioEngineThreadPool(int threads = 4) {
for (int index = 0; index < threads; ++index) {
_workers.emplace_back(std::thread([this]() {
threadFunc();
}));
}
}
void addTask(const std::function<void()> &task) {
std::unique_lock<std::mutex> lk(_queueMutex);
_taskQueue.emplace(task);
_taskCondition.notify_one();
}
~AudioEngineThreadPool() {
{
std::unique_lock<std::mutex> lk(_queueMutex);
_stop = true;
_taskCondition.notify_all();
}
for (auto &&worker : _workers) {
worker.join();
}
}
private:
void threadFunc() {
while (true) {
std::function<void()> task = nullptr;
{
std::unique_lock<std::mutex> lk(_queueMutex);
if (_stop) {
break;
}
if (!_taskQueue.empty()) {
task = std::move(_taskQueue.front());
_taskQueue.pop();
} else {
_taskCondition.wait(lk);
continue;
}
}
task();
}
}
ccstd::vector<std::thread> _workers;
ccstd::queue<std::function<void()>> _taskQueue;
std::mutex _queueMutex;
std::condition_variable _taskCondition;
bool _stop{};
};
void AudioEngine::end() {
stopAll();
if (sThreadPool) {
delete sThreadPool;
sThreadPool = nullptr;
}
delete sAudioEngineImpl;
sAudioEngineImpl = nullptr;
delete sDefaultProfileHelper;
sDefaultProfileHelper = nullptr;
sOnPauseListenerID.reset();
sOnResumeListenerID.reset();
}
bool AudioEngine::lazyInit() {
if (sAudioEngineImpl == nullptr) {
sAudioEngineImpl = ccnew AudioEngineImpl();
if (!sAudioEngineImpl || !sAudioEngineImpl->init()) {
delete sAudioEngineImpl;
sAudioEngineImpl = nullptr;
return false;
}
sOnPauseListenerID.bind(&onEnterBackground);
sOnResumeListenerID.bind(&onEnterForeground);
}
#if (CC_PLATFORM != CC_PLATFORM_ANDROID)
if (sAudioEngineImpl && sThreadPool == nullptr) {
sThreadPool = ccnew AudioEngineThreadPool();
}
#endif
return true;
}
int AudioEngine::play2d(const ccstd::string &filePath, bool loop, float volume, const AudioProfile *profile) {
int ret = AudioEngine::INVALID_AUDIO_ID;
do {
if (!isEnabled()) {
break;
}
if (!lazyInit()) {
break;
}
if (!FileUtils::getInstance()->isFileExist(filePath)) {
break;
}
auto *profileHelper = sDefaultProfileHelper;
if (profile && profile != &profileHelper->profile) {
CC_ASSERT(!profile->name.empty());
profileHelper = &sAudioPathProfileHelperMap[profile->name];
profileHelper->profile = *profile;
}
if (sAudioIDInfoMap.size() >= sMaxInstances) {
CC_LOG_INFO("Fail to play %s cause by limited max instance of AudioEngine", filePath.c_str());
break;
}
if (profileHelper) {
if (profileHelper->profile.maxInstances != 0 && profileHelper->audioIDs.size() >= profileHelper->profile.maxInstances) {
CC_LOG_INFO("Fail to play %s cause by limited max instance of AudioProfile", filePath.c_str());
break;
}
if (profileHelper->profile.minDelay > TIME_DELAY_PRECISION) {
auto currTime = std::chrono::high_resolution_clock::now();
auto delay = static_cast<float>(std::chrono::duration_cast<std::chrono::microseconds>(currTime - profileHelper->lastPlayTime).count()) / 1000000.0;
if (profileHelper->lastPlayTime.time_since_epoch().count() != 0 && delay <= profileHelper->profile.minDelay) {
CC_LOG_INFO("Fail to play %s cause by limited minimum delay", filePath.c_str());
break;
}
}
}
if (volume < 0.0F) {
volume = 0.0F;
} else if (volume > 1.0F) {
volume = 1.0F;
}
ret = sAudioEngineImpl->play2d(filePath, loop, volume);
if (ret != INVALID_AUDIO_ID) {
sAudioPathIDMap[filePath].push_back(ret);
auto it = sAudioPathIDMap.find(filePath);
auto &audioRef = sAudioIDInfoMap[ret];
audioRef.volume = volume;
audioRef.loop = loop;
audioRef.filePath = &it->first;
audioRef.state = AudioState::PLAYING;
if (profileHelper) {
profileHelper->lastPlayTime = std::chrono::high_resolution_clock::now();
profileHelper->audioIDs.push_back(ret);
}
audioRef.profileHelper = profileHelper;
}
} while (false);
return ret;
}
void AudioEngine::setLoop(int audioID, bool loop) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end() && it->second.loop != loop) {
sAudioEngineImpl->setLoop(audioID, loop);
it->second.loop = loop;
}
}
void AudioEngine::setVolume(int audioID, float volume) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end()) {
if (volume < 0.0F) {
volume = 0.0F;
} else if (volume > 1.0F) {
volume = 1.0F;
}
if (it->second.volume != volume) {
sAudioEngineImpl->setVolume(audioID, volume * sVolumeFactor);
it->second.volume = volume;
}
}
}
void AudioEngine::setVolumeFactor(float factor) {
if (factor > 1.0F) {
factor = 1.0F;
}
if (factor < 0) {
factor = 0.0F;
}
sVolumeFactor = factor;
for (auto &item : sAudioIDInfoMap) {
sAudioEngineImpl->setVolume(item.first, item.second.volume * sVolumeFactor);
}
}
void AudioEngine::pause(int audioID) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end() && it->second.state == AudioState::PLAYING) {
sAudioEngineImpl->pause(audioID);
it->second.state = AudioState::PAUSED;
}
}
void AudioEngine::pauseAll() {
auto itEnd = sAudioIDInfoMap.end();
for (auto it = sAudioIDInfoMap.begin(); it != itEnd; ++it) {
if (it->second.state == AudioState::PLAYING) {
sAudioEngineImpl->pause(it->first);
it->second.state = AudioState::PAUSED;
}
}
}
void AudioEngine::resume(int audioID) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end() && it->second.state == AudioState::PAUSED) {
sAudioEngineImpl->resume(audioID);
it->second.state = AudioState::PLAYING;
}
}
void AudioEngine::resumeAll() {
auto itEnd = sAudioIDInfoMap.end();
for (auto it = sAudioIDInfoMap.begin(); it != itEnd; ++it) {
if (it->second.state == AudioState::PAUSED) {
sAudioEngineImpl->resume(it->first);
it->second.state = AudioState::PLAYING;
}
}
}
void AudioEngine::onEnterBackground() {
auto itEnd = sAudioIDInfoMap.end();
for (auto it = sAudioIDInfoMap.begin(); it != itEnd; ++it) {
if (it->second.state == AudioState::PLAYING) {
sAudioEngineImpl->pause(it->first);
it->second.state = AudioState::PAUSED;
sBreakAudioID.push_back(it->first);
}
}
#if CC_PLATFORM == CC_PLATFORM_ANDROID || CC_PLATFORM == CC_PLATFORM_OPENHARMONY
if (sAudioEngineImpl) {
sAudioEngineImpl->onPause();
}
#endif
}
void AudioEngine::onEnterForeground() {
auto itEnd = sBreakAudioID.end();
for (auto it = sBreakAudioID.begin(); it != itEnd; ++it) {
auto iter = sAudioIDInfoMap.find(*it);
if (iter != sAudioIDInfoMap.end() && iter->second.state == AudioState::PAUSED) {
sAudioEngineImpl->resume(*it);
iter->second.state = AudioState::PLAYING;
}
}
sBreakAudioID.clear();
#if CC_PLATFORM == CC_PLATFORM_ANDROID || CC_PLATFORM == CC_PLATFORM_OPENHARMONY
if (sAudioEngineImpl) {
sAudioEngineImpl->onResume();
}
#endif
}
void AudioEngine::stop(int audioID) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end()) {
sAudioEngineImpl->stop(audioID);
remove(audioID);
}
}
void AudioEngine::remove(int audioID) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end()) {
if (it->second.profileHelper) {
it->second.profileHelper->audioIDs.remove(audioID);
}
sAudioPathIDMap[*it->second.filePath].remove(audioID);
sAudioIDInfoMap.erase(audioID);
}
}
void AudioEngine::stopAll() {
if (!sAudioEngineImpl) {
return;
}
sAudioEngineImpl->stopAll();
auto itEnd = sAudioIDInfoMap.end();
for (auto it = sAudioIDInfoMap.begin(); it != itEnd; ++it) {
if (it->second.profileHelper) {
it->second.profileHelper->audioIDs.remove(it->first);
}
}
sAudioPathIDMap.clear();
sAudioIDInfoMap.clear();
}
void AudioEngine::uncache(const ccstd::string &filePath) {
auto audioIDsIter = sAudioPathIDMap.find(filePath);
if (audioIDsIter != sAudioPathIDMap.end()) {
//@Note: For safely iterating elements from the audioID list, we need to copy the list
// since 'AudioEngine::remove' may be invoked in 'sAudioEngineImpl->stop' synchronously.
// If this happens, it will break the iteration, and crash will appear on some devices.
ccstd::list<int> copiedIDs(audioIDsIter->second);
for (int audioID : copiedIDs) {
sAudioEngineImpl->stop(audioID);
auto itInfo = sAudioIDInfoMap.find(audioID);
if (itInfo != sAudioIDInfoMap.end()) {
if (itInfo->second.profileHelper) {
itInfo->second.profileHelper->audioIDs.remove(audioID);
}
sAudioIDInfoMap.erase(audioID);
}
}
sAudioPathIDMap.erase(filePath);
}
if (sAudioEngineImpl) {
sAudioEngineImpl->uncache(filePath);
}
}
void AudioEngine::uncacheAll() {
if (!sAudioEngineImpl) {
return;
}
stopAll();
sAudioEngineImpl->uncacheAll();
}
float AudioEngine::getDuration(int audioID) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end() && it->second.state != AudioState::INITIALIZING) {
if (it->second.duration == TIME_UNKNOWN) {
it->second.duration = sAudioEngineImpl->getDuration(audioID);
}
return it->second.duration;
}
return TIME_UNKNOWN;
}
float AudioEngine::getDurationFromFile(const ccstd::string &filePath) {
lazyInit();
if (sAudioEngineImpl) {
return sAudioEngineImpl->getDurationFromFile(filePath);
}
return TIME_UNKNOWN;
}
bool AudioEngine::setCurrentTime(int audioID, float time) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end() && it->second.state != AudioState::INITIALIZING) {
return sAudioEngineImpl->setCurrentTime(audioID, time);
}
return false;
}
float AudioEngine::getCurrentTime(int audioID) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end() && it->second.state != AudioState::INITIALIZING) {
return sAudioEngineImpl->getCurrentTime(audioID);
}
return 0.0F;
}
void AudioEngine::setFinishCallback(int audioID, const std::function<void(int, const ccstd::string &)> &callback) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end()) {
sAudioEngineImpl->setFinishCallback(audioID, callback);
}
}
bool AudioEngine::setMaxAudioInstance(int maxInstances) {
if (maxInstances > 0 && maxInstances <= MAX_AUDIOINSTANCES) {
sMaxInstances = maxInstances;
return true;
}
return false;
}
bool AudioEngine::isLoop(int audioID) {
auto tmpIterator = sAudioIDInfoMap.find(audioID);
if (tmpIterator != sAudioIDInfoMap.end()) {
return tmpIterator->second.loop;
}
CC_LOG_INFO("AudioEngine::isLoop-->The audio instance %d is non-existent", audioID);
return false;
}
float AudioEngine::getVolume(int audioID) {
auto tmpIterator = sAudioIDInfoMap.find(audioID);
if (tmpIterator != sAudioIDInfoMap.end()) {
return tmpIterator->second.volume;
}
CC_LOG_INFO("AudioEngine::getVolume-->The audio instance %d is non-existent", audioID);
return 0.0F;
}
AudioEngine::AudioState AudioEngine::getState(int audioID) {
auto tmpIterator = sAudioIDInfoMap.find(audioID);
if (tmpIterator != sAudioIDInfoMap.end()) {
return tmpIterator->second.state;
}
return AudioState::ERROR;
}
AudioProfile *AudioEngine::getProfile(int audioID) {
auto it = sAudioIDInfoMap.find(audioID);
if (it != sAudioIDInfoMap.end()) {
return &it->second.profileHelper->profile;
}
return nullptr;
}
AudioProfile *AudioEngine::getDefaultProfile() {
if (sDefaultProfileHelper == nullptr) {
sDefaultProfileHelper = ccnew ProfileHelper();
}
return &sDefaultProfileHelper->profile;
}
AudioProfile *AudioEngine::getProfile(const ccstd::string &name) {
auto it = sAudioPathProfileHelperMap.find(name);
if (it != sAudioPathProfileHelperMap.end()) {
return &it->second.profile;
}
return nullptr;
}
void AudioEngine::preload(const ccstd::string &filePath, const std::function<void(bool isSuccess)> &callback) {
if (!isEnabled()) {
callback(false);
return;
}
lazyInit();
if (sAudioEngineImpl) {
if (!FileUtils::getInstance()->isFileExist(filePath)) {
if (callback) {
callback(false);
}
return;
}
sAudioEngineImpl->preload(filePath, callback);
}
}
void AudioEngine::addTask(const std::function<void()> &task) {
lazyInit();
if (sAudioEngineImpl && sThreadPool) {
sThreadPool->addTask(task);
}
}
int AudioEngine::getPlayingAudioCount() {
return static_cast<int>(sAudioIDInfoMap.size());
}
void AudioEngine::setEnabled(bool isEnabled) {
if (sIsEnabled != isEnabled) {
sIsEnabled = isEnabled;
if (!sIsEnabled) {
stopAll();
}
}
}
bool AudioEngine::isEnabled() {
return sIsEnabled;
}
PCMHeader AudioEngine::getPCMHeader(const char *url) {
lazyInit();
return sAudioEngineImpl->getPCMHeader(url);
}
ccstd::vector<uint8_t> AudioEngine::getOriginalPCMBuffer(const char *url, uint32_t channelID) {
lazyInit();
return sAudioEngineImpl->getOriginalPCMBuffer(url, channelID);
}
} // namespace cc

View File

@@ -0,0 +1,45 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AssetFd"
#include "audio/android/AssetFd.h"
#include "audio/android/cutils/log.h"
namespace cc {
AssetFd::AssetFd(int assetFd)
: _assetFd(assetFd) {
}
AssetFd::~AssetFd() {
ALOGV("~AssetFd: %d", _assetFd);
if (_assetFd > 0) {
::close(_assetFd);
_assetFd = 0;
}
};
} // namespace cc

View File

@@ -0,0 +1,42 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <unistd.h>
namespace cc {
class AssetFd {
public:
AssetFd(int assetFd);
~AssetFd();
inline int getFd() const { return _assetFd; };
private:
int _assetFd;
};
} // namespace cc

View File

@@ -0,0 +1,77 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include "audio/android/utils/Errors.h"
namespace cc {
// ----------------------------------------------------------------------------
class AudioBufferProvider {
public:
// IDEA: merge with AudioTrackShared::Buffer, AudioTrack::Buffer, and AudioRecord::Buffer
// and rename getNextBuffer() to obtainBuffer()
struct Buffer {
Buffer() : raw(NULL), frameCount(0) {}
union {
void *raw;
short *i16;
int8_t *i8;
};
size_t frameCount;
};
virtual ~AudioBufferProvider() {}
// value representing an invalid presentation timestamp
static const int64_t kInvalidPTS = 0x7FFFFFFFFFFFFFFFLL; // <stdint.h> is too painful
// pts is the local time when the next sample yielded by getNextBuffer
// will be rendered.
// Pass kInvalidPTS if the PTS is unknown or not applicable.
// On entry:
// buffer != NULL
// buffer->raw unused
// buffer->frameCount maximum number of desired frames
// On successful return:
// status NO_ERROR
// buffer->raw non-NULL pointer to buffer->frameCount contiguous available frames
// buffer->frameCount number of contiguous available frames at buffer->raw,
// 0 < buffer->frameCount <= entry value
// On error return:
// status != NO_ERROR
// buffer->raw NULL
// buffer->frameCount 0
virtual status_t getNextBuffer(Buffer *buffer, int64_t pts = kInvalidPTS) = 0;
// Release (a portion of) the buffer previously obtained by getNextBuffer().
// It is permissible to call releaseBuffer() multiple times per getNextBuffer().
// On entry:
// buffer->frameCount number of frames to release, must be <= number of frames
// obtained but not yet released
// buffer->raw unused
// On return:
// buffer->frameCount 0; implementation MUST set to zero
// buffer->raw undefined; implementation is PERMITTED to set to any value,
// so if caller needs to continue using this buffer it must
// keep track of the pointer itself
virtual void releaseBuffer(Buffer *buffer) = 0;
};
// ----------------------------------------------------------------------------
} // namespace cc

View File

@@ -0,0 +1,260 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioDecoder"
#include "audio/android/AudioDecoder.h"
#include "audio/android/AudioResampler.h"
#include "audio/android/PcmBufferProvider.h"
#include <chrono>
#include <thread>
namespace cc {
size_t AudioDecoder::fileRead(void *ptr, size_t size, size_t nmemb, void *datasource) {
AudioDecoder *thiz = (AudioDecoder *)datasource;
ssize_t toReadBytes = std::min((ssize_t)(thiz->_fileData.getSize() - thiz->_fileCurrPos), (ssize_t)(nmemb * size));
if (toReadBytes > 0) {
memcpy(ptr, (unsigned char *)thiz->_fileData.getBytes() + thiz->_fileCurrPos, toReadBytes);
thiz->_fileCurrPos += toReadBytes;
}
// ALOGD("File size: %d, After fileRead _fileCurrPos %d", (int)thiz->_fileData.getSize(), thiz->_fileCurrPos);
return toReadBytes;
}
int AudioDecoder::fileSeek(void *datasource, int64_t offset, int whence) {
AudioDecoder *thiz = (AudioDecoder *)datasource;
if (whence == SEEK_SET)
thiz->_fileCurrPos = static_cast<size_t>(offset);
else if (whence == SEEK_CUR)
thiz->_fileCurrPos = static_cast<size_t>(thiz->_fileCurrPos + offset);
else if (whence == SEEK_END)
thiz->_fileCurrPos = static_cast<size_t>(thiz->_fileData.getSize());
return 0;
}
int AudioDecoder::fileClose(void *datasource) {
return 0;
}
long AudioDecoder::fileTell(void *datasource) {
AudioDecoder *thiz = (AudioDecoder *)datasource;
return (long)thiz->_fileCurrPos;
}
AudioDecoder::AudioDecoder()
: _fileCurrPos(0), _sampleRate(-1) {
auto pcmBuffer = std::make_shared<ccstd::vector<char>>();
pcmBuffer->reserve(4096);
_result.pcmBuffer = pcmBuffer;
}
AudioDecoder::~AudioDecoder() {
ALOGV("~AudioDecoder() %p", this);
}
bool AudioDecoder::init(const ccstd::string &url, int sampleRate) {
_url = url;
_sampleRate = sampleRate;
return true;
}
bool AudioDecoder::start() {
auto oldTime = clockNow();
auto nowTime = oldTime;
bool ret;
do {
ret = decodeToPcm();
if (!ret) {
ALOGE("decodeToPcm (%s) failed!", _url.c_str());
break;
}
nowTime = clockNow();
ALOGD("Decoding (%s) to pcm data wasted %fms", _url.c_str(), intervalInMS(oldTime, nowTime));
oldTime = nowTime;
ret = resample();
if (!ret) {
ALOGE("resample (%s) failed!", _url.c_str());
break;
}
nowTime = clockNow();
ALOGD("Resampling (%s) wasted %fms", _url.c_str(), intervalInMS(oldTime, nowTime));
oldTime = nowTime;
ret = interleave();
if (!ret) {
ALOGE("interleave (%s) failed!", _url.c_str());
break;
}
nowTime = clockNow();
ALOGD("Interleave (%s) wasted %fms", _url.c_str(), intervalInMS(oldTime, nowTime));
} while (false);
ALOGV_IF(!ret, "%s returns false, decode (%s)", __FUNCTION__, _url.c_str());
return ret;
}
bool AudioDecoder::resample() {
if (_result.sampleRate == _sampleRate) {
ALOGI("No need to resample since the sample rate (%d) of the decoded pcm data is the same as the device output sample rate",
_sampleRate);
return true;
}
ALOGV("Resample: %d --> %d", _result.sampleRate, _sampleRate);
auto r = _result;
PcmBufferProvider provider;
provider.init(r.pcmBuffer->data(), r.numFrames, r.pcmBuffer->size() / r.numFrames);
const int outFrameRate = _sampleRate;
int outputChannels = 2;
size_t outputFrameSize = outputChannels * sizeof(int32_t);
auto outputFrames = static_cast<size_t>(((int64_t)r.numFrames * outFrameRate) / r.sampleRate);
size_t outputSize = outputFrames * outputFrameSize;
void *outputVAddr = malloc(outputSize);
auto resampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT, r.numChannels, outFrameRate,
AudioResampler::MED_QUALITY);
resampler->setSampleRate(r.sampleRate);
resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
memset(outputVAddr, 0, outputSize);
ALOGV("resample() %zu output frames", outputFrames);
ccstd::vector<int> Ovalues;
if (Ovalues.empty()) {
Ovalues.push_back(static_cast<int>(outputFrames));
}
for (size_t i = 0, j = 0; i < outputFrames;) {
size_t thisFrames = Ovalues[j++];
if (j >= Ovalues.size()) {
j = 0;
}
if (thisFrames == 0 || thisFrames > outputFrames - i) {
thisFrames = outputFrames - i;
}
int outFrames = static_cast<int>(resampler->resample(static_cast<int32_t *>(outputVAddr) + outputChannels * i, thisFrames,
&provider));
ALOGV("outFrames: %d", outFrames);
i += thisFrames;
}
ALOGV("resample() complete");
resampler->reset();
ALOGV("reset() complete");
delete resampler;
resampler = nullptr;
// mono takes left channel only (out of stereo output pair)
// stereo and multichannel preserve all channels.
int channels = r.numChannels;
int32_t *out = (int32_t *)outputVAddr;
int16_t *convert = (int16_t *)malloc(outputFrames * channels * sizeof(int16_t));
const int volumeShift = 12; // shift requirement for Q4.27 to Q.15
// round to half towards zero and saturate at int16 (non-dithered)
const int roundVal = (1 << (volumeShift - 1)) - 1; // volumePrecision > 0
for (size_t i = 0; i < outputFrames; i++) {
for (int j = 0; j < channels; j++) {
int32_t s = out[i * outputChannels + j] + roundVal; // add offset here
if (s < 0) {
s = (s + 1) >> volumeShift; // round to 0
if (s < -32768) {
s = -32768;
}
} else {
s = s >> volumeShift;
if (s > 32767) {
s = 32767;
}
}
convert[i * channels + j] = int16_t(s);
}
}
// Reset result
_result.numFrames = static_cast<int>(outputFrames);
_result.sampleRate = outFrameRate;
auto buffer = std::make_shared<ccstd::vector<char>>();
buffer->reserve(_result.numFrames * _result.bitsPerSample / 8);
buffer->insert(buffer->end(), (char *)convert,
(char *)convert + outputFrames * channels * sizeof(int16_t));
_result.pcmBuffer = buffer;
ALOGV("pcm buffer size: %d", (int)_result.pcmBuffer->size());
free(convert);
free(outputVAddr);
return true;
}
//-----------------------------------------------------------------
bool AudioDecoder::interleave() {
if (_result.numChannels == 2) {
ALOGI("Audio channel count is 2, no need to interleave");
return true;
} else if (_result.numChannels == 1) {
// If it's a mono audio, try to compose a fake stereo buffer
size_t newBufferSize = _result.pcmBuffer->size() * 2;
auto newBuffer = std::make_shared<ccstd::vector<char>>();
newBuffer->reserve(newBufferSize);
size_t totalFrameSizeInBytes = (size_t)(_result.numFrames * _result.bitsPerSample / 8);
for (size_t i = 0; i < totalFrameSizeInBytes; i += 2) {
// get one short value
char byte1 = _result.pcmBuffer->at(i);
char byte2 = _result.pcmBuffer->at(i + 1);
// push two short value
for (int j = 0; j < 2; ++j) {
newBuffer->push_back(byte1);
newBuffer->push_back(byte2);
}
}
_result.numChannels = 2;
_result.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
_result.pcmBuffer = newBuffer;
return true;
}
ALOGE("Audio channel count (%d) is wrong, interleave only supports converting mono to stereo!", _result.numChannels);
return false;
}
} // namespace cc

View File

@@ -0,0 +1,61 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/OpenSLHelper.h"
#include "audio/android/PcmData.h"
#include "base/Data.h"
namespace cc {
class AudioDecoder {
public:
AudioDecoder();
virtual ~AudioDecoder();
virtual bool init(const ccstd::string &url, int sampleRate);
bool start();
inline PcmData getResult() { return _result; };
protected:
virtual bool decodeToPcm() = 0;
bool resample();
bool interleave();
static size_t fileRead(void *ptr, size_t size, size_t nmemb, void *datasource);
static int fileSeek(void *datasource, int64_t offset, int whence);
static int fileClose(void *datasource);
static long fileTell(void *datasource); // NOLINT
ccstd::string _url;
PcmData _result;
int _sampleRate;
Data _fileData;
size_t _fileCurrPos;
};
} // namespace cc

View File

@@ -0,0 +1,75 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioDecoderMp3"
#include "audio/android/AudioDecoderMp3.h"
#include "audio/android/mp3reader.h"
#include "platform/FileUtils.h"
namespace cc {
AudioDecoderMp3::AudioDecoderMp3() {
ALOGV("Create AudioDecoderMp3");
}
AudioDecoderMp3::~AudioDecoderMp3() {
}
bool AudioDecoderMp3::decodeToPcm() {
_fileData = FileUtils::getInstance()->getDataFromFile(_url);
if (_fileData.isNull()) {
return false;
}
mp3_callbacks callbacks;
callbacks.read = AudioDecoder::fileRead;
callbacks.seek = AudioDecoder::fileSeek;
callbacks.close = AudioDecoder::fileClose;
callbacks.tell = AudioDecoder::fileTell;
int numChannels = 0;
int sampleRate = 0;
int numFrames = 0;
if (EXIT_SUCCESS == decodeMP3(&callbacks, this, *_result.pcmBuffer, &numChannels, &sampleRate, &numFrames) && numChannels > 0 && sampleRate > 0 && numFrames > 0) {
_result.numChannels = numChannels;
_result.sampleRate = sampleRate;
_result.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
_result.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
_result.channelMask = numChannels == 1 ? SL_SPEAKER_FRONT_CENTER : (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT);
_result.endianness = SL_BYTEORDER_LITTLEENDIAN;
_result.numFrames = numFrames;
_result.duration = 1.0f * numFrames / sampleRate;
ccstd::string info = _result.toString();
ALOGI("Original audio info: %s, total size: %d", info.c_str(), (int)_result.pcmBuffer->size());
return true;
}
ALOGE("Decode MP3 (%s) failed, channels: %d, rate: %d, frames: %d", _url.c_str(), numChannels, sampleRate, numFrames);
return false;
}
} // namespace cc

View File

@@ -0,0 +1,41 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/AudioDecoder.h"
namespace cc {
class AudioDecoderMp3 : public AudioDecoder {
protected:
AudioDecoderMp3();
virtual ~AudioDecoderMp3();
virtual bool decodeToPcm() override;
friend class AudioDecoderProvider;
};
} // namespace cc

View File

@@ -0,0 +1,101 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioDecoderOgg"
#include "audio/android/AudioDecoderOgg.h"
#include "platform/FileUtils.h"
namespace cc {
AudioDecoderOgg::AudioDecoderOgg() {
ALOGV("Create AudioDecoderOgg");
}
AudioDecoderOgg::~AudioDecoderOgg() {
}
int AudioDecoderOgg::fseek64Wrap(void *datasource, ogg_int64_t off, int whence) {
return AudioDecoder::fileSeek(datasource, (long)off, whence);
}
bool AudioDecoderOgg::decodeToPcm() {
_fileData = FileUtils::getInstance()->getDataFromFile(_url);
if (_fileData.isNull()) {
return false;
}
ov_callbacks callbacks;
callbacks.read_func = AudioDecoder::fileRead;
callbacks.seek_func = AudioDecoderOgg::fseek64Wrap;
callbacks.close_func = AudioDecoder::fileClose;
callbacks.tell_func = AudioDecoder::fileTell;
_fileCurrPos = 0;
OggVorbis_File vf;
int ret = ov_open_callbacks(this, &vf, NULL, 0, callbacks);
if (ret != 0) {
ALOGE("Open file error, file: %s, ov_open_callbacks return %d", _url.c_str(), ret);
return false;
}
// header
auto vi = ov_info(&vf, -1);
uint32_t pcmSamples = (uint32_t)ov_pcm_total(&vf, -1);
uint32_t bufferSize = pcmSamples * vi->channels * sizeof(short);
char *pcmBuffer = (char *)malloc(bufferSize);
memset(pcmBuffer, 0, bufferSize);
int currentSection = 0;
long curPos = 0;
long readBytes = 0;
do {
readBytes = ov_read(&vf, pcmBuffer + curPos, 4096, &currentSection);
curPos += readBytes;
} while (readBytes > 0);
if (curPos > 0) {
_result.pcmBuffer->insert(_result.pcmBuffer->end(), pcmBuffer, pcmBuffer + bufferSize);
_result.numChannels = vi->channels;
_result.sampleRate = vi->rate;
_result.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
_result.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
_result.channelMask = vi->channels == 1 ? SL_SPEAKER_FRONT_CENTER : (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT);
_result.endianness = SL_BYTEORDER_LITTLEENDIAN;
_result.numFrames = pcmSamples;
_result.duration = 1.0f * pcmSamples / vi->rate;
} else {
ALOGE("ov_read returns 0 byte!");
}
ov_clear(&vf);
free(pcmBuffer);
return (curPos > 0);
}
} // namespace cc

View File

@@ -0,0 +1,44 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/AudioDecoder.h"
#include "tremolo/Tremolo/ivorbisfile.h"
namespace cc {
class AudioDecoderOgg : public AudioDecoder {
protected:
AudioDecoderOgg();
virtual ~AudioDecoderOgg();
static int fseek64Wrap(void *datasource, ogg_int64_t off, int whence);
virtual bool decodeToPcm() override;
friend class AudioDecoderProvider;
};
} // namespace cc

View File

@@ -0,0 +1,78 @@
/****************************************************************************
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioDecoderProvider"
#include "audio/android/AudioDecoderProvider.h"
#include "audio/android/AudioDecoderMp3.h"
#include "audio/android/AudioDecoderOgg.h"
#include "audio/android/AudioDecoderSLES.h"
#include "audio/android/AudioDecoderWav.h"
#include "base/memory/Memory.h"
#include "platform/FileUtils.h"
namespace cc {
AudioDecoder *AudioDecoderProvider::createAudioDecoder(SLEngineItf engineItf, const ccstd::string &url, int bufferSizeInFrames, int sampleRate, const FdGetterCallback &fdGetterCallback) {
AudioDecoder *decoder = nullptr;
ccstd::string extension = FileUtils::getInstance()->getFileExtension(url);
ALOGV("url:%s, extension:%s", url.c_str(), extension.c_str());
if (extension == ".ogg") {
decoder = ccnew AudioDecoderOgg();
if (!decoder->init(url, sampleRate)) {
delete decoder;
decoder = nullptr;
}
} else if (extension == ".mp3") {
decoder = ccnew AudioDecoderMp3();
if (!decoder->init(url, sampleRate)) {
delete decoder;
decoder = nullptr;
}
} else if (extension == ".wav") {
decoder = ccnew AudioDecoderWav();
if (!decoder->init(url, sampleRate)) {
delete decoder;
decoder = nullptr;
}
} else {
auto slesDecoder = ccnew AudioDecoderSLES();
if (slesDecoder->init(engineItf, url, bufferSizeInFrames, sampleRate, fdGetterCallback)) {
decoder = slesDecoder;
} else {
delete slesDecoder;
}
}
return decoder;
}
void AudioDecoderProvider::destroyAudioDecoder(AudioDecoder **decoder) {
if (decoder != nullptr && *decoder != nullptr) {
delete (*decoder);
(*decoder) = nullptr;
}
}
} // namespace cc

View File

@@ -0,0 +1,40 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/OpenSLHelper.h"
namespace cc {
class AudioDecoder;
class AudioDecoderProvider {
public:
static AudioDecoder *createAudioDecoder(SLEngineItf engineItf, const ccstd::string &url, int bufferSizeInFrames, int sampleRate, const FdGetterCallback &fdGetterCallback);
static void destroyAudioDecoder(AudioDecoder **decoder);
};
} // namespace cc

View File

@@ -0,0 +1,588 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioDecoderSLES"
#include "base/Macros.h"
#include "audio/android/AudioDecoderSLES.h"
#include "platform/FileUtils.h"
#include <mutex>
#include <thread>
namespace cc {
/* Explicitly requesting SL_IID_ANDROIDSIMPLEBUFFERQUEUE and SL_IID_PREFETCHSTATUS
* on the UrlAudioPlayer object for decoding, SL_IID_METADATAEXTRACTION for retrieving the
* format of the decoded audio */
#define NUM_EXPLICIT_INTERFACES_FOR_PLAYER 3
/* Size of the decode buffer queue */
#define NB_BUFFERS_IN_QUEUE 4
/* size of the struct to retrieve the PCM format metadata values: the values we're interested in
* are SLuint32, but it is saved in the data field of a SLMetadataInfo, hence the larger size.
* Nate that this size is queried and displayed at l.452 for demonstration/test purposes.
* */
#define PCM_METADATA_VALUE_SIZE 32
/* used to detect errors likely to have occurred when the OpenSL ES framework fails to open
* a resource, for instance because a file URI is invalid, or an HTTP server doesn't respond.
*/
#define PREFETCHEVENT_ERROR_CANDIDATE (SL_PREFETCHEVENT_STATUSCHANGE | SL_PREFETCHEVENT_FILLLEVELCHANGE)
//-----------------------------------------------------------------
static std::mutex __SLPlayerMutex; //NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static int toBufferSizeInBytes(int bufferSizeInFrames, int sampleSize, int channelCount) {
return bufferSizeInFrames * sampleSize * channelCount;
}
static int BUFFER_SIZE_IN_BYTES = 0; // NOLINT(readability-identifier-naming)
static void checkMetaData(int index, const char *key) {
if (index != -1) {
ALOGV("Key %s is at index %d", key, index);
} else {
ALOGE("Unable to find key %s", key);
}
}
class SLAudioDecoderCallbackProxy {
public:
//-----------------------------------------------------------------
/* Callback for "prefetch" events, here used to detect audio resource opening errors */
static void prefetchEventCallback(SLPrefetchStatusItf caller, void *context, SLuint32 event) {
auto *thiz = reinterpret_cast<AudioDecoderSLES *>(context);
thiz->prefetchCallback(caller, event);
}
static void decPlayCallback(CCSLBufferQueueItf queueItf, void *context) {
auto *thiz = reinterpret_cast<AudioDecoderSLES *>(context);
thiz->decodeToPcmCallback(queueItf);
}
static void decProgressCallback(SLPlayItf caller, void *context, SLuint32 event) {
auto *thiz = reinterpret_cast<AudioDecoderSLES *>(context);
thiz->decodeProgressCallback(caller, event);
}
};
AudioDecoderSLES::AudioDecoderSLES()
: _engineItf(nullptr), _playObj(nullptr), _formatQueried(false), _prefetchError(false), _counter(0), _numChannelsKeyIndex(-1), _sampleRateKeyIndex(-1), _bitsPerSampleKeyIndex(-1), _containerSizeKeyIndex(-1), _channelMaskKeyIndex(-1), _endiannessKeyIndex(-1), _eos(false), _bufferSizeInFrames(-1), _assetFd(0), _fdGetterCallback(nullptr), _isDecodingCallbackInvoked(false) {
ALOGV("Create AudioDecoderSLES");
}
AudioDecoderSLES::~AudioDecoderSLES() {
{
std::lock_guard<std::mutex> lk(__SLPlayerMutex);
SL_DESTROY_OBJ(_playObj);
}
ALOGV("After destroying SL play object");
if (_assetFd > 0) {
ALOGV("Closing assetFd: %d", _assetFd);
::close(_assetFd);
_assetFd = 0;
}
free(_pcmData);
}
bool AudioDecoderSLES::init(SLEngineItf engineItf, const ccstd::string &url, int bufferSizeInFrames, int sampleRate, const FdGetterCallback &fdGetterCallback) {
if (AudioDecoder::init(url, sampleRate)) {
_engineItf = engineItf;
_bufferSizeInFrames = bufferSizeInFrames;
_fdGetterCallback = fdGetterCallback;
BUFFER_SIZE_IN_BYTES = toBufferSizeInBytes(bufferSizeInFrames, 2, 2);
_pcmData = static_cast<char *>(malloc(NB_BUFFERS_IN_QUEUE * BUFFER_SIZE_IN_BYTES));
memset(_pcmData, 0x00, NB_BUFFERS_IN_QUEUE * BUFFER_SIZE_IN_BYTES);
return true;
}
return false;
}
bool AudioDecoderSLES::decodeToPcm() {
#if CC_PLATFORM == CC_PLATFORM_ANDROID
SLresult result;
/* Objects this application uses: one audio player */
SLObjectItf player;
/* Interfaces for the audio player */
CCSLBufferQueueItf decBuffQueueItf;
SLPrefetchStatusItf prefetchItf;
SLPlayItf playItf;
SLMetadataExtractionItf mdExtrItf;
/* Source of audio data for the decoding */
SLDataSource decSource;
// decUri & locFd should be defined here
SLDataLocator_URI decUri;
SLDataLocator_AndroidFD locFd;
/* Data sink for decoded audio */
SLDataSink decDest;
SLDataLocator_AndroidSimpleBufferQueue decBuffQueue;
SLDataFormat_PCM pcm;
SLboolean required[NUM_EXPLICIT_INTERFACES_FOR_PLAYER];
SLInterfaceID iidArray[NUM_EXPLICIT_INTERFACES_FOR_PLAYER];
/* Initialize arrays required[] and iidArray[] */
for (int i = 0; i < NUM_EXPLICIT_INTERFACES_FOR_PLAYER; i++) {
required[i] = SL_BOOLEAN_FALSE;
iidArray[i] = SL_IID_NULL;
}
/* ------------------------------------------------------ */
/* Configuration of the player */
/* Request the AndroidSimpleBufferQueue interface */
required[0] = SL_BOOLEAN_TRUE;
iidArray[0] = SL_IID_ANDROIDSIMPLEBUFFERQUEUE;
/* Request the PrefetchStatus interface */
required[1] = SL_BOOLEAN_TRUE;
iidArray[1] = SL_IID_PREFETCHSTATUS;
/* Request the PrefetchStatus interface */
required[2] = SL_BOOLEAN_TRUE;
iidArray[2] = SL_IID_METADATAEXTRACTION;
SLDataFormat_MIME formatMime = {SL_DATAFORMAT_MIME, nullptr, SL_CONTAINERTYPE_UNSPECIFIED};
decSource.pFormat = &formatMime;
if (_url[0] != '/') {
off_t start = 0;
off_t length = 0;
ccstd::string relativePath;
size_t position = _url.find("@assets/");
if (0 == position) {
// "@assets/" is at the beginning of the path and we don't want it
relativePath = _url.substr(strlen("@assets/"));
} else {
relativePath = _url;
}
_assetFd = _fdGetterCallback(relativePath, &start, &length);
if (_assetFd <= 0) {
ALOGE("Failed to open file descriptor for '%s'", _url.c_str());
return false;
}
// configure audio source
locFd = {SL_DATALOCATOR_ANDROIDFD, _assetFd, start, length};
decSource.pLocator = &locFd;
} else {
decUri = {SL_DATALOCATOR_URI, (SLchar *)_url.c_str()}; // NOLINT(google-readability-casting)
decSource.pLocator = &decUri;
}
/* Setup the data sink */
decBuffQueue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
decBuffQueue.numBuffers = NB_BUFFERS_IN_QUEUE;
/* set up the format of the data in the buffer queue */
pcm.formatType = SL_DATAFORMAT_PCM;
// IDEA: valid value required but currently ignored
pcm.numChannels = 2;
pcm.samplesPerSec = SL_SAMPLINGRATE_44_1;
pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
pcm.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
decDest.pLocator = reinterpret_cast<void *>(&decBuffQueue);
decDest.pFormat = reinterpret_cast<void *>(&pcm);
{
std::lock_guard<std::mutex> lk(__SLPlayerMutex);
/* Create the audio player */
result = (*_engineItf)->CreateAudioPlayer(_engineItf, &player, &decSource, &decDest, NUM_EXPLICIT_INTERFACES_FOR_PLAYER, iidArray, required);
SL_RETURN_VAL_IF_FAILED(result, false, "CreateAudioPlayer failed");
_playObj = player;
/* Realize the player in synchronous mode. */
result = (*player)->Realize(player, SL_BOOLEAN_FALSE);
SL_RETURN_VAL_IF_FAILED(result, false, "Realize failed");
}
/* Get the play interface which is implicit */
result = (*player)->GetInterface(player, SL_IID_PLAY, reinterpret_cast<void *>(&playItf));
SL_RETURN_VAL_IF_FAILED(result, false, "GetInterface SL_IID_PLAY failed");
/* Set up the player callback to get events during the decoding */
// IDEA: currently ignored
result = (*playItf)->SetMarkerPosition(playItf, 2000);
SL_RETURN_VAL_IF_FAILED(result, false, "SetMarkerPosition failed");
result = (*playItf)->SetPositionUpdatePeriod(playItf, 500);
SL_RETURN_VAL_IF_FAILED(result, false, "SetPositionUpdatePeriod failed");
result = (*playItf)->SetCallbackEventsMask(playItf,
SL_PLAYEVENT_HEADATMARKER |
SL_PLAYEVENT_HEADATNEWPOS | SL_PLAYEVENT_HEADATEND);
SL_RETURN_VAL_IF_FAILED(result, false, "SetCallbackEventsMask failed");
result = (*playItf)->RegisterCallback(playItf, SLAudioDecoderCallbackProxy::decProgressCallback,
this);
SL_RETURN_VAL_IF_FAILED(result, false, "RegisterCallback failed");
ALOGV("Play callback registered");
/* Get the buffer queue interface which was explicitly requested */
result = (*player)->GetInterface(player, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
reinterpret_cast<void *>(&decBuffQueueItf));
SL_RETURN_VAL_IF_FAILED(result, false, "GetInterface SL_IID_ANDROIDSIMPLEBUFFERQUEUE failed");
/* Get the prefetch status interface which was explicitly requested */
result = (*player)->GetInterface(player, SL_IID_PREFETCHSTATUS, reinterpret_cast<void *>(&prefetchItf));
SL_RETURN_VAL_IF_FAILED(result, false, "GetInterface SL_IID_PREFETCHSTATUS failed");
/* Get the metadata extraction interface which was explicitly requested */
result = (*player)->GetInterface(player, SL_IID_METADATAEXTRACTION, reinterpret_cast<void *>(&mdExtrItf));
SL_RETURN_VAL_IF_FAILED(result, false, "GetInterface SL_IID_METADATAEXTRACTION failed");
/* ------------------------------------------------------ */
/* Initialize the callback and its context for the decoding buffer queue */
_decContext.playItf = playItf;
_decContext.metaItf = mdExtrItf;
_decContext.pDataBase = reinterpret_cast<int8_t *>(_pcmData);
_decContext.pData = _decContext.pDataBase;
_decContext.size = NB_BUFFERS_IN_QUEUE * BUFFER_SIZE_IN_BYTES;
result = (*decBuffQueueItf)->RegisterCallback(decBuffQueueItf, SLAudioDecoderCallbackProxy::decPlayCallback, this);
SL_RETURN_VAL_IF_FAILED(result, false, "decBuffQueueItf RegisterCallback failed");
/* Enqueue buffers to map the region of memory allocated to store the decoded data */
// ALOGV("Enqueueing buffer ");
for (int i = 0; i < NB_BUFFERS_IN_QUEUE; i++) {
result = (*decBuffQueueItf)->Enqueue(decBuffQueueItf, _decContext.pData, BUFFER_SIZE_IN_BYTES);
SL_RETURN_VAL_IF_FAILED(result, false, "Enqueue failed");
_decContext.pData += BUFFER_SIZE_IN_BYTES;
}
_decContext.pData = _decContext.pDataBase;
/* ------------------------------------------------------ */
/* Initialize the callback for prefetch errors, if we can't open the resource to decode */
result = (*prefetchItf)->RegisterCallback(prefetchItf, SLAudioDecoderCallbackProxy::prefetchEventCallback, this);
SL_RETURN_VAL_IF_FAILED(result, false, "prefetchItf RegisterCallback failed");
result = (*prefetchItf)->SetCallbackEventsMask(prefetchItf, PREFETCHEVENT_ERROR_CANDIDATE);
SL_RETURN_VAL_IF_FAILED(result, false, "prefetchItf SetCallbackEventsMask failed");
/* ------------------------------------------------------ */
/* Prefetch the data so we can get information about the format before starting to decode */
/* 1/ cause the player to prefetch the data */
result = (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_PAUSED);
SL_RETURN_VAL_IF_FAILED(result, false, "SetPlayState SL_PLAYSTATE_PAUSED failed");
/* 2/ block until data has been prefetched */
SLuint32 prefetchStatus = SL_PREFETCHSTATUS_UNDERFLOW;
SLuint32 timeOutIndex = 1000; //cjh time out prefetching after 2s
while ((prefetchStatus != SL_PREFETCHSTATUS_SUFFICIENTDATA) && (timeOutIndex > 0) &&
!_prefetchError) {
std::this_thread::sleep_for(std::chrono::milliseconds(2));
(*prefetchItf)->GetPrefetchStatus(prefetchItf, &prefetchStatus);
timeOutIndex--;
}
if (timeOutIndex == 0 || _prefetchError) {
ALOGE("Failure to prefetch data in time, exiting");
SL_RETURN_VAL_IF_FAILED(SL_RESULT_CONTENT_NOT_FOUND, false,
"Failure to prefetch data in time");
}
/* ------------------------------------------------------ */
/* Display duration */
SLmillisecond durationInMsec = SL_TIME_UNKNOWN;
result = (*playItf)->GetDuration(playItf, &durationInMsec);
SL_RETURN_VAL_IF_FAILED(result, false, "GetDuration failed");
if (durationInMsec == SL_TIME_UNKNOWN) {
ALOGV("Content duration is unknown");
} else {
ALOGV("Content duration is %dms", (int)durationInMsec);
}
/* ------------------------------------------------------ */
/* Display the metadata obtained from the decoder */
// This is for test / demonstration purposes only where we discover the key and value sizes
// of a PCM decoder. An application that would want to directly get access to those values
// can make assumptions about the size of the keys and their matching values (all SLuint32)
SLuint32 itemCount;
result = (*mdExtrItf)->GetItemCount(mdExtrItf, &itemCount);
SLuint32 i;
SLuint32 keySize;
SLuint32 valueSize;
SLMetadataInfo *keyInfo;
SLMetadataInfo *value;
for (i = 0; i < itemCount; i++) {
keyInfo = nullptr;
keySize = 0;
value = nullptr;
valueSize = 0;
result = (*mdExtrItf)->GetKeySize(mdExtrItf, i, &keySize);
SL_RETURN_VAL_IF_FAILED(result, false, "GetKeySize(%d) failed", (int)i);
result = (*mdExtrItf)->GetValueSize(mdExtrItf, i, &valueSize);
SL_RETURN_VAL_IF_FAILED(result, false, "GetValueSize(%d) failed", (int)i);
keyInfo = reinterpret_cast<SLMetadataInfo *>(malloc(keySize));
if (nullptr != keyInfo) {
result = (*mdExtrItf)->GetKey(mdExtrItf, i, keySize, keyInfo);
SL_RETURN_VAL_IF_FAILED(result, false, "GetKey(%d) failed", (int)i);
ALOGV("key[%d] size=%d, name=%s, value size=%d",
(int)i, (int)keyInfo->size, keyInfo->data, (int)valueSize);
/* find out the key index of the metadata we're interested in */
if (!strcmp(reinterpret_cast<char *>(keyInfo->data), ANDROID_KEY_PCMFORMAT_NUMCHANNELS)) {
_numChannelsKeyIndex = i;
} else if (!strcmp(reinterpret_cast<char *>(keyInfo->data), ANDROID_KEY_PCMFORMAT_SAMPLERATE)) {
_sampleRateKeyIndex = i;
} else if (!strcmp(reinterpret_cast<char *>(keyInfo->data), ANDROID_KEY_PCMFORMAT_BITSPERSAMPLE)) {
_bitsPerSampleKeyIndex = i;
} else if (!strcmp(reinterpret_cast<char *>(keyInfo->data), ANDROID_KEY_PCMFORMAT_CONTAINERSIZE)) {
_containerSizeKeyIndex = i;
} else if (!strcmp(reinterpret_cast<char *>(keyInfo->data), ANDROID_KEY_PCMFORMAT_CHANNELMASK)) {
_channelMaskKeyIndex = i;
} else if (!strcmp(reinterpret_cast<char *>(keyInfo->data), ANDROID_KEY_PCMFORMAT_ENDIANNESS)) {
_endiannessKeyIndex = i;
}
free(keyInfo);
}
}
checkMetaData(_numChannelsKeyIndex, ANDROID_KEY_PCMFORMAT_NUMCHANNELS);
checkMetaData(_sampleRateKeyIndex, ANDROID_KEY_PCMFORMAT_SAMPLERATE);
checkMetaData(_bitsPerSampleKeyIndex, ANDROID_KEY_PCMFORMAT_BITSPERSAMPLE);
checkMetaData(_containerSizeKeyIndex, ANDROID_KEY_PCMFORMAT_CONTAINERSIZE);
checkMetaData(_channelMaskKeyIndex, ANDROID_KEY_PCMFORMAT_CHANNELMASK);
checkMetaData(_endiannessKeyIndex, ANDROID_KEY_PCMFORMAT_ENDIANNESS);
/* ------------------------------------------------------ */
/* Start decoding */
result = (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_PLAYING);
SL_RETURN_VAL_IF_FAILED(result, false, "SetPlayState SL_PLAYSTATE_PLAYING failed");
ALOGV("Starting to decode");
/* Decode until the end of the stream is reached */
{
std::unique_lock<std::mutex> autoLock(_eosLock);
while (!_eos) {
_eosCondition.wait(autoLock);
}
}
ALOGV("EOS signaled");
/* ------------------------------------------------------ */
/* End of decoding */
/* Stop decoding */
result = (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_STOPPED);
SL_RETURN_VAL_IF_FAILED(result, false, "SetPlayState SL_PLAYSTATE_STOPPED failed");
ALOGV("Stopped decoding");
/* Destroy the UrlAudioPlayer object */
{
std::lock_guard<std::mutex> lk(__SLPlayerMutex);
SL_DESTROY_OBJ(_playObj);
}
ALOGV("After destroy player ...");
_result.numFrames =
static_cast<int>(_result.pcmBuffer->size() / _result.numChannels / (_result.bitsPerSample / 8));
ccstd::string info = _result.toString();
ALOGI("Original audio info: %s, total size: %d", info.c_str(), (int)_result.pcmBuffer->size());
#endif
return true;
}
//-----------------------------------------------------------------
void AudioDecoderSLES::signalEos() {
std::unique_lock<std::mutex> autoLock(_eosLock);
_eos = true;
_eosCondition.notify_one();
}
void AudioDecoderSLES::queryAudioInfo() {
if (_formatQueried) {
return;
}
SLresult result;
/* Get duration in callback where we use the callback context for the SLPlayItf*/
SLmillisecond durationInMsec = SL_TIME_UNKNOWN;
result = (*_decContext.playItf)->GetDuration(_decContext.playItf, &durationInMsec);
SL_RETURN_IF_FAILED(result, "decodeProgressCallback,GetDuration failed");
if (durationInMsec == SL_TIME_UNKNOWN) {
ALOGV("Content duration is unknown (in dec callback)");
} else {
ALOGV("Content duration is %dms (in dec callback)", (int)durationInMsec);
_result.duration = durationInMsec / 1000.0F;
}
/* used to query metadata values */
SLMetadataInfo pcmMetaData;
result = (*_decContext.metaItf)->GetValue(_decContext.metaItf, _sampleRateKeyIndex, PCM_METADATA_VALUE_SIZE, &pcmMetaData);
SL_RETURN_IF_FAILED(result, "%s GetValue _sampleRateKeyIndex failed", __FUNCTION__);
// Note: here we could verify the following:
// pcmMetaData->encoding == SL_CHARACTERENCODING_BINARY
// pcmMetaData->size == sizeof(SLuint32)
// but the call was successful for the PCM format keys, so those conditions are implied
_result.sampleRate = *reinterpret_cast<SLuint32 *>(pcmMetaData.data);
result = (*_decContext.metaItf)->GetValue(_decContext.metaItf, _numChannelsKeyIndex, PCM_METADATA_VALUE_SIZE, &pcmMetaData);
SL_RETURN_IF_FAILED(result, "%s GetValue _numChannelsKeyIndex failed", __FUNCTION__);
_result.numChannels = *reinterpret_cast<SLuint32 *>(pcmMetaData.data);
result = (*_decContext.metaItf)->GetValue(_decContext.metaItf, _bitsPerSampleKeyIndex, PCM_METADATA_VALUE_SIZE, &pcmMetaData);
SL_RETURN_IF_FAILED(result, "%s GetValue _bitsPerSampleKeyIndex failed", __FUNCTION__)
_result.bitsPerSample = *reinterpret_cast<SLuint32 *>(pcmMetaData.data);
result = (*_decContext.metaItf)->GetValue(_decContext.metaItf, _containerSizeKeyIndex, PCM_METADATA_VALUE_SIZE, &pcmMetaData);
SL_RETURN_IF_FAILED(result, "%s GetValue _containerSizeKeyIndex failed", __FUNCTION__)
_result.containerSize = *reinterpret_cast<SLuint32 *>(pcmMetaData.data);
result = (*_decContext.metaItf)->GetValue(_decContext.metaItf, _channelMaskKeyIndex, PCM_METADATA_VALUE_SIZE, &pcmMetaData);
SL_RETURN_IF_FAILED(result, "%s GetValue _channelMaskKeyIndex failed", __FUNCTION__)
_result.channelMask = *reinterpret_cast<SLuint32 *>(pcmMetaData.data);
result = (*_decContext.metaItf)->GetValue(_decContext.metaItf, _endiannessKeyIndex, PCM_METADATA_VALUE_SIZE, &pcmMetaData);
SL_RETURN_IF_FAILED(result, "%s GetValue _endiannessKeyIndex failed", __FUNCTION__)
_result.endianness = *reinterpret_cast<SLuint32 *>(pcmMetaData.data);
_formatQueried = true;
}
void AudioDecoderSLES::prefetchCallback(SLPrefetchStatusItf caller, SLuint32 event) {
SLpermille level = 0;
SLresult result;
result = (*caller)->GetFillLevel(caller, &level);
SL_RETURN_IF_FAILED(result, "GetFillLevel failed");
SLuint32 status;
//ALOGV("PrefetchEventCallback: received event %u", event);
result = (*caller)->GetPrefetchStatus(caller, &status);
SL_RETURN_IF_FAILED(result, "GetPrefetchStatus failed");
if ((PREFETCHEVENT_ERROR_CANDIDATE == (event & PREFETCHEVENT_ERROR_CANDIDATE)) && (level == 0) && (status == SL_PREFETCHSTATUS_UNDERFLOW)) {
ALOGV("PrefetchEventCallback: Error while prefetching data, exiting");
_prefetchError = true;
signalEos();
}
}
/* Callback for "playback" events, i.e. event happening during decoding */
void AudioDecoderSLES::decodeProgressCallback(SLPlayItf caller, SLuint32 event) {
CC_UNUSED_PARAM(caller);
if (SL_PLAYEVENT_HEADATEND & event) {
ALOGV("SL_PLAYEVENT_HEADATEND");
if (!_isDecodingCallbackInvoked) {
queryAudioInfo();
for (int i = 0; i < NB_BUFFERS_IN_QUEUE; ++i) {
_result.pcmBuffer->insert(_result.pcmBuffer->end(), _decContext.pData,
_decContext.pData + BUFFER_SIZE_IN_BYTES);
/* Increase data pointer by buffer size */
_decContext.pData += BUFFER_SIZE_IN_BYTES;
}
}
signalEos();
}
}
//-----------------------------------------------------------------
/* Callback for decoding buffer queue events */
void AudioDecoderSLES::decodeToPcmCallback(CCSLBufferQueueItf queueItf) {
_isDecodingCallbackInvoked = true;
ALOGV("%s ...", __FUNCTION__);
_counter++;
SLresult result;
// IDEA: ??
if (_counter % 1000 == 0) {
SLmillisecond msec;
result = (*_decContext.playItf)->GetPosition(_decContext.playItf, &msec);
SL_RETURN_IF_FAILED(result, "%s, GetPosition failed", __FUNCTION__);
ALOGV("%s called (iteration %d): current position=%d ms", __FUNCTION__, _counter, (int)msec);
}
_result.pcmBuffer->insert(_result.pcmBuffer->end(), _decContext.pData,
_decContext.pData + BUFFER_SIZE_IN_BYTES);
result = (*queueItf)->Enqueue(queueItf, _decContext.pData, BUFFER_SIZE_IN_BYTES);
SL_RETURN_IF_FAILED(result, "%s, Enqueue failed", __FUNCTION__);
/* Increase data pointer by buffer size */
_decContext.pData += BUFFER_SIZE_IN_BYTES;
if (_decContext.pData >= _decContext.pDataBase + (NB_BUFFERS_IN_QUEUE * BUFFER_SIZE_IN_BYTES)) {
_decContext.pData = _decContext.pDataBase;
}
// Note: adding a sleep here or any sync point is a way to slow down the decoding, or
// synchronize it with some other event, as the OpenSL ES framework will block until the
// buffer queue callback return to proceed with the decoding.
#if 0
/* Example: buffer queue state display */
SLAndroidSimpleBufferQueueState decQueueState;
result =(*queueItf)->GetState(queueItf, &decQueueState);
SL_RETURN_IF_FAILED(result, "decQueueState.GetState failed");
ALOGV("DecBufferQueueCallback now has _decContext.pData=%p, _decContext.pDataBase=%p, queue: "
"count=%u playIndex=%u, count: %d",
_decContext.pData, _decContext.pDataBase, decQueueState.count, decQueueState.index, _counter);
#endif
#if 0
/* Example: display position in callback where we use the callback context for the SLPlayItf*/
SLmillisecond posMsec = SL_TIME_UNKNOWN;
result = (*_decContext.playItf)->GetPosition(_decContext.playItf, &posMsec);
SL_RETURN_IF_FAILED(result, "decodeToPcmCallback,GetPosition2 failed");
if (posMsec == SL_TIME_UNKNOWN) {
ALOGV("Content position is unknown (in dec callback)");
} else {
ALOGV("Content position is %ums (in dec callback)",
posMsec);
}
#endif
queryAudioInfo();
}
} // namespace cc

View File

@@ -0,0 +1,96 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <condition_variable>
#include <mutex>
#include "audio/android/AudioDecoder.h"
#include "audio/android/utils/Compat.h"
namespace cc {
class AudioDecoderSLES : public AudioDecoder {
protected:
AudioDecoderSLES();
~AudioDecoderSLES() override;
bool init(SLEngineItf engineItf, const ccstd::string &url, int bufferSizeInFrames, int sampleRate, const FdGetterCallback &fdGetterCallback);
bool decodeToPcm() override;
private:
void queryAudioInfo();
void signalEos();
void decodeToPcmCallback(CCSLBufferQueueItf queueItf);
void prefetchCallback(SLPrefetchStatusItf caller, SLuint32 event);
void decodeProgressCallback(SLPlayItf caller, SLuint32 event);
SLEngineItf _engineItf;
SLObjectItf _playObj;
/* Local storage for decoded audio data */
char *_pcmData;
/* we only want to query / display the PCM format once */
bool _formatQueried;
/* Used to signal prefetching failures */
bool _prefetchError;
/* to display the number of decode iterations */
int _counter;
/* metadata key index for the PCM format information we want to retrieve */
int _numChannelsKeyIndex;
int _sampleRateKeyIndex;
int _bitsPerSampleKeyIndex;
int _containerSizeKeyIndex;
int _channelMaskKeyIndex;
int _endiannessKeyIndex;
/* to signal to the test app the end of the stream to decode has been reached */
bool _eos;
std::mutex _eosLock;
std::condition_variable _eosCondition;
/* Structure for passing information to callback function */
typedef struct CallbackCntxt_ { //NOLINT(modernize-use-using, readability-identifier-naming)
SLPlayItf playItf;
SLMetadataExtractionItf metaItf;
SLuint32 size;
SLint8 *pDataBase; // Base address of local audio data storage
SLint8 *pData; // Current address of local audio data storage
} CallbackCntxt;
CallbackCntxt _decContext;
int _bufferSizeInFrames;
int _assetFd;
FdGetterCallback _fdGetterCallback;
bool _isDecodingCallbackInvoked;
friend class SLAudioDecoderCallbackProxy;
friend class AudioDecoderProvider;
};
} // namespace cc

View File

@@ -0,0 +1,106 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioDecoderWav"
#include "audio/android/AudioDecoderWav.h"
#include "audio/common/utils/include/tinysndfile.h"
#include "platform/FileUtils.h"
namespace cc {
using namespace sf; //NOLINT
AudioDecoderWav::AudioDecoderWav() {
ALOGV("Create AudioDecoderWav");
}
AudioDecoderWav::~AudioDecoderWav() = default;
void *AudioDecoderWav::onWavOpen(const char * /*path*/, void *user) {
return user;
}
int AudioDecoderWav::onWavSeek(void *datasource, long offset, int whence) { //NOLINT(google-runtime-int)
return AudioDecoder::fileSeek(datasource, static_cast<int64_t>(offset), whence);
}
int AudioDecoderWav::onWavClose(void * /*datasource*/) {
return 0;
}
bool AudioDecoderWav::decodeToPcm() {
_fileData = FileUtils::getInstance()->getDataFromFile(_url);
if (_fileData.isNull()) {
return false;
}
SF_INFO info;
snd_callbacks cb;
cb.open = onWavOpen;
cb.read = AudioDecoder::fileRead;
cb.seek = onWavSeek;
cb.close = onWavClose;
cb.tell = AudioDecoder::fileTell;
SNDFILE *handle = nullptr;
bool ret = false;
do {
handle = sf_open_read(_url.c_str(), &info, &cb, this);
if (handle == nullptr) {
break;
}
if (info.frames == 0) {
break;
}
ALOGD("wav info: frames: %d, samplerate: %d, channels: %d, format: %d", info.frames, info.samplerate, info.channels, info.format);
size_t bufSize = sizeof(int16_t) * info.frames * info.channels;
auto *buf = static_cast<unsigned char *>(malloc(bufSize));
sf_count_t readFrames = sf_readf_short(handle, reinterpret_cast<int16_t *>(buf), info.frames);
CC_ASSERT(readFrames == info.frames);
_result.pcmBuffer->insert(_result.pcmBuffer->end(), buf, buf + bufSize);
_result.numChannels = info.channels;
_result.sampleRate = info.samplerate;
_result.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
_result.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
_result.channelMask = _result.numChannels == 1 ? SL_SPEAKER_FRONT_CENTER : (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT);
_result.endianness = SL_BYTEORDER_LITTLEENDIAN;
_result.numFrames = info.frames;
_result.duration = static_cast<float>(1.0F * info.frames / _result.sampleRate); //NOLINT
free(buf);
ret = true;
} while (false);
if (handle != nullptr) {
sf_close(handle);
}
return ret;
}
} // namespace cc

View File

@@ -0,0 +1,46 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/AudioDecoder.h"
namespace cc {
class AudioDecoderWav : public AudioDecoder {
protected:
AudioDecoderWav();
virtual ~AudioDecoderWav();
virtual bool decodeToPcm() override;
static void *onWavOpen(const char *path, void *user);
static int onWavSeek(void *datasource, long offset, int whence);
static int onWavClose(void *datasource);
friend class AudioDecoderProvider;
};
} // namespace cc

View File

@@ -0,0 +1,507 @@
/****************************************************************************
Copyright (c) 2014-2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioEngineImpl"
#include "audio/android/AudioEngine-inl.h"
#include <unistd.h>
// for native asset manager
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <android/asset_manager.h>
#include <android/asset_manager_jni.h>
#include <android/log.h>
#endif
#include <sys/types.h>
#include <mutex>
#include <thread>
#include "application/ApplicationManager.h"
#include "audio/include/AudioEngine.h"
#include "base/Log.h"
#include "base/Scheduler.h"
#include "base/UTF8.h"
#include "base/memory/Memory.h"
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include "platform/android/FileUtils-android.h"
#include "platform/java/jni/JniHelper.h"
#include "platform/java/jni/JniImp.h"
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#include "cocos/platform/openharmony/FileUtils-OpenHarmony.h"
#endif
#include "audio/android/AudioDecoder.h"
#include "audio/android/AudioDecoderProvider.h"
#include "audio/android/AudioPlayerProvider.h"
#include "audio/android/IAudioPlayer.h"
#include "audio/android/ICallerThreadUtils.h"
#include "audio/android/UrlAudioPlayer.h"
#include "audio/android/cutils/log.h"
#include "engine/EngineEvents.h"
using namespace cc; //NOLINT
// Audio focus values synchronized with which in cocos/platform/android/java/src/com/cocos/lib/CocosNativeActivity.java
namespace {
AudioEngineImpl *gAudioImpl = nullptr;
int outputSampleRate = 44100;
#if CC_PLATFORM == CC_PLATFORM_ANDROID
int bufferSizeInFrames = 192;
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
// TODO(hack) : There is currently a bug in the opensles module,
// so openharmony must configure a fixed size, otherwise the callback will be suspended
int bufferSizeInFrames = 2048;
#endif
void getAudioInfo() {
#if CC_PLATFORM == CC_PLATFORM_ANDROID
JNIEnv * env = JniHelper::getEnv();
jclass audioSystem = env->FindClass("android/media/AudioSystem");
jmethodID method = env->GetStaticMethodID(audioSystem, "getPrimaryOutputSamplingRate", "()I");
outputSampleRate = env->CallStaticIntMethod(audioSystem, method);
method = env->GetStaticMethodID(audioSystem, "getPrimaryOutputFrameCount", "()I");
bufferSizeInFrames = env->CallStaticIntMethod(audioSystem, method);
#else
// In openharmony, setting to 48K does not cause audio delays
outputSampleRate = 48000;
#endif
}
} // namespace
class CallerThreadUtils : public ICallerThreadUtils {
public:
void performFunctionInCallerThread(const std::function<void()> &func) override {
CC_CURRENT_ENGINE()->getScheduler()->performFunctionInCocosThread(func);
};
std::thread::id getCallerThreadId() override {
return _tid;
};
void setCallerThreadId(std::thread::id tid) {
_tid = tid;
};
private:
std::thread::id _tid;
};
static CallerThreadUtils gCallerThreadUtils;
static int fdGetter(const ccstd::string &url, off_t *start, off_t *length) {
int fd = -1;
#if CC_PLATFORM == CC_PLATFORM_ANDROID
if (cc::FileUtilsAndroid::getObbFile() != nullptr) {
int64_t startV;
int64_t lenV;
fd = cc::getObbAssetFileDescriptorJNI(url, &startV, &lenV);
*start = static_cast<off_t>(startV);
*length = static_cast<off_t>(lenV);
}
if (fd <= 0) {
auto *asset = AAssetManager_open(cc::FileUtilsAndroid::getAssetManager(), url.c_str(), AASSET_MODE_UNKNOWN);
// open asset as file descriptor
fd = AAsset_openFileDescriptor(asset, start, length);
AAsset_close(asset);
}
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
FileUtilsOpenHarmony* fileUtils = dynamic_cast<FileUtilsOpenHarmony*>(FileUtils::getInstance());
if(fileUtils) {
RawFileDescriptor descriptor;
fileUtils->getRawFileDescriptor(url, descriptor);
fd = descriptor.fd;
}
#endif
if (fd <= 0) {
ALOGE("Failed to open file descriptor for '%s'", url.c_str());
}
return fd;
};
//====================================================
AudioEngineImpl::AudioEngineImpl()
: _engineObject(nullptr),
_engineEngine(nullptr),
_outputMixObject(nullptr),
_audioPlayerProvider(nullptr),
_audioIDIndex(0),
_lazyInitLoop(true) {
gCallerThreadUtils.setCallerThreadId(std::this_thread::get_id());
gAudioImpl = this;
getAudioInfo();
}
AudioEngineImpl::~AudioEngineImpl() {
if (_audioPlayerProvider != nullptr) {
delete _audioPlayerProvider;
_audioPlayerProvider = nullptr;
}
if (_outputMixObject) {
(*_outputMixObject)->Destroy(_outputMixObject);
}
if (_engineObject) {
(*_engineObject)->Destroy(_engineObject);
}
gAudioImpl = nullptr;
}
bool AudioEngineImpl::init() {
bool ret = false;
do {
// create engine
auto result = slCreateEngine(&_engineObject, 0, nullptr, 0, nullptr, nullptr);
if (SL_RESULT_SUCCESS != result) {
CC_LOG_ERROR("create opensl engine fail");
break;
}
// realize the engine
result = (*_engineObject)->Realize(_engineObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != result) {
CC_LOG_ERROR("realize the engine fail");
break;
}
// get the engine interface, which is needed in order to create other objects
result = (*_engineObject)->GetInterface(_engineObject, SL_IID_ENGINE, &_engineEngine);
if (SL_RESULT_SUCCESS != result) {
CC_LOG_ERROR("get the engine interface fail");
break;
}
// create output mix
const SLInterfaceID outputMixIIDs[] = {};
const SLboolean outputMixReqs[] = {};
result = (*_engineEngine)->CreateOutputMix(_engineEngine, &_outputMixObject, 0, outputMixIIDs, outputMixReqs);
if (SL_RESULT_SUCCESS != result) {
CC_LOG_ERROR("create output mix fail");
break;
}
// realize the output mix
result = (*_outputMixObject)->Realize(_outputMixObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != result) {
CC_LOG_ERROR("realize the output mix fail");
break;
}
_audioPlayerProvider = ccnew AudioPlayerProvider(_engineEngine, _outputMixObject, outputSampleRate, bufferSizeInFrames, fdGetter, &gCallerThreadUtils);
ret = true;
} while (false);
return ret;
}
void AudioEngineImpl::setAudioFocusForAllPlayers(bool isFocus) {
for (const auto &e : _audioPlayers) {
e.second->setAudioFocus(isFocus);
}
}
int AudioEngineImpl::play2d(const ccstd::string &filePath, bool loop, float volume) {
ALOGV("play2d, _audioPlayers.size=%d", (int)_audioPlayers.size());
auto audioId = AudioEngine::INVALID_AUDIO_ID;
do {
if (_engineEngine == nullptr || _audioPlayerProvider == nullptr) {
break;
}
auto fullPath = FileUtils::getInstance()->fullPathForFilename(filePath);
audioId = _audioIDIndex++;
auto *player = _audioPlayerProvider->getAudioPlayer(fullPath);
if (player != nullptr) {
player->setId(audioId);
_audioPlayers.insert(std::make_pair(audioId, player));
player->setPlayEventCallback([this, player, filePath](IAudioPlayer::State state) {
if (state != IAudioPlayer::State::OVER && state != IAudioPlayer::State::STOPPED) {
ALOGV("Ignore state: %d", static_cast<int>(state));
return;
}
int id = player->getId();
ALOGV("Removing player id=%d, state:%d", id, (int)state);
AudioEngine::remove(id);
if (_audioPlayers.find(id) != _audioPlayers.end()) {
_audioPlayers.erase(id);
}
if (_urlAudioPlayersNeedResume.find(id) != _urlAudioPlayersNeedResume.end()) {
_urlAudioPlayersNeedResume.erase(id);
}
auto iter = _callbackMap.find(id);
if (iter != _callbackMap.end()) {
if (state == IAudioPlayer::State::OVER) {
iter->second(id, filePath);
}
_callbackMap.erase(iter);
}
});
player->setLoop(loop);
player->setVolume(volume);
player->play();
} else {
ALOGE("Oops, player is null ...");
return AudioEngine::INVALID_AUDIO_ID;
}
AudioEngine::sAudioIDInfoMap[audioId].state = AudioEngine::AudioState::PLAYING;
} while (false);
return audioId;
}
void AudioEngineImpl::setVolume(int audioID, float volume) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
player->setVolume(volume);
}
}
void AudioEngineImpl::setLoop(int audioID, bool loop) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
player->setLoop(loop);
}
}
void AudioEngineImpl::pause(int audioID) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
player->pause();
}
}
void AudioEngineImpl::resume(int audioID) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
player->resume();
}
}
void AudioEngineImpl::stop(int audioID) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
player->stop();
}
}
void AudioEngineImpl::stopAll() {
if (_audioPlayers.empty()) {
return;
}
// Create a temporary vector for storing all players since
// p->stop() will trigger _audioPlayers.erase,
// and it will cause a crash as it's already in for loop
ccstd::vector<IAudioPlayer *> players;
players.reserve(_audioPlayers.size());
for (const auto &e : _audioPlayers) {
players.push_back(e.second);
}
for (auto *p : players) {
p->stop();
}
}
float AudioEngineImpl::getDuration(int audioID) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
return player->getDuration();
}
return 0.0F;
}
float AudioEngineImpl::getDurationFromFile(const ccstd::string &filePath) {
if (_audioPlayerProvider != nullptr) {
auto fullPath = FileUtils::getInstance()->fullPathForFilename(filePath);
return _audioPlayerProvider->getDurationFromFile(fullPath);
}
return 0;
}
float AudioEngineImpl::getCurrentTime(int audioID) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
return player->getPosition();
}
return 0.0F;
}
bool AudioEngineImpl::setCurrentTime(int audioID, float time) {
auto iter = _audioPlayers.find(audioID);
if (iter != _audioPlayers.end()) {
auto *player = iter->second;
return player->setPosition(time);
}
return false;
}
void AudioEngineImpl::setFinishCallback(int audioID, const std::function<void(int, const ccstd::string &)> &callback) {
_callbackMap[audioID] = callback;
}
void AudioEngineImpl::preload(const ccstd::string &filePath, const std::function<void(bool)> &callback) {
if (_audioPlayerProvider != nullptr) {
ccstd::string fullPath = FileUtils::getInstance()->fullPathForFilename(filePath);
_audioPlayerProvider->preloadEffect(fullPath, [callback](bool succeed, const PcmData & /*data*/) {
if (callback != nullptr) {
callback(succeed);
}
});
} else {
if (callback != nullptr) {
callback(false);
}
}
}
void AudioEngineImpl::uncache(const ccstd::string &filePath) {
if (_audioPlayerProvider != nullptr) {
ccstd::string fullPath = FileUtils::getInstance()->fullPathForFilename(filePath);
_audioPlayerProvider->clearPcmCache(fullPath);
}
}
void AudioEngineImpl::uncacheAll() {
if (_audioPlayerProvider != nullptr) {
_audioPlayerProvider->clearAllPcmCaches();
}
}
void AudioEngineImpl::onPause() {
if (_audioPlayerProvider != nullptr) {
_audioPlayerProvider->pause();
}
}
void AudioEngineImpl::onResume() {
if (_audioPlayerProvider != nullptr) {
_audioPlayerProvider->resume();
}
}
PCMHeader AudioEngineImpl::getPCMHeader(const char *url) {
PCMHeader header{};
ccstd::string fileFullPath = FileUtils::getInstance()->fullPathForFilename(url);
if (fileFullPath.empty()) {
CC_LOG_DEBUG("file %s does not exist or failed to load", url);
return header;
}
if (_audioPlayerProvider->getPcmHeader(url, header)) {
CC_LOG_DEBUG("file %s pcm data already cached", url);
return header;
}
AudioDecoder *decoder = AudioDecoderProvider::createAudioDecoder(_engineEngine, fileFullPath, bufferSizeInFrames, outputSampleRate, fdGetter);
if (decoder == nullptr) {
CC_LOG_DEBUG("decode %s failed, the file formate might not support", url);
return header;
}
if (!decoder->start()) {
CC_LOG_DEBUG("[Audio Decoder] Decode failed %s", url);
return header;
}
// Ready to decode
do {
PcmData data = decoder->getResult();
header.bytesPerFrame = data.bitsPerSample / 8;
header.channelCount = data.numChannels;
header.dataFormat = AudioDataFormat::SIGNED_16;
header.sampleRate = data.sampleRate;
header.totalFrames = data.numFrames;
} while (false);
AudioDecoderProvider::destroyAudioDecoder(&decoder);
return header;
}
ccstd::vector<uint8_t> AudioEngineImpl::getOriginalPCMBuffer(const char *url, uint32_t channelID) {
ccstd::string fileFullPath = FileUtils::getInstance()->fullPathForFilename(url);
ccstd::vector<uint8_t> pcmData;
if (fileFullPath.empty()) {
CC_LOG_DEBUG("file %s does not exist or failed to load", url);
return pcmData;
}
PcmData data;
if (_audioPlayerProvider->getPcmData(url, data)) {
CC_LOG_DEBUG("file %s pcm data already cached", url);
} else {
AudioDecoder *decoder = AudioDecoderProvider::createAudioDecoder(_engineEngine, fileFullPath, bufferSizeInFrames, outputSampleRate, fdGetter);
if (decoder == nullptr) {
CC_LOG_DEBUG("decode %s failed, the file formate might not support", url);
return pcmData;
}
if (!decoder->start()) {
CC_LOG_DEBUG("[Audio Decoder] Decode failed %s", url);
return pcmData;
}
data = decoder->getResult();
_audioPlayerProvider->registerPcmData(url, data);
AudioDecoderProvider::destroyAudioDecoder(&decoder);
}
do {
const uint32_t channelCount = data.numChannels;
if (channelID >= channelCount) {
CC_LOG_ERROR("channelID invalid, total channel count is %d but %d is required", channelCount, channelID);
break;
}
// bytesPerSample = bitsPerSample / 8, according to 1 byte = 8 bits
const uint32_t bytesPerFrame = data.numChannels * data.bitsPerSample / 8;
const uint32_t numFrames = data.numFrames;
const uint32_t bytesPerChannelInFrame = bytesPerFrame / channelCount;
pcmData.resize(bytesPerChannelInFrame * numFrames);
uint8_t *p = pcmData.data();
char *tmpBuf = data.pcmBuffer->data(); // shared ptr
for (int itr = 0; itr < numFrames; itr++) {
memcpy(p, tmpBuf + itr * bytesPerFrame + channelID * bytesPerChannelInFrame, bytesPerChannelInFrame);
p += bytesPerChannelInFrame;
}
} while (false);
return pcmData;
}

View File

@@ -0,0 +1,105 @@
/****************************************************************************
Copyright (c) 2014-2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <SLES/OpenSLES.h>
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <SLES/OpenSLES_Android.h>
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#include <SLES/OpenSLES_Platform.h>
#endif
#include <functional>
#include "audio/include/AudioDef.h"
#include "base/RefCounted.h"
#include "base/Utils.h"
#include "base/std/container/string.h"
#include "base/std/container/unordered_map.h"
#define MAX_AUDIOINSTANCES 13
#define ERRORLOG(msg) log("fun:%s,line:%d,msg:%s", __func__, __LINE__, #msg)
namespace cc {
struct CustomEvent;
class IAudioPlayer;
class AudioPlayerProvider;
class AudioEngineImpl;
class AudioEngineImpl : public RefCounted {
public:
AudioEngineImpl();
~AudioEngineImpl() override;
bool init();
int play2d(const ccstd::string &filePath, bool loop, float volume);
void setVolume(int audioID, float volume);
void setLoop(int audioID, bool loop);
void pause(int audioID);
void resume(int audioID);
void stop(int audioID);
void stopAll();
float getDuration(int audioID);
float getDurationFromFile(const ccstd::string &filePath);
float getCurrentTime(int audioID);
bool setCurrentTime(int audioID, float time);
void setFinishCallback(int audioID, const std::function<void(int, const ccstd::string &)> &callback);
void uncache(const ccstd::string &filePath);
void uncacheAll();
void preload(const ccstd::string &filePath, const std::function<void(bool)> &callback);
void onResume();
void onPause();
void setAudioFocusForAllPlayers(bool isFocus);
PCMHeader getPCMHeader(const char *url);
std::vector<uint8_t> getOriginalPCMBuffer(const char *url, uint32_t channelID);
private:
// engine interfaces
SLObjectItf _engineObject;
SLEngineItf _engineEngine;
// output mix interfaces
SLObjectItf _outputMixObject;
//audioID,AudioInfo
ccstd::unordered_map<int, IAudioPlayer *> _audioPlayers;
ccstd::unordered_map<int, std::function<void(int, const ccstd::string &)>> _callbackMap;
// UrlAudioPlayers which need to resumed while entering foreground
ccstd::unordered_map<int, IAudioPlayer *> _urlAudioPlayersNeedResume;
AudioPlayerProvider *_audioPlayerProvider;
int _audioIDIndex;
bool _lazyInitLoop;
};
} // namespace cc

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,379 @@
/*
**
** Copyright 2007, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#pragma once
#include <cstdint>
#include <sys/types.h>
#include <pthread.h>
#include "audio/android/AudioBufferProvider.h"
#include "audio/android/AudioResamplerPublic.h"
#include "audio/android/AudioResampler.h"
#include "audio/android/audio.h"
#include "audio/android/utils/Compat.h"
// IDEA: This is actually unity gain, which might not be max in future, expressed in U.12
#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
namespace cc {
// ----------------------------------------------------------------------------
class AudioMixer {
public:
AudioMixer(size_t frameCount, uint32_t sampleRate,
uint32_t maxNumTracks = MAX_NUM_TRACKS);
/*virtual*/ ~AudioMixer(); // non-virtual saves a v-table, restore if sub-classed
// This mixer has a hard-coded upper limit of 32 active track inputs.
// Adding support for > 32 tracks would require more than simply changing this value.
static const uint32_t MAX_NUM_TRACKS = 32;
// maximum number of channels supported by the mixer
// This mixer has a hard-coded upper limit of 8 channels for output.
static const uint32_t MAX_NUM_CHANNELS = 8;
static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only
// maximum number of channels supported for the content
static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
static const uint16_t UNITY_GAIN_INT = 0x1000;
static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0F;
enum { // names
// track names (MAX_NUM_TRACKS units)
TRACK0 = 0x1000,
// 0x2000 is unused
// setParameter targets
TRACK = 0x3000,
RESAMPLE = 0x3001,
RAMP_VOLUME = 0x3002, // ramp to new volume
VOLUME = 0x3003, // don't ramp
TIMESTRETCH = 0x3004,
// set Parameter names
// for target TRACK
CHANNEL_MASK = 0x4000,
FORMAT = 0x4001,
MAIN_BUFFER = 0x4002,
AUX_BUFFER = 0x4003,
DOWNMIX_TYPE = 0X4004,
MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
// for target RESAMPLE
SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name;
// parameter 'value' is the new sample rate in Hz.
// Only creates a sample rate converter the first time that
// the track sample rate is different from the mix sample rate.
// If the new sample rate is the same as the mix sample rate,
// and a sample rate converter already exists,
// then the sample rate converter remains present but is a no-op.
RESET = 0x4101, // Reset sample rate converter without changing sample rate.
// This clears out the resampler's input buffer.
REMOVE = 0x4102, // Remove the sample rate converter on this track name;
// the track is restored to the mix sample rate.
// for target RAMP_VOLUME and VOLUME (8 channels max)
// IDEA: use float for these 3 to improve the dynamic range
VOLUME0 = 0x4200,
VOLUME1 = 0x4201,
AUXLEVEL = 0x4210,
// for target TIMESTRETCH
PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
// parameter 'value' is a pointer to the new playback rate.
};
// For all APIs with "name": TRACK0 <= name < TRACK0 + MAX_NUM_TRACKS
// Allocate a track name. Returns new track name if successful, -1 on failure.
// The failure could be because of an invalid channelMask or format, or that
// the track capacity of the mixer is exceeded.
int getTrackName(audio_channel_mask_t channelMask,
audio_format_t format, int sessionId);
// Free an allocated track by name
void deleteTrackName(int name);
// Enable or disable an allocated track by name
void enable(int name);
void disable(int name);
void setParameter(int name, int target, int param, void *value);
void setBufferProvider(int name, AudioBufferProvider *bufferProvider);
void process(int64_t pts);
uint32_t trackNames() const { return mTrackNames; }
size_t getUnreleasedFrames(int name) const;
static inline bool isValidPcmTrackFormat(audio_format_t format) {
switch (format) {
case AUDIO_FORMAT_PCM_8_BIT:
case AUDIO_FORMAT_PCM_16_BIT:
case AUDIO_FORMAT_PCM_24_BIT_PACKED:
case AUDIO_FORMAT_PCM_32_BIT:
case AUDIO_FORMAT_PCM_FLOAT:
return true;
default:
return false;
}
}
private:
enum {
// IDEA: this representation permits up to 8 channels
NEEDS_CHANNEL_COUNT__MASK = 0x00000007, // NOLINT(bugprone-reserved-identifier)
};
enum {
NEEDS_CHANNEL_1 = 0x00000000, // mono
NEEDS_CHANNEL_2 = 0x00000001, // stereo
// sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
NEEDS_MUTE = 0x00000100,
NEEDS_RESAMPLE = 0x00001000,
NEEDS_AUX = 0x00010000,
};
struct state_t;
struct track_t;
typedef void (*hook_t)(track_t *t, int32_t *output, size_t numOutFrames, int32_t *temp, int32_t *aux); //NOLINT(modernize-use-using)
static const int BLOCKSIZE = 16; // 4 cache lines
struct track_t {
uint32_t needs;
// REFINE: Eventually remove legacy integer volume settings
union {
int16_t volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
int32_t volumeRL;
};
int32_t prevVolume[MAX_NUM_VOLUMES];
// 16-byte boundary
int32_t volumeInc[MAX_NUM_VOLUMES];
int32_t auxInc;
int32_t prevAuxLevel;
// 16-byte boundary
int16_t auxLevel; // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
uint16_t frameCount;
uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
uint8_t unused_padding; // formerly format, was always 16
uint16_t enabled; // actually bool
audio_channel_mask_t channelMask;
// actual buffer provider used by the track hooks, see DownmixerBufferProvider below
// for how the Track buffer provider is wrapped by another one when dowmixing is required
AudioBufferProvider *bufferProvider;
// 16-byte boundary
mutable AudioBufferProvider::Buffer buffer; // 8 bytes
hook_t hook;
const void *in; // current location in buffer
// 16-byte boundary
AudioResampler *resampler;
uint32_t sampleRate;
int32_t *mainBuffer;
int32_t *auxBuffer;
// 16-byte boundary
/* Buffer providers are constructed to translate the track input data as needed.
*
* REFINE: perhaps make a single PlaybackConverterProvider class to move
* all pre-mixer track buffer conversions outside the AudioMixer class.
*
* 1) mInputBufferProvider: The AudioTrack buffer provider.
* 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
* match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
* requires reformat. For example, it may convert floating point input to
* PCM_16_bit if that's required by the downmixer.
* 3) downmixerBufferProvider: If not NULL, performs the channel remixing to match
* the number of channels required by the mixer sink.
* 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
* the downmixer requirements to the mixer engine input requirements.
* 5) mTimestretchBufferProvider: Adds timestretching for playback rate
*/
AudioBufferProvider *mInputBufferProvider; // externally provided buffer provider.
//cjh PassthruBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting.
// PassthruBufferProvider* downmixerBufferProvider; // wrapper for channel conversion.
// PassthruBufferProvider* mPostDownmixReformatBufferProvider;
// PassthruBufferProvider* mTimestretchBufferProvider;
int32_t sessionId;
audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
audio_format_t mFormat; // input track format
audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
// each track must be converted to this format.
audio_format_t mDownmixRequiresFormat; // required downmixer format
// AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
// AUDIO_FORMAT_INVALID if no required format
float mVolume[MAX_NUM_VOLUMES]; // floating point set volume
float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
float mVolumeInc[MAX_NUM_VOLUMES]; // floating point volume increment
float mAuxLevel; // floating point set aux level
float mPrevAuxLevel; // floating point prev aux level
float mAuxInc; // floating point aux increment
audio_channel_mask_t mMixerChannelMask;
uint32_t mMixerChannelCount;
AudioPlaybackRate mPlaybackRate;
bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
bool doesResample() const { return resampler != nullptr; }
void resetResampler() const {
if (resampler != nullptr) resampler->reset();
}
void adjustVolumeRamp(bool aux, bool useFloat = false);
size_t getUnreleasedFrames() const { return resampler != nullptr ? resampler->getUnreleasedFrames() : 0; };
status_t prepareForDownmix();
void unprepareForDownmix();
status_t prepareForReformat();
void unprepareForReformat();
bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
void reconfigureBufferProviders();
};
typedef void (*process_hook_t)(state_t *state, int64_t pts); // NOLINT(modernize-use-using)
// pad to 32-bytes to fill cache line
struct state_t {
uint32_t enabledTracks;
uint32_t needsChanged;
size_t frameCount;
process_hook_t hook; // one of process__*, never NULL
int32_t *outputTemp;
int32_t *resampleTemp;
//cjh NBLog::Writer* mLog;
int32_t reserved[1];
// IDEA: allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS
track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
};
// bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
uint32_t mTrackNames;// NOLINT(readability-identifier-naming)
// bitmask of configured track names; ~0 if maxNumTracks == MAX_NUM_TRACKS,
// but will have fewer bits set if maxNumTracks < MAX_NUM_TRACKS
const uint32_t mConfiguredNames;// NOLINT(readability-identifier-naming)
const uint32_t mSampleRate;// NOLINT(readability-identifier-naming)
//cjh NBLog::Writer mDummyLog;
public:
//cjh void setLog(NBLog::Writer* log);
private:
state_t mState __attribute__((aligned(32)));// NOLINT(readability-identifier-naming)
// Call after changing either the enabled status of a track, or parameters of an enabled track.
// OK to call more often than that, but unnecessary.
void invalidateState(uint32_t mask);
bool setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
static void track__genericResample(track_t *t, int32_t *out, size_t numFrames, int32_t *temp, int32_t *aux);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void track__nop(track_t *t, int32_t *out, size_t numFrames, int32_t *temp, int32_t *aux);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void track__16BitsStereo(track_t *t, int32_t *out, size_t numFrames, int32_t *temp, int32_t *aux);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void track__16BitsMono(track_t *t, int32_t *out, size_t numFrames, int32_t *temp, int32_t *aux);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void volumeRampStereo(track_t *t, int32_t *out, size_t frameCount, int32_t *temp, int32_t *aux);
static void volumeStereo(track_t *t, int32_t *out, size_t frameCount, int32_t *temp,
int32_t *aux);
static void process__validate(state_t *state, int64_t pts);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void process__nop(state_t *state, int64_t pts);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void process__genericNoResampling(state_t *state, int64_t pts);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void process__genericResampling(state_t *state, int64_t pts);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void process__OneTrack16BitsStereoNoResampling(state_t *state, int64_t pts);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static int64_t calculateOutputPTS(const track_t &t, int64_t basePTS,
int outputFrameIndex);
static uint64_t sLocalTimeFreq;
static pthread_once_t sOnceControl;
static void sInitRoutine();
/* multi-format volume mixing function (calls template functions
* in AudioMixerOps.h). The template parameters are as follows:
*
* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* USEFLOATVOL (set to true if float volume is used)
* ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
* TA: int32_t (Q4.27)
*/
template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
typename TO, typename TI, typename TA>
static void volumeMix(TO *out, size_t outFrames,
const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t);
// multi-format process hooks
template <int MIXTYPE, typename TO, typename TI, typename TA>
static void process_NoResampleOneTrack(state_t *state, int64_t pts);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
// multi-format track hooks
template <int MIXTYPE, typename TO, typename TI, typename TA>
static void track__Resample(track_t *t, TO *out, size_t frameCount, TO *temp __unused, TA *aux);// NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
template <int MIXTYPE, typename TO, typename TI, typename TA>
static void track__NoResample(track_t *t, TO *out, size_t frameCount, TO *temp __unused, TA *aux); // NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
void *in, audio_format_t mixerInFormat, size_t sampleCount);
// hook types
enum {
PROCESSTYPE_NORESAMPLEONETRACK,
};
enum {
TRACKTYPE_NOP,
TRACKTYPE_RESAMPLE,
TRACKTYPE_NORESAMPLE,
TRACKTYPE_NORESAMPLEMONO,
};
// functions for determining the proper process and track hooks.
static process_hook_t getProcessHook(int processType, uint32_t channelCount,
audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
static hook_t getTrackHook(int trackType, uint32_t channelCount,
audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
};
// ----------------------------------------------------------------------------
} // namespace cc

View File

@@ -0,0 +1,296 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioMixerController"
#include "audio/android/AudioMixerController.h"
#include <algorithm>
#include "audio/android/AudioMixer.h"
#include "audio/android/OpenSLHelper.h"
#include "audio/android/Track.h"
#include "base/memory/Memory.h"
namespace cc {
AudioMixerController::AudioMixerController(int bufferSizeInFrames, int sampleRate, int channelCount)
: _bufferSizeInFrames(bufferSizeInFrames), _sampleRate(sampleRate), _channelCount(channelCount), _mixer(nullptr), _isPaused(false), _isMixingFrame(false) {
ALOGV("In the constructor of AudioMixerController!");
_mixingBuffer.size = (size_t)bufferSizeInFrames * 2 * channelCount;
// Don't use posix_memalign since it was added from API 16, it will crash on Android 2.3
// Therefore, for a workaround, we uses memalign here.
_mixingBuffer.buf = memalign(32, _mixingBuffer.size);
memset(_mixingBuffer.buf, 0, _mixingBuffer.size);
}
AudioMixerController::~AudioMixerController() {
destroy();
if (_mixer != nullptr) {
delete _mixer;
_mixer = nullptr;
}
free(_mixingBuffer.buf);
}
bool AudioMixerController::init() {
_mixer = ccnew AudioMixer(_bufferSizeInFrames, _sampleRate);
return _mixer != nullptr;
}
bool AudioMixerController::addTrack(Track *track) {
ALOG_ASSERT(track != nullptr, "Shouldn't pass nullptr to addTrack");
bool ret = false;
std::lock_guard<std::mutex> lk(_activeTracksMutex);
auto iter = std::find(_activeTracks.begin(), _activeTracks.end(), track);
if (iter == _activeTracks.end()) {
_activeTracks.push_back(track);
ret = true;
}
return ret;
}
template <typename T>
static void removeItemFromVector(ccstd::vector<T> &v, T item) {
auto iter = std::find(v.begin(), v.end(), item);
if (iter != v.end()) {
v.erase(iter);
}
}
void AudioMixerController::initTrack(Track *track, ccstd::vector<Track *> &tracksToRemove) {
if (track->isInitialized())
return;
uint32_t channelMask = audio_channel_out_mask_from_count(2);
int32_t name = _mixer->getTrackName(channelMask, AUDIO_FORMAT_PCM_16_BIT,
AUDIO_SESSION_OUTPUT_MIX);
if (name < 0) {
// If we could not get the track name, it means that there're MAX_NUM_TRACKS tracks
// So ignore the new track.
tracksToRemove.push_back(track);
} else {
_mixer->setBufferProvider(name, track);
_mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
_mixingBuffer.buf);
_mixer->setParameter(
name,
AudioMixer::TRACK,
AudioMixer::MIXER_FORMAT,
(void *)(uintptr_t)AUDIO_FORMAT_PCM_16_BIT);
_mixer->setParameter(
name,
AudioMixer::TRACK,
AudioMixer::FORMAT,
(void *)(uintptr_t)AUDIO_FORMAT_PCM_16_BIT);
_mixer->setParameter(
name,
AudioMixer::TRACK,
AudioMixer::MIXER_CHANNEL_MASK,
(void *)(uintptr_t)channelMask);
_mixer->setParameter(
name,
AudioMixer::TRACK,
AudioMixer::CHANNEL_MASK,
(void *)(uintptr_t)channelMask);
track->setName(name);
_mixer->enable(name);
std::lock_guard<std::mutex> lk(track->_volumeDirtyMutex);
gain_minifloat_packed_t volume = track->getVolumeLR();
float lVolume = float_from_gain(gain_minifloat_unpack_left(volume));
float rVolume = float_from_gain(gain_minifloat_unpack_right(volume));
_mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &lVolume);
_mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &rVolume);
track->setVolumeDirty(false);
track->setInitialized(true);
}
}
void AudioMixerController::mixOneFrame() {
_isMixingFrame = true;
_activeTracksMutex.lock();
auto mixStart = clockNow();
ccstd::vector<Track *> tracksToRemove;
tracksToRemove.reserve(_activeTracks.size());
// FOR TESTING BEGIN
// Track* track = _activeTracks[0];
//
// AudioBufferProvider::Buffer buffer;
// buffer.frameCount = _bufferSizeInFrames;
// status_t r = track->getNextBuffer(&buffer);
//// ALOG_ASSERT(buffer.frameCount == _mixing->size / 2, "buffer.frameCount:%d, _mixing->size/2:%d", buffer.frameCount, _mixing->size/2);
// if (r == NO_ERROR)
// {
// ALOGV("getNextBuffer succeed ...");
// memcpy(_mixing->buf, buffer.raw, _mixing->size);
// }
// if (buffer.raw == nullptr)
// {
// ALOGV("Play over ...");
// tracksToRemove.push_back(track);
// }
// else
// {
// track->releaseBuffer(&buffer);
// }
//
// _mixing->state = BufferState::FULL;
// _activeTracksMutex.unlock();
// FOR TESTING END
Track::State state;
// set up the tracks.
for (auto &&track : _activeTracks) {
state = track->getState();
if (state == Track::State::PLAYING) {
initTrack(track, tracksToRemove);
int name = track->getName();
ALOG_ASSERT(name >= 0);
std::lock_guard<std::mutex> lk(track->_volumeDirtyMutex);
if (track->isVolumeDirty()) {
gain_minifloat_packed_t volume = track->getVolumeLR();
float lVolume = float_from_gain(gain_minifloat_unpack_left(volume));
float rVolume = float_from_gain(gain_minifloat_unpack_right(volume));
ALOGV("Track (name: %d)'s volume is dirty, update volume to L: %f, R: %f", name, lVolume, rVolume);
_mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &lVolume);
_mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &rVolume);
track->setVolumeDirty(false);
}
} else if (state == Track::State::RESUMED) {
initTrack(track, tracksToRemove);
if (track->getPrevState() == Track::State::PAUSED) {
_mixer->enable(track->getName());
track->setState(Track::State::PLAYING);
} else {
ALOGW("Previous state (%d) isn't PAUSED, couldn't resume!", static_cast<int>(track->getPrevState()));
}
} else if (state == Track::State::PAUSED) {
initTrack(track, tracksToRemove);
if (track->getPrevState() == Track::State::PLAYING || track->getPrevState() == Track::State::RESUMED) {
_mixer->disable(track->getName());
} else {
ALOGW("Previous state (%d) isn't PLAYING, couldn't pause!", static_cast<int>(track->getPrevState()));
}
} else if (state == Track::State::STOPPED) {
if (track->isInitialized()) {
_mixer->deleteTrackName(track->getName());
} else {
ALOGV("Track (%p) hasn't been initialized yet!", track);
}
tracksToRemove.push_back(track);
}
if (track->isPlayOver()) {
if (track->isLoop()) {
track->reset();
} else {
ALOGV("Play over ...");
_mixer->deleteTrackName(track->getName());
tracksToRemove.push_back(track);
track->setState(Track::State::OVER);
}
}
}
bool hasAvailableTracks = _activeTracks.size() - tracksToRemove.size() > 0;
if (hasAvailableTracks) {
ALOGV_IF(_activeTracks.size() > 8, "More than 8 active tracks: %d", (int)_activeTracks.size());
_mixer->process(AudioBufferProvider::kInvalidPTS);
} else {
ALOGV("Doesn't have enough tracks: %d, %d", (int)_activeTracks.size(), (int)tracksToRemove.size());
}
// Remove stopped or playover tracks for active tracks container
for (auto &&track : tracksToRemove) {
removeItemFromVector(_activeTracks, track);
if (track != nullptr && track->onStateChanged != nullptr) {
track->onStateChanged(Track::State::DESTROYED);
} else {
ALOGE("track (%p) was released ...", track);
}
}
_activeTracksMutex.unlock();
auto mixEnd = clockNow();
float mixInterval = intervalInMS(mixStart, mixEnd);
ALOGV_IF(mixInterval > 1.0f, "Mix a frame waste: %fms", mixInterval);
_isMixingFrame = false;
}
void AudioMixerController::destroy() {
while (_isMixingFrame) {
usleep(10);
}
usleep(2000); // Wait for more 2ms
}
void AudioMixerController::pause() {
_isPaused = true;
}
void AudioMixerController::resume() {
_isPaused = false;
}
bool AudioMixerController::hasPlayingTacks() {
std::lock_guard<std::mutex> lk(_activeTracksMutex);
if (_activeTracks.empty())
return false;
for (auto &&track : _activeTracks) {
Track::State state = track->getState();
if (state == Track::State::IDLE || state == Track::State::PLAYING || state == Track::State::RESUMED) {
return true;
}
}
return false;
}
} // namespace cc

View File

@@ -0,0 +1,84 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <thread>
#include "audio/android/utils/Errors.h"
#include "base/std/container/vector.h"
namespace cc {
class Track;
class AudioMixer;
class AudioMixerController {
public:
struct OutputBuffer {
void *buf;
size_t size;
};
AudioMixerController(int bufferSizeInFrames, int sampleRate, int channelCount);
~AudioMixerController();
bool init();
bool addTrack(Track *track);
bool hasPlayingTacks();
void pause();
void resume();
inline bool isPaused() const { return _isPaused; };
void mixOneFrame();
inline OutputBuffer *current() { return &_mixingBuffer; }
private:
void destroy();
void initTrack(Track *track, ccstd::vector<Track *> &tracksToRemove);
private:
int _bufferSizeInFrames;
int _sampleRate;
int _channelCount;
AudioMixer *_mixer;
std::mutex _activeTracksMutex;
ccstd::vector<Track *> _activeTracks;
OutputBuffer _mixingBuffer;
std::atomic_bool _isPaused;
std::atomic_bool _isMixingFrame;
};
} // namespace cc

View File

@@ -0,0 +1,445 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "audio/android/cutils/log.h"
namespace cc {
/* Behavior of is_same<>::value is true if the types are identical,
* false otherwise. Identical to the STL std::is_same.
*/
template <typename T, typename U>
struct is_same {
static const bool value = false;
};
template <typename T>
struct is_same<T, T> // partial specialization
{
static const bool value = true;
};
/* MixMul is a multiplication operator to scale an audio input signal
* by a volume gain, with the formula:
*
* O(utput) = I(nput) * V(olume)
*
* The output, input, and volume may have different types.
* There are 27 variants, of which 14 are actually defined in an
* explicitly templated class.
*
* The following type variables and the underlying meaning:
*
* Output type TO: int32_t (Q4.27) or int16_t (Q.15) or float [-1,1]
* Input signal type TI: int32_t (Q4.27) or int16_t (Q.15) or float [-1,1]
* Volume type TV: int32_t (U4.28) or int16_t (U4.12) or float [-1,1]
*
* For high precision audio, only the <TO, TI, TV> = <float, float, float>
* needs to be accelerated. This is perhaps the easiest form to do quickly as well.
*
* A generic version is NOT defined to catch any mistake of using it.
*/
template <typename TO, typename TI, typename TV>
TO MixMul(TI value, TV volume);
template <>
inline int32_t MixMul<int32_t, int16_t, int16_t>(int16_t value, int16_t volume) {
return value * volume;
}
template <>
inline int32_t MixMul<int32_t, int32_t, int16_t>(int32_t value, int16_t volume) {
return (value >> 12) * volume;
}
template <>
inline int32_t MixMul<int32_t, int16_t, int32_t>(int16_t value, int32_t volume) {
return value * (volume >> 16);
}
template <>
inline int32_t MixMul<int32_t, int32_t, int32_t>(int32_t value, int32_t volume) {
return (value >> 12) * (volume >> 16);
}
template <>
inline float MixMul<float, float, int16_t>(float value, int16_t volume) {
static const float norm = 1. / (1 << 12);
return value * volume * norm;
}
template <>
inline float MixMul<float, float, int32_t>(float value, int32_t volume) {
static const float norm = 1. / (1 << 28);
return value * volume * norm;
}
template <>
inline int16_t MixMul<int16_t, float, int16_t>(float value, int16_t volume) {
return clamp16_from_float(MixMul<float, float, int16_t>(value, volume));
}
template <>
inline int16_t MixMul<int16_t, float, int32_t>(float value, int32_t volume) {
return clamp16_from_float(MixMul<float, float, int32_t>(value, volume));
}
template <>
inline float MixMul<float, int16_t, int16_t>(int16_t value, int16_t volume) {
static const float norm = 1. / (1 << (15 + 12));
return static_cast<float>(value) * static_cast<float>(volume) * norm;
}
template <>
inline float MixMul<float, int16_t, int32_t>(int16_t value, int32_t volume) {
static const float norm = 1. / (1ULL << (15 + 28));
return static_cast<float>(value) * static_cast<float>(volume) * norm;
}
template <>
inline int16_t MixMul<int16_t, int16_t, int16_t>(int16_t value, int16_t volume) {
return clamp16(MixMul<int32_t, int16_t, int16_t>(value, volume) >> 12);
}
template <>
inline int16_t MixMul<int16_t, int32_t, int16_t>(int32_t value, int16_t volume) {
return clamp16(MixMul<int32_t, int32_t, int16_t>(value, volume) >> 12);
}
template <>
inline int16_t MixMul<int16_t, int16_t, int32_t>(int16_t value, int32_t volume) {
return clamp16(MixMul<int32_t, int16_t, int32_t>(value, volume) >> 12);
}
template <>
inline int16_t MixMul<int16_t, int32_t, int32_t>(int32_t value, int32_t volume) {
return clamp16(MixMul<int32_t, int32_t, int32_t>(value, volume) >> 12);
}
/* Required for floating point volume. Some are needed for compilation but
* are not needed in execution and should be removed from the final build by
* an optimizing compiler.
*/
template <>
inline float MixMul<float, float, float>(float value, float volume) {
return value * volume;
}
template <>
inline float MixMul<float, int16_t, float>(int16_t value, float volume) {
static const float float_from_q_15 = 1. / (1 << 15);
return value * volume * float_from_q_15;
}
template <>
inline int32_t MixMul<int32_t, int32_t, float>(int32_t value, float volume) {
LOG_ALWAYS_FATAL("MixMul<int32_t, int32_t, float> Runtime Should not be here");
return value * volume;
}
template <>
inline int32_t MixMul<int32_t, int16_t, float>(int16_t value, float volume) {
LOG_ALWAYS_FATAL("MixMul<int32_t, int16_t, float> Runtime Should not be here");
static const float u4_12_from_float = (1 << 12);
return value * volume * u4_12_from_float;
}
template <>
inline int16_t MixMul<int16_t, int16_t, float>(int16_t value, float volume) {
LOG_ALWAYS_FATAL("MixMul<int16_t, int16_t, float> Runtime Should not be here");
return clamp16_from_float(MixMul<float, int16_t, float>(value, volume));
}
template <>
inline int16_t MixMul<int16_t, float, float>(float value, float volume) {
return clamp16_from_float(value * volume);
}
/*
* MixAccum is used to add into an accumulator register of a possibly different
* type. The TO and TI types are the same as MixMul.
*/
template <typename TO, typename TI>
inline void MixAccum(TO *auxaccum, TI value) {
if (!is_same<TO, TI>::value) {
LOG_ALWAYS_FATAL("MixAccum type not properly specialized: %zu %zu\n",
sizeof(TO), sizeof(TI));
}
*auxaccum += value;
}
template <>
inline void MixAccum<float, int16_t>(float *auxaccum, int16_t value) {
static const float norm = 1. / (1 << 15);
*auxaccum += norm * value;
}
template <>
inline void MixAccum<float, int32_t>(float *auxaccum, int32_t value) {
static const float norm = 1. / (1 << 27);
*auxaccum += norm * value;
}
template <>
inline void MixAccum<int32_t, int16_t>(int32_t *auxaccum, int16_t value) {
*auxaccum += value << 12;
}
template <>
inline void MixAccum<int32_t, float>(int32_t *auxaccum, float value) {
*auxaccum += clampq4_27_from_float(value);
}
/* MixMulAux is just like MixMul except it combines with
* an accumulator operation MixAccum.
*/
template <typename TO, typename TI, typename TV, typename TA>
inline TO MixMulAux(TI value, TV volume, TA *auxaccum) {
MixAccum<TA, TI>(auxaccum, value);
return MixMul<TO, TI, TV>(value, volume);
}
/* MIXTYPE is used to determine how the samples in the input frame
* are mixed with volume gain into the output frame.
* See the volumeRampMulti functions below for more details.
*/
enum {
MIXTYPE_MULTI,
MIXTYPE_MONOEXPAND,
MIXTYPE_MULTI_SAVEONLY,
MIXTYPE_MULTI_MONOVOL,
MIXTYPE_MULTI_SAVEONLY_MONOVOL,
};
/*
* The volumeRampMulti and volumeRamp functions take a MIXTYPE
* which indicates the per-frame mixing and accumulation strategy.
*
* MIXTYPE_MULTI:
* NCHAN represents number of input and output channels.
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
* TV: int32_t (U4.28) or int16_t (U4.12) or float
* vol: represents a volume array.
*
* This accumulates into the out pointer.
*
* MIXTYPE_MONOEXPAND:
* Single input channel. NCHAN represents number of output channels.
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
* TV: int32_t (U4.28) or int16_t (U4.12) or float
* Input channel count is 1.
* vol: represents volume array.
*
* This accumulates into the out pointer.
*
* MIXTYPE_MULTI_SAVEONLY:
* NCHAN represents number of input and output channels.
* TO: int16_t (Q.15) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
* TV: int32_t (U4.28) or int16_t (U4.12) or float
* vol: represents a volume array.
*
* MIXTYPE_MULTI_SAVEONLY does not accumulate into the out pointer.
*
* MIXTYPE_MULTI_MONOVOL:
* Same as MIXTYPE_MULTI, but uses only volume[0].
*
* MIXTYPE_MULTI_SAVEONLY_MONOVOL:
* Same as MIXTYPE_MULTI_SAVEONLY, but uses only volume[0].
*
*/
template <int MIXTYPE, int NCHAN,
typename TO, typename TI, typename TV, typename TA, typename TAV>
inline void volumeRampMulti(TO *out, size_t frameCount,
const TI *in, TA *aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc) {
#ifdef ALOGVV
ALOGVV("volumeRampMulti, MIXTYPE:%d\n", MIXTYPE);
#endif
if (aux != NULL) {
do {
TA auxaccum = 0;
switch (MIXTYPE) {
case MIXTYPE_MULTI:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
vol[i] += volinc[i];
}
break;
case MIXTYPE_MONOEXPAND:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
vol[i] += volinc[i];
}
in++;
break;
case MIXTYPE_MULTI_SAVEONLY:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
vol[i] += volinc[i];
}
break;
case MIXTYPE_MULTI_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
}
vol[0] += volinc[0];
break;
case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
}
vol[0] += volinc[0];
break;
default:
LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
break;
}
auxaccum /= NCHAN;
*aux++ += MixMul<TA, TA, TAV>(auxaccum, *vola);
vola[0] += volainc;
} while (--frameCount);
} else {
do {
switch (MIXTYPE) {
case MIXTYPE_MULTI:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
vol[i] += volinc[i];
}
break;
case MIXTYPE_MONOEXPAND:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMul<TO, TI, TV>(*in, vol[i]);
vol[i] += volinc[i];
}
in++;
break;
case MIXTYPE_MULTI_SAVEONLY:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
vol[i] += volinc[i];
}
break;
case MIXTYPE_MULTI_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMul<TO, TI, TV>(*in++, vol[0]);
}
vol[0] += volinc[0];
break;
case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMul<TO, TI, TV>(*in++, vol[0]);
}
vol[0] += volinc[0];
break;
default:
LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
break;
}
} while (--frameCount);
}
}
template <int MIXTYPE, int NCHAN,
typename TO, typename TI, typename TV, typename TA, typename TAV>
inline void volumeMulti(TO *out, size_t frameCount,
const TI *in, TA *aux, const TV *vol, TAV vola) {
#ifdef ALOGVV
ALOGVV("volumeMulti MIXTYPE:%d\n", MIXTYPE);
#endif
if (aux != NULL) {
do {
TA auxaccum = 0;
switch (MIXTYPE) {
case MIXTYPE_MULTI:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
}
break;
case MIXTYPE_MONOEXPAND:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
}
in++;
break;
case MIXTYPE_MULTI_SAVEONLY:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
}
break;
case MIXTYPE_MULTI_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
}
break;
case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum);
}
break;
default:
LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
break;
}
auxaccum /= NCHAN;
*aux++ += MixMul<TA, TA, TAV>(auxaccum, vola);
} while (--frameCount);
} else {
do {
switch (MIXTYPE) {
case MIXTYPE_MULTI:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
}
break;
case MIXTYPE_MONOEXPAND:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMul<TO, TI, TV>(*in, vol[i]);
}
in++;
break;
case MIXTYPE_MULTI_SAVEONLY:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
}
break;
case MIXTYPE_MULTI_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ += MixMul<TO, TI, TV>(*in++, vol[0]);
}
break;
case MIXTYPE_MULTI_SAVEONLY_MONOVOL:
for (int i = 0; i < NCHAN; ++i) {
*out++ = MixMul<TO, TI, TV>(*in++, vol[0]);
}
break;
default:
LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE);
break;
}
} while (--frameCount);
}
}
} // namespace cc

View File

@@ -0,0 +1,520 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include <memory>
#include "audio/android/PcmData.h"
#include "audio/include/AudioDef.h"
#include "base/Log.h"
#define LOG_TAG "AudioPlayerProvider"
#include <algorithm> // for std::find_if
#include <cstdlib>
#include <utility>
#include "audio/android/AudioDecoder.h"
#include "audio/android/AudioDecoderProvider.h"
#include "audio/android/AudioMixerController.h"
#include "audio/android/AudioPlayerProvider.h"
#include "audio/android/ICallerThreadUtils.h"
#include "audio/android/PcmAudioPlayer.h"
#include "audio/android/PcmAudioService.h"
#include "audio/android/UrlAudioPlayer.h"
#include "audio/android/utils/Utils.h"
#include "base/ThreadPool.h"
#include "base/memory/Memory.h"
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <sys/system_properties.h>
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#include "cocos/platform/FileUtils.h"
#include "cocos/platform/openharmony/FileUtils-OpenHarmony.h"
#endif
#include <algorithm> // for std::find_if
#include <cstdlib>
#include <utility>
namespace cc {
static int getSystemAPILevel() {
static int sSystemApiLevel = -1;
#if CC_PLATFORM == CC_PLATFORM_ANDROID
if (sSystemApiLevel > 0) {
return sSystemApiLevel;
}
int apiLevel = getSDKVersion();
if (apiLevel > 0) {
ALOGD("Android API level: %d", apiLevel);
} else {
ALOGE("Fail to get Android API level!");
}
sSystemApiLevel = apiLevel;
return apiLevel;
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
// TODO(qgh): On the openharmony platform, pcm streaming must be used
return std::numeric_limits<int>::max();
#endif
}
struct AudioFileIndicator {
ccstd::string extension;
int smallSizeIndicator;
};
static AudioFileIndicator gAudioFileIndicator[] = {
{"default", 128000}, // If we could not handle the audio format, return default value, the position should be first.
{".wav", 1024000},
{".ogg", 128000},
{".mp3", 160000}};
AudioPlayerProvider::AudioPlayerProvider(SLEngineItf engineItf, SLObjectItf outputMixObject,
int deviceSampleRate, int bufferSizeInFrames,
const FdGetterCallback &fdGetterCallback, //NOLINT(modernize-pass-by-value)
ICallerThreadUtils *callerThreadUtils)
: _engineItf(engineItf), _outputMixObject(outputMixObject), _deviceSampleRate(deviceSampleRate), _bufferSizeInFrames(bufferSizeInFrames), _fdGetterCallback(fdGetterCallback), _callerThreadUtils(callerThreadUtils), _pcmAudioService(nullptr), _mixController(nullptr), _threadPool(LegacyThreadPool::newCachedThreadPool(1, 8, 5, 2, 2)) {
ALOGI("deviceSampleRate: %d, bufferSizeInFrames: %d", _deviceSampleRate, _bufferSizeInFrames);
if (getSystemAPILevel() >= 17) {
_mixController = ccnew AudioMixerController(_bufferSizeInFrames, _deviceSampleRate, 2);
_mixController->init();
_pcmAudioService = ccnew PcmAudioService(engineItf, outputMixObject);
_pcmAudioService->init(_mixController, 2, deviceSampleRate, bufferSizeInFrames * 2);
}
ALOG_ASSERT(callerThreadUtils != nullptr, "Caller thread utils parameter should not be nullptr!");
}
AudioPlayerProvider::~AudioPlayerProvider() {
ALOGV("~AudioPlayerProvider()");
UrlAudioPlayer::stopAll();
SL_SAFE_DELETE(_pcmAudioService);
SL_SAFE_DELETE(_mixController);
SL_SAFE_DELETE(_threadPool);
}
IAudioPlayer *AudioPlayerProvider::getAudioPlayer(const ccstd::string &audioFilePath) {
// Pcm data decoding by OpenSLES API only supports in API level 17 and later.
if (getSystemAPILevel() < 17) {
AudioFileInfo info = getFileInfo(audioFilePath);
if (info.isValid()) {
return createUrlAudioPlayer(info);
}
return nullptr;
}
IAudioPlayer *player = nullptr;
_pcmCacheMutex.lock();
auto iter = _pcmCache.find(audioFilePath);
if (iter != _pcmCache.end()) { // Found pcm cache means it was used to be a PcmAudioService
PcmData pcmData = iter->second;
_pcmCacheMutex.unlock();
player = obtainPcmAudioPlayer(audioFilePath, pcmData);
ALOGV_IF(player == nullptr, "%s, %d: player is nullptr, path: %s", __FUNCTION__, __LINE__, audioFilePath.c_str());
} else {
_pcmCacheMutex.unlock();
// Check audio file size to determine to use a PcmAudioService or UrlAudioPlayer,
// generally PcmAudioService is used for playing short audio like game effects while
// playing background music uses UrlAudioPlayer
AudioFileInfo info = getFileInfo(audioFilePath);
if (info.isValid()) {
if (isSmallFile(info)) {
// Put an empty lambda to preloadEffect since we only want the future object to get PcmData
auto pcmData = std::make_shared<PcmData>();
auto isSucceed = std::make_shared<bool>(false);
auto isReturnFromCache = std::make_shared<bool>(false);
auto isPreloadFinished = std::make_shared<bool>(false);
std::thread::id threadId = std::this_thread::get_id();
void *infoPtr = &info;
ccstd::string url = info.url;
preloadEffect(
info, [infoPtr, url, threadId, pcmData, isSucceed, isReturnFromCache, isPreloadFinished](bool succeed, PcmData data) {
// If the callback is in the same thread as caller's, it means that we found it
// in the cache
*isReturnFromCache = std::this_thread::get_id() == threadId;
*pcmData = std::move(data);
*isSucceed = succeed;
*isPreloadFinished = true;
ALOGV("FileInfo (%p), Set isSucceed flag: %d, path: %s", infoPtr, succeed, url.c_str());
},
true);
if (!*isReturnFromCache && !*isPreloadFinished) {
std::unique_lock<std::mutex> lck(_preloadWaitMutex);
// Wait for 2 seconds for the decoding in sub thread finishes.
ALOGV("FileInfo (%p), Waiting preload (%s) to finish ...", &info, audioFilePath.c_str());
_preloadWaitCond.wait_for(lck, std::chrono::seconds(2));
ALOGV("FileInfo (%p), Waitup preload (%s) ...", &info, audioFilePath.c_str());
}
if (*isSucceed) {
if (pcmData->isValid()) {
player = obtainPcmAudioPlayer(info.url, *pcmData);
ALOGV_IF(player == nullptr, "%s, %d: player is nullptr, path: %s", __FUNCTION__, __LINE__, audioFilePath.c_str());
} else {
ALOGE("pcm data is invalid, path: %s", audioFilePath.c_str());
}
} else {
ALOGE("FileInfo (%p), preloadEffect (%s) failed", &info, audioFilePath.c_str());
}
} else {
player = createUrlAudioPlayer(info);
ALOGV_IF(player == nullptr, "%s, %d: player is nullptr, path: %s", __FUNCTION__, __LINE__, audioFilePath.c_str());
}
} else {
ALOGE("File info is invalid, path: %s", audioFilePath.c_str());
}
}
ALOGV_IF(player == nullptr, "%s, %d return nullptr", __FUNCTION__, __LINE__);
return player;
}
void AudioPlayerProvider::preloadEffect(const ccstd::string &audioFilePath, const PreloadCallback &callback) {
// Pcm data decoding by OpenSLES API only supports in API level 17 and later.
if (getSystemAPILevel() < 17) {
PcmData data;
callback(true, data);
return;
}
_pcmCacheMutex.lock();
auto &&iter = _pcmCache.find(audioFilePath);
if (iter != _pcmCache.end()) {
ALOGV("preload return from cache: (%s)", audioFilePath.c_str());
_pcmCacheMutex.unlock();
callback(true, iter->second);
return;
}
_pcmCacheMutex.unlock();
auto info = getFileInfo(audioFilePath);
preloadEffect(
info, [this, callback, audioFilePath](bool succeed, const PcmData &data) {
_callerThreadUtils->performFunctionInCallerThread([this, succeed, data, callback]() {
callback(succeed, data);
});
},
false);
}
// Used internally
void AudioPlayerProvider::preloadEffect(const AudioFileInfo &info, const PreloadCallback &callback, bool isPreloadInPlay2d) {
PcmData pcmData;
if (!info.isValid()) {
callback(false, pcmData);
return;
}
if (isSmallFile(info)) {
ccstd::string audioFilePath = info.url;
// 1. First time check, if it wasn't in the cache, goto 2 step
_pcmCacheMutex.lock();
auto &&iter = _pcmCache.find(audioFilePath);
if (iter != _pcmCache.end()) {
ALOGV("1. Return pcm data from cache, url: %s", info.url.c_str());
_pcmCacheMutex.unlock();
callback(true, iter->second);
return;
}
_pcmCacheMutex.unlock();
{
// 2. Check whether the audio file is being preloaded, if it has been removed from map just now,
// goto step 3
std::lock_guard<std::mutex> lck(_preloadCallbackMutex);
auto &&preloadIter = _preloadCallbackMap.find(audioFilePath);
if (preloadIter != _preloadCallbackMap.end()) {
ALOGV("audio (%s) is being preloaded, add to callback vector!", audioFilePath.c_str());
PreloadCallbackParam param;
param.callback = callback;
param.isPreloadInPlay2d = isPreloadInPlay2d;
preloadIter->second.push_back(std::move(param));
return;
}
// 3. Check it in cache again. If it has been removed from map just now, the file is in
// the cache absolutely.
_pcmCacheMutex.lock();
auto &&iter = _pcmCache.find(audioFilePath);
if (iter != _pcmCache.end()) {
ALOGV("2. Return pcm data from cache, url: %s", info.url.c_str());
_pcmCacheMutex.unlock();
callback(true, iter->second);
return;
}
_pcmCacheMutex.unlock();
PreloadCallbackParam param;
param.callback = callback;
param.isPreloadInPlay2d = isPreloadInPlay2d;
ccstd::vector<PreloadCallbackParam> callbacks;
callbacks.push_back(std::move(param));
_preloadCallbackMap.insert(std::make_pair(audioFilePath, std::move(callbacks)));
}
_threadPool->pushTask([this, audioFilePath](int /*tid*/) {
ALOGV("AudioPlayerProvider::preloadEffect: (%s)", audioFilePath.c_str());
PcmData d;
AudioDecoder *decoder = AudioDecoderProvider::createAudioDecoder(_engineItf, audioFilePath, _bufferSizeInFrames, _deviceSampleRate, _fdGetterCallback);
bool ret = decoder != nullptr && decoder->start();
if (ret) {
d = decoder->getResult();
std::lock_guard<std::mutex> lck(_pcmCacheMutex);
_pcmCache.insert(std::make_pair(audioFilePath, d));
} else {
ALOGE("decode (%s) failed!", audioFilePath.c_str());
}
ALOGV("decode %s", (ret ? "succeed" : "failed"));
std::lock_guard<std::mutex> lck(_preloadCallbackMutex);
auto &&preloadIter = _preloadCallbackMap.find(audioFilePath);
if (preloadIter != _preloadCallbackMap.end()) {
auto &&params = preloadIter->second;
ALOGV("preload (%s) callback count: %d", audioFilePath.c_str(), (int)params.size());
PcmData result = decoder->getResult();
for (auto &&param : params) {
param.callback(ret, result);
if (param.isPreloadInPlay2d) {
_preloadWaitCond.notify_one();
}
}
_preloadCallbackMap.erase(preloadIter);
}
AudioDecoderProvider::destroyAudioDecoder(&decoder);
});
} else {
ALOGV("File (%s) is too large, ignore preload!", info.url.c_str());
callback(true, pcmData);
}
}
AudioPlayerProvider::AudioFileInfo AudioPlayerProvider::getFileInfo(
const ccstd::string &audioFilePath) {
AudioFileInfo info;
long fileSize = 0; //NOLINT(google-runtime-int)
off_t start = 0;
off_t length = 0;
int assetFd = -1;
#if CC_PLATFORM == CC_PLATFORM_ANDROID
if (audioFilePath[0] != '/') {
ccstd::string relativePath;
size_t position = audioFilePath.find("@assets/");
if (0 == position) {
// "@assets/" is at the beginning of the path and we don't want it
relativePath = audioFilePath.substr(strlen("@assets/"));
} else {
relativePath = audioFilePath;
}
assetFd = _fdGetterCallback(relativePath, &start, &length);
if (assetFd <= 0) {
ALOGE("Failed to open file descriptor for '%s'", audioFilePath.c_str());
return info;
}
fileSize = length;
} else {
FILE *fp = fopen(audioFilePath.c_str(), "rb");
if (fp != nullptr) {
fseek(fp, 0, SEEK_END);
fileSize = ftell(fp);
fclose(fp);
} else {
return info;
}
}
info.url = audioFilePath;
info.assetFd = std::make_shared<AssetFd>(assetFd);
info.start = start;
info.length = fileSize;
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
FileUtilsOpenHarmony* fileUtils = dynamic_cast<FileUtilsOpenHarmony*>(FileUtils::getInstance());
if(!fileUtils) {
return info;
}
RawFileDescriptor descriptor;
fileUtils->getRawFileDescriptor(audioFilePath, descriptor);
info.url = audioFilePath;
info.assetFd = std::make_shared<AssetFd>(descriptor.fd);
info.start = descriptor.start;
info.length = descriptor.length;
#endif
ALOGV("(%s) file size: %ld", audioFilePath.c_str(), fileSize);
return info;
}
bool AudioPlayerProvider::isSmallFile(const AudioFileInfo &info) { //NOLINT(readability-convert-member-functions-to-static)
#if CC_PLATFORM == CC_PLATFORM_OPENHARMONY
// TODO(qgh): OpenHarmony system does not support this function yet
return true;
#endif
//REFINE: If file size is smaller than 100k, we think it's a small file. This value should be set by developers.
auto &audioFileInfo = const_cast<AudioFileInfo &>(info);
size_t judgeCount = sizeof(gAudioFileIndicator) / sizeof(gAudioFileIndicator[0]);
size_t pos = audioFileInfo.url.rfind('.');
ccstd::string extension;
if (pos != ccstd::string::npos) {
extension = audioFileInfo.url.substr(pos);
}
auto *iter = std::find_if(std::begin(gAudioFileIndicator), std::end(gAudioFileIndicator),
[&extension](const AudioFileIndicator &judge) -> bool {
return judge.extension == extension;
});
if (iter != std::end(gAudioFileIndicator)) {
// ALOGV("isSmallFile: found: %s: ", iter->extension.c_str());
return info.length < iter->smallSizeIndicator;
}
// ALOGV("isSmallFile: not found return default value");
return info.length < gAudioFileIndicator[0].smallSizeIndicator;
}
float AudioPlayerProvider::getDurationFromFile(const ccstd::string &filePath) {
std::lock_guard<std::mutex> lck(_pcmCacheMutex);
auto iter = _pcmCache.find(filePath);
if (iter != _pcmCache.end()) {
return iter->second.duration;
}
return 0;
}
void AudioPlayerProvider::clearPcmCache(const ccstd::string &audioFilePath) {
std::lock_guard<std::mutex> lck(_pcmCacheMutex);
auto iter = _pcmCache.find(audioFilePath);
if (iter != _pcmCache.end()) {
ALOGV("clear pcm cache: (%s)", audioFilePath.c_str());
_pcmCache.erase(iter);
} else {
ALOGW("Couldn't find the pcm cache: (%s)", audioFilePath.c_str());
}
}
void AudioPlayerProvider::clearAllPcmCaches() {
std::lock_guard<std::mutex> lck(_pcmCacheMutex);
_pcmCache.clear();
}
PcmAudioPlayer *AudioPlayerProvider::obtainPcmAudioPlayer(const ccstd::string &url,
const PcmData &pcmData) {
PcmAudioPlayer *pcmPlayer = nullptr;
if (pcmData.isValid()) {
pcmPlayer = ccnew PcmAudioPlayer(_mixController, _callerThreadUtils);
if (pcmPlayer != nullptr) {
pcmPlayer->prepare(url, pcmData);
}
} else {
ALOGE("obtainPcmAudioPlayer failed, pcmData isn't valid!");
}
return pcmPlayer;
}
UrlAudioPlayer *AudioPlayerProvider::createUrlAudioPlayer(
const AudioPlayerProvider::AudioFileInfo &info) {
if (info.url.empty()) {
ALOGE("createUrlAudioPlayer failed, url is empty!");
return nullptr;
}
#if CC_PLATFORM == CC_PLATFORM_ANDROID
SLuint32 locatorType = info.assetFd->getFd() > 0 ? SL_DATALOCATOR_ANDROIDFD : SL_DATALOCATOR_URI;
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
SLuint32 locatorType = SL_DATALOCATOR_URI;
#endif
auto *urlPlayer = new (std::nothrow) UrlAudioPlayer(_engineItf, _outputMixObject, _callerThreadUtils);
bool ret = urlPlayer->prepare(info.url, locatorType, info.assetFd, info.start, info.length);
if (!ret) {
SL_SAFE_DELETE(urlPlayer);
}
return urlPlayer;
}
void AudioPlayerProvider::pause() {
if (_mixController != nullptr) {
_mixController->pause();
}
if (_pcmAudioService != nullptr) {
_pcmAudioService->pause();
}
}
void AudioPlayerProvider::resume() {
if (_mixController != nullptr) {
_mixController->resume();
}
if (_pcmAudioService != nullptr) {
_pcmAudioService->resume();
}
}
void AudioPlayerProvider::registerPcmData(const ccstd::string &audioFilePath, PcmData &data) {
std::lock_guard<std::mutex> lck(_pcmCacheMutex);
if (_pcmCache.find(audioFilePath) != _pcmCache.end()) {
CC_LOG_DEBUG("file %s pcm data is already cached.", audioFilePath.c_str());
return;
}
_pcmCache.emplace(audioFilePath, data);
}
bool AudioPlayerProvider::getPcmHeader(const ccstd::string &audioFilePath, PCMHeader &header) {
std::lock_guard<std::mutex> lck(_pcmCacheMutex);
auto &&iter = _pcmCache.find(audioFilePath);
if (iter != _pcmCache.end()) {
ALOGV("get pcm header from cache, url: %s", audioFilePath.c_str());
// On Android, all pcm buffer is resampled to sign16.
header.bytesPerFrame = iter->second.bitsPerSample / 8;
header.channelCount = iter->second.numChannels;
header.dataFormat = AudioDataFormat::SIGNED_16;
header.sampleRate = iter->second.sampleRate;
header.totalFrames = iter->second.numFrames;
return true;
}
return false;
}
bool AudioPlayerProvider::getPcmData(const ccstd::string &audioFilePath, PcmData &data) {
std::lock_guard<std::mutex> lck(_pcmCacheMutex);
auto &&iter = _pcmCache.find(audioFilePath);
if (iter != _pcmCache.end()) {
ALOGV("get pcm buffer from cache, url: %s", audioFilePath.c_str());
// On Android, all pcm buffer is resampled to sign16.
data = iter->second;
return true;
}
return false;
}
} // namespace cc

View File

@@ -0,0 +1,122 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <condition_variable>
#include <memory>
#include "audio/android/IAudioPlayer.h"
#include "audio/android/OpenSLHelper.h"
#include "audio/android/PcmData.h"
#include "audio/include/AudioDef.h"
#include "base/std/container/unordered_map.h"
namespace cc {
// Manage PcmAudioPlayer& UrlAudioPlayer
class PcmAudioPlayer;
class PcmAudioService;
class UrlAudioPlayer;
class AudioMixerController;
class ICallerThreadUtils;
class AssetFd;
class LegacyThreadPool;
class AudioPlayerProvider {
public:
AudioPlayerProvider(SLEngineItf engineItf, SLObjectItf outputMixObject, int deviceSampleRate,
int bufferSizeInFrames, const FdGetterCallback &fdGetterCallback,
ICallerThreadUtils *callerThreadUtils);
virtual ~AudioPlayerProvider();
bool isFileCached(const ccstd::string &audioFilePath);
IAudioPlayer *getAudioPlayer(const ccstd::string &audioFilePath);
bool getPcmHeader(const ccstd::string &audioFilePath, PCMHeader &header);
bool getPcmData(const ccstd::string &audioFilePath, PcmData &data);
using PreloadCallback = std::function<void(bool, PcmData)>;
void preloadEffect(const ccstd::string &audioFilePath, const PreloadCallback &callback);
void registerPcmData(const ccstd::string &audioFilePath, PcmData &data);
float getDurationFromFile(const ccstd::string &filePath);
void clearPcmCache(const ccstd::string &audioFilePath);
void clearAllPcmCaches();
void pause();
void resume();
private:
struct AudioFileInfo {
ccstd::string url;
std::shared_ptr<AssetFd> assetFd;
off_t start{};
off_t length;
AudioFileInfo()
: assetFd(nullptr) {}
inline bool isValid() const {
return !url.empty() && length > 0;
}
};
PcmAudioPlayer *obtainPcmAudioPlayer(const ccstd::string &url, const PcmData &pcmData);
UrlAudioPlayer *createUrlAudioPlayer(const AudioFileInfo &info);
void preloadEffect(const AudioFileInfo &info, const PreloadCallback &callback, bool isPreloadInPlay2d);
AudioFileInfo getFileInfo(const ccstd::string &audioFilePath);
bool isSmallFile(const AudioFileInfo &info);
SLEngineItf _engineItf;
SLObjectItf _outputMixObject;
int _deviceSampleRate;
int _bufferSizeInFrames;
FdGetterCallback _fdGetterCallback;
ICallerThreadUtils *_callerThreadUtils;
ccstd::unordered_map<ccstd::string, PcmData> _pcmCache;
std::mutex _pcmCacheMutex;
struct PreloadCallbackParam {
PreloadCallback callback;
bool isPreloadInPlay2d;
};
ccstd::unordered_map<ccstd::string, ccstd::vector<PreloadCallbackParam>> _preloadCallbackMap;
std::mutex _preloadCallbackMutex;
std::mutex _preloadWaitMutex;
std::condition_variable _preloadWaitCond;
PcmAudioService *_pcmAudioService;
AudioMixerController *_mixController;
LegacyThreadPool *_threadPool;
};
} // namespace cc

View File

@@ -0,0 +1,792 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "AudioResampler"
//#define LOG_NDEBUG 0
#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/types.h>
#include <new>
#include "audio/android/cutils/log.h"
#include "audio/android/utils/Utils.h"
//#include <cutils/properties.h>
#include "audio/android/AudioResampler.h"
#include "audio/common/utils/include/primitives.h"
//#include "audio/android/AudioResamplerSinc.h"
#include "audio/android/AudioResamplerCubic.h"
#include "base/memory/Memory.h"
//#include "AudioResamplerDyn.h"
//cjh #ifdef __arm__
// #define ASM_ARM_RESAMP1 // enable asm optimisation for ResamplerOrder1
//#endif
namespace cc {
// ----------------------------------------------------------------------------
class AudioResamplerOrder1 : public AudioResampler {
public:
AudioResamplerOrder1(int inChannelCount, int32_t sampleRate) : AudioResampler(inChannelCount, sampleRate, LOW_QUALITY), mX0L(0), mX0R(0) {
}
virtual size_t resample(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider);
private:
// number of bits used in interpolation multiply - 15 bits avoids overflow
static const int kNumInterpBits = 15;
// bits to shift the phase fraction down to avoid overflow
static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits;
void init() {}
size_t resampleMono16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider);
size_t resampleStereo16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider);
#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
void AsmMono16Loop(int16_t *in, int32_t *maxOutPt, int32_t maxInIdx,
size_t &outputIndex, int32_t *out, size_t &inputIndex, int32_t vl, int32_t vr,
uint32_t &phaseFraction, uint32_t phaseIncrement);
void AsmStereo16Loop(int16_t *in, int32_t *maxOutPt, int32_t maxInIdx,
size_t &outputIndex, int32_t *out, size_t &inputIndex, int32_t vl, int32_t vr,
uint32_t &phaseFraction, uint32_t phaseIncrement);
#endif // ASM_ARM_RESAMP1
static inline int32_t Interp(int32_t x0, int32_t x1, uint32_t f) {
return x0 + (((x1 - x0) * (int32_t)(f >> kPreInterpShift)) >> kNumInterpBits);
}
static inline void Advance(size_t *index, uint32_t *frac, uint32_t inc) {
*frac += inc;
*index += (size_t)(*frac >> kNumPhaseBits);
*frac &= kPhaseMask;
}
int mX0L;
int mX0R;
};
/*static*/
const double AudioResampler::kPhaseMultiplier = 1L << AudioResampler::kNumPhaseBits;
bool AudioResampler::qualityIsSupported(src_quality quality) {
switch (quality) {
case DEFAULT_QUALITY:
case LOW_QUALITY:
case MED_QUALITY:
case HIGH_QUALITY:
case VERY_HIGH_QUALITY:
return true;
default:
return false;
}
}
// ----------------------------------------------------------------------------
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
static AudioResampler::src_quality defaultQuality = AudioResampler::DEFAULT_QUALITY;
void AudioResampler::init_routine() {
// int resamplerQuality = getSystemProperty("af.resampler.quality");
// if (resamplerQuality > 0) {
// defaultQuality = (src_quality) resamplerQuality;
// ALOGD("forcing AudioResampler quality to %d", defaultQuality);
// if (defaultQuality < DEFAULT_QUALITY || defaultQuality > VERY_HIGH_QUALITY) {
// defaultQuality = DEFAULT_QUALITY;
// }
// }
}
uint32_t AudioResampler::qualityMHz(src_quality quality) {
switch (quality) {
default:
case DEFAULT_QUALITY:
case LOW_QUALITY:
return 3;
case MED_QUALITY:
return 6;
case HIGH_QUALITY:
return 20;
case VERY_HIGH_QUALITY:
return 34;
// case DYN_LOW_QUALITY:
// return 4;
// case DYN_MED_QUALITY:
// return 6;
// case DYN_HIGH_QUALITY:
// return 12;
}
}
static const uint32_t maxMHz = 130; // an arbitrary number that permits 3 VHQ, should be tunable
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static uint32_t currentMHz = 0;
AudioResampler *AudioResampler::create(audio_format_t format, int inChannelCount,
int32_t sampleRate, src_quality quality) {
bool atFinalQuality;
if (quality == DEFAULT_QUALITY) {
// read the resampler default quality property the first time it is needed
int ok = pthread_once(&once_control, init_routine);
if (ok != 0) {
ALOGE("%s pthread_once failed: %d", __func__, ok);
}
quality = defaultQuality;
atFinalQuality = false;
} else {
atFinalQuality = true;
}
/* if the caller requests DEFAULT_QUALITY and af.resampler.property
* has not been set, the target resampler quality is set to DYN_MED_QUALITY,
* and allowed to "throttle" down to DYN_LOW_QUALITY if necessary
* due to estimated CPU load of having too many active resamplers
* (the code below the if).
*/
if (quality == DEFAULT_QUALITY) {
//cjh quality = DYN_MED_QUALITY;
}
// naive implementation of CPU load throttling doesn't account for whether resampler is active
pthread_mutex_lock(&mutex);
for (;;) {
uint32_t deltaMHz = qualityMHz(quality);
uint32_t newMHz = currentMHz + deltaMHz;
if ((qualityIsSupported(quality) && newMHz <= maxMHz) || atFinalQuality) {
ALOGV("resampler load %u -> %u MHz due to delta +%u MHz from quality %d",
currentMHz, newMHz, deltaMHz, quality);
currentMHz = newMHz;
break;
}
// not enough CPU available for proposed quality level, so try next lowest level
switch (quality) {
default:
case LOW_QUALITY:
atFinalQuality = true;
break;
case MED_QUALITY:
quality = LOW_QUALITY;
break;
case HIGH_QUALITY:
quality = MED_QUALITY;
break;
case VERY_HIGH_QUALITY:
quality = HIGH_QUALITY;
break;
// case DYN_LOW_QUALITY:
// atFinalQuality = true;
// break;
// case DYN_MED_QUALITY:
// quality = DYN_LOW_QUALITY;
// break;
// case DYN_HIGH_QUALITY:
// quality = DYN_MED_QUALITY;
// break;
}
}
pthread_mutex_unlock(&mutex);
AudioResampler *resampler;
switch (quality) {
default:
case LOW_QUALITY:
ALOGV("Create linear Resampler");
LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT, "invalid pcm format");
resampler = ccnew AudioResamplerOrder1(inChannelCount, sampleRate);
break;
case MED_QUALITY:
ALOGV("Create cubic Resampler");
LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT, "invalid pcm format");
resampler = ccnew AudioResamplerCubic(inChannelCount, sampleRate);
break;
case HIGH_QUALITY:
ALOGV("Create HIGH_QUALITY sinc Resampler");
LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT, "invalid pcm format");
ALOG_ASSERT(false, "HIGH_QUALITY isn't supported");
// Cocos2d-x only uses MED_QUALITY, so we could remove Sinc relative files
// resampler = ccnew AudioResamplerSinc(inChannelCount, sampleRate);
break;
case VERY_HIGH_QUALITY:
ALOGV("Create VERY_HIGH_QUALITY sinc Resampler = %d", quality);
LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT, "invalid pcm format");
// Cocos2d-x only uses MED_QUALITY, so we could remove Sinc relative files
// resampler = ccnew AudioResamplerSinc(inChannelCount, sampleRate, quality);
ALOG_ASSERT(false, "VERY_HIGH_QUALITY isn't supported");
break;
}
// initialize resampler
resampler->init();
return resampler;
}
AudioResampler::AudioResampler(int inChannelCount,
int32_t sampleRate, src_quality quality) : mChannelCount(inChannelCount),
mSampleRate(sampleRate),
mInSampleRate(sampleRate),
mInputIndex(0),
mPhaseFraction(0),
mLocalTimeFreq(0),
mPTS(AudioBufferProvider::kInvalidPTS),
mQuality(quality) {
const int maxChannels = 2; //cjh quality < DYN_LOW_QUALITY ? 2 : 8;
if (inChannelCount < 1 || inChannelCount > maxChannels) {
LOG_ALWAYS_FATAL("Unsupported sample format %d quality %d channels",
quality, inChannelCount);
}
if (sampleRate <= 0) {
LOG_ALWAYS_FATAL("Unsupported sample rate %d Hz", sampleRate);
}
// initialize common members
mVolume[0] = mVolume[1] = 0;
mBuffer.frameCount = 0;
}
AudioResampler::~AudioResampler() {
pthread_mutex_lock(&mutex);
src_quality quality = getQuality();
uint32_t deltaMHz = qualityMHz(quality);
int32_t newMHz = currentMHz - deltaMHz;
ALOGV("resampler load %u -> %d MHz due to delta -%u MHz from quality %d",
currentMHz, newMHz, deltaMHz, quality);
LOG_ALWAYS_FATAL_IF(newMHz < 0, "negative resampler load %d MHz", newMHz);
currentMHz = newMHz;
pthread_mutex_unlock(&mutex);
}
void AudioResampler::setSampleRate(int32_t inSampleRate) {
mInSampleRate = inSampleRate;
mPhaseIncrement = (uint32_t)((kPhaseMultiplier * inSampleRate) / mSampleRate);
}
void AudioResampler::setVolume(float left, float right) {
// REFINE: Implement anti-zipper filter
// convert to U4.12 for internal integer use (round down)
// integer volume values are clamped to 0 to UNITY_GAIN.
mVolume[0] = u4_12_from_float(clampFloatVol(left));
mVolume[1] = u4_12_from_float(clampFloatVol(right));
}
void AudioResampler::setLocalTimeFreq(uint64_t freq) {
mLocalTimeFreq = freq;
}
void AudioResampler::setPTS(int64_t pts) {
mPTS = pts;
}
int64_t AudioResampler::calculateOutputPTS(int outputFrameIndex) {
if (mPTS == AudioBufferProvider::kInvalidPTS) {
return AudioBufferProvider::kInvalidPTS;
} else {
return mPTS + ((outputFrameIndex * mLocalTimeFreq) / mSampleRate);
}
}
void AudioResampler::reset() {
mInputIndex = 0;
mPhaseFraction = 0;
mBuffer.frameCount = 0;
}
// ----------------------------------------------------------------------------
size_t AudioResamplerOrder1::resample(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider) {
// should never happen, but we overflow if it does
// ALOG_ASSERT(outFrameCount < 32767);
// select the appropriate resampler
switch (mChannelCount) {
case 1:
return resampleMono16(out, outFrameCount, provider);
case 2:
return resampleStereo16(out, outFrameCount, provider);
default:
LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
return 0;
}
}
size_t AudioResamplerOrder1::resampleStereo16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider) {
int32_t vl = mVolume[0];
int32_t vr = mVolume[1];
size_t inputIndex = mInputIndex;
uint32_t phaseFraction = mPhaseFraction;
uint32_t phaseIncrement = mPhaseIncrement;
size_t outputIndex = 0;
size_t outputSampleCount = outFrameCount * 2;
size_t inFrameCount = getInFrameCountRequired(outFrameCount);
// ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
// outFrameCount, inputIndex, phaseFraction, phaseIncrement);
while (outputIndex < outputSampleCount) {
// buffer is empty, fetch a new one
while (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
provider->getNextBuffer(&mBuffer,
calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL) {
goto resampleStereo16_exit;
}
// ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
if (mBuffer.frameCount > inputIndex) break;
inputIndex -= mBuffer.frameCount;
mX0L = mBuffer.i16[mBuffer.frameCount * 2 - 2];
mX0R = mBuffer.i16[mBuffer.frameCount * 2 - 1];
provider->releaseBuffer(&mBuffer);
// mBuffer.frameCount == 0 now so we reload a new buffer
}
int16_t *in = mBuffer.i16;
// handle boundary case
while (inputIndex == 0) {
// ALOGE("boundary case");
out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction);
out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction);
Advance(&inputIndex, &phaseFraction, phaseIncrement);
if (outputIndex == outputSampleCount) {
break;
}
}
// process input samples
// ALOGE("general case");
#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
if (inputIndex + 2 < mBuffer.frameCount) {
int32_t *maxOutPt;
int32_t maxInIdx;
maxOutPt = out + (outputSampleCount - 2); // 2 because 2 frames per loop
maxInIdx = mBuffer.frameCount - 2;
AsmStereo16Loop(in, maxOutPt, maxInIdx, outputIndex, out, inputIndex, vl, vr,
phaseFraction, phaseIncrement);
}
#endif // ASM_ARM_RESAMP1
while (outputIndex < outputSampleCount && inputIndex < mBuffer.frameCount) {
out[outputIndex++] += vl * Interp(in[inputIndex * 2 - 2],
in[inputIndex * 2], phaseFraction);
out[outputIndex++] += vr * Interp(in[inputIndex * 2 - 1],
in[inputIndex * 2 + 1], phaseFraction);
Advance(&inputIndex, &phaseFraction, phaseIncrement);
}
// ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
// if done with buffer, save samples
if (inputIndex >= mBuffer.frameCount) {
inputIndex -= mBuffer.frameCount;
// ALOGE("buffer done, new input index %d", inputIndex);
mX0L = mBuffer.i16[mBuffer.frameCount * 2 - 2];
mX0R = mBuffer.i16[mBuffer.frameCount * 2 - 1];
provider->releaseBuffer(&mBuffer);
// verify that the releaseBuffer resets the buffer frameCount
// ALOG_ASSERT(mBuffer.frameCount == 0);
}
}
// ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
resampleStereo16_exit:
// save state
mInputIndex = inputIndex;
mPhaseFraction = phaseFraction;
return outputIndex / 2 /* channels for stereo */;
}
size_t AudioResamplerOrder1::resampleMono16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider) {
int32_t vl = mVolume[0];
int32_t vr = mVolume[1];
size_t inputIndex = mInputIndex;
uint32_t phaseFraction = mPhaseFraction;
uint32_t phaseIncrement = mPhaseIncrement;
size_t outputIndex = 0;
size_t outputSampleCount = outFrameCount * 2;
size_t inFrameCount = getInFrameCountRequired(outFrameCount);
// ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
// outFrameCount, inputIndex, phaseFraction, phaseIncrement);
while (outputIndex < outputSampleCount) {
// buffer is empty, fetch a new one
while (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
provider->getNextBuffer(&mBuffer,
calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL) {
mInputIndex = inputIndex;
mPhaseFraction = phaseFraction;
goto resampleMono16_exit;
}
// ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
if (mBuffer.frameCount > inputIndex) break;
inputIndex -= mBuffer.frameCount;
mX0L = mBuffer.i16[mBuffer.frameCount - 1];
provider->releaseBuffer(&mBuffer);
// mBuffer.frameCount == 0 now so we reload a new buffer
}
int16_t *in = mBuffer.i16;
// handle boundary case
while (inputIndex == 0) {
// ALOGE("boundary case");
int32_t sample = Interp(mX0L, in[0], phaseFraction);
out[outputIndex++] += vl * sample;
out[outputIndex++] += vr * sample;
Advance(&inputIndex, &phaseFraction, phaseIncrement);
if (outputIndex == outputSampleCount) {
break;
}
}
// process input samples
// ALOGE("general case");
#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
if (inputIndex + 2 < mBuffer.frameCount) {
int32_t *maxOutPt;
int32_t maxInIdx;
maxOutPt = out + (outputSampleCount - 2);
maxInIdx = (int32_t)mBuffer.frameCount - 2;
AsmMono16Loop(in, maxOutPt, maxInIdx, outputIndex, out, inputIndex, vl, vr,
phaseFraction, phaseIncrement);
}
#endif // ASM_ARM_RESAMP1
while (outputIndex < outputSampleCount && inputIndex < mBuffer.frameCount) {
int32_t sample = Interp(in[inputIndex - 1], in[inputIndex],
phaseFraction);
out[outputIndex++] += vl * sample;
out[outputIndex++] += vr * sample;
Advance(&inputIndex, &phaseFraction, phaseIncrement);
}
// ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
// if done with buffer, save samples
if (inputIndex >= mBuffer.frameCount) {
inputIndex -= mBuffer.frameCount;
// ALOGE("buffer done, new input index %d", inputIndex);
mX0L = mBuffer.i16[mBuffer.frameCount - 1];
provider->releaseBuffer(&mBuffer);
// verify that the releaseBuffer resets the buffer frameCount
// ALOG_ASSERT(mBuffer.frameCount == 0);
}
}
// ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
resampleMono16_exit:
// save state
mInputIndex = inputIndex;
mPhaseFraction = phaseFraction;
return outputIndex;
}
#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
/*******************************************************************
*
* AsmMono16Loop
* asm optimized monotonic loop version; one loop is 2 frames
* Input:
* in : pointer on input samples
* maxOutPt : pointer on first not filled
* maxInIdx : index on first not used
* outputIndex : pointer on current output index
* out : pointer on output buffer
* inputIndex : pointer on current input index
* vl, vr : left and right gain
* phaseFraction : pointer on current phase fraction
* phaseIncrement
* Output:
* outputIndex :
* out : updated buffer
* inputIndex : index of next to use
* phaseFraction : phase fraction for next interpolation
*
*******************************************************************/
__attribute__((noinline)) void AudioResamplerOrder1::AsmMono16Loop(int16_t *in, int32_t *maxOutPt, int32_t maxInIdx,
size_t &outputIndex, int32_t *out, size_t &inputIndex, int32_t vl, int32_t vr,
uint32_t &phaseFraction, uint32_t phaseIncrement) {
(void)maxOutPt; // remove unused parameter warnings
(void)maxInIdx;
(void)outputIndex;
(void)out;
(void)inputIndex;
(void)vl;
(void)vr;
(void)phaseFraction;
(void)phaseIncrement;
(void)in;
#define MO_PARAM5 "36" // offset of parameter 5 (outputIndex)
asm(
"stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
// get parameters
" ldr r6, [sp, #" MO_PARAM5
" + 20]\n" // &phaseFraction
" ldr r6, [r6]\n" // phaseFraction
" ldr r7, [sp, #" MO_PARAM5
" + 8]\n" // &inputIndex
" ldr r7, [r7]\n" // inputIndex
" ldr r8, [sp, #" MO_PARAM5
" + 4]\n" // out
" ldr r0, [sp, #" MO_PARAM5
" + 0]\n" // &outputIndex
" ldr r0, [r0]\n" // outputIndex
" add r8, r8, r0, asl #2\n" // curOut
" ldr r9, [sp, #" MO_PARAM5
" + 24]\n" // phaseIncrement
" ldr r10, [sp, #" MO_PARAM5
" + 12]\n" // vl
" ldr r11, [sp, #" MO_PARAM5
" + 16]\n" // vr
// r0 pin, x0, Samp
// r1 in
// r2 maxOutPt
// r3 maxInIdx
// r4 x1, i1, i3, Out1
// r5 out0
// r6 frac
// r7 inputIndex
// r8 curOut
// r9 inc
// r10 vl
// r11 vr
// r12
// r13 sp
// r14
// the following loop works on 2 frames
"1:\n"
" cmp r8, r2\n" // curOut - maxCurOut
" bcs 2f\n"
#define MO_ONE_FRAME \
" add r0, r1, r7, asl #1\n" /* in + inputIndex */ \
" ldrsh r4, [r0]\n" /* in[inputIndex] */ \
" ldr r5, [r8]\n" /* out[outputIndex] */ \
" ldrsh r0, [r0, #-2]\n" /* in[inputIndex-1] */ \
" bic r6, r6, #0xC0000000\n" /* phaseFraction & ... */ \
" sub r4, r4, r0\n" /* in[inputIndex] - in[inputIndex-1] */ \
" mov r4, r4, lsl #2\n" /* <<2 */ \
" smulwt r4, r4, r6\n" /* (x1-x0)*.. */ \
" add r6, r6, r9\n" /* phaseFraction + phaseIncrement */ \
" add r0, r0, r4\n" /* x0 - (..) */ \
" mla r5, r0, r10, r5\n" /* vl*interp + out[] */ \
" ldr r4, [r8, #4]\n" /* out[outputIndex+1] */ \
" str r5, [r8], #4\n" /* out[outputIndex++] = ... */ \
" mla r4, r0, r11, r4\n" /* vr*interp + out[] */ \
" add r7, r7, r6, lsr #30\n" /* inputIndex + phaseFraction>>30 */ \
" str r4, [r8], #4\n" /* out[outputIndex++] = ... */
MO_ONE_FRAME // frame 1
MO_ONE_FRAME // frame 2
" cmp r7, r3\n" // inputIndex - maxInIdx
" bcc 1b\n"
"2:\n"
" bic r6, r6, #0xC0000000\n" // phaseFraction & ...
// save modified values
" ldr r0, [sp, #" MO_PARAM5
" + 20]\n" // &phaseFraction
" str r6, [r0]\n" // phaseFraction
" ldr r0, [sp, #" MO_PARAM5
" + 8]\n" // &inputIndex
" str r7, [r0]\n" // inputIndex
" ldr r0, [sp, #" MO_PARAM5
" + 4]\n" // out
" sub r8, r0\n" // curOut - out
" asr r8, #2\n" // new outputIndex
" ldr r0, [sp, #" MO_PARAM5
" + 0]\n" // &outputIndex
" str r8, [r0]\n" // save outputIndex
" ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}\n");
}
/*******************************************************************
*
* AsmStereo16Loop
* asm optimized stereo loop version; one loop is 2 frames
* Input:
* in : pointer on input samples
* maxOutPt : pointer on first not filled
* maxInIdx : index on first not used
* outputIndex : pointer on current output index
* out : pointer on output buffer
* inputIndex : pointer on current input index
* vl, vr : left and right gain
* phaseFraction : pointer on current phase fraction
* phaseIncrement
* Output:
* outputIndex :
* out : updated buffer
* inputIndex : index of next to use
* phaseFraction : phase fraction for next interpolation
*
*******************************************************************/
__attribute__((noinline)) void AudioResamplerOrder1::AsmStereo16Loop(int16_t *in, int32_t *maxOutPt, int32_t maxInIdx,
size_t &outputIndex, int32_t *out, size_t &inputIndex, int32_t vl, int32_t vr,
uint32_t &phaseFraction, uint32_t phaseIncrement) {
(void)maxOutPt; // remove unused parameter warnings
(void)maxInIdx;
(void)outputIndex;
(void)out;
(void)inputIndex;
(void)vl;
(void)vr;
(void)phaseFraction;
(void)phaseIncrement;
(void)in;
#define ST_PARAM5 "40" // offset of parameter 5 (outputIndex)
asm(
"stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}\n"
// get parameters
" ldr r6, [sp, #" ST_PARAM5
" + 20]\n" // &phaseFraction
" ldr r6, [r6]\n" // phaseFraction
" ldr r7, [sp, #" ST_PARAM5
" + 8]\n" // &inputIndex
" ldr r7, [r7]\n" // inputIndex
" ldr r8, [sp, #" ST_PARAM5
" + 4]\n" // out
" ldr r0, [sp, #" ST_PARAM5
" + 0]\n" // &outputIndex
" ldr r0, [r0]\n" // outputIndex
" add r8, r8, r0, asl #2\n" // curOut
" ldr r9, [sp, #" ST_PARAM5
" + 24]\n" // phaseIncrement
" ldr r10, [sp, #" ST_PARAM5
" + 12]\n" // vl
" ldr r11, [sp, #" ST_PARAM5
" + 16]\n" // vr
// r0 pin, x0, Samp
// r1 in
// r2 maxOutPt
// r3 maxInIdx
// r4 x1, i1, i3, out1
// r5 out0
// r6 frac
// r7 inputIndex
// r8 curOut
// r9 inc
// r10 vl
// r11 vr
// r12 temporary
// r13 sp
// r14
"3:\n"
" cmp r8, r2\n" // curOut - maxCurOut
" bcs 4f\n"
#define ST_ONE_FRAME \
" bic r6, r6, #0xC0000000\n" /* phaseFraction & ... */ \
\
" add r0, r1, r7, asl #2\n" /* in + 2*inputIndex */ \
\
" ldrsh r4, [r0]\n" /* in[2*inputIndex] */ \
" ldr r5, [r8]\n" /* out[outputIndex] */ \
" ldrsh r12, [r0, #-4]\n" /* in[2*inputIndex-2] */ \
" sub r4, r4, r12\n" /* in[2*InputIndex] - in[2*InputIndex-2] */ \
" mov r4, r4, lsl #2\n" /* <<2 */ \
" smulwt r4, r4, r6\n" /* (x1-x0)*.. */ \
" add r12, r12, r4\n" /* x0 - (..) */ \
" mla r5, r12, r10, r5\n" /* vl*interp + out[] */ \
" ldr r4, [r8, #4]\n" /* out[outputIndex+1] */ \
" str r5, [r8], #4\n" /* out[outputIndex++] = ... */ \
\
" ldrsh r12, [r0, #+2]\n" /* in[2*inputIndex+1] */ \
" ldrsh r0, [r0, #-2]\n" /* in[2*inputIndex-1] */ \
" sub r12, r12, r0\n" /* in[2*InputIndex] - in[2*InputIndex-2] */ \
" mov r12, r12, lsl #2\n" /* <<2 */ \
" smulwt r12, r12, r6\n" /* (x1-x0)*.. */ \
" add r12, r0, r12\n" /* x0 - (..) */ \
" mla r4, r12, r11, r4\n" /* vr*interp + out[] */ \
" str r4, [r8], #4\n" /* out[outputIndex++] = ... */ \
\
" add r6, r6, r9\n" /* phaseFraction + phaseIncrement */ \
" add r7, r7, r6, lsr #30\n" /* inputIndex + phaseFraction>>30 */
ST_ONE_FRAME // frame 1
ST_ONE_FRAME // frame 1
" cmp r7, r3\n" // inputIndex - maxInIdx
" bcc 3b\n"
"4:\n"
" bic r6, r6, #0xC0000000\n" // phaseFraction & ...
// save modified values
" ldr r0, [sp, #" ST_PARAM5
" + 20]\n" // &phaseFraction
" str r6, [r0]\n" // phaseFraction
" ldr r0, [sp, #" ST_PARAM5
" + 8]\n" // &inputIndex
" str r7, [r0]\n" // inputIndex
" ldr r0, [sp, #" ST_PARAM5
" + 4]\n" // out
" sub r8, r0\n" // curOut - out
" asr r8, #2\n" // new outputIndex
" ldr r0, [sp, #" ST_PARAM5
" + 0]\n" // &outputIndex
" str r8, [r0]\n" // save outputIndex
" ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, pc}\n");
}
#endif // ASM_ARM_RESAMP1
// ----------------------------------------------------------------------------
} // namespace cc

View File

@@ -0,0 +1,182 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <android/log.h>
#include <sys/system_properties.h>
#include <sys/types.h>
#endif
#include "audio/android/AudioBufferProvider.h"
//#include <cutils/compiler.h>
//#include <utils/Compat.h>
//#include <media/AudioBufferProvider.h>
//#include <system/audio.h>
#include <cassert>
#include "audio/android/audio.h"
namespace cc {
class AudioResampler {
public:
// Determines quality of SRC.
// LOW_QUALITY: linear interpolator (1st order)
// MED_QUALITY: cubic interpolator (3rd order)
// HIGH_QUALITY: fixed multi-tap FIR (e.g. 48KHz->44.1KHz)
// NOTE: high quality SRC will only be supported for
// certain fixed rate conversions. Sample rate cannot be
// changed dynamically.
enum src_quality { // NOLINT(readability-identifier-naming)
DEFAULT_QUALITY = 0,
LOW_QUALITY = 1,
MED_QUALITY = 2,
HIGH_QUALITY = 3,
VERY_HIGH_QUALITY = 4,
};
static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0F;
static AudioResampler *create(audio_format_t format, int inChannelCount,
int32_t sampleRate, src_quality quality = DEFAULT_QUALITY);
virtual ~AudioResampler();
virtual void init() = 0;
virtual void setSampleRate(int32_t inSampleRate);
virtual void setVolume(float left, float right);
virtual void setLocalTimeFreq(uint64_t freq);
// set the PTS of the next buffer output by the resampler
virtual void setPTS(int64_t pts);
// Resample int16_t samples from provider and accumulate into 'out'.
// A mono provider delivers a sequence of samples.
// A stereo provider delivers a sequence of interleaved pairs of samples.
//
// In either case, 'out' holds interleaved pairs of fixed-point Q4.27.
// That is, for a mono provider, there is an implicit up-channeling.
// Since this method accumulates, the caller is responsible for clearing 'out' initially.
//
// For a float resampler, 'out' holds interleaved pairs of float samples.
//
// Multichannel interleaved frames for n > 2 is supported for quality DYN_LOW_QUALITY,
// DYN_MED_QUALITY, and DYN_HIGH_QUALITY.
//
// Returns the number of frames resampled into the out buffer.
virtual size_t resample(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider) = 0;
virtual void reset();
virtual size_t getUnreleasedFrames() const { return mInputIndex; }
// called from destructor, so must not be virtual
src_quality getQuality() const { return mQuality; }
protected:
// number of bits for phase fraction - 30 bits allows nearly 2x downsampling
static const int kNumPhaseBits = 30; // NOLINT(readability-identifier-naming)
// phase mask for fraction
static const uint32_t kPhaseMask = (1LU << kNumPhaseBits) - 1; // NOLINT(readability-identifier-naming)
// multiplier to calculate fixed point phase increment
static const double kPhaseMultiplier; // NOLINT(readability-identifier-naming)
AudioResampler(int inChannelCount, int32_t sampleRate, src_quality quality);
// prevent copying
AudioResampler(const AudioResampler &);
AudioResampler &operator=(const AudioResampler &);
int64_t calculateOutputPTS(int outputFrameIndex);
const int32_t mChannelCount;// NOLINT(readability-identifier-naming)
const int32_t mSampleRate;// NOLINT(readability-identifier-naming)
int32_t mInSampleRate;// NOLINT(readability-identifier-naming)
AudioBufferProvider::Buffer mBuffer;// NOLINT(readability-identifier-naming)
union {
int16_t mVolume[2];// NOLINT(readability-identifier-naming)
uint32_t mVolumeRL;// NOLINT(readability-identifier-naming)
};
int16_t mTargetVolume[2];// NOLINT(readability-identifier-naming)
size_t mInputIndex;// NOLINT(readability-identifier-naming)
int32_t mPhaseIncrement;// NOLINT(readability-identifier-naming)
uint32_t mPhaseFraction;// NOLINT(readability-identifier-naming)
uint64_t mLocalTimeFreq;// NOLINT(readability-identifier-naming)
int64_t mPTS;// NOLINT(readability-identifier-naming)
// returns the inFrameCount required to generate outFrameCount frames.
//
// Placed here to be a consistent for all resamplers.
//
// Right now, we use the upper bound without regards to the current state of the
// input buffer using integer arithmetic, as follows:
//
// (static_cast<uint64_t>(outFrameCount)*mInSampleRate + (mSampleRate - 1))/mSampleRate;
//
// The double precision equivalent (float may not be precise enough):
// ceil(static_cast<double>(outFrameCount) * mInSampleRate / mSampleRate);
//
// this relies on the fact that the mPhaseIncrement is rounded down from
// #phases * mInSampleRate/mSampleRate and the fact that Sum(Floor(x)) <= Floor(Sum(x)).
// http://www.proofwiki.org/wiki/Sum_of_Floors_Not_Greater_Than_Floor_of_Sums
//
// (so long as double precision is computed accurately enough to be considered
// greater than or equal to the Floor(x) value in int32_t arithmetic; thus this
// will not necessarily hold for floats).
//
// REFINE:
// Greater accuracy and a tight bound is obtained by:
// 1) subtract and adjust for the current state of the AudioBufferProvider buffer.
// 2) using the exact integer formula where (ignoring 64b casting)
// inFrameCount = (mPhaseIncrement * (outFrameCount - 1) + mPhaseFraction) / phaseWrapLimit;
// phaseWrapLimit is the wraparound (1 << kNumPhaseBits), if not specified explicitly.
//
inline size_t getInFrameCountRequired(size_t outFrameCount) const {
return (static_cast<size_t>(outFrameCount) * mInSampleRate + (mSampleRate - 1)) / mSampleRate;
}
inline float clampFloatVol(float volume) {//NOLINT(readability-identifier-naming, readability-convert-member-functions-to-static)
float ret = 0.0F;
if (volume > UNITY_GAIN_FLOAT) {
ret = UNITY_GAIN_FLOAT;
} else if (volume >= 0.) {
ret = volume;
}
return ret; // NaN or negative volume maps to 0.
}
private:
const src_quality mQuality;// NOLINT(readability-identifier-naming)
// Return 'true' if the quality level is supported without explicit request
static bool qualityIsSupported(src_quality quality);
// For pthread_once()
static void init_routine(); // NOLINT(readability-identifier-naming)
// Return the estimated CPU load for specific resampler in MHz.
// The absolute number is irrelevant, it's the relative values that matter.
static uint32_t qualityMHz(src_quality quality);
};
// ----------------------------------------------------------------------------
} // namespace cc

View File

@@ -0,0 +1,186 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "AudioResamplerCubic"
#include <stdint.h>
#include <string.h>
#include <sys/types.h>
#include "audio/android/cutils/log.h"
#include "audio/android/AudioResampler.h"
#include "audio/android/AudioResamplerCubic.h"
namespace cc {
// ----------------------------------------------------------------------------
void AudioResamplerCubic::init() {
memset(&left, 0, sizeof(state));
memset(&right, 0, sizeof(state));
}
size_t AudioResamplerCubic::resample(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider) {
// should never happen, but we overflow if it does
// ALOG_ASSERT(outFrameCount < 32767);
// select the appropriate resampler
switch (mChannelCount) {
case 1:
return resampleMono16(out, outFrameCount, provider);
case 2:
return resampleStereo16(out, outFrameCount, provider);
default:
LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
return 0;
}
}
size_t AudioResamplerCubic::resampleStereo16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider) {
int32_t vl = mVolume[0];
int32_t vr = mVolume[1];
size_t inputIndex = mInputIndex;
uint32_t phaseFraction = mPhaseFraction;
uint32_t phaseIncrement = mPhaseIncrement;
size_t outputIndex = 0;
size_t outputSampleCount = outFrameCount * 2;
size_t inFrameCount = getInFrameCountRequired(outFrameCount);
// fetch first buffer
if (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
provider->getNextBuffer(&mBuffer, mPTS);
if (mBuffer.raw == NULL) {
return 0;
}
// ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
}
int16_t *in = mBuffer.i16;
while (outputIndex < outputSampleCount) {
int32_t sample;
int32_t x;
// calculate output sample
x = phaseFraction >> kPreInterpShift;
out[outputIndex++] += vl * interp(&left, x);
out[outputIndex++] += vr * interp(&right, x);
// out[outputIndex++] += vr * in[inputIndex*2];
// increment phase
phaseFraction += phaseIncrement;
uint32_t indexIncrement = (phaseFraction >> kNumPhaseBits);
phaseFraction &= kPhaseMask;
// time to fetch another sample
while (indexIncrement--) {
inputIndex++;
if (inputIndex == mBuffer.frameCount) {
inputIndex = 0;
provider->releaseBuffer(&mBuffer);
mBuffer.frameCount = inFrameCount;
provider->getNextBuffer(&mBuffer,
calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL) {
goto save_state; // ugly, but efficient
}
in = mBuffer.i16;
// ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
}
// advance sample state
advance(&left, in[inputIndex * 2]);
advance(&right, in[inputIndex * 2 + 1]);
}
}
save_state:
// ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
mInputIndex = inputIndex;
mPhaseFraction = phaseFraction;
return outputIndex / 2 /* channels for stereo */;
}
size_t AudioResamplerCubic::resampleMono16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider) {
int32_t vl = mVolume[0];
int32_t vr = mVolume[1];
size_t inputIndex = mInputIndex;
uint32_t phaseFraction = mPhaseFraction;
uint32_t phaseIncrement = mPhaseIncrement;
size_t outputIndex = 0;
size_t outputSampleCount = outFrameCount * 2;
size_t inFrameCount = getInFrameCountRequired(outFrameCount);
// fetch first buffer
if (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
provider->getNextBuffer(&mBuffer, mPTS);
if (mBuffer.raw == NULL) {
return 0;
}
// ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
}
int16_t *in = mBuffer.i16;
while (outputIndex < outputSampleCount) {
int32_t sample;
int32_t x;
// calculate output sample
x = phaseFraction >> kPreInterpShift;
sample = interp(&left, x);
out[outputIndex++] += vl * sample;
out[outputIndex++] += vr * sample;
// increment phase
phaseFraction += phaseIncrement;
uint32_t indexIncrement = (phaseFraction >> kNumPhaseBits);
phaseFraction &= kPhaseMask;
// time to fetch another sample
while (indexIncrement--) {
inputIndex++;
if (inputIndex == mBuffer.frameCount) {
inputIndex = 0;
provider->releaseBuffer(&mBuffer);
mBuffer.frameCount = inFrameCount;
provider->getNextBuffer(&mBuffer,
calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL) {
goto save_state; // ugly, but efficient
}
// ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
in = mBuffer.i16;
}
// advance sample state
advance(&left, in[inputIndex]);
}
}
save_state:
// ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
mInputIndex = inputIndex;
mPhaseFraction = phaseFraction;
return outputIndex;
}
// ----------------------------------------------------------------------------
} // namespace cc

View File

@@ -0,0 +1,65 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
#include <sys/types.h>
#include "audio/android/AudioResampler.h"
#include "audio/android/AudioBufferProvider.h"
namespace cc {
// ----------------------------------------------------------------------------
class AudioResamplerCubic : public AudioResampler {
public:
AudioResamplerCubic(int inChannelCount, int32_t sampleRate) : AudioResampler(inChannelCount, sampleRate, MED_QUALITY) {
}
virtual size_t resample(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider);
private:
// number of bits used in interpolation multiply - 14 bits avoids overflow
static const int kNumInterpBits = 14;
// bits to shift the phase fraction down to avoid overflow
static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits;
typedef struct {
int32_t a, b, c, y0, y1, y2, y3;
} state;
void init();
size_t resampleMono16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider);
size_t resampleStereo16(int32_t *out, size_t outFrameCount,
AudioBufferProvider *provider);
static inline int32_t interp(state *p, int32_t x) {
return (((((p->a * x >> 14) + p->b) * x >> 14) + p->c) * x >> 14) + p->y1;
}
static inline void advance(state *p, int16_t in) {
p->y0 = p->y1;
p->y1 = p->y2;
p->y2 = p->y3;
p->y3 = in;
p->a = (3 * (p->y1 - p->y2) - p->y0 + p->y3) >> 1;
p->b = (p->y2 << 1) + p->y0 - (((5 * p->y1 + p->y3)) >> 1);
p->c = (p->y2 - p->y0) >> 1;
}
state left, right;
};
// ----------------------------------------------------------------------------
} // namespace cc

View File

@@ -0,0 +1,171 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <math.h>
#include <stdint.h>
namespace cc {
// AUDIO_RESAMPLER_DOWN_RATIO_MAX is the maximum ratio between the original
// audio sample rate and the target rate when downsampling,
// as permitted in the audio framework, e.g. AudioTrack and AudioFlinger.
// In practice, it is not recommended to downsample more than 6:1
// for best audio quality, even though the audio framework permits a larger
// downsampling ratio.
// REFINE: replace with an API
#define AUDIO_RESAMPLER_DOWN_RATIO_MAX 256
// AUDIO_RESAMPLER_UP_RATIO_MAX is the maximum suggested ratio between the original
// audio sample rate and the target rate when upsampling. It is loosely enforced by
// the system. One issue with large upsampling ratios is the approximation by
// an int32_t of the phase increments, making the resulting sample rate inexact.
#define AUDIO_RESAMPLER_UP_RATIO_MAX 65536
// AUDIO_TIMESTRETCH_SPEED_MIN and AUDIO_TIMESTRETCH_SPEED_MAX define the min and max time stretch
// speeds supported by the system. These are enforced by the system and values outside this range
// will result in a runtime error.
// Depending on the AudioPlaybackRate::mStretchMode, the effective limits might be narrower than
// the ones specified here
// AUDIO_TIMESTRETCH_SPEED_MIN_DELTA is the minimum absolute speed difference that might trigger a
// parameter update
#define AUDIO_TIMESTRETCH_SPEED_MIN 0.01f
#define AUDIO_TIMESTRETCH_SPEED_MAX 20.0f
#define AUDIO_TIMESTRETCH_SPEED_NORMAL 1.0f
#define AUDIO_TIMESTRETCH_SPEED_MIN_DELTA 0.0001f
// AUDIO_TIMESTRETCH_PITCH_MIN and AUDIO_TIMESTRETCH_PITCH_MAX define the min and max time stretch
// pitch shifting supported by the system. These are not enforced by the system and values
// outside this range might result in a pitch different than the one requested.
// Depending on the AudioPlaybackRate::mStretchMode, the effective limits might be narrower than
// the ones specified here.
// AUDIO_TIMESTRETCH_PITCH_MIN_DELTA is the minimum absolute pitch difference that might trigger a
// parameter update
#define AUDIO_TIMESTRETCH_PITCH_MIN 0.25f
#define AUDIO_TIMESTRETCH_PITCH_MAX 4.0f
#define AUDIO_TIMESTRETCH_PITCH_NORMAL 1.0f
#define AUDIO_TIMESTRETCH_PITCH_MIN_DELTA 0.0001f
//Determines the current algorithm used for stretching
enum AudioTimestretchStretchMode : int32_t {
AUDIO_TIMESTRETCH_STRETCH_DEFAULT = 0,
AUDIO_TIMESTRETCH_STRETCH_SPEECH = 1,
//REFINE: add more stretch modes/algorithms
};
//Limits for AUDIO_TIMESTRETCH_STRETCH_SPEECH mode
#define TIMESTRETCH_SONIC_SPEED_MIN 0.1f
#define TIMESTRETCH_SONIC_SPEED_MAX 6.0f
//Determines behavior of Timestretch if current algorithm can't perform
//with current parameters.
// FALLBACK_CUT_REPEAT: (internal only) for speed <1.0 will truncate frames
// for speed > 1.0 will repeat frames
// FALLBACK_MUTE: will set all processed frames to zero
// FALLBACK_FAIL: will stop program execution and log a fatal error
enum AudioTimestretchFallbackMode : int32_t {
AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT = -1,
AUDIO_TIMESTRETCH_FALLBACK_DEFAULT = 0,
AUDIO_TIMESTRETCH_FALLBACK_MUTE = 1,
AUDIO_TIMESTRETCH_FALLBACK_FAIL = 2,
};
struct AudioPlaybackRate {
float mSpeed;
float mPitch;
enum AudioTimestretchStretchMode mStretchMode;
enum AudioTimestretchFallbackMode mFallbackMode;
};
static const AudioPlaybackRate AUDIO_PLAYBACK_RATE_DEFAULT = {
AUDIO_TIMESTRETCH_SPEED_NORMAL,
AUDIO_TIMESTRETCH_PITCH_NORMAL,
AUDIO_TIMESTRETCH_STRETCH_DEFAULT,
AUDIO_TIMESTRETCH_FALLBACK_DEFAULT};
static inline bool isAudioPlaybackRateEqual(const AudioPlaybackRate &pr1,
const AudioPlaybackRate &pr2) {
return fabs(pr1.mSpeed - pr2.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
fabs(pr1.mPitch - pr2.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA &&
pr1.mStretchMode == pr2.mStretchMode &&
pr1.mFallbackMode == pr2.mFallbackMode;
}
static inline bool isAudioPlaybackRateValid(const AudioPlaybackRate &playbackRate) {
if (playbackRate.mFallbackMode == AUDIO_TIMESTRETCH_FALLBACK_FAIL &&
(playbackRate.mStretchMode == AUDIO_TIMESTRETCH_STRETCH_SPEECH ||
playbackRate.mStretchMode == AUDIO_TIMESTRETCH_STRETCH_DEFAULT)) {
//test sonic specific constraints
return playbackRate.mSpeed >= TIMESTRETCH_SONIC_SPEED_MIN &&
playbackRate.mSpeed <= TIMESTRETCH_SONIC_SPEED_MAX &&
playbackRate.mPitch >= AUDIO_TIMESTRETCH_PITCH_MIN &&
playbackRate.mPitch <= AUDIO_TIMESTRETCH_PITCH_MAX;
} else {
return playbackRate.mSpeed >= AUDIO_TIMESTRETCH_SPEED_MIN &&
playbackRate.mSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX &&
playbackRate.mPitch >= AUDIO_TIMESTRETCH_PITCH_MIN &&
playbackRate.mPitch <= AUDIO_TIMESTRETCH_PITCH_MAX;
}
}
// REFINE: Consider putting these inlines into a class scope
// Returns the source frames needed to resample to destination frames. This is not a precise
// value and depends on the resampler (and possibly how it handles rounding internally).
// Nevertheless, this should be an upper bound on the requirements of the resampler.
// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
// may not be true if the resampler is asynchronous.
static inline size_t sourceFramesNeeded(
uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
// +1 for rounding - always do this even if matched ratio (resampler may use phases not ratio)
// +1 for additional sample needed for interpolation
return srcSampleRate == dstSampleRate ? dstFramesRequired : size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
}
// An upper bound for the number of destination frames possible from srcFrames
// after sample rate conversion. This may be used for buffer sizing.
static inline size_t destinationFramesPossible(size_t srcFrames, uint32_t srcSampleRate,
uint32_t dstSampleRate) {
if (srcSampleRate == dstSampleRate) {
return srcFrames;
}
uint64_t dstFrames = (uint64_t)srcFrames * dstSampleRate / srcSampleRate;
return dstFrames > 2 ? static_cast<size_t>(dstFrames - 2) : 0;
}
static inline size_t sourceFramesNeededWithTimestretch(
uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate,
float speed) {
// required is the number of input frames the resampler needs
size_t required = sourceFramesNeeded(srcSampleRate, dstFramesRequired, dstSampleRate);
// to deliver this, the time stretcher requires:
return required * (double)speed + 1 + 1; // accounting for rounding dependencies
}
// Identifies sample rates that we associate with music
// and thus eligible for better resampling and fast capture.
// This is somewhat less than 44100 to allow for pitch correction
// involving resampling as well as asynchronous resampling.
#define AUDIO_PROCESSING_MUSIC_RATE 40000
static inline bool isMusicRate(uint32_t sampleRate) {
return sampleRate >= AUDIO_PROCESSING_MUSIC_RATE;
}
} // namespace cc
// ---------------------------------------------------------------------------

View File

@@ -0,0 +1,87 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <functional>
#include "base/std/container/string.h"
namespace cc {
class IAudioPlayer {
public:
enum class State {
INVALID = 0,
INITIALIZED,
PLAYING,
PAUSED,
STOPPED,
OVER
};
using PlayEventCallback = std::function<void(State)>;
virtual ~IAudioPlayer(){};
virtual int getId() const = 0;
virtual void setId(int id) = 0;
virtual ccstd::string getUrl() const = 0;
virtual State getState() const = 0;
virtual void play() = 0;
virtual void pause() = 0;
virtual void resume() = 0;
virtual void stop() = 0;
virtual void rewind() = 0;
virtual void setVolume(float volume) = 0;
virtual float getVolume() const = 0;
virtual void setAudioFocus(bool isFocus) = 0;
virtual void setLoop(bool isLoop) = 0;
virtual bool isLoop() const = 0;
virtual float getDuration() const = 0;
virtual float getPosition() const = 0;
virtual bool setPosition(float pos) = 0;
// @note: STOPPED event is invoked in main thread
// OVER event is invoked in sub thread
virtual void setPlayEventCallback(const PlayEventCallback &playEventCallback) = 0;
};
} // namespace cc

View File

@@ -0,0 +1,40 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <functional>
#include <thread>
namespace cc {
class ICallerThreadUtils {
public:
virtual ~ICallerThreadUtils(){};
virtual void performFunctionInCallerThread(const std::function<void()> &func) = 0;
virtual std::thread::id getCallerThreadId() = 0;
};
} // namespace cc

View File

@@ -0,0 +1,42 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/common/utils/include/minifloat.h"
namespace cc {
class IVolumeProvider {
public:
// The provider implementation is responsible for validating that the return value is in range.
virtual gain_minifloat_packed_t getVolumeLR() = 0;
protected:
IVolumeProvider() {}
virtual ~IVolumeProvider() {}
};
} // namespace cc

View File

@@ -0,0 +1,107 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/cutils/log.h"
#include <SLES/OpenSLES.h>
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <SLES/OpenSLES_Android.h>
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#include <SLES/OpenSLES_Platform.h>
#endif
#include <functional>
#include "base/std/container/string.h"
#define SL_SAFE_DELETE(obj) \
if ((obj) != nullptr) { \
delete (obj); \
(obj) = nullptr; \
}
#define SL_DESTROY_OBJ(OBJ) \
if ((OBJ) != nullptr) { \
(*(OBJ))->Destroy(OBJ); \
(OBJ) = nullptr; \
}
#define SL_RETURN_VAL_IF_FAILED(r, rval, ...) \
if (r != SL_RESULT_SUCCESS) { \
ALOGE(__VA_ARGS__); \
return rval; \
}
#define SL_RETURN_IF_FAILED(r, ...) \
if (r != SL_RESULT_SUCCESS) { \
ALOGE(__VA_ARGS__); \
return; \
}
#define SL_PRINT_ERROR_IF_FAILED(r, ...) \
if (r != SL_RESULT_SUCCESS) { \
ALOGE(__VA_ARGS__); \
}
typedef std::function<int(const ccstd::string &, off_t *start, off_t *length)> FdGetterCallback;
// Copied from OpenSLES_AndroidMetadata.h in android-21
// It's because android-10 doesn't contain this header file
/**
* Additional metadata keys to be used in SLMetadataExtractionItf:
* the ANDROID_KEY_PCMFORMAT_* keys follow the fields of the SLDataFormat_PCM struct, and as such
* all values corresponding to these keys are of SLuint32 type, and are defined as the fields
* of the same name in SLDataFormat_PCM. The exception is that sample rate is expressed here
* in Hz units, rather than in milliHz units.
*/
#ifndef ANDROID_KEY_PCMFORMAT_NUMCHANNELS
#define ANDROID_KEY_PCMFORMAT_NUMCHANNELS "AndroidPcmFormatNumChannels"
#endif
#ifndef ANDROID_KEY_PCMFORMAT_SAMPLERATE
#define ANDROID_KEY_PCMFORMAT_SAMPLERATE "AndroidPcmFormatSampleRate"
#endif
#ifndef ANDROID_KEY_PCMFORMAT_BITSPERSAMPLE
#define ANDROID_KEY_PCMFORMAT_BITSPERSAMPLE "AndroidPcmFormatBitsPerSample"
#endif
#ifndef ANDROID_KEY_PCMFORMAT_CONTAINERSIZE
#define ANDROID_KEY_PCMFORMAT_CONTAINERSIZE "AndroidPcmFormatContainerSize"
#endif
#ifndef ANDROID_KEY_PCMFORMAT_CHANNELMASK
#define ANDROID_KEY_PCMFORMAT_CHANNELMASK "AndroidPcmFormatChannelMask"
#endif
#ifndef ANDROID_KEY_PCMFORMAT_ENDIANNESS
#define ANDROID_KEY_PCMFORMAT_ENDIANNESS "AndroidPcmFormatEndianness"
#endif
#define clockNow() std::chrono::high_resolution_clock::now()
#define intervalInMS(oldTime, newTime) (static_cast<long>(std::chrono::duration_cast<std::chrono::microseconds>((newTime) - (oldTime)).count()) / 1000.f)
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))

View File

@@ -0,0 +1,193 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "PcmAudioPlayer"
#include "audio/android/PcmAudioPlayer.h"
#include "audio/android/AudioMixerController.h"
#include "audio/android/ICallerThreadUtils.h"
#include "audio/android/cutils/log.h"
#include "base/memory/Memory.h"
namespace cc {
PcmAudioPlayer::PcmAudioPlayer(AudioMixerController *controller, ICallerThreadUtils *callerThreadUtils)
: _id(-1), _track(nullptr), _playEventCallback(nullptr), _controller(controller), _callerThreadUtils(callerThreadUtils) {
ALOGV("PcmAudioPlayer constructor: %p", this);
}
PcmAudioPlayer::~PcmAudioPlayer() {
ALOGV("In the destructor of PcmAudioPlayer (%p)", this);
delete _track;
}
bool PcmAudioPlayer::prepare(const ccstd::string &url, const PcmData &decResult) {
_url = url;
_decResult = decResult;
_track = ccnew Track(_decResult);
std::thread::id callerThreadId = _callerThreadUtils->getCallerThreadId();
// @note The logic may cause this issue https://github.com/cocos2d/cocos2d-x/issues/17707
// Assume that AudioEngine::stop(id) is invoked and the audio is played over meanwhile.
// Since State::OVER and State::DESTROYED are triggered in the audio mixing thread, it will
// call 'performFunctionInCallerThread' to post events to cocos's message queue.
// Therefore, the sequence in cocos's thread will be |STOP|OVER|DESTROYED|.
// Although, we remove the audio id in |STOPPED| callback, because it's asynchronous operation,
// |OVER| and |DESTROYED| callbacks will still be invoked in cocos's thread.
// HOW TO FIX: If the previous state is |STOPPED| and the current state
// is |OVER|, just skip to invoke |OVER| callback.
_track->onStateChanged = [this, callerThreadId](Track::State state) {
// It maybe in sub thread
Track::State prevState = _track->getPrevState();
auto func = [this, state, prevState]() {
// It's in caller's thread
if (state == Track::State::OVER && prevState != Track::State::STOPPED) {
if (_playEventCallback != nullptr) {
_playEventCallback(State::OVER);
}
} else if (state == Track::State::STOPPED) {
if (_playEventCallback != nullptr) {
_playEventCallback(State::STOPPED);
}
} else if (state == Track::State::DESTROYED) {
delete this;
}
};
if (callerThreadId == std::this_thread::get_id()) { // onStateChanged(Track::State::STOPPED) is in caller's (Cocos's) thread.
func();
} else { // onStateChanged(Track::State::OVER) or onStateChanged(Track::State::DESTROYED) are in audio mixing thread.
_callerThreadUtils->performFunctionInCallerThread(func);
}
};
setVolume(1.0f);
return true;
}
void PcmAudioPlayer::rewind() {
ALOGW("PcmAudioPlayer::rewind isn't supported!");
}
void PcmAudioPlayer::setVolume(float volume) {
_track->setVolume(volume);
}
float PcmAudioPlayer::getVolume() const {
return _track->getVolume();
}
void PcmAudioPlayer::setAudioFocus(bool isFocus) {
_track->setAudioFocus(isFocus);
}
void PcmAudioPlayer::setLoop(bool isLoop) {
_track->setLoop(isLoop);
}
bool PcmAudioPlayer::isLoop() const {
return _track->isLoop();
}
float PcmAudioPlayer::getDuration() const {
return _decResult.duration;
}
float PcmAudioPlayer::getPosition() const {
return _track->getPosition();
}
bool PcmAudioPlayer::setPosition(float pos) {
return _track->setPosition(pos);
}
void PcmAudioPlayer::setPlayEventCallback(const PlayEventCallback &playEventCallback) {
_playEventCallback = playEventCallback;
}
void PcmAudioPlayer::play() {
// put track to AudioMixerController
ALOGV("PcmAudioPlayer (%p) play, url: %s", this, _url.c_str());
_controller->addTrack(_track);
_track->setState(Track::State::PLAYING);
}
void PcmAudioPlayer::pause() {
ALOGV("PcmAudioPlayer (%p) pause, url: %s", this, _url.c_str());
_track->setState(Track::State::PAUSED);
}
void PcmAudioPlayer::resume() {
ALOGV("PcmAudioPlayer (%p) resume, url: %s", this, _url.c_str());
_track->setState(Track::State::RESUMED);
}
void PcmAudioPlayer::stop() {
ALOGV("PcmAudioPlayer (%p) stop, url: %s", this, _url.c_str());
_track->setState(Track::State::STOPPED);
}
IAudioPlayer::State PcmAudioPlayer::getState() const {
IAudioPlayer::State state = State::INVALID;
if (_track != nullptr) {
switch (_track->getState()) {
case Track::State::IDLE:
state = State::INITIALIZED;
break;
case Track::State::PLAYING:
state = State::PLAYING;
break;
case Track::State::RESUMED:
state = State::PLAYING;
break;
case Track::State::PAUSED:
state = State::PAUSED;
break;
case Track::State::STOPPED:
state = State::STOPPED;
break;
case Track::State::OVER:
state = State::OVER;
break;
default:
state = State::INVALID;
break;
}
}
return state;
}
} // namespace cc

View File

@@ -0,0 +1,96 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <mutex>
#include "audio/android/IAudioPlayer.h"
#include "audio/android/PcmData.h"
#include "audio/android/Track.h"
namespace cc {
class ICallerThreadUtils;
class AudioMixerController;
class PcmAudioPlayer : public IAudioPlayer {
public:
bool prepare(const ccstd::string &url, const PcmData &decResult);
// Override Functions Begin
virtual int getId() const override { return _id; };
virtual void setId(int id) override { _id = id; };
virtual ccstd::string getUrl() const override { return _url; };
virtual State getState() const override;
virtual void play() override;
virtual void pause() override;
virtual void resume() override;
virtual void stop() override;
virtual void rewind() override;
virtual void setVolume(float volume) override;
virtual float getVolume() const override;
virtual void setAudioFocus(bool isFocus) override;
virtual void setLoop(bool isLoop) override;
virtual bool isLoop() const override;
virtual float getDuration() const override;
virtual float getPosition() const override;
virtual bool setPosition(float pos) override;
virtual void setPlayEventCallback(const PlayEventCallback &playEventCallback) override;
// Override Functions End
private:
PcmAudioPlayer(AudioMixerController *controller, ICallerThreadUtils *callerThreadUtils);
virtual ~PcmAudioPlayer();
private:
int _id;
ccstd::string _url;
PcmData _decResult;
Track *_track;
PlayEventCallback _playEventCallback;
AudioMixerController *_controller;
ICallerThreadUtils *_callerThreadUtils;
friend class AudioPlayerProvider;
};
} // namespace cc

View File

@@ -0,0 +1,190 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "PcmAudioService"
#include "base/Macros.h"
#include "audio/android/PcmAudioService.h"
#include "audio/android/AudioMixerController.h"
#include "audio/android/utils/Compat.h"
namespace cc {
static ccstd::vector<char> __silenceData;//NOLINT(bugprone-reserved-identifier, readability-identifier-naming)
#define AUDIO_PLAYER_BUFFER_COUNT (2)
class SLPcmAudioPlayerCallbackProxy {
public:
#if CC_PLATFORM == CC_PLATFORM_ANDROID
static void samplePlayerCallback(CCSLBufferQueueItf bq, void *context) {
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
static void samplePlayerCallback(CCSLBufferQueueItf bq, void *context, SLuint32 size) {
#endif
auto *thiz = reinterpret_cast<PcmAudioService *>(context);
thiz->bqFetchBufferCallback(bq);
}
};
PcmAudioService::PcmAudioService(SLEngineItf engineItf, SLObjectItf outputMixObject)
: _engineItf(engineItf), _outputMixObj(outputMixObject), _playObj(nullptr), _playItf(nullptr), _volumeItf(nullptr), _bufferQueueItf(nullptr), _numChannels(-1), _sampleRate(-1), _bufferSizeInBytes(0), _controller(nullptr) {
}
PcmAudioService::~PcmAudioService() {
ALOGV("PcmAudioServicee() (%p), before destroy play object", this);
SL_DESTROY_OBJ(_playObj);
ALOGV("PcmAudioServicee() end");
}
bool PcmAudioService::enqueue() {
#if CC_PLATFORM == CC_PLATFORM_OPENHARMONY
// We need to call this interface in openharmony, otherwise there will be noise
SLuint8 *buffer = nullptr;
SLuint32 size = 0;
(*_bufferQueueItf)->GetBuffer(_bufferQueueItf, &buffer, &size);
#endif
if (_controller->hasPlayingTacks()) {
if (_controller->isPaused()) {
SLresult r = (*_bufferQueueItf)->Enqueue(_bufferQueueItf, __silenceData.data(), __silenceData.size());
SL_RETURN_VAL_IF_FAILED(r, false, "enqueue silent data failed!");
} else {
_controller->mixOneFrame();
auto *current = _controller->current();
ALOG_ASSERT(current != nullptr, "current buffer is nullptr ...");
SLresult r = (*_bufferQueueItf)->Enqueue(_bufferQueueItf, current->buf, current->size);
SL_RETURN_VAL_IF_FAILED(r, false, "enqueue failed!");
}
} else {
SLresult r = (*_bufferQueueItf)->Enqueue(_bufferQueueItf, __silenceData.data(), __silenceData.size());
SL_RETURN_VAL_IF_FAILED(r, false, "enqueue silent data failed!");
}
return true;
}
void PcmAudioService::bqFetchBufferCallback(CCSLBufferQueueItf bq) {
CC_UNUSED_PARAM(bq);
// IDEA: PcmAudioService instance may be destroyed, we need to find a way to wait...
// It's in sub thread
enqueue();
}
bool PcmAudioService::init(AudioMixerController *controller, int numChannels, int sampleRate, int bufferSizeInBytes) {
_controller = controller;
_numChannels = numChannels;
_sampleRate = sampleRate;
_bufferSizeInBytes = bufferSizeInBytes;
SLuint32 channelMask = SL_SPEAKER_FRONT_CENTER;
if (numChannels > 1) {
channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
}
SLDataFormat_PCM formatPcm = {
SL_DATAFORMAT_PCM,
static_cast<SLuint32>(numChannels),
static_cast<SLuint32>(sampleRate * 1000),
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
channelMask,
SL_BYTEORDER_LITTLEENDIAN};
#if CC_PLATFORM == CC_PLATFORM_ANDROID
SLDataLocator_AndroidSimpleBufferQueue locBufQueue = {
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
AUDIO_PLAYER_BUFFER_COUNT};
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
SLDataLocator_BufferQueue locBufQueue = {SL_DATALOCATOR_BUFFERQUEUE, AUDIO_PLAYER_BUFFER_COUNT};
#endif
SLDataSource source = {&locBufQueue, &formatPcm};
SLDataLocator_OutputMix locOutmix = {
SL_DATALOCATOR_OUTPUTMIX,
_outputMixObj};
SLDataSink sink = {&locOutmix, nullptr};
const SLInterfaceID ids[] = {
SL_IID_PLAY,
SL_IID_VOLUME,
CC_SL_IDD_BUFFER_QUEUE,
};
const SLboolean req[] = {
SL_BOOLEAN_TRUE,
SL_BOOLEAN_TRUE,
SL_BOOLEAN_TRUE,
};
SLresult r;
r = (*_engineItf)->CreateAudioPlayer(_engineItf, &_playObj, &source, &sink, sizeof(ids) / sizeof(ids[0]), ids, req);//NOLINT(bugprone-sizeof-expression)
SL_RETURN_VAL_IF_FAILED(r, false, "CreateAudioPlayer failed");
r = (*_playObj)->Realize(_playObj, SL_BOOLEAN_FALSE);
SL_RETURN_VAL_IF_FAILED(r, false, "Realize failed");
r = (*_playObj)->GetInterface(_playObj, SL_IID_PLAY, &_playItf);
SL_RETURN_VAL_IF_FAILED(r, false, "GetInterface SL_IID_PLAY failed");
r = (*_playObj)->GetInterface(_playObj, SL_IID_VOLUME, &_volumeItf);
SL_RETURN_VAL_IF_FAILED(r, false, "GetInterface SL_IID_VOLUME failed");
r = (*_playObj)->GetInterface(_playObj, CC_SL_IDD_BUFFER_QUEUE, &_bufferQueueItf);
SL_RETURN_VAL_IF_FAILED(r, false, "GetInterface CC_SL_IDD_BUFFER_QUEUE failed");
r = (*_bufferQueueItf)->RegisterCallback(_bufferQueueItf, SLPcmAudioPlayerCallbackProxy::samplePlayerCallback, this);
SL_RETURN_VAL_IF_FAILED(r, false, "_bufferQueueItf RegisterCallback failed");
if (__silenceData.empty()) {
__silenceData.resize(_numChannels * _bufferSizeInBytes, 0x00);
}
#if CC_PLATFORM == CC_PLATFORM_OPENHARMONY
// We need to call this interface in openharmony, otherwise there will be noise
SLuint8 *buffer = nullptr;
SLuint32 size = 0;
(*_bufferQueueItf)->GetBuffer(_bufferQueueItf, &buffer, &size);
#endif
r = (*_bufferQueueItf)->Enqueue(_bufferQueueItf, __silenceData.data(), __silenceData.size());
SL_RETURN_VAL_IF_FAILED(r, false, "_bufferQueueItf Enqueue failed");
r = (*_playItf)->SetPlayState(_playItf, SL_PLAYSTATE_PLAYING);
SL_RETURN_VAL_IF_FAILED(r, false, "SetPlayState failed");
return true;
}
void PcmAudioService::pause() {
SLresult r = (*_playItf)->SetPlayState(_playItf, SL_PLAYSTATE_PAUSED);
SL_RETURN_IF_FAILED(r, "PcmAudioService::pause failed");
}
void PcmAudioService::resume() {
SLresult r = (*_playItf)->SetPlayState(_playItf, SL_PLAYSTATE_PLAYING);
SL_RETURN_IF_FAILED(r, "PcmAudioService::resume failed");
}
} // namespace cc

View File

@@ -0,0 +1,78 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/IAudioPlayer.h"
#include "audio/android/OpenSLHelper.h"
#include "audio/android/PcmData.h"
#include <condition_variable>
#include <mutex>
#include "audio/android/utils/Compat.h"
namespace cc {
class AudioMixerController;
class PcmAudioService {
public:
inline int getChannelCount() const { return _numChannels; };
inline int getSampleRate() const { return _sampleRate; };
private:
PcmAudioService(SLEngineItf engineItf, SLObjectItf outputMixObject);
virtual ~PcmAudioService();
bool init(AudioMixerController *controller, int numChannels, int sampleRate, int bufferSizeInBytes);
bool enqueue();
void bqFetchBufferCallback(CCSLBufferQueueItf bq);
void pause();
void resume();
SLEngineItf _engineItf;
SLObjectItf _outputMixObj;
SLObjectItf _playObj;
SLPlayItf _playItf;
SLVolumeItf _volumeItf;
CCSLBufferQueueItf _bufferQueueItf;
int _numChannels;
int _sampleRate;
int _bufferSizeInBytes;
AudioMixerController *_controller;
friend class SLPcmAudioPlayerCallbackProxy;
friend class AudioPlayerProvider;
};
} // namespace cc

View File

@@ -0,0 +1,102 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "PcmBufferProvider"
#include "audio/android/PcmBufferProvider.h"
#include "audio/android/cutils/log.h"
//#define VERY_VERY_VERBOSE_LOGGING
#ifdef VERY_VERY_VERBOSE_LOGGING
#define ALOGVV ALOGV
#else
#define ALOGVV(a...) \
do { \
} while (0)
#endif
namespace cc {
PcmBufferProvider::PcmBufferProvider()
: _addr(nullptr), _numFrames(0), _frameSize(0), _nextFrame(0), _unrel(0) {
}
bool PcmBufferProvider::init(const void *addr, size_t frames, size_t frameSize) {
_addr = addr;
_numFrames = frames;
_frameSize = frameSize;
_nextFrame = 0;
_unrel = 0;
return true;
}
status_t PcmBufferProvider::getNextBuffer(Buffer *buffer,
int64_t pts /* = kInvalidPTS*/) {
(void)pts; // suppress warning
size_t requestedFrames = buffer->frameCount;
if (requestedFrames > _numFrames - _nextFrame) {
buffer->frameCount = _numFrames - _nextFrame;
}
ALOGVV(
"getNextBuffer() requested %zu frames out of %zu frames available,"
" and returned %zu frames",
requestedFrames, (size_t)(_numFrames - _nextFrame), buffer->frameCount);
_unrel = buffer->frameCount;
if (buffer->frameCount > 0) {
buffer->raw = (char *)_addr + _frameSize * _nextFrame;
return NO_ERROR;
} else {
buffer->raw = NULL;
return NOT_ENOUGH_DATA;
}
}
void PcmBufferProvider::releaseBuffer(Buffer *buffer) {
if (buffer->frameCount > _unrel) {
ALOGVV(
"ERROR releaseBuffer() released %zu frames but only %zu available "
"to release",
buffer->frameCount, _unrel);
_nextFrame += _unrel;
_unrel = 0;
} else {
ALOGVV(
"releaseBuffer() released %zu frames out of %zu frames available "
"to release",
buffer->frameCount, _unrel);
_nextFrame += buffer->frameCount;
_unrel -= buffer->frameCount;
}
buffer->frameCount = 0;
buffer->raw = NULL;
}
void PcmBufferProvider::reset() {
_nextFrame = 0;
}
} // namespace cc

View File

@@ -0,0 +1,51 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/AudioBufferProvider.h"
#include <stddef.h>
#include <stdio.h>
namespace cc {
class PcmBufferProvider : public AudioBufferProvider {
public:
PcmBufferProvider();
bool init(const void *addr, size_t frames, size_t frameSize);
virtual status_t getNextBuffer(Buffer *buffer, int64_t pts = kInvalidPTS) override;
virtual void releaseBuffer(Buffer *buffer) override;
void reset();
protected:
const void *_addr; // base address
size_t _numFrames; // total frames
size_t _frameSize; // size of each frame in bytes
size_t _nextFrame; // index of next frame to provide
size_t _unrel; // number of frames not yet released
};
} // namespace cc

View File

@@ -0,0 +1,128 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "PcmData"
#include "audio/android/PcmData.h"
#include "audio/android/OpenSLHelper.h"
namespace cc {
PcmData::PcmData() {
// ALOGV("In the constructor of PcmData (%p)", this);
reset();
}
PcmData::~PcmData() {
// ALOGV("In the destructor of PcmData (%p)", this);
}
PcmData::PcmData(const PcmData &o) {
// ALOGV("In the copy constructor of PcmData (%p)", this);
numChannels = o.numChannels;
sampleRate = o.sampleRate;
bitsPerSample = o.bitsPerSample;
containerSize = o.containerSize;
channelMask = o.channelMask;
endianness = o.endianness;
numFrames = o.numFrames;
duration = o.duration;
pcmBuffer = std::move(o.pcmBuffer);
}
PcmData::PcmData(PcmData &&o) {
// ALOGV("In the move constructor of PcmData (%p)", this);
numChannels = o.numChannels;
sampleRate = o.sampleRate;
bitsPerSample = o.bitsPerSample;
containerSize = o.containerSize;
channelMask = o.channelMask;
endianness = o.endianness;
numFrames = o.numFrames;
duration = o.duration;
pcmBuffer = std::move(o.pcmBuffer);
o.reset();
}
PcmData &PcmData::operator=(const PcmData &o) {
// ALOGV("In the copy assignment of PcmData");
numChannels = o.numChannels;
sampleRate = o.sampleRate;
bitsPerSample = o.bitsPerSample;
containerSize = o.containerSize;
channelMask = o.channelMask;
endianness = o.endianness;
numFrames = o.numFrames;
duration = o.duration;
pcmBuffer = o.pcmBuffer;
return *this;
}
PcmData &PcmData::operator=(PcmData &&o) {
// ALOGV("In the move assignment of PcmData");
numChannels = o.numChannels;
sampleRate = o.sampleRate;
bitsPerSample = o.bitsPerSample;
containerSize = o.containerSize;
channelMask = o.channelMask;
endianness = o.endianness;
numFrames = o.numFrames;
duration = o.duration;
pcmBuffer = std::move(o.pcmBuffer);
o.reset();
return *this;
}
void PcmData::reset() {
numChannels = -1;
sampleRate = -1;
bitsPerSample = -1;
containerSize = -1;
channelMask = -1;
endianness = -1;
numFrames = -1;
duration = -1.0f;
pcmBuffer = nullptr;
}
bool PcmData::isValid() const {
return numChannels > 0 && sampleRate > 0 && bitsPerSample > 0 && containerSize > 0 && numFrames > 0 && duration > 0 && pcmBuffer != nullptr;
}
ccstd::string PcmData::toString() const {
ccstd::string ret;
char buf[256] = {0};
snprintf(buf, sizeof(buf),
"numChannels: %d, sampleRate: %d, bitPerSample: %d, containerSize: %d, "
"channelMask: %d, endianness: %d, numFrames: %d, duration: %f",
numChannels, sampleRate, bitsPerSample, containerSize, channelMask, endianness,
numFrames, duration);
ret = buf;
return ret;
}
} // namespace cc

View File

@@ -0,0 +1,65 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <stdio.h>
#include <memory>
#include "base/std/container/string.h"
#include "base/std/container/vector.h"
namespace cc {
struct PcmData {
std::shared_ptr<ccstd::vector<char>> pcmBuffer;
int numChannels;
int sampleRate;
int bitsPerSample;
int containerSize;
int channelMask;
int endianness;
int numFrames;
float duration; // in seconds
PcmData();
~PcmData();
PcmData(const PcmData &o);
PcmData(PcmData &&o);
PcmData &operator=(const PcmData &o);
PcmData &operator=(PcmData &&o);
void reset();
bool isValid() const;
ccstd::string toString() const;
};
} // namespace cc

View File

@@ -0,0 +1,86 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "Track"
#include "audio/android/Track.h"
#include "audio/android/cutils/log.h"
#include <math.h>
namespace cc {
Track::Track(const PcmData &pcmData)
: onStateChanged(nullptr), _pcmData(pcmData), _prevState(State::IDLE), _state(State::IDLE), _name(-1), _volume(1.0f), _isVolumeDirty(true), _isLoop(false), _isInitialized(false), _isAudioFocus(true) {
init(_pcmData.pcmBuffer->data(), _pcmData.numFrames, _pcmData.bitsPerSample / 8 * _pcmData.numChannels);
}
Track::~Track() {
ALOGV("~Track(): %p", this);
}
gain_minifloat_packed_t Track::getVolumeLR() {
float volume = _isAudioFocus ? _volume : 0.0f;
gain_minifloat_t v = gain_from_float(volume);
return gain_minifloat_pack(v, v);
}
bool Track::setPosition(float pos) {
_nextFrame = (size_t)(pos * _numFrames / _pcmData.duration);
_unrel = 0;
return true;
}
float Track::getPosition() const {
return _nextFrame * _pcmData.duration / _numFrames;
}
void Track::setVolume(float volume) {
std::lock_guard<std::mutex> lk(_volumeDirtyMutex);
if (fabs(_volume - volume) > 0.00001) {
_volume = volume;
setVolumeDirty(true);
}
}
float Track::getVolume() const {
return _volume;
}
void Track::setAudioFocus(bool isFocus) {
_isAudioFocus = isFocus;
setVolumeDirty(true);
}
void Track::setState(State state) {
std::lock_guard<std::mutex> lk(_stateMutex);
if (_state != state) {
_prevState = _state;
_state = state;
onStateChanged(_state);
}
};
} // namespace cc

101
cocos/audio/android/Track.h Normal file
View File

@@ -0,0 +1,101 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "audio/android/IVolumeProvider.h"
#include "audio/android/PcmBufferProvider.h"
#include "audio/android/PcmData.h"
#include <functional>
#include <mutex>
namespace cc {
class Track : public PcmBufferProvider, public IVolumeProvider {
public:
enum class State {
IDLE,
PLAYING,
RESUMED,
PAUSED,
STOPPED,
OVER,
DESTROYED
};
Track(const PcmData &pcmData);
virtual ~Track();
inline State getState() const { return _state; };
void setState(State state);
inline State getPrevState() const { return _prevState; };
inline bool isPlayOver() const { return _state == State::PLAYING && _nextFrame >= _numFrames; };
inline void setName(int name) { _name = name; };
inline int getName() const { return _name; };
void setVolume(float volume);
float getVolume() const;
void setAudioFocus(bool isFocus);
bool setPosition(float pos);
float getPosition() const;
virtual gain_minifloat_packed_t getVolumeLR() override;
inline void setLoop(bool isLoop) { _isLoop = isLoop; };
inline bool isLoop() const { return _isLoop; };
std::function<void(State)> onStateChanged;
private:
inline bool isVolumeDirty() const { return _isVolumeDirty; };
inline void setVolumeDirty(bool isDirty) { _isVolumeDirty = isDirty; };
inline bool isInitialized() const { return _isInitialized; };
inline void setInitialized(bool isInitialized) { _isInitialized = isInitialized; };
private:
PcmData _pcmData;
State _prevState;
State _state;
std::mutex _stateMutex;
int _name;
float _volume;
bool _isVolumeDirty;
std::mutex _volumeDirtyMutex;
bool _isLoop;
bool _isInitialized;
bool _isAudioFocus;
friend class AudioMixerController;
};
} // namespace cc

View File

@@ -0,0 +1,360 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "UrlAudioPlayer"
#include "audio/android/UrlAudioPlayer.h"
#include "audio/android/ICallerThreadUtils.h"
#include "base/std/container/vector.h"
#include "base/Macros.h"
#include <cmath>
#include <algorithm> // for std::find
namespace {
std::mutex __playerContainerMutex;//NOLINT(bugprone-reserved-identifier,readability-identifier-naming)
ccstd::vector<cc::UrlAudioPlayer *> __playerContainer;//NOLINT(bugprone-reserved-identifier,readability-identifier-naming)
std::once_flag __onceFlag;//NOLINT(bugprone-reserved-identifier,readability-identifier-naming)
} // namespace
namespace cc {
class SLUrlAudioPlayerCallbackProxy {
public:
static void playEventCallback(SLPlayItf caller, void *context, SLuint32 playEvent) {
auto *thiz = reinterpret_cast<UrlAudioPlayer *>(context);
// We must use a mutex for the whole block of the following function invocation.
std::lock_guard<std::mutex> lk(__playerContainerMutex);
auto iter = std::find(__playerContainer.begin(), __playerContainer.end(), thiz);
if (iter != __playerContainer.end()) {
thiz->playEventCallback(caller, playEvent);
}
}
};
UrlAudioPlayer::UrlAudioPlayer(SLEngineItf engineItf, SLObjectItf outputMixObject, ICallerThreadUtils *callerThreadUtils)
: _engineItf(engineItf), _outputMixObj(outputMixObject), _callerThreadUtils(callerThreadUtils), _id(-1), _assetFd(nullptr), _playObj(nullptr), _playItf(nullptr), _seekItf(nullptr), _volumeItf(nullptr), _volume(0.0F), _duration(0.0F), _isLoop(false), _isAudioFocus(true), _state(State::INVALID), _playEventCallback(nullptr), _isDestroyed(std::make_shared<bool>(false)) {
std::call_once(__onceFlag, []() {
__playerContainer.reserve(10);
});
__playerContainerMutex.lock();
__playerContainer.push_back(this);
ALOGV("Current UrlAudioPlayer instance count: %d", (int)__playerContainer.size());
__playerContainerMutex.unlock();
_callerThreadId = callerThreadUtils->getCallerThreadId();
}
UrlAudioPlayer::~UrlAudioPlayer() {
ALOGV("~UrlAudioPlayer(): %p", this);
__playerContainerMutex.lock();
auto iter = std::find(__playerContainer.begin(), __playerContainer.end(), this);
if (iter != __playerContainer.end()) {
__playerContainer.erase(iter);
}
__playerContainerMutex.unlock();
}
void UrlAudioPlayer::playEventCallback(SLPlayItf caller, SLuint32 playEvent) {
CC_UNUSED_PARAM(caller);
// Note that it's on sub thread, please don't invoke OpenSLES API on sub thread
if (playEvent == SL_PLAYEVENT_HEADATEND) {
std::shared_ptr<bool> isDestroyed = _isDestroyed;
auto func = [this, isDestroyed]() {
// If it was destroyed, just return.
if (*isDestroyed) {
ALOGV("The UrlAudioPlayer (%p) was destroyed!", this);
return;
}
//Note that It's in the caller's thread (Cocos Thread)
// If state is already stopped, ignore the play over event.
if (_state == State::STOPPED) {
return;
}
//fix issue#8965:AudioEngine can't looping audio on Android 2.3.x
if (isLoop()) {
play();
} else {
setState(State::OVER);
if (_playEventCallback != nullptr) {
_playEventCallback(State::OVER);
}
ALOGV("UrlAudioPlayer (%p) played over, destroy self ...", this);
destroy();
delete this;
}
};
if (_callerThreadId == std::this_thread::get_id()) {
func();
} else {
_callerThreadUtils->performFunctionInCallerThread(func);
}
}
}
void UrlAudioPlayer::setPlayEventCallback(const PlayEventCallback &playEventCallback) {
_playEventCallback = playEventCallback;
}
void UrlAudioPlayer::stop() {
ALOGV("UrlAudioPlayer::stop (%p, %d)", this, getId());
SLresult r = (*_playItf)->SetPlayState(_playItf, SL_PLAYSTATE_STOPPED);
SL_RETURN_IF_FAILED(r, "UrlAudioPlayer::stop failed");
if (_state == State::PLAYING || _state == State::PAUSED) {
setLoop(false);
setState(State::STOPPED);
if (_playEventCallback != nullptr) {
_playEventCallback(State::STOPPED);
}
destroy();
delete this;
} else {
ALOGW("UrlAudioPlayer (%p, state:%d) isn't playing or paused, could not invoke stop!", this, static_cast<int>(_state));
}
}
void UrlAudioPlayer::pause() {
if (_state == State::PLAYING) {
SLresult r = (*_playItf)->SetPlayState(_playItf, SL_PLAYSTATE_PAUSED);
SL_RETURN_IF_FAILED(r, "UrlAudioPlayer::pause failed");
setState(State::PAUSED);
} else {
ALOGW("UrlAudioPlayer (%p, state:%d) isn't playing, could not invoke pause!", this, static_cast<int>(_state));
}
}
void UrlAudioPlayer::resume() {
if (_state == State::PAUSED) {
SLresult r = (*_playItf)->SetPlayState(_playItf, SL_PLAYSTATE_PLAYING);
SL_RETURN_IF_FAILED(r, "UrlAudioPlayer::resume failed");
setState(State::PLAYING);
} else {
ALOGW("UrlAudioPlayer (%p, state:%d) isn't paused, could not invoke resume!", this, static_cast<int>(_state));
}
}
void UrlAudioPlayer::play() {
if (_state == State::INITIALIZED || _state == State::PAUSED) {
SLresult r = (*_playItf)->SetPlayState(_playItf, SL_PLAYSTATE_PLAYING);
SL_RETURN_IF_FAILED(r, "UrlAudioPlayer::play failed");
setState(State::PLAYING);
} else {
ALOGW("UrlAudioPlayer (%p, state:%d) isn't paused or initialized, could not invoke play!", this, static_cast<int>(_state));
}
}
void UrlAudioPlayer::setVolumeToSLPlayer(float volume) {
int dbVolume = static_cast<int>(2000 * log10(volume));
if (dbVolume < SL_MILLIBEL_MIN) {
dbVolume = SL_MILLIBEL_MIN;
}
SLresult r = (*_volumeItf)->SetVolumeLevel(_volumeItf, dbVolume);
SL_RETURN_IF_FAILED(r, "UrlAudioPlayer::setVolumeToSLPlayer %d failed", dbVolume);
}
void UrlAudioPlayer::setVolume(float volume) {
_volume = volume;
if (_isAudioFocus) {
setVolumeToSLPlayer(_volume);
}
}
float UrlAudioPlayer::getVolume() const {
return _volume;
}
void UrlAudioPlayer::setAudioFocus(bool isFocus) {
_isAudioFocus = isFocus;
float volume = _isAudioFocus ? _volume : 0.0F;
setVolumeToSLPlayer(volume);
}
float UrlAudioPlayer::getDuration() const {
if (_duration > 0) {
return _duration;
}
SLmillisecond duration;
SLresult r = (*_playItf)->GetDuration(_playItf, &duration);
SL_RETURN_VAL_IF_FAILED(r, 0.0F, "UrlAudioPlayer::getDuration failed");
if (duration == SL_TIME_UNKNOWN) {
return -1.0F;
} else {// NOLINT(readability-else-after-return)
const_cast<UrlAudioPlayer *>(this)->_duration = duration / 1000.0F;
if (_duration <= 0) {
return -1.0F;
}
}
return _duration;
}
float UrlAudioPlayer::getPosition() const {
SLmillisecond millisecond;
SLresult r = (*_playItf)->GetPosition(_playItf, &millisecond);
SL_RETURN_VAL_IF_FAILED(r, 0.0F, "UrlAudioPlayer::getPosition failed");
return millisecond / 1000.0F;
}
bool UrlAudioPlayer::setPosition(float pos) {
SLmillisecond millisecond = 1000.0F * pos;
SLresult r = (*_seekItf)->SetPosition(_seekItf, millisecond, SL_SEEKMODE_ACCURATE);
SL_RETURN_VAL_IF_FAILED(r, false, "UrlAudioPlayer::setPosition %f failed", pos);
return true;
}
bool UrlAudioPlayer::prepare(const ccstd::string &url, SLuint32 locatorType, std::shared_ptr<AssetFd> assetFd, int start,
int length) {
_url = url;
_assetFd = std::move(assetFd);
#if CC_PLATFORM == CC_PLATFORM_ANDROID
const char *locatorTypeStr = "UNKNOWN";
if (locatorType == SL_DATALOCATOR_ANDROIDFD) {
locatorTypeStr = "SL_DATALOCATOR_ANDROIDFD";
} else if (locatorType == SL_DATALOCATOR_URI) {
locatorTypeStr = "SL_DATALOCATOR_URI";
} else {
ALOGE("Oops, invalid locatorType: %d", (int)locatorType);
return false;
}
ALOGV("UrlAudioPlayer::prepare: %s, %s, %d, %d, %d", _url.c_str(), locatorTypeStr, _assetFd->getFd(), start,
length);
SLDataSource audioSrc;
SLDataFormat_MIME formatMime = {SL_DATAFORMAT_MIME, nullptr, SL_CONTAINERTYPE_UNSPECIFIED};
audioSrc.pFormat = &formatMime;
//Note: locFd & locUri should be outside of the following if/else block
// Although locFd & locUri are only used inside if/else block, its lifecycle
// will be destroyed right after '}' block. And since we pass a pointer to
// 'audioSrc.pLocator=&locFd/&locUri', pLocator will point to an invalid address
// while invoking Engine::createAudioPlayer interface. So be care of change the position
// of these two variables.
SLDataLocator_AndroidFD locFd;
SLDataLocator_URI locUri;
if (locatorType == SL_DATALOCATOR_ANDROIDFD) {
locFd = {locatorType, _assetFd->getFd(), start, length};
audioSrc.pLocator = &locFd;
} else if (locatorType == SL_DATALOCATOR_URI) {
locUri = {locatorType, (SLchar *)_url.c_str()}; // NOLINT(google-readability-casting)
audioSrc.pLocator = &locUri;
ALOGV("locUri: locatorType: %d", (int)locUri.locatorType);
}
// configure audio sink
SLDataLocator_OutputMix locOutmix = {SL_DATALOCATOR_OUTPUTMIX, _outputMixObj};
SLDataSink audioSnk = {&locOutmix, nullptr};
// create audio player
const SLInterfaceID ids[3] = {SL_IID_SEEK, SL_IID_PREFETCHSTATUS, SL_IID_VOLUME};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
SLresult result = (*_engineItf)->CreateAudioPlayer(_engineItf, &_playObj, &audioSrc, &audioSnk, 3, ids, req);
SL_RETURN_VAL_IF_FAILED(result, false, "CreateAudioPlayer failed");
// realize the player
result = (*_playObj)->Realize(_playObj, SL_BOOLEAN_FALSE);
SL_RETURN_VAL_IF_FAILED(result, false, "Realize failed");
// get the play interface
result = (*_playObj)->GetInterface(_playObj, SL_IID_PLAY, &_playItf);
SL_RETURN_VAL_IF_FAILED(result, false, "GetInterface SL_IID_PLAY failed");
// get the seek interface
result = (*_playObj)->GetInterface(_playObj, SL_IID_SEEK, &_seekItf);
SL_RETURN_VAL_IF_FAILED(result, false, "GetInterface SL_IID_SEEK failed");
// get the volume interface
result = (*_playObj)->GetInterface(_playObj, SL_IID_VOLUME, &_volumeItf);
SL_RETURN_VAL_IF_FAILED(result, false, "GetInterface SL_IID_VOLUME failed");
result = (*_playItf)->RegisterCallback(_playItf,
SLUrlAudioPlayerCallbackProxy::playEventCallback, this);
SL_RETURN_VAL_IF_FAILED(result, false, "RegisterCallback failed");
result = (*_playItf)->SetCallbackEventsMask(_playItf, SL_PLAYEVENT_HEADATEND);
SL_RETURN_VAL_IF_FAILED(result, false, "SetCallbackEventsMask SL_PLAYEVENT_HEADATEND failed");
setState(State::INITIALIZED);
setVolume(1.0F);
#endif
return true;
}
void UrlAudioPlayer::rewind() {
// Not supported currently. since cocos audio engine will new -> prepare -> play again.
}
void UrlAudioPlayer::setLoop(bool isLoop) {
_isLoop = isLoop;
SLboolean loopEnable = _isLoop ? SL_BOOLEAN_TRUE : SL_BOOLEAN_FALSE;
SLresult r = (*_seekItf)->SetLoop(_seekItf, loopEnable, 0, SL_TIME_UNKNOWN);
SL_RETURN_IF_FAILED(r, "UrlAudioPlayer::setLoop %d failed", _isLoop ? 1 : 0);
}
bool UrlAudioPlayer::isLoop() const {
return _isLoop;
}
void UrlAudioPlayer::stopAll() {
// To avoid break the for loop, we need to copy a new map
__playerContainerMutex.lock();
auto temp = __playerContainer;
__playerContainerMutex.unlock();
for (auto &&player : temp) {
player->stop();
}
}
void UrlAudioPlayer::destroy() {
if (!*_isDestroyed) {
*_isDestroyed = true;
ALOGV("UrlAudioPlayer::destroy() %p", this);
SL_DESTROY_OBJ(_playObj);
ALOGV("UrlAudioPlayer::destroy end");
}
}
} // namespace cc

View File

@@ -0,0 +1,127 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include <memory>
#include <mutex>
#include <thread>
#include "audio/android/AssetFd.h"
#include "audio/android/IAudioPlayer.h"
#include "audio/android/OpenSLHelper.h"
namespace cc {
class ICallerThreadUtils;
class AssetFd;
class UrlAudioPlayer : public IAudioPlayer {
public:
// Override Functions Begin
virtual int getId() const override { return _id; };
virtual void setId(int id) override { _id = id; };
virtual ccstd::string getUrl() const override { return _url; };
virtual State getState() const override { return _state; };
virtual void play() override;
virtual void pause() override;
virtual void resume() override;
virtual void stop() override;
virtual void rewind() override;
virtual void setVolume(float volume) override;
virtual float getVolume() const override;
virtual void setAudioFocus(bool isFocus) override;
virtual void setLoop(bool isLoop) override;
virtual bool isLoop() const override;
virtual float getDuration() const override;
virtual float getPosition() const override;
virtual bool setPosition(float pos) override;
virtual void setPlayEventCallback(const PlayEventCallback &playEventCallback) override;
// Override Functions EndOv
private:
UrlAudioPlayer(SLEngineItf engineItf, SLObjectItf outputMixObject, ICallerThreadUtils *callerThreadUtils);
virtual ~UrlAudioPlayer();
bool prepare(const ccstd::string &url, SLuint32 locatorType, std::shared_ptr<AssetFd> assetFd, int start, int length);
static void stopAll();
void destroy();
inline void setState(State state) { _state = state; };
void playEventCallback(SLPlayItf caller, SLuint32 playEvent);
void setVolumeToSLPlayer(float volume);
private:
SLEngineItf _engineItf;
SLObjectItf _outputMixObj;
ICallerThreadUtils *_callerThreadUtils;
int _id;
ccstd::string _url;
std::shared_ptr<AssetFd> _assetFd;
SLObjectItf _playObj;
SLPlayItf _playItf;
SLSeekItf _seekItf;
SLVolumeItf _volumeItf;
float _volume;
float _duration;
bool _isLoop;
bool _isAudioFocus;
State _state;
PlayEventCallback _playEventCallback;
std::thread::id _callerThreadId;
std::shared_ptr<bool> _isDestroyed;
friend class SLUrlAudioPlayerCallbackProxy;
friend class AudioPlayerProvider;
};
} // namespace cc

491
cocos/audio/android/audio.h Normal file
View File

@@ -0,0 +1,491 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
// ----------------------------------------------------------------------------
#include <stdint.h>
#include "audio/android/cutils/bitops.h"
#define PROPERTY_VALUE_MAX 256
#define CONSTEXPR constexpr
#ifdef __cplusplus
#define CC_LIKELY(exp) (__builtin_expect(!!(exp), true))
#define CC_UNLIKELY(exp) (__builtin_expect(!!(exp), false))
#else
#define CC_LIKELY(exp) (__builtin_expect(!!(exp), 1))
#define CC_UNLIKELY(exp) (__builtin_expect(!!(exp), 0))
#endif
/* special audio session values
* (XXX: should this be living in the audio effects land?)
*/
typedef enum {
/* session for effects attached to a particular output stream
* (value must be less than 0)
*/
AUDIO_SESSION_OUTPUT_STAGE = -1,
/* session for effects applied to output mix. These effects can
* be moved by audio policy manager to another output stream
* (value must be 0)
*/
AUDIO_SESSION_OUTPUT_MIX = 0,
/* application does not specify an explicit session ID to be used,
* and requests a new session ID to be allocated
* REFINE: use unique values for AUDIO_SESSION_OUTPUT_MIX and AUDIO_SESSION_ALLOCATE,
* after all uses have been updated from 0 to the appropriate symbol, and have been tested.
*/
AUDIO_SESSION_ALLOCATE = 0,
} audio_session_t;
/* Audio sub formats (see enum audio_format). */
/* PCM sub formats */
typedef enum {
/* All of these are in native byte order */
AUDIO_FORMAT_PCM_SUB_16_BIT = 0x1, /* DO NOT CHANGE - PCM signed 16 bits */
AUDIO_FORMAT_PCM_SUB_8_BIT = 0x2, /* DO NOT CHANGE - PCM unsigned 8 bits */
AUDIO_FORMAT_PCM_SUB_32_BIT = 0x3, /* PCM signed .31 fixed point */
AUDIO_FORMAT_PCM_SUB_8_24_BIT = 0x4, /* PCM signed 8.23 fixed point */
AUDIO_FORMAT_PCM_SUB_FLOAT = 0x5, /* PCM single-precision floating point */
AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED = 0x6, /* PCM signed .23 fixed point packed in 3 bytes */
} audio_format_pcm_sub_fmt_t;
/* The audio_format_*_sub_fmt_t declarations are not currently used */
/* MP3 sub format field definition : can use 11 LSBs in the same way as MP3
* frame header to specify bit rate, stereo mode, version...
*/
typedef enum {
AUDIO_FORMAT_MP3_SUB_NONE = 0x0,
} audio_format_mp3_sub_fmt_t;
/* AMR NB/WB sub format field definition: specify frame block interleaving,
* bandwidth efficient or octet aligned, encoding mode for recording...
*/
typedef enum {
AUDIO_FORMAT_AMR_SUB_NONE = 0x0,
} audio_format_amr_sub_fmt_t;
/* AAC sub format field definition: specify profile or bitrate for recording... */
typedef enum {
AUDIO_FORMAT_AAC_SUB_MAIN = 0x1,
AUDIO_FORMAT_AAC_SUB_LC = 0x2,
AUDIO_FORMAT_AAC_SUB_SSR = 0x4,
AUDIO_FORMAT_AAC_SUB_LTP = 0x8,
AUDIO_FORMAT_AAC_SUB_HE_V1 = 0x10,
AUDIO_FORMAT_AAC_SUB_SCALABLE = 0x20,
AUDIO_FORMAT_AAC_SUB_ERLC = 0x40,
AUDIO_FORMAT_AAC_SUB_LD = 0x80,
AUDIO_FORMAT_AAC_SUB_HE_V2 = 0x100,
AUDIO_FORMAT_AAC_SUB_ELD = 0x200,
} audio_format_aac_sub_fmt_t;
/* VORBIS sub format field definition: specify quality for recording... */
typedef enum {
AUDIO_FORMAT_VORBIS_SUB_NONE = 0x0,
} audio_format_vorbis_sub_fmt_t;
/* Audio format consists of a main format field (upper 8 bits) and a sub format
* field (lower 24 bits).
*
* The main format indicates the main codec type. The sub format field
* indicates options and parameters for each format. The sub format is mainly
* used for record to indicate for instance the requested bitrate or profile.
* It can also be used for certain formats to give informations not present in
* the encoded audio stream (e.g. octet alignment for AMR).
*/
typedef enum {
AUDIO_FORMAT_INVALID = 0xFFFFFFFFUL,
AUDIO_FORMAT_DEFAULT = 0,
AUDIO_FORMAT_PCM = 0x00000000UL, /* DO NOT CHANGE */
AUDIO_FORMAT_MP3 = 0x01000000UL,
AUDIO_FORMAT_AMR_NB = 0x02000000UL,
AUDIO_FORMAT_AMR_WB = 0x03000000UL,
AUDIO_FORMAT_AAC = 0x04000000UL,
AUDIO_FORMAT_HE_AAC_V1 = 0x05000000UL, /* Deprecated, Use AUDIO_FORMAT_AAC_HE_V1*/
AUDIO_FORMAT_HE_AAC_V2 = 0x06000000UL, /* Deprecated, Use AUDIO_FORMAT_AAC_HE_V2*/
AUDIO_FORMAT_VORBIS = 0x07000000UL,
AUDIO_FORMAT_OPUS = 0x08000000UL,
AUDIO_FORMAT_AC3 = 0x09000000UL,
AUDIO_FORMAT_E_AC3 = 0x0A000000UL,
AUDIO_FORMAT_DTS = 0x0B000000UL,
AUDIO_FORMAT_DTS_HD = 0x0C000000UL,
AUDIO_FORMAT_MAIN_MASK = 0xFF000000UL,
AUDIO_FORMAT_SUB_MASK = 0x00FFFFFFUL,
/* Aliases */
/* note != AudioFormat.ENCODING_PCM_16BIT */
AUDIO_FORMAT_PCM_16_BIT = (AUDIO_FORMAT_PCM |
AUDIO_FORMAT_PCM_SUB_16_BIT),
/* note != AudioFormat.ENCODING_PCM_8BIT */
AUDIO_FORMAT_PCM_8_BIT = (AUDIO_FORMAT_PCM |
AUDIO_FORMAT_PCM_SUB_8_BIT),
AUDIO_FORMAT_PCM_32_BIT = (AUDIO_FORMAT_PCM |
AUDIO_FORMAT_PCM_SUB_32_BIT),
AUDIO_FORMAT_PCM_8_24_BIT = (AUDIO_FORMAT_PCM |
AUDIO_FORMAT_PCM_SUB_8_24_BIT),
AUDIO_FORMAT_PCM_FLOAT = (AUDIO_FORMAT_PCM |
AUDIO_FORMAT_PCM_SUB_FLOAT),
AUDIO_FORMAT_PCM_24_BIT_PACKED = (AUDIO_FORMAT_PCM |
AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED),
AUDIO_FORMAT_AAC_MAIN = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_MAIN),
AUDIO_FORMAT_AAC_LC = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_LC),
AUDIO_FORMAT_AAC_SSR = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_SSR),
AUDIO_FORMAT_AAC_LTP = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_LTP),
AUDIO_FORMAT_AAC_HE_V1 = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_HE_V1),
AUDIO_FORMAT_AAC_SCALABLE = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_SCALABLE),
AUDIO_FORMAT_AAC_ERLC = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_ERLC),
AUDIO_FORMAT_AAC_LD = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_LD),
AUDIO_FORMAT_AAC_HE_V2 = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_HE_V2),
AUDIO_FORMAT_AAC_ELD = (AUDIO_FORMAT_AAC |
AUDIO_FORMAT_AAC_SUB_ELD),
} audio_format_t;
/* For the channel mask for position assignment representation */
enum {
/* These can be a complete audio_channel_mask_t. */
AUDIO_CHANNEL_NONE = 0x0,
AUDIO_CHANNEL_INVALID = 0xC0000000,
/* These can be the bits portion of an audio_channel_mask_t
* with representation AUDIO_CHANNEL_REPRESENTATION_POSITION.
* Using these bits as a complete audio_channel_mask_t is deprecated.
*/
/* output channels */
AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1,
AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2,
AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4,
AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8,
AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10,
AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20,
AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40,
AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80,
AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100,
AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200,
AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400,
AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800,
AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000,
AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000,
AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000,
AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000,
AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000,
AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000,
/* REFINE: should these be considered complete channel masks, or only bits? */
AUDIO_CHANNEL_OUT_MONO = AUDIO_CHANNEL_OUT_FRONT_LEFT,
AUDIO_CHANNEL_OUT_STEREO = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
AUDIO_CHANNEL_OUT_FRONT_RIGHT),
AUDIO_CHANNEL_OUT_QUAD = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
AUDIO_CHANNEL_OUT_FRONT_RIGHT |
AUDIO_CHANNEL_OUT_BACK_LEFT |
AUDIO_CHANNEL_OUT_BACK_RIGHT),
AUDIO_CHANNEL_OUT_QUAD_BACK = AUDIO_CHANNEL_OUT_QUAD,
/* like AUDIO_CHANNEL_OUT_QUAD_BACK with *_SIDE_* instead of *_BACK_* */
AUDIO_CHANNEL_OUT_QUAD_SIDE = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
AUDIO_CHANNEL_OUT_FRONT_RIGHT |
AUDIO_CHANNEL_OUT_SIDE_LEFT |
AUDIO_CHANNEL_OUT_SIDE_RIGHT),
AUDIO_CHANNEL_OUT_5POINT1 = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
AUDIO_CHANNEL_OUT_FRONT_RIGHT |
AUDIO_CHANNEL_OUT_FRONT_CENTER |
AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
AUDIO_CHANNEL_OUT_BACK_LEFT |
AUDIO_CHANNEL_OUT_BACK_RIGHT),
AUDIO_CHANNEL_OUT_5POINT1_BACK = AUDIO_CHANNEL_OUT_5POINT1,
/* like AUDIO_CHANNEL_OUT_5POINT1_BACK with *_SIDE_* instead of *_BACK_* */
AUDIO_CHANNEL_OUT_5POINT1_SIDE = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
AUDIO_CHANNEL_OUT_FRONT_RIGHT |
AUDIO_CHANNEL_OUT_FRONT_CENTER |
AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
AUDIO_CHANNEL_OUT_SIDE_LEFT |
AUDIO_CHANNEL_OUT_SIDE_RIGHT),
// matches the correct AudioFormat.CHANNEL_OUT_7POINT1_SURROUND definition for 7.1
AUDIO_CHANNEL_OUT_7POINT1 = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
AUDIO_CHANNEL_OUT_FRONT_RIGHT |
AUDIO_CHANNEL_OUT_FRONT_CENTER |
AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
AUDIO_CHANNEL_OUT_BACK_LEFT |
AUDIO_CHANNEL_OUT_BACK_RIGHT |
AUDIO_CHANNEL_OUT_SIDE_LEFT |
AUDIO_CHANNEL_OUT_SIDE_RIGHT),
AUDIO_CHANNEL_OUT_ALL = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
AUDIO_CHANNEL_OUT_FRONT_RIGHT |
AUDIO_CHANNEL_OUT_FRONT_CENTER |
AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
AUDIO_CHANNEL_OUT_BACK_LEFT |
AUDIO_CHANNEL_OUT_BACK_RIGHT |
AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER |
AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER |
AUDIO_CHANNEL_OUT_BACK_CENTER |
AUDIO_CHANNEL_OUT_SIDE_LEFT |
AUDIO_CHANNEL_OUT_SIDE_RIGHT |
AUDIO_CHANNEL_OUT_TOP_CENTER |
AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT |
AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER |
AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT |
AUDIO_CHANNEL_OUT_TOP_BACK_LEFT |
AUDIO_CHANNEL_OUT_TOP_BACK_CENTER |
AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT),
/* These are bits only, not complete values */
/* input channels */
AUDIO_CHANNEL_IN_LEFT = 0x4,
AUDIO_CHANNEL_IN_RIGHT = 0x8,
AUDIO_CHANNEL_IN_FRONT = 0x10,
AUDIO_CHANNEL_IN_BACK = 0x20,
AUDIO_CHANNEL_IN_LEFT_PROCESSED = 0x40,
AUDIO_CHANNEL_IN_RIGHT_PROCESSED = 0x80,
AUDIO_CHANNEL_IN_FRONT_PROCESSED = 0x100,
AUDIO_CHANNEL_IN_BACK_PROCESSED = 0x200,
AUDIO_CHANNEL_IN_PRESSURE = 0x400,
AUDIO_CHANNEL_IN_X_AXIS = 0x800,
AUDIO_CHANNEL_IN_Y_AXIS = 0x1000,
AUDIO_CHANNEL_IN_Z_AXIS = 0x2000,
AUDIO_CHANNEL_IN_VOICE_UPLINK = 0x4000,
AUDIO_CHANNEL_IN_VOICE_DNLINK = 0x8000,
/* REFINE: should these be considered complete channel masks, or only bits, or deprecated? */
AUDIO_CHANNEL_IN_MONO = AUDIO_CHANNEL_IN_FRONT,
AUDIO_CHANNEL_IN_STEREO = (AUDIO_CHANNEL_IN_LEFT | AUDIO_CHANNEL_IN_RIGHT),
AUDIO_CHANNEL_IN_FRONT_BACK = (AUDIO_CHANNEL_IN_FRONT | AUDIO_CHANNEL_IN_BACK),
AUDIO_CHANNEL_IN_ALL = (AUDIO_CHANNEL_IN_LEFT |
AUDIO_CHANNEL_IN_RIGHT |
AUDIO_CHANNEL_IN_FRONT |
AUDIO_CHANNEL_IN_BACK |
AUDIO_CHANNEL_IN_LEFT_PROCESSED |
AUDIO_CHANNEL_IN_RIGHT_PROCESSED |
AUDIO_CHANNEL_IN_FRONT_PROCESSED |
AUDIO_CHANNEL_IN_BACK_PROCESSED |
AUDIO_CHANNEL_IN_PRESSURE |
AUDIO_CHANNEL_IN_X_AXIS |
AUDIO_CHANNEL_IN_Y_AXIS |
AUDIO_CHANNEL_IN_Z_AXIS |
AUDIO_CHANNEL_IN_VOICE_UPLINK |
AUDIO_CHANNEL_IN_VOICE_DNLINK),
};
/* A channel mask per se only defines the presence or absence of a channel, not the order.
* But see AUDIO_INTERLEAVE_* below for the platform convention of order.
*
* audio_channel_mask_t is an opaque type and its internal layout should not
* be assumed as it may change in the future.
* Instead, always use the functions declared in this header to examine.
*
* These are the current representations:
*
* AUDIO_CHANNEL_REPRESENTATION_POSITION
* is a channel mask representation for position assignment.
* Each low-order bit corresponds to the spatial position of a transducer (output),
* or interpretation of channel (input).
* The user of a channel mask needs to know the context of whether it is for output or input.
* The constants AUDIO_CHANNEL_OUT_* or AUDIO_CHANNEL_IN_* apply to the bits portion.
* It is not permitted for no bits to be set.
*
* AUDIO_CHANNEL_REPRESENTATION_INDEX
* is a channel mask representation for index assignment.
* Each low-order bit corresponds to a selected channel.
* There is no platform interpretation of the various bits.
* There is no concept of output or input.
* It is not permitted for no bits to be set.
*
* All other representations are reserved for future use.
*
* Warning: current representation distinguishes between input and output, but this will not the be
* case in future revisions of the platform. Wherever there is an ambiguity between input and output
* that is currently resolved by checking the channel mask, the implementer should look for ways to
* fix it with additional information outside of the mask.
*/
typedef uint32_t audio_channel_mask_t;
/* Maximum number of channels for all representations */
#define AUDIO_CHANNEL_COUNT_MAX 30
/* log(2) of maximum number of representations, not part of public API */
#define AUDIO_CHANNEL_REPRESENTATION_LOG2 2
/* Representations */
typedef enum {
AUDIO_CHANNEL_REPRESENTATION_POSITION = 0, // must be zero for compatibility
// 1 is reserved for future use
AUDIO_CHANNEL_REPRESENTATION_INDEX = 2,
// 3 is reserved for future use
} audio_channel_representation_t;
/* The return value is undefined if the channel mask is invalid. */
static inline uint32_t audio_channel_mask_get_bits(audio_channel_mask_t channel) {
return channel & ((1 << AUDIO_CHANNEL_COUNT_MAX) - 1);
}
/* The return value is undefined if the channel mask is invalid. */
static inline audio_channel_representation_t audio_channel_mask_get_representation(
audio_channel_mask_t channel) {
// The right shift should be sufficient, but also "and" for safety in case mask is not 32 bits
return (audio_channel_representation_t)((channel >> AUDIO_CHANNEL_COUNT_MAX) & ((1 << AUDIO_CHANNEL_REPRESENTATION_LOG2) - 1));
}
/* Returns the number of channels from an output channel mask,
* used in the context of audio output or playback.
* If a channel bit is set which could _not_ correspond to an output channel,
* it is excluded from the count.
* Returns zero if the representation is invalid.
*/
static inline uint32_t audio_channel_count_from_out_mask(audio_channel_mask_t channel) {
uint32_t bits = audio_channel_mask_get_bits(channel);
switch (audio_channel_mask_get_representation(channel)) {
case AUDIO_CHANNEL_REPRESENTATION_POSITION:
// REFINE: We can now merge with from_in_mask and remove anding
bits &= AUDIO_CHANNEL_OUT_ALL;
// fall through
case AUDIO_CHANNEL_REPRESENTATION_INDEX:
return popcount(bits);
default:
return 0;
}
}
static inline bool audio_is_valid_format(audio_format_t format) {
switch (format & AUDIO_FORMAT_MAIN_MASK) {
case AUDIO_FORMAT_PCM:
switch (format) {
case AUDIO_FORMAT_PCM_16_BIT:
case AUDIO_FORMAT_PCM_8_BIT:
case AUDIO_FORMAT_PCM_32_BIT:
case AUDIO_FORMAT_PCM_8_24_BIT:
case AUDIO_FORMAT_PCM_FLOAT:
case AUDIO_FORMAT_PCM_24_BIT_PACKED:
return true;
default:
return false;
}
/* not reached */
case AUDIO_FORMAT_MP3:
case AUDIO_FORMAT_AMR_NB:
case AUDIO_FORMAT_AMR_WB:
case AUDIO_FORMAT_AAC:
case AUDIO_FORMAT_HE_AAC_V1:
case AUDIO_FORMAT_HE_AAC_V2:
case AUDIO_FORMAT_VORBIS:
case AUDIO_FORMAT_OPUS:
case AUDIO_FORMAT_AC3:
case AUDIO_FORMAT_E_AC3:
case AUDIO_FORMAT_DTS:
case AUDIO_FORMAT_DTS_HD:
return true;
default:
return false;
}
}
static inline bool audio_is_linear_pcm(audio_format_t format) {
return ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM);
}
static inline size_t audio_bytes_per_sample(audio_format_t format) {
size_t size = 0;
switch (format) {
case AUDIO_FORMAT_PCM_32_BIT:
case AUDIO_FORMAT_PCM_8_24_BIT:
size = sizeof(int32_t);
break;
case AUDIO_FORMAT_PCM_24_BIT_PACKED:
size = sizeof(uint8_t) * 3;
break;
case AUDIO_FORMAT_PCM_16_BIT:
size = sizeof(int16_t);
break;
case AUDIO_FORMAT_PCM_8_BIT:
size = sizeof(uint8_t);
break;
case AUDIO_FORMAT_PCM_FLOAT:
size = sizeof(float);
break;
default:
break;
}
return size;
}
/* Not part of public API */
static inline audio_channel_mask_t audio_channel_mask_from_representation_and_bits(
audio_channel_representation_t representation, uint32_t bits) {
return (audio_channel_mask_t)((representation << AUDIO_CHANNEL_COUNT_MAX) | bits);
}
/* Derive an output channel mask for position assignment from a channel count.
* This is to be used when the content channel mask is unknown. The 1, 2, 4, 5, 6, 7 and 8 channel
* cases are mapped to the standard game/home-theater layouts, but note that 4 is mapped to quad,
* and not stereo + FC + mono surround. A channel count of 3 is arbitrarily mapped to stereo + FC
* for continuity with stereo.
* Returns the matching channel mask,
* or AUDIO_CHANNEL_NONE if the channel count is zero,
* or AUDIO_CHANNEL_INVALID if the channel count exceeds that of the
* configurations for which a default output channel mask is defined.
*/
static inline audio_channel_mask_t audio_channel_out_mask_from_count(uint32_t channel_count) {
uint32_t bits;
switch (channel_count) {
case 0:
return AUDIO_CHANNEL_NONE;
case 1:
bits = AUDIO_CHANNEL_OUT_MONO;
break;
case 2:
bits = AUDIO_CHANNEL_OUT_STEREO;
break;
case 3:
bits = AUDIO_CHANNEL_OUT_STEREO | AUDIO_CHANNEL_OUT_FRONT_CENTER;
break;
case 4: // 4.0
bits = AUDIO_CHANNEL_OUT_QUAD;
break;
case 5: // 5.0
bits = AUDIO_CHANNEL_OUT_QUAD | AUDIO_CHANNEL_OUT_FRONT_CENTER;
break;
case 6: // 5.1
bits = AUDIO_CHANNEL_OUT_5POINT1;
break;
case 7: // 6.1
bits = AUDIO_CHANNEL_OUT_5POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER;
break;
case 8:
bits = AUDIO_CHANNEL_OUT_7POINT1;
break;
// IDEA: FCC_8
default:
return AUDIO_CHANNEL_INVALID;
}
return audio_channel_mask_from_representation_and_bits(
AUDIO_CHANNEL_REPRESENTATION_POSITION, bits);
}

View File

@@ -0,0 +1,48 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef COCOS_CUTILS_BITOPS_H
#define COCOS_CUTILS_BITOPS_H
#include <stdbool.h>
#include <string.h>
#include <strings.h>
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <sys/cdefs.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
static inline int popcount(unsigned int x) {
return __builtin_popcount(x);
}
static inline int popcountl(unsigned long x) {
return __builtin_popcountl(x);
}
static inline int popcountll(unsigned long long x) {
return __builtin_popcountll(x);
}
#ifdef __cplusplus
}
#endif
#endif /* COCOS_CUTILS_BITOPS_H */

View File

@@ -0,0 +1,597 @@
/*
* Copyright (C) 2005-2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// C/C++ logging functions. See the logging documentation for API details.
//
// We'd like these to be available from C code (in case we import some from
// somewhere), so this has a C interface.
//
// The output will be correct when the log file is shared between multiple
// threads and/or multiple processes so long as the operating system
// supports O_APPEND. These calls have mutex-protected data structures
// and so are NOT reentrant. Do not use LOG in a signal handler.
//
#ifndef COCOS_CUTILS_LOG_H
#define COCOS_CUTILS_LOG_H
#include <stdarg.h>
#include <stdio.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <android/log.h>
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
// TODO(qgh):May be implemented in later versions
// #include <Hilog/log.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ---------------------------------------------------------------------
/*
* Normally we strip ALOGV (VERBOSE messages) from release builds.
* You can modify this (for example with "#define LOG_NDEBUG 0"
* at the top of your source file) to change that behavior.
*/
#ifndef LOG_NDEBUG
#if defined(CC_DEBUG) && CC_DEBUG > 0
#define LOG_NDEBUG 0
#else
#define LOG_NDEBUG 1
#endif
#endif
/*
* This is the local tag used for the following simplified
* logging macros. You can change this preprocessor definition
* before using the other macros to change the tag.
*/
#ifndef LOG_TAG
#define LOG_TAG NULL
#endif
// ---------------------------------------------------------------------
#ifndef __predict_false
#define __predict_false(exp) __builtin_expect((exp) != 0, 0)
#endif
/*
* -DLINT_RLOG in sources that you want to enforce that all logging
* goes to the radio log buffer. If any logging goes to any of the other
* log buffers, there will be a compile or link error to highlight the
* problem. This is not a replacement for a full audit of the code since
* this only catches compiled code, not ifdef'd debug code. Options to
* defining this, either temporarily to do a spot check, or permanently
* to enforce, in all the communications trees; We have hopes to ensure
* that by supplying just the radio log buffer that the communications
* teams will have their one-stop shop for triaging issues.
*/
#ifndef LINT_RLOG
/*
* Simplified macro to send a verbose log message using the current LOG_TAG.
*/
#ifndef ALOGV
#define __ALOGV(...) ((void)ALOG(LOG_VERBOSE, LOG_TAG, __VA_ARGS__))
#if LOG_NDEBUG
#define ALOGV(...) \
do { \
if (0) { \
__ALOGV(__VA_ARGS__); \
} \
} while (0)
#else
#define ALOGV(...) __ALOGV(__VA_ARGS__)
#endif
#endif
#ifndef ALOGV_IF
#if LOG_NDEBUG
#define ALOGV_IF(cond, ...) ((void)0)
#else
#define ALOGV_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)ALOG(LOG_VERBOSE, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
#endif
/*
* Simplified macro to send a debug log message using the current LOG_TAG.
*/
#ifndef ALOGD
#define ALOGD(...) ((void)ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#endif
#ifndef ALOGD_IF
#define ALOGD_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send an info log message using the current LOG_TAG.
*/
#ifndef ALOGI
#define ALOGI(...) ((void)ALOG(LOG_INFO, LOG_TAG, __VA_ARGS__))
#endif
#ifndef ALOGI_IF
#define ALOGI_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)ALOG(LOG_INFO, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send a warning log message using the current LOG_TAG.
*/
#ifndef ALOGW
#define ALOGW(...) ((void)ALOG(LOG_WARN, LOG_TAG, __VA_ARGS__))
#endif
#ifndef ALOGW_IF
#define ALOGW_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)ALOG(LOG_WARN, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send an error log message using the current LOG_TAG.
*/
#ifndef ALOGE
#define ALOGE(...) ((void)ALOG(LOG_ERROR, LOG_TAG, __VA_ARGS__))
#endif
#ifndef ALOGE_IF
#define ALOGE_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)ALOG(LOG_ERROR, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
// ---------------------------------------------------------------------
/*
* Conditional based on whether the current LOG_TAG is enabled at
* verbose priority.
*/
#ifndef IF_ALOGV
#if LOG_NDEBUG
#define IF_ALOGV() if (false)
#else
#define IF_ALOGV() IF_ALOG(LOG_VERBOSE, LOG_TAG)
#endif
#endif
/*
* Conditional based on whether the current LOG_TAG is enabled at
* debug priority.
*/
#ifndef IF_ALOGD
#define IF_ALOGD() IF_ALOG(LOG_DEBUG, LOG_TAG)
#endif
/*
* Conditional based on whether the current LOG_TAG is enabled at
* info priority.
*/
#ifndef IF_ALOGI
#define IF_ALOGI() IF_ALOG(LOG_INFO, LOG_TAG)
#endif
/*
* Conditional based on whether the current LOG_TAG is enabled at
* warn priority.
*/
#ifndef IF_ALOGW
#define IF_ALOGW() IF_ALOG(LOG_WARN, LOG_TAG)
#endif
/*
* Conditional based on whether the current LOG_TAG is enabled at
* error priority.
*/
#ifndef IF_ALOGE
#define IF_ALOGE() IF_ALOG(LOG_ERROR, LOG_TAG)
#endif
// ---------------------------------------------------------------------
/*
* Simplified macro to send a verbose system log message using the current LOG_TAG.
*/
#ifndef SLOGV
#define __SLOGV(...) \
((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_VERBOSE, LOG_TAG, __VA_ARGS__))
#if LOG_NDEBUG
#define SLOGV(...) \
do { \
if (0) { \
__SLOGV(__VA_ARGS__); \
} \
} while (0)
#else
#define SLOGV(...) __SLOGV(__VA_ARGS__)
#endif
#endif
#ifndef SLOGV_IF
#if LOG_NDEBUG
#define SLOGV_IF(cond, ...) ((void)0)
#else
#define SLOGV_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_VERBOSE, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
#endif
/*
* Simplified macro to send a debug system log message using the current LOG_TAG.
*/
#ifndef SLOGD
#define SLOGD(...) \
((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#endif
#ifndef SLOGD_IF
#define SLOGD_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send an info system log message using the current LOG_TAG.
*/
#ifndef SLOGI
#define SLOGI(...) \
((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
#endif
#ifndef SLOGI_IF
#define SLOGI_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send a warning system log message using the current LOG_TAG.
*/
#ifndef SLOGW
#define SLOGW(...) \
((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__))
#endif
#ifndef SLOGW_IF
#define SLOGW_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send an error system log message using the current LOG_TAG.
*/
#ifndef SLOGE
#define SLOGE(...) \
((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
#endif
#ifndef SLOGE_IF
#define SLOGE_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_SYSTEM, ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
#endif /* !LINT_RLOG */
// ---------------------------------------------------------------------
/*
* Simplified macro to send a verbose radio log message using the current LOG_TAG.
*/
#ifndef RLOGV
#define __RLOGV(...) \
((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_VERBOSE, LOG_TAG, __VA_ARGS__))
#if LOG_NDEBUG
#define RLOGV(...) \
do { \
if (0) { \
__RLOGV(__VA_ARGS__); \
} \
} while (0)
#else
#define RLOGV(...) __RLOGV(__VA_ARGS__)
#endif
#endif
#ifndef RLOGV_IF
#if LOG_NDEBUG
#define RLOGV_IF(cond, ...) ((void)0)
#else
#define RLOGV_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_VERBOSE, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
#endif
/*
* Simplified macro to send a debug radio log message using the current LOG_TAG.
*/
#ifndef RLOGD
#define RLOGD(...) \
((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#endif
#ifndef RLOGD_IF
#define RLOGD_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send an info radio log message using the current LOG_TAG.
*/
#ifndef RLOGI
#define RLOGI(...) \
((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
#endif
#ifndef RLOGI_IF
#define RLOGI_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send a warning radio log message using the current LOG_TAG.
*/
#ifndef RLOGW
#define RLOGW(...) \
((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__))
#endif
#ifndef RLOGW_IF
#define RLOGW_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
/*
* Simplified macro to send an error radio log message using the current LOG_TAG.
*/
#ifndef RLOGE
#define RLOGE(...) \
((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
#endif
#ifndef RLOGE_IF
#define RLOGE_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)__android_log_buf_print(LOG_ID_RADIO, ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)) \
: (void)0)
#endif
// ---------------------------------------------------------------------
/*
* Log a fatal error. If the given condition fails, this stops program
* execution like a normal assertion, but also generating the given message.
* It is NOT stripped from release builds. Note that the condition test
* is -inverted- from the normal assert() semantics.
*/
#ifndef LOG_ALWAYS_FATAL_IF
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#define LOG_ALWAYS_FATAL_IF(cond, ...) \
((__predict_false(cond)) \
? ((void)android_printAssert(#cond, LOG_TAG, ##__VA_ARGS__)) \
: (void)0)
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#define LOG_ALWAYS_FATAL_IF(cond, ...)
#endif
#endif
#ifndef LOG_ALWAYS_FATAL
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#define LOG_ALWAYS_FATAL(...) \
(((void)android_printAssert(NULL, LOG_TAG, ##__VA_ARGS__)))
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#define LOG_ALWAYS_FATAL(...)
#endif
#endif
/*
* Versions of LOG_ALWAYS_FATAL_IF and LOG_ALWAYS_FATAL that
* are stripped out of release builds.
*/
#if LOG_NDEBUG
#ifndef LOG_FATAL_IF
#define LOG_FATAL_IF(cond, ...) ((void)0)
#endif
#ifndef LOG_FATAL
#define LOG_FATAL(...) ((void)0)
#endif
#else
#ifndef LOG_FATAL_IF
#define LOG_FATAL_IF(cond, ...) LOG_ALWAYS_FATAL_IF(cond, ##__VA_ARGS__)
#endif
#ifndef LOG_FATAL
#define LOG_FATAL(...) LOG_ALWAYS_FATAL(__VA_ARGS__)
#endif
#endif
/*
* Assertion that generates a log message when the assertion fails.
* Stripped out of release builds. Uses the current LOG_TAG.
*/
#ifndef ALOG_ASSERT
#define ALOG_ASSERT(cond, ...) LOG_FATAL_IF(!(cond), ##__VA_ARGS__)
//#define ALOG_ASSERT(cond) LOG_FATAL_IF(!(cond), "Assertion failed: " #cond)
#endif
// ---------------------------------------------------------------------
/*
* Basic log message macro.
*
* Example:
* ALOG(LOG_WARN, NULL, "Failed with error %d", errno);
*
* The second argument may be NULL or "" to indicate the "global" tag.
*/
#ifndef ALOG
#define ALOG(priority, tag, ...) \
LOG_PRI(ANDROID_##priority, tag, __VA_ARGS__)
#endif
/*
* Log macro that allows you to specify a number for the priority.
*/
#ifndef LOG_PRI
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#define LOG_PRI(priority, tag, ...) \
android_printLog(priority, tag, __VA_ARGS__)
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#define LOG_PRI(priority, tag, ...) ((void)0)
#endif
#endif
/*
* Log macro that allows you to pass in a varargs ("args" is a va_list).
*/
#ifndef LOG_PRI_VA
#define LOG_PRI_VA(priority, tag, fmt, args) \
android_vprintLog(priority, NULL, tag, fmt, args)
#endif
/*
* Conditional given a desired logging priority and tag.
*/
#ifndef IF_ALOG
#define IF_ALOG(priority, tag) \
if (android_testLog(ANDROID_##priority, tag))
#endif
// ---------------------------------------------------------------------
/*
* ===========================================================================
*
* The stuff in the rest of this file should not be used directly.
*/
#define android_printLog(prio, tag, ...) \
__android_log_print(prio, tag, __VA_ARGS__)
#define android_vprintLog(prio, cond, tag, ...) \
__android_log_vprint(prio, tag, __VA_ARGS__)
/* XXX Macros to work around syntax errors in places where format string
* arg is not passed to ALOG_ASSERT, LOG_ALWAYS_FATAL or LOG_ALWAYS_FATAL_IF
* (happens only in debug builds).
*/
/* Returns 2nd arg. Used to substitute default value if caller's vararg list
* is empty.
*/
#define __android_second(dummy, second, ...) second
/* If passed multiple args, returns ',' followed by all but 1st arg, otherwise
* returns nothing.
*/
#define __android_rest(first, ...) , ##__VA_ARGS__
#define android_printAssert(cond, tag, ...) \
__android_log_assert(cond, tag, \
__android_second(0, ##__VA_ARGS__, NULL) __android_rest(__VA_ARGS__))
#define android_writeLog(prio, tag, text) \
__android_log_write(prio, tag, text)
#define android_bWriteLog(tag, payload, len) \
__android_log_bwrite(tag, payload, len)
#define android_btWriteLog(tag, type, payload, len) \
__android_log_btwrite(tag, type, payload, len)
#define android_errorWriteLog(tag, subTag) \
__android_log_error_write(tag, subTag, -1, NULL, 0)
#define android_errorWriteWithInfoLog(tag, subTag, uid, data, dataLen) \
__android_log_error_write(tag, subTag, uid, data, dataLen)
/*
* IF_ALOG uses android_testLog, but IF_ALOG can be overridden.
* android_testLog will remain constant in its purpose as a wrapper
* for Android logging filter policy, and can be subject to
* change. It can be reused by the developers that override
* IF_ALOG as a convenient means to reimplement their policy
* over Android.
*/
#if LOG_NDEBUG /* Production */
#define android_testLog(prio, tag) \
(__android_log_is_loggable(prio, tag, ANDROID_LOG_DEBUG) != 0)
#else
#define android_testLog(prio, tag) \
(__android_log_is_loggable(prio, tag, ANDROID_LOG_VERBOSE) != 0)
#endif
/*
* Use the per-tag properties "log.tag.<tagname>" to generate a runtime
* result of non-zero to expose a log. prio is ANDROID_LOG_VERBOSE to
* ANDROID_LOG_FATAL. default_prio if no property. Undefined behavior if
* any other value.
*/
int __android_log_is_loggable(int prio, const char *tag, int default_prio);
int __android_log_security(); /* Device Owner is present */
int __android_log_error_write(int tag, const char *subTag, int32_t uid, const char *data,
uint32_t dataLen);
/*
* Send a simple string to the log.
*/
int __android_log_buf_write(int bufID, int prio, const char *tag, const char *text);
int __android_log_buf_print(int bufID, int prio, const char *tag, const char *fmt, ...)
#if defined(__GNUC__)
__attribute__((__format__(printf, 4, 5)))
#endif
;
#ifdef __cplusplus
}
#endif
#endif /* COCOS_CUTILS_LOG_H */

View File

@@ -0,0 +1,525 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define LOG_TAG "mp3reader"
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h> // Resolves that memset, memcpy aren't found while APP_PLATFORM >= 22 on Android
#include <vector>
#include "audio/android/cutils/log.h"
#include "audio/android/mp3reader.h"
#include "pvmp3decoder_api.h"
static uint32_t U32_AT(const uint8_t *ptr) {
return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
}
static bool parseHeader(
uint32_t header, size_t *frame_size,
uint32_t *out_sampling_rate = NULL, uint32_t *out_channels = NULL,
uint32_t *out_bitrate = NULL, uint32_t *out_num_samples = NULL) {
*frame_size = 0;
if (out_sampling_rate) {
*out_sampling_rate = 0;
}
if (out_channels) {
*out_channels = 0;
}
if (out_bitrate) {
*out_bitrate = 0;
}
if (out_num_samples) {
*out_num_samples = 1152;
}
if ((header & 0xffe00000) != 0xffe00000) {
return false;
}
unsigned version = (header >> 19) & 3;
if (version == 0x01) {
return false;
}
unsigned layer = (header >> 17) & 3;
if (layer == 0x00) {
return false;
}
unsigned bitrate_index = (header >> 12) & 0x0f;
if (bitrate_index == 0 || bitrate_index == 0x0f) {
// Disallow "free" bitrate.
return false;
}
unsigned sampling_rate_index = (header >> 10) & 3;
if (sampling_rate_index == 3) {
return false;
}
static const int kSamplingRateV1[] = {44100, 48000, 32000};
int sampling_rate = kSamplingRateV1[sampling_rate_index];
if (version == 2 /* V2 */) {
sampling_rate /= 2;
} else if (version == 0 /* V2.5 */) {
sampling_rate /= 4;
}
unsigned padding = (header >> 9) & 1;
if (layer == 3) {
// layer I
static const int kBitrateV1[] = {
32, 64, 96, 128, 160, 192, 224, 256,
288, 320, 352, 384, 416, 448};
static const int kBitrateV2[] = {
32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256};
int bitrate =
(version == 3 /* V1 */)
? kBitrateV1[bitrate_index - 1]
: kBitrateV2[bitrate_index - 1];
if (out_bitrate) {
*out_bitrate = bitrate;
}
*frame_size = (12000 * bitrate / sampling_rate + padding) * 4;
if (out_num_samples) {
*out_num_samples = 384;
}
} else {
// layer II or III
static const int kBitrateV1L2[] = {
32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384};
static const int kBitrateV1L3[] = {
32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320};
static const int kBitrateV2[] = {
8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160};
int bitrate;
if (version == 3 /* V1 */) {
bitrate = (layer == 2 /* L2 */)
? kBitrateV1L2[bitrate_index - 1]
: kBitrateV1L3[bitrate_index - 1];
if (out_num_samples) {
*out_num_samples = 1152;
}
} else {
// V2 (or 2.5)
bitrate = kBitrateV2[bitrate_index - 1];
if (out_num_samples) {
*out_num_samples = (layer == 1 /* L3 */) ? 576 : 1152;
}
}
if (out_bitrate) {
*out_bitrate = bitrate;
}
if (version == 3 /* V1 */) {
*frame_size = 144000 * bitrate / sampling_rate + padding;
} else {
// V2 or V2.5
size_t tmp = (layer == 1 /* L3 */) ? 72000 : 144000;
*frame_size = tmp * bitrate / sampling_rate + padding;
}
}
if (out_sampling_rate) {
*out_sampling_rate = sampling_rate;
}
if (out_channels) {
int channel_mode = (header >> 6) & 3;
*out_channels = (channel_mode == 3) ? 1 : 2;
}
return true;
}
// Mask to extract the version, layer, sampling rate parts of the MP3 header,
// which should be same for all MP3 frames.
static const uint32_t kMask = 0xfffe0c00;
static ssize_t sourceReadAt(mp3_callbacks *callback, void *source, off64_t offset, void *data, size_t size) {
int retVal = callback->seek(source, offset, SEEK_SET);
if (retVal != EXIT_SUCCESS) {
return 0;
} else {
return callback->read(data, 1, size, source);
}
}
// Resync to next valid MP3 frame in the file.
static bool resync(
mp3_callbacks *callback, void *source, uint32_t match_header,
off64_t *inout_pos, uint32_t *out_header) {
if (*inout_pos == 0) {
// Skip an optional ID3 header if syncing at the very beginning
// of the datasource.
for (;;) {
uint8_t id3header[10];
int retVal = sourceReadAt(callback, source, *inout_pos, id3header,
sizeof(id3header));
if (retVal < (ssize_t)sizeof(id3header)) {
// If we can't even read these 10 bytes, we might as well bail
// out, even if there _were_ 10 bytes of valid mp3 audio data...
return false;
}
if (memcmp("ID3", id3header, 3)) {
break;
}
// Skip the ID3v2 header.
size_t len =
((id3header[6] & 0x7f) << 21) | ((id3header[7] & 0x7f) << 14) | ((id3header[8] & 0x7f) << 7) | (id3header[9] & 0x7f);
len += 10;
*inout_pos += len;
ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
(long long)*inout_pos, (long long)*inout_pos);
}
}
off64_t pos = *inout_pos;
bool valid = false;
const int32_t kMaxReadBytes = 1024;
const int32_t kMaxBytesChecked = 128 * 1024;
uint8_t buf[kMaxReadBytes];
ssize_t bytesToRead = kMaxReadBytes;
ssize_t totalBytesRead = 0;
ssize_t remainingBytes = 0;
bool reachEOS = false;
uint8_t *tmp = buf;
do {
if (pos >= (off64_t)(*inout_pos + kMaxBytesChecked)) {
// Don't scan forever.
ALOGV("giving up at offset %lld", (long long)pos);
break;
}
if (remainingBytes < 4) {
if (reachEOS) {
break;
} else {
memcpy(buf, tmp, remainingBytes);
bytesToRead = kMaxReadBytes - remainingBytes;
/*
* The next read position should start from the end of
* the last buffer, and thus should include the remaining
* bytes in the buffer.
*/
totalBytesRead = sourceReadAt(callback, source, pos + remainingBytes,
buf + remainingBytes, bytesToRead);
if (totalBytesRead <= 0) {
break;
}
reachEOS = (totalBytesRead != bytesToRead);
remainingBytes += totalBytesRead;
tmp = buf;
continue;
}
}
uint32_t header = U32_AT(tmp);
if (match_header != 0 && (header & kMask) != (match_header & kMask)) {
++pos;
++tmp;
--remainingBytes;
continue;
}
size_t frame_size;
uint32_t sample_rate, num_channels, bitrate;
if (!parseHeader(
header, &frame_size,
&sample_rate, &num_channels, &bitrate)) {
++pos;
++tmp;
--remainingBytes;
continue;
}
// ALOGV("found possible 1st frame at %lld (header = 0x%08x)", (long long)pos, header);
// We found what looks like a valid frame,
// now find its successors.
off64_t test_pos = pos + frame_size;
valid = true;
const int FRAME_MATCH_REQUIRED = 3;
for (int j = 0; j < FRAME_MATCH_REQUIRED; ++j) {
uint8_t tmp[4];
ssize_t retval = sourceReadAt(callback, source, test_pos, tmp, sizeof(tmp));
if (retval < (ssize_t)sizeof(tmp)) {
valid = false;
break;
}
uint32_t test_header = U32_AT(tmp);
ALOGV("subsequent header is %08x", test_header);
if ((test_header & kMask) != (header & kMask)) {
valid = false;
break;
}
size_t test_frame_size;
if (!parseHeader(test_header, &test_frame_size)) {
valid = false;
break;
}
ALOGV("found subsequent frame #%d at %lld", j + 2, (long long)test_pos);
test_pos += test_frame_size;
}
if (valid) {
*inout_pos = pos;
if (out_header != NULL) {
*out_header = header;
}
} else {
ALOGV("no dice, no valid sequence of frames found.");
}
++pos;
++tmp;
--remainingBytes;
} while (!valid);
return valid;
}
Mp3Reader::Mp3Reader() : mSource(NULL), mCallback(NULL) {
}
// Initialize the MP3 reader.
bool Mp3Reader::init(mp3_callbacks *callback, void *source) {
mSource = source;
mCallback = callback;
// Open the file.
// mFp = fopen(file, "rb");
// if (mFp == NULL) return false;
// Sync to the first valid frame.
off64_t pos = 0;
uint32_t header;
bool success = resync(callback, source, 0 /*match_header*/, &pos, &header);
if (!success) {
ALOGE("%s, resync failed", __FUNCTION__);
return false;
}
mCurrentPos = pos;
mFixedHeader = header;
size_t frame_size;
return parseHeader(header, &frame_size, &mSampleRate,
&mNumChannels, &mBitrate);
}
// Get the next valid MP3 frame.
bool Mp3Reader::getFrame(void *buffer, uint32_t *size) {
size_t frame_size;
uint32_t bitrate;
uint32_t num_samples;
uint32_t sample_rate;
for (;;) {
ssize_t n = sourceReadAt(mCallback, mSource, mCurrentPos, buffer, 4);
if (n < 4) {
return false;
}
uint32_t header = U32_AT((const uint8_t *)buffer);
if ((header & kMask) == (mFixedHeader & kMask) && parseHeader(
header, &frame_size, &sample_rate, NULL /*out_channels*/,
&bitrate, &num_samples)) {
break;
}
// Lost sync.
off64_t pos = mCurrentPos;
if (!resync(mCallback, mSource, mFixedHeader, &pos, NULL /*out_header*/)) {
// Unable to resync. Signalling end of stream.
return false;
}
mCurrentPos = pos;
// Try again with the new position.
}
ssize_t n = sourceReadAt(mCallback, mSource, mCurrentPos, buffer, frame_size);
if (n < (ssize_t)frame_size) {
return false;
}
*size = frame_size;
mCurrentPos += frame_size;
return true;
}
// Close the MP3 reader.
void Mp3Reader::close() {
assert(mCallback != NULL);
mCallback->close(mSource);
}
Mp3Reader::~Mp3Reader() {
}
enum {
kInputBufferSize = 10 * 1024,
kOutputBufferSize = 4608 * 2,
};
int decodeMP3(mp3_callbacks *cb, void *source, std::vector<char> &pcmBuffer, int *numChannels, int *sampleRate, int *numFrames) {
// Initialize the config.
tPVMP3DecoderExternal config;
config.equalizerType = flat;
config.crcEnabled = false;
// Allocate the decoder memory.
uint32_t memRequirements = pvmp3_decoderMemRequirements();
void *decoderBuf = malloc(memRequirements);
assert(decoderBuf != NULL);
// Initialize the decoder.
pvmp3_InitDecoder(&config, decoderBuf);
// Open the input file.
Mp3Reader mp3Reader;
bool success = mp3Reader.init(cb, source);
if (!success) {
ALOGE("mp3Reader.init: Encountered error reading\n");
free(decoderBuf);
return EXIT_FAILURE;
}
// Open the output file.
// SF_INFO sfInfo;
// memset(&sfInfo, 0, sizeof(SF_INFO));
// sfInfo.channels = mp3Reader.getNumChannels();
// sfInfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
// sfInfo.samplerate = mp3Reader.getSampleRate();
// SNDFILE *handle = sf_open(argv[2], SFM_WRITE, &sfInfo);
// if (handle == NULL) {
// ALOGE("Encountered error writing %s\n", argv[2]);
// mp3Reader.close();
// free(decoderBuf);
// return EXIT_FAILURE;
// }
// Allocate input buffer.
uint8_t *inputBuf = static_cast<uint8_t *>(malloc(kInputBufferSize));
assert(inputBuf != NULL);
// Allocate output buffer.
int16_t *outputBuf = static_cast<int16_t *>(malloc(kOutputBufferSize));
assert(outputBuf != NULL);
// Decode loop.
int retVal = EXIT_SUCCESS;
while (1) {
// Read input from the file.
uint32_t bytesRead;
bool success = mp3Reader.getFrame(inputBuf, &bytesRead);
if (!success) break;
*numChannels = mp3Reader.getNumChannels();
*sampleRate = mp3Reader.getSampleRate();
// Set the input config.
config.inputBufferCurrentLength = bytesRead;
config.inputBufferMaxLength = 0;
config.inputBufferUsedLength = 0;
config.pInputBuffer = inputBuf;
config.pOutputBuffer = outputBuf;
config.outputFrameSize = kOutputBufferSize / sizeof(int16_t);
ERROR_CODE decoderErr;
decoderErr = pvmp3_framedecoder(&config, decoderBuf);
if (decoderErr != NO_DECODING_ERROR) {
ALOGE("Decoder encountered error=%d", decoderErr);
retVal = EXIT_FAILURE;
break;
}
pcmBuffer.insert(pcmBuffer.end(), (char *)outputBuf, ((char *)outputBuf) + config.outputFrameSize * 2);
*numFrames += config.outputFrameSize / mp3Reader.getNumChannels();
}
// Close input reader and output writer.
mp3Reader.close();
// sf_close(handle);
// Free allocated memory.
free(inputBuf);
free(outputBuf);
free(decoderBuf);
return retVal;
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef MP3READER_H_
#define MP3READER_H_
typedef struct {
size_t (*read)(void *ptr, size_t size, size_t nmemb, void *datasource);
int (*seek)(void *datasource, int64_t offset, int whence);
int (*close)(void *datasource);
long (*tell)(void *datasource);
} mp3_callbacks;
class Mp3Reader {
public:
Mp3Reader();
bool init(mp3_callbacks *callback, void *source);
bool getFrame(void *buffer, uint32_t *size);
uint32_t getSampleRate() { return mSampleRate; }
uint32_t getNumChannels() { return mNumChannels; }
void close();
~Mp3Reader();
private:
void *mSource;
mp3_callbacks *mCallback;
uint32_t mFixedHeader;
off64_t mCurrentPos;
uint32_t mSampleRate;
uint32_t mNumChannels;
uint32_t mBitrate;
};
int decodeMP3(mp3_callbacks *cb, void *source, std::vector<char> &pcmBuffer, int *numChannels, int *sampleRate, int *numFrames);
#endif /* MP3READER_H_ */

View File

@@ -0,0 +1,107 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef COCOS_LIB_UTILS_COMPAT_H
#define COCOS_LIB_UTILS_COMPAT_H
#if CC_PLATFORM == CC_PLATFORM_ANDROID
#include <unistd.h>
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#elif CC_PLATFORM == CC_PLATFORM_WINDOWS
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
#include <SLES/OpenSLES_OpenHarmony.h>
#include <SLES/OpenSLES_Platform.h>
#endif
#if defined(__APPLE__)
/* Mac OS has always had a 64-bit off_t, so it doesn't have off64_t. */
typedef off_t off64_t;
static inline off64_t lseek64(int fd, off64_t offset, int whence) {
return lseek(fd, offset, whence);
}
static inline ssize_t pread64(int fd, void *buf, size_t nbytes, off64_t offset) {
return pread(fd, buf, nbytes, offset);
}
static inline ssize_t pwrite64(int fd, const void *buf, size_t nbytes, off64_t offset) {
return pwrite(fd, buf, nbytes, offset);
}
#endif /* __APPLE__ */
#if defined(_WIN32)
#define O_CLOEXEC O_NOINHERIT
#define O_NOFOLLOW 0
#define DEFFILEMODE 0666
#endif /* _WIN32 */
#if defined(_WIN32)
#define ZD "%ld"
#define ZD_TYPE long
#else
#define ZD "%zd"
#define ZD_TYPE ssize_t
#endif
/*
* Needed for cases where something should be constexpr if possible, but not
* being constexpr is fine if in pre-C++11 code (such as a const static float
* member variable).
*/
#if __cplusplus >= 201103L
#define CONSTEXPR constexpr
#else
#define CONSTEXPR
#endif
/*
* TEMP_FAILURE_RETRY is defined by some, but not all, versions of
* <unistd.h>. (Alas, it is not as standard as we'd hoped!) So, if it's
* not already defined, then define it here.
*/
#ifndef TEMP_FAILURE_RETRY
/* Used to retry syscalls that can return EINTR. */
#define TEMP_FAILURE_RETRY(exp) ({ \
typeof (exp) _rc; \
do { \
_rc = (exp); \
} while (_rc == -1 && errno == EINTR); \
_rc; })
#endif
#if defined(_WIN32)
#define OS_PATH_SEPARATOR '\\'
#else
#define OS_PATH_SEPARATOR '/'
#endif
#if CC_PLATFORM == CC_PLATFORM_ANDROID
typedef SLAndroidSimpleBufferQueueItf CCSLBufferQueueItf;
#define CC_SL_IDD_BUFFER_QUEUE SL_IID_ANDROIDSIMPLEBUFFERQUEUE
#elif CC_PLATFORM == CC_PLATFORM_OPENHARMONY
typedef SLOHBufferQueueItf CCSLBufferQueueItf;
#define CC_SL_IDD_BUFFER_QUEUE SL_IID_OH_BUFFERQUEUE
#define __unused
#endif
#endif /* COCOS_LIB_UTILS_COMPAT_H */

View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef COCOS_ERRORS_H
#define COCOS_ERRORS_H
#include <sys/types.h>
#include <errno.h>
namespace cc {
// use this type to return error codes
#ifdef _WIN32
typedef int status_t;
#else
typedef int32_t status_t;
#endif
/* the MS C runtime lacks a few error codes */
/*
* Error codes.
* All error codes are negative values.
*/
// Win32 #defines NO_ERROR as well. It has the same value, so there's no
// real conflict, though it's a bit awkward.
#ifdef _WIN32
#undef NO_ERROR
#endif
enum {
OK = 0, // Everything's swell.
NO_ERROR = 0, // No errors.
UNKNOWN_ERROR = (-2147483647 - 1), // INT32_MIN value
NO_MEMORY = -ENOMEM,
INVALID_OPERATION = -ENOSYS,
BAD_VALUE = -EINVAL,
BAD_TYPE = (UNKNOWN_ERROR + 1),
NAME_NOT_FOUND = -ENOENT,
PERMISSION_DENIED = -EPERM,
NO_INIT = -ENODEV,
ALREADY_EXISTS = -EEXIST,
DEAD_OBJECT = -EPIPE,
FAILED_TRANSACTION = (UNKNOWN_ERROR + 2),
#if !defined(_WIN32)
BAD_INDEX = -EOVERFLOW,
NOT_ENOUGH_DATA = -ENODATA,
WOULD_BLOCK = -EWOULDBLOCK,
TIMED_OUT = -ETIMEDOUT,
UNKNOWN_TRANSACTION = -EBADMSG,
#else
BAD_INDEX = -E2BIG,
NOT_ENOUGH_DATA = (UNKNOWN_ERROR + 3),
WOULD_BLOCK = (UNKNOWN_ERROR + 4),
TIMED_OUT = (UNKNOWN_ERROR + 5),
UNKNOWN_TRANSACTION = (UNKNOWN_ERROR + 6),
#endif
FDS_NOT_ALLOWED = (UNKNOWN_ERROR + 7),
UNEXPECTED_NULL = (UNKNOWN_ERROR + 8),
};
// Restore define; enumeration is in "android" namespace, so the value defined
// there won't work for Win32 code in a different namespace.
#ifdef _WIN32
#define NO_ERROR 0L
#endif
} // namespace cc
// ---------------------------------------------------------------------------
#endif // COCOS_ERRORS_H

View File

@@ -0,0 +1,34 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "audio/android/utils/Utils.h"
#include "platform/BasePlatform.h"
namespace cc {
int getSDKVersion() {
return BasePlatform::getPlatform()->getSdkVersion();
}
} // end of namespace cc

View File

@@ -0,0 +1,31 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#include "base/std/container/string.h"
namespace cc {
extern int getSDKVersion();
}

View File

@@ -0,0 +1,110 @@
/****************************************************************************
Copyright (c) 2014-2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#import <OpenAL/al.h>
#include <mutex>
#include "base/std/container/string.h"
#include "audio/apple/AudioMacros.h"
#include "base/Macros.h"
#include "base/std/container/vector.h"
#define INVALID_AL_BUFFER_ID 0xFFFFFFFF
namespace cc {
class AudioEngineImpl;
class AudioPlayer;
class AudioCache {
public:
enum class State {
INITIAL,
LOADING,
READY,
FAILED
};
AudioCache();
~AudioCache();
void addPlayCallback(const std::function<void()> &callback);
void addLoadCallback(const std::function<void(bool)> &callback);
inline bool isStreaming() const { return _isStreaming; }
protected:
void setSkipReadDataTask(bool isSkip) { _isSkipReadDataTask = isSkip; };
void readDataTask(unsigned int selfId);
void invokingPlayCallbacks();
void invokingLoadCallbacks();
//pcm data related stuff
ALenum _format{-1};
ALsizei _sampleRate{0};
float _duration{0};
uint32_t _totalFrames{0};
uint32_t _framesRead{0};
uint32_t _bytesPerFrame{0};
bool _isStreaming{false};
uint32_t _channelCount{1};
/*Cache related stuff;
* Cache pcm data when sizeInBytes less than PCMDATA_CACHEMAXSIZE
*/
ALuint _alBufferId{INVALID_AL_BUFFER_ID};
char *_pcmData{nullptr};
/*Queue buffer related stuff
* Streaming in openal when sizeInBytes greater then PCMDATA_CACHEMAXSIZE
*/
char *_queBuffers[QUEUEBUFFER_NUM];
ALsizei _queBufferSize[QUEUEBUFFER_NUM];
uint32_t _queBufferFrames{0};
std::mutex _playCallbackMutex;
ccstd::vector<std::function<void()>> _playCallbacks;
// loadCallbacks doesn't need mutex since it's invoked only in Cocos thread.
ccstd::vector<std::function<void(bool)>> _loadCallbacks;
std::mutex _readDataTaskMutex;
State _state{State::INITIAL};
std::shared_ptr<bool> _isDestroyed;
ccstd::string _fileFullPath;
unsigned int _id;
bool _isLoadingFinished{false};
bool _isSkipReadDataTask{false};
friend class AudioEngineImpl;
friend class AudioPlayer;
};
} // namespace cc

View File

@@ -0,0 +1,354 @@
/****************************************************************************
Copyright (c) 2014-2016 Chukong Technologies Inc.
Copyright (c) 2017-2022 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated engine source code (the "Software"), a limited,
worldwide, royalty-free, non-assignable, revocable and non-exclusive license
to use Cocos Creator solely to develop games on your target platforms. You shall
not use Cocos Creator software for developing other software or tools that's
used for developing games. You are not granted to publish, distribute,
sublicense, and/or sell copies of Cocos Creator.
The software or tools in this License Agreement are licensed, not sold.
Xiamen Yaji Software Co., Ltd. reserves all rights not expressly granted to you.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#define LOG_TAG "AudioCache"
#include "audio/apple/AudioCache.h"
#import <Foundation/Foundation.h>
#import <OpenAL/alc.h>
#include <thread>
#include "application/ApplicationManager.h"
#include "base/Scheduler.h"
#include "base/memory/Memory.h"
#include "audio/apple/AudioDecoder.h"
#ifdef VERY_VERY_VERBOSE_LOGGING
#define ALOGVV ALOGV
#else
#define ALOGVV(...) \
do { \
} while (false)
#endif
namespace {
unsigned int __idIndex = 0;
}
#define PCMDATA_CACHEMAXSIZE 1048576
@interface NSTimerWrapper : NSObject {
std::function<void()> _timeoutCallback;
}
@end
@implementation NSTimerWrapper
- (id)initWithTimeInterval:(double)seconds callback:(const std::function<void()> &)cb {
if (self = [super init]) {
_timeoutCallback = cb;
NSTimer *timer = [NSTimer timerWithTimeInterval:seconds target:self selector:@selector(onTimeoutCallback:) userInfo:nil repeats:NO];
[[NSRunLoop currentRunLoop] addTimer:timer forMode:NSDefaultRunLoopMode];
}
return self;
}
- (void)onTimeoutCallback:(NSTimer *)timer {
if (_timeoutCallback != nullptr) {
_timeoutCallback();
_timeoutCallback = nullptr;
}
}
- (void)dealloc {
[super dealloc];
}
@end
using namespace cc;
AudioCache::AudioCache()
: _isDestroyed(std::make_shared<bool>(false)), _id(++__idIndex){
ALOGVV("AudioCache() %p, id=%u", this, _id);
for (int i = 0; i < QUEUEBUFFER_NUM; ++i) {
_queBuffers[i] = nullptr;
_queBufferSize[i] = 0;
}
}
AudioCache::~AudioCache() {
ALOGVV("~AudioCache() %p, id=%u, begin", this, _id);
*_isDestroyed = true;
while (!_isLoadingFinished) {
if (_isSkipReadDataTask) {
ALOGV("id=%u, Skip read data task, don't continue to wait!", _id);
break;
}
ALOGVV("id=%u, waiting readData thread to finish ...", _id);
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
//wait for the 'readDataTask' task to exit
_readDataTaskMutex.lock();
if (_state == State::READY) {
if (_alBufferId != INVALID_AL_BUFFER_ID && alIsBuffer(_alBufferId)) {
ALOGV("~AudioCache(id=%u), delete buffer: %u", _id, _alBufferId);
alDeleteBuffers(1, &_alBufferId);
_alBufferId = INVALID_AL_BUFFER_ID;
}
} else {
ALOGW("AudioCache (%p), id=%u, buffer isn't ready, state=%d", this, _id, _state);
}
if (_queBufferFrames > 0) {
for (int index = 0; index < QUEUEBUFFER_NUM; ++index) {
free(_queBuffers[index]);
}
}
ALOGVV("~AudioCache() %p, id=%u, end", this, _id);
_readDataTaskMutex.unlock();
}
void AudioCache::readDataTask(unsigned int selfId) {
//Note: It's in sub thread
ALOGVV("readDataTask, cache id=%u", selfId);
_readDataTaskMutex.lock();
_state = State::LOADING;
AudioDecoder decoder;
do {
if (!decoder.open(_fileFullPath.c_str()))
break;
const uint32_t originalTotalFrames = decoder.getTotalFrames();
const uint32_t bytesPerFrame = decoder.getBytesPerFrame();
const uint32_t sampleRate = decoder.getSampleRate();
_channelCount = decoder.getChannelCount();
uint32_t totalFrames = originalTotalFrames;
uint32_t dataSize = totalFrames * bytesPerFrame;
uint32_t remainingFrames = totalFrames;
uint32_t adjustFrames = 0;
_format = _channelCount > 1 ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
_sampleRate = (ALsizei)sampleRate;
_duration = 1.0f * totalFrames / sampleRate;
_totalFrames = totalFrames;
if (dataSize <= PCMDATA_CACHEMAXSIZE) {
uint32_t framesRead = 0;
const uint32_t framesToReadOnce = std::min(totalFrames, static_cast<uint32_t>(sampleRate * QUEUEBUFFER_TIME_STEP * QUEUEBUFFER_NUM));
BREAK_IF_ERR_LOG(!decoder.seek(totalFrames), "AudioDecoder::seek(%u) error", totalFrames);
char *tmpBuf = (char *)malloc(framesToReadOnce * bytesPerFrame);
ccstd::vector<char> adjustFrameBuf;
adjustFrameBuf.reserve(framesToReadOnce * bytesPerFrame);
// Adjust total frames by setting position to the end of frames and try to read more data.
// This is a workaround for https://github.com/cocos2d/cocos2d-x/issues/16938
do {
framesRead = decoder.read(framesToReadOnce, tmpBuf);
if (framesRead > 0) {
adjustFrames += framesRead;
adjustFrameBuf.insert(adjustFrameBuf.end(), tmpBuf, tmpBuf + framesRead * bytesPerFrame);
}
} while (framesRead > 0);
if (adjustFrames > 0) {
ALOGV("Orignal total frames: %u, adjust frames: %u, current total frames: %u", totalFrames, adjustFrames, totalFrames + adjustFrames);
totalFrames += adjustFrames;
_totalFrames = remainingFrames = totalFrames;
}
// Reset dataSize
dataSize = totalFrames * bytesPerFrame;
free(tmpBuf);
// Reset to frame 0
BREAK_IF_ERR_LOG(!decoder.seek(0), "AudioDecoder::seek(0) failed!");
_pcmData = (char *)malloc(dataSize);
memset(_pcmData, 0x00, dataSize);
ALOGV(" id=%u _pcmData alloc: %p", selfId, _pcmData);
if (adjustFrames > 0) {
memcpy(_pcmData + (dataSize - adjustFrameBuf.size()), adjustFrameBuf.data(), adjustFrameBuf.size());
}
if (*_isDestroyed)
break;
framesRead = decoder.readFixedFrames(std::min(framesToReadOnce, remainingFrames), _pcmData + _framesRead * bytesPerFrame);
_framesRead += framesRead;
remainingFrames -= framesRead;
if (*_isDestroyed)
break;
uint32_t frames = 0;
while (!*_isDestroyed && _framesRead < originalTotalFrames) {
frames = std::min(framesToReadOnce, remainingFrames);
if (_framesRead + frames > originalTotalFrames) {
frames = originalTotalFrames - _framesRead;
}
framesRead = decoder.read(frames, _pcmData + _framesRead * bytesPerFrame);
if (framesRead == 0)
break;
_framesRead += framesRead;
remainingFrames -= framesRead;
}
if (_framesRead < originalTotalFrames) {
memset(_pcmData + _framesRead * bytesPerFrame, 0x00, (totalFrames - _framesRead) * bytesPerFrame);
}
ALOGV("pcm buffer was loaded successfully, total frames: %u, total read frames: %u, adjust frames: %u, remainingFrames: %u", totalFrames, _framesRead, adjustFrames, remainingFrames);
_framesRead += adjustFrames;
alGenBuffers(1, &_alBufferId);
auto alError = alGetError();
if (alError != AL_NO_ERROR) {
ALOGE("%s: attaching audio to buffer fail: %x", __PRETTY_FUNCTION__, alError);
break;
}
ALOGV(" id=%u generated alGenBuffers: %u for _pcmData: %p", selfId, _alBufferId, _pcmData);
ALOGV(" id=%u _pcmData alBufferData: %p", selfId, _pcmData);
alBufferData(_alBufferId, _format, _pcmData, (ALsizei)dataSize, (ALsizei)sampleRate);
_state = State::READY;
invokingPlayCallbacks();
} else {
_isStreaming = true;
_queBufferFrames = sampleRate * QUEUEBUFFER_TIME_STEP;
BREAK_IF_ERR_LOG(_queBufferFrames == 0, "_queBufferFrames == 0");
const uint32_t queBufferBytes = _queBufferFrames * bytesPerFrame;
for (int index = 0; index < QUEUEBUFFER_NUM; ++index) {
_queBuffers[index] = (char *)malloc(queBufferBytes);
_queBufferSize[index] = queBufferBytes;
decoder.readFixedFrames(_queBufferFrames, _queBuffers[index]);
}
_state = State::READY;
}
} while (false);
if (_pcmData != nullptr) {
CC_SAFE_FREE(_pcmData);
}
decoder.close();
//IDEA: Why to invoke play callback first? Should it be after 'load' callback?
invokingPlayCallbacks();
invokingLoadCallbacks();
_isLoadingFinished = true;
if (_state != State::READY) {
_state = State::FAILED;
if (_alBufferId != INVALID_AL_BUFFER_ID && alIsBuffer(_alBufferId)) {
ALOGV(" id=%u readDataTask failed, delete buffer: %u", selfId, _alBufferId);
alDeleteBuffers(1, &_alBufferId);
_alBufferId = INVALID_AL_BUFFER_ID;
}
}
_readDataTaskMutex.unlock();
}
void AudioCache::addPlayCallback(const std::function<void()> &callback) {
std::lock_guard<std::mutex> lk(_playCallbackMutex);
switch (_state) {
case State::INITIAL:
case State::LOADING:
_playCallbacks.push_back(callback);
break;
case State::READY:
// If state is failure, we still need to invoke the callback
// since the callback will set the 'AudioPlayer::_removeByAudioEngine' flag to true.
case State::FAILED:
callback();
break;
default:
ALOGE("Invalid state: %d", _state);
break;
}
}
void AudioCache::invokingPlayCallbacks() {
std::lock_guard<std::mutex> lk(_playCallbackMutex);
for (auto &&cb : _playCallbacks) {
cb();
}
_playCallbacks.clear();
}
void AudioCache::addLoadCallback(const std::function<void(bool)> &callback) {
switch (_state) {
case State::INITIAL:
case State::LOADING:
_loadCallbacks.push_back(callback);
break;
case State::READY:
callback(true);
break;
case State::FAILED:
callback(false);
break;
default:
ALOGE("Invalid state: %d", _state);
break;
}
}
void AudioCache::invokingLoadCallbacks() {
if (*_isDestroyed) {
ALOGV("AudioCache (%p) was destroyed, don't invoke preload callback ...", this);
return;
}
auto isDestroyed = _isDestroyed;
auto scheduler = CC_CURRENT_ENGINE()->getScheduler();
scheduler->performFunctionInCocosThread([&, isDestroyed]() {
if (*isDestroyed) {
ALOGV("invokingLoadCallbacks perform in cocos thread, AudioCache (%p) was destroyed!", this);
return;
}
for (auto &&cb : _loadCallbacks) {
cb(_state == State::READY);
}
_loadCallbacks.clear();
});
}

View File

@@ -0,0 +1,118 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2023 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#pragma once
#import <AudioToolbox/ExtendedAudioFile.h>
#include <stdint.h>
namespace cc {
/**
* @brief The class for decoding compressed audio file to PCM buffer.
*/
class AudioDecoder {
public:
static const uint32_t INVALID_FRAME_INDEX = UINT32_MAX;
AudioDecoder();
~AudioDecoder();
/**
* @brief Opens an audio file specified by a file path.
* @return true if succeed, otherwise false.
*/
bool open(const char *path);
/**
* @brief Checks whether decoder has opened file successfully.
* @return true if succeed, otherwise false.
*/
bool isOpened() const;
/**
* @brief Closes opened audio file.
* @note The method will also be automatically invoked in the destructor.
*/
void close();
/**
* @brief Reads audio frames of PCM format.
* @param framesToRead The number of frames excepted to be read.
* @param pcmBuf The buffer to hold the frames to be read, its size should be >= |framesToRead| * _bytesPerFrame.
* @return The number of frames actually read, it's probably less than 'framesToRead'. Returns 0 means reach the end of file.
*/
uint32_t read(uint32_t framesToRead, char *pcmBuf);
/**
* @brief Reads fixed audio frames of PCM format.
* @param framesToRead The number of frames excepted to be read.
* @param pcmBuf The buffer to hold the frames to be read, its size should be >= |framesToRead| * _bytesPerFrame.
* @return The number of frames actually read, it's probably less than |framesToRead|. Returns 0 means reach the end of file.
* @note The different between |read| and |readFixedFrames| is |readFixedFrames| will do multiple reading operations if |framesToRead| frames
* isn't filled entirely, while |read| just does reading operation once whatever |framesToRead| is or isn't filled entirely.
* If current position reaches the end of frames, the return value may smaller than |framesToRead| and the remaining
* buffer in |pcmBuf| will be set with silence data (0x00).
*/
uint32_t readFixedFrames(uint32_t framesToRead, char *pcmBuf);
/**
* @brief Sets frame offest to be read.
* @param frameOffset The frame offest to be set.
* @return true if succeed, otherwise false
*/
bool seek(uint32_t frameOffset);
/**
* @brief Tells the current frame offset.
* @return The current frame offset.
*/
uint32_t tell() const;
/** Gets total frames of current audio.*/
uint32_t getTotalFrames() const;
/** Gets bytes per frame of current audio.*/
uint32_t getBytesPerFrame() const;
/** Gets sample rate of current audio.*/
uint32_t getSampleRate() const;
/** Gets the channel count of current audio.
* @note Currently we only support 1 or 2 channels.
*/
uint32_t getChannelCount() const;
private:
bool _isOpened;
ExtAudioFileRef _extRef;
uint32_t _totalFrames;
uint32_t _bytesPerFrame;
uint32_t _sampleRate;
uint32_t _channelCount;
AudioStreamBasicDescription _outputFormat;
};
} // namespace cc

View File

@@ -0,0 +1,203 @@
/****************************************************************************
Copyright (c) 2016 Chukong Technologies Inc.
Copyright (c) 2017-2022 Xiamen Yaji Software Co., Ltd.
http://www.cocos.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated engine source code (the "Software"), a limited,
worldwide, royalty-free, non-assignable, revocable and non-exclusive license
to use Cocos Creator solely to develop games on your target platforms. You shall
not use Cocos Creator software for developing other software or tools that's
used for developing games. You are not granted to publish, distribute,
sublicense, and/or sell copies of Cocos Creator.
The software or tools in this License Agreement are licensed, not sold.
Xiamen Yaji Software Co., Ltd. reserves all rights not expressly granted to you.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "audio/apple/AudioDecoder.h"
#include "audio/apple/AudioMacros.h"
#import <Foundation/Foundation.h>
#define LOG_TAG "AudioDecoder"
namespace cc {
AudioDecoder::AudioDecoder()
: _isOpened(false), _extRef(nullptr), _totalFrames(0), _bytesPerFrame(0), _sampleRate(0), _channelCount(0) {
memset(&_outputFormat, 0, sizeof(_outputFormat));
}
AudioDecoder::~AudioDecoder() {
close();
}
bool AudioDecoder::open(const char *path) {
bool ret = false;
CFURLRef fileURL = nil;
do {
BREAK_IF_ERR_LOG(path == nullptr || strlen(path) == 0, "Invalid path!");
NSString *fileFullPath = [[NSString alloc] initWithCString:path encoding:NSUTF8StringEncoding];
fileURL = (CFURLRef)[[NSURL alloc] initFileURLWithPath:fileFullPath];
[fileFullPath release];
BREAK_IF_ERR_LOG(fileURL == nil, "Converting path to CFURLRef failed!");
OSStatus status = ExtAudioFileOpenURL(fileURL, &_extRef);
BREAK_IF_ERR_LOG(status != noErr, "ExtAudioFileOpenURL FAILED, Error = %ld", (long)ret);
AudioStreamBasicDescription fileFormat;
UInt32 propertySize = sizeof(fileFormat);
// Get the audio data format
ret = ExtAudioFileGetProperty(_extRef, kExtAudioFileProperty_FileDataFormat, &propertySize, &fileFormat);
BREAK_IF_ERR_LOG(status != noErr, "ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld", (long)ret);
BREAK_IF_ERR_LOG(fileFormat.mChannelsPerFrame > 2, "Unsupported Format, channel count is greater than stereo!");
// Set the client format to 16 bit signed integer (native-endian) data
// Maintain the channel count and sample rate of the original source format
_outputFormat.mSampleRate = fileFormat.mSampleRate;
_outputFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
_outputFormat.mFormatID = kAudioFormatLinearPCM;
_outputFormat.mFramesPerPacket = 1;
_outputFormat.mBitsPerChannel = 16;
_outputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
_sampleRate = _outputFormat.mSampleRate;
_channelCount = _outputFormat.mChannelsPerFrame;
_bytesPerFrame = 2 * _outputFormat.mChannelsPerFrame;
_outputFormat.mBytesPerPacket = _bytesPerFrame;
_outputFormat.mBytesPerFrame = _bytesPerFrame;
ret = ExtAudioFileSetProperty(_extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(_outputFormat), &_outputFormat);
BREAK_IF_ERR_LOG(status != noErr, "ExtAudioFileSetProperty FAILED, Error = %ld", (long)ret);
// Get the total frame count
SInt64 totalFrames = 0;
propertySize = sizeof(totalFrames);
ret = ExtAudioFileGetProperty(_extRef, kExtAudioFileProperty_FileLengthFrames, &propertySize, &totalFrames);
BREAK_IF_ERR_LOG(status != noErr, "ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld", (long)ret);
BREAK_IF_ERR_LOG(totalFrames <= 0, "Total frames is 0, it's an invalid audio file: %s", path);
_totalFrames = static_cast<uint32_t>(totalFrames);
_isOpened = true;
ret = true;
} while (false);
if (fileURL != nil)
CFRelease(fileURL);
if (!ret) {
close();
}
return ret;
}
void AudioDecoder::close() {
if (_extRef != nullptr) {
ExtAudioFileDispose(_extRef);
_extRef = nullptr;
_totalFrames = 0;
_bytesPerFrame = 0;
_sampleRate = 0;
_channelCount = 0;
}
}
uint32_t AudioDecoder::read(uint32_t framesToRead, char *pcmBuf) {
uint32_t ret = 0;
do {
BREAK_IF_ERR_LOG(!isOpened(), "decoder isn't openned");
BREAK_IF_ERR_LOG(framesToRead == INVALID_FRAME_INDEX, "frameToRead is INVALID_FRAME_INDEX");
BREAK_IF_ERR_LOG(framesToRead == 0, "frameToRead is 0");
BREAK_IF_ERR_LOG(pcmBuf == nullptr, "pcmBuf is nullptr");
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mDataByteSize = framesToRead * _bytesPerFrame;
bufferList.mBuffers[0].mNumberChannels = _outputFormat.mChannelsPerFrame;
bufferList.mBuffers[0].mData = pcmBuf;
UInt32 frames = framesToRead;
OSStatus status = ExtAudioFileRead(_extRef, &frames, &bufferList);
BREAK_IF(status != noErr);
ret = frames;
} while (false);
return ret;
}
uint32_t AudioDecoder::readFixedFrames(uint32_t framesToRead, char *pcmBuf) {
uint32_t framesRead = 0;
uint32_t framesReadOnce = 0;
do {
framesReadOnce = read(framesToRead - framesRead, pcmBuf + framesRead * _bytesPerFrame);
framesRead += framesReadOnce;
} while (framesReadOnce != 0 && framesRead < framesToRead);
if (framesRead < framesToRead) {
memset(pcmBuf + framesRead * _bytesPerFrame, 0x00, (framesToRead - framesRead) * _bytesPerFrame);
}
return framesRead;
}
bool AudioDecoder::seek(uint32_t frameOffset) {
bool ret = false;
do {
BREAK_IF_ERR_LOG(!isOpened(), "decoder isn't openned");
BREAK_IF_ERR_LOG(frameOffset == INVALID_FRAME_INDEX, "frameIndex is INVALID_FRAME_INDEX");
OSStatus status = ExtAudioFileSeek(_extRef, frameOffset);
BREAK_IF(status != noErr);
ret = true;
} while (false);
return ret;
}
uint32_t AudioDecoder::tell() const {
uint32_t ret = INVALID_FRAME_INDEX;
do {
BREAK_IF_ERR_LOG(!isOpened(), "decoder isn't openned");
SInt64 frameIndex = INVALID_FRAME_INDEX;
OSStatus status = ExtAudioFileTell(_extRef, &frameIndex);
BREAK_IF(status != noErr);
ret = static_cast<uint32_t>(frameIndex);
} while (false);
return ret;
}
uint32_t AudioDecoder::getTotalFrames() const {
return _totalFrames;
}
uint32_t AudioDecoder::getBytesPerFrame() const {
return _bytesPerFrame;
}
uint32_t AudioDecoder::getSampleRate() const {
return _sampleRate;
}
uint32_t AudioDecoder::getChannelCount() const {
return _channelCount;
}
bool AudioDecoder::isOpened() const {
return _isOpened;
}
} // namespace cc

Some files were not shown because too many files have changed in this diff Show More