You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

484 lines
19 KiB

/*
* Copyright (C) 2017 Hisilicon Technologies Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vendor.huanglong.hardware.hwhlai@1.0;
/**
* ModelMngr service model
*/
interface IAiModelMngr {
/**
* registerClient:
* regist the recipient client to the ModelMngr.
*
* @param originalPath original Path
* buildPath build path
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* ModelMngr instance is not available.
*/
buildModel(ModelInfo originalPath, string buildPath) generates(int32_t result);
/**
* start the ModelMngr engine service.
*
* @param modelPara Model Description
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* ModelMngr instance is not available.
*/
startModelFromFile(vec<ModelDescription> modelParaVec) generates(int32_t result);
/**
* start the ModelMngr engine service.
*
* @param modelPara Model Buffer
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* ModelMngr instance is not available.
*/
startModelFromMem(vec<ModelBuffer> modelBufferVec) generates(int32_t result);
/**
* set the ModelMngr service input and output.
*
* @param modelname the model name of input and output data.
* @param nn_inputs the neural network inputs.
* @param nn_outputs the neural network outputs.
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
*/
setInputsAndOutputs(string modelname, vec<AINeuralNetworkBuffer> nn_inputs, vec<AINeuralNetworkBuffer> nn_outputs) generates(int32_t result);
/**
* trigger the ModelMngr service computing.
*
* @param modelname the model name of input and output data.
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
*/
startCompute(string modelname) generates(int32_t result);
/**
* process the ModelMngr service request.
*
* @param anwBuffer the image data.
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
*/
runModel(vec<handle> srcDataVec, vec<TensorDescription> srcTensorVec, vec<handle> destDataVec, vec<TensorDescription> destTensorVec, uint32_t ulTimeout, string selectModelName) generates(int32_t result);
/**
* stop the ModelMngr engine service.
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* if the engine service is not existed.
*/
stopModel() generates(int32_t result);
/**
* register a new instance to ModelMngr service.
*
* @param instanceID new instance ID to be register to ModelMngr service
* @param isSync indicates following start/process/stop request on this instance is sync or not
*
* @return status code for the operation, 0 on sucess, others when error occured, value is error code
**/
registerInstance(int32_t instanceID, bool isSync) generates(int32_t result);
/**
* unregister a previously registered instance from ModelMngr service.
*
* @pram instanceID a instanceID previously registered to ModelMngr service
*
* @return status code for the operation, 0 on sucess, others when error occured, value is error code
**/
unregisterInstance(int32_t instanceID) generates(int32_t result);
/**
* request to start a series of new AI model with a list of ModelDescriptions
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelParaVec list of Model Description
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* ModelMngr instance is not available.
**/
startModelFromFile2(int32_t instanceID, int32_t taskStamp, vec<ModelDescription> modelParaVec) generates(int32_t result);
/**
* request to start a series of new AI model with a list of ModelBuffers
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelPara list of ModelBuffers
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* ModelMngr instance is not available.
**/
startModelFromMem2(int32_t instanceID, int32_t taskStamp, vec<ModelBuffer> modelBufferVec) generates(int32_t result);
/**
* request to process a input data with previously started AI model
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param inDataVec input data buffers
* @param inTensorVec input tensor descriptions
* @param outDataVec output data buffers
* @param outTensorVec output tensor descriptions
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
**/
runModel2(int32_t instanceID, int32_t taskStamp,
vec<DataInfo> inDataVec, vec<TensorDescription> inTensorVec,
vec<DataInfo> outDataVec, vec<TensorDescription> outTensorVec,
uint32_t ulTimeout, string selectModelName) generates(int32_t result);
/**
* request to stop the previously started AI models
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* if the engine service is not existed.
**/
stopModel2(int32_t instanceID, int32_t taskStamp) generates(int32_t result);
/**
* set the ModelMngr service input and output.
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelname the model name of input and output data.
* @param nn_inputs the neural network inputs.
* @param nn_outputs the neural network outputs.
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
**/
setInputsAndOutputs2(int32_t instanceID, int32_t taskStamp, string modelname,
vec<AINeuralNetworkBuffer> nn_inputs,
vec<AINeuralNetworkBuffer> nn_outputs) generates(int32_t result);
/**
* trigger the ModelMngr service computing.
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelname the model name of input and output data.
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
**/
startCompute2(int32_t instanceID, int32_t taskStamp, string modelname) generates(int32_t result);
/**
* query a previously started AI model description infos
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelname the model name of model you want to query.
*
* @return input list of input tensor description infos
* output list of output tensor description infos
**/
getModelTensor(int32_t instanceID, int32_t taskStamp, string modelname)
generates(vec<TensorDescription> input, vec<TensorDescription> output);
/**
* query the maximum memory cost on the current computing device.
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
*
* @return status code for the operation, 0 on sucess, others when error occured, value is error code
**/
getMaxUsedMemory(int32_t instanceID, int32_t taskStamp) generates(int32_t result);
/**
* check if model valid.
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
*
* @return status code for the operation, 0 on sucess, others when error occured, value is error code
**/
checkModelValid(int32_t instanceID, int32_t taskStamp) generates(int32_t result);
/**
* check if model compatible.
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelBuf a model buffer
*
* @return status code for the operation, 0 on sucess, others when error occured, value is error code
**/
checkModelCompatibility(int32_t instanceID, int32_t taskStamp, ModelBuffer modelBuf) generates(int32_t result);
/**
* query a previously started AI model description infos
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelname the model name of model you want to query.
*
* @return input list of input tensor description infos
* output list of output tensor description infos
**/
getModelTensorV2(int32_t instanceID, int32_t taskStamp, string modelname)
generates(vec<TensorDescriptionV2> input, vec<TensorDescriptionV2> output);
/**
* request to alloc memory with a list of parameter
*
* @param id ID registered to MemoryManager service
* @param config config para of memory request
* @param size size of memory request
* @return status code for the request, 0 on sucess, others when error occured, and handle of memory
**/
allocMemory(int32_t id, string config, int32_t size) generates(int32_t status, memory data);
/**
* request to free memory with a list of parameter
*
* @param id ID registered to MemoryManager service
* @param config config para of memory request
* @param data handle of memory
* @return status code for the request, 0 on sucess, others when error occured
**/
freeMemory(int32_t id, string config, memory data) generates(int32_t status);
/**
* query AIPP info of a specific tensor, include AIPP nodes count and batch number
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelName the model name of model you want to query.
* @param index the index of tensor
*
* @return batchNum batch number of tensor with @param index
* aippNodesCount aipp nodes count of tensor with @param index
**/
getTensorAippInfo(int32_t instanceID, int32_t taskStamp, string modelName, uint32_t index)
generates(uint32_t batchNum, uint32_t aippNodesCount);
/**
* query AIPP parameters configured in model of a specific tensor
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelName the model name of model you want to query.
* @param index the index of tensor
* @param paraBuff buffer to store aipp parameter
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
**/
getTensorAippParas(int32_t instanceID, int32_t taskStamp, string modelName, uint32_t index, vec<handle> paraBuff)
generates(int32_t result);
/**
* request to process a input data with previously started AI model(dynamic aipp version)
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param inDataVec input data buffers
* @param aippDatavec aipp parameter buffers
* @param inTensorVec input tensor descriptions
* @param outDataVec output data buffers
* @param outTensorVec output tensor descriptions
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
**/
runAippModel(int32_t instanceID, int32_t taskStamp,
vec<DataInfo> inDataVec, vec<TensorDescription> inTensorVec,
vec<handle> aippParaVec, vec<AippPosition> aippPositionVec,
vec<DataInfo> outDataVec, vec<TensorDescription> outTensorVec,
uint32_t ulTimeout, string selectModelName) generates(int32_t result);
/**
* request to get model tensorname
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelname model name
* @param modelname the model name of model you want to query.
*
* @return input list of input tensor name infos
* output list of output tensor name infos
**/
getModelTensorName(int32_t instanceID, int32_t taskStamp, string modelname)
generates(vec<string> input, vec<string> output);
/**
* request to get model tensorname
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelname the model name of model you want to query.
*
* @return batch dynamic batch config
**/
getDynamicBatch(int32_t instanceID, int32_t taskStamp, string modelname)
generates(vec<int32_t> batch);
/**
* request to get model tensorname
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param modelname the model name of model you want to query.
*
* @return height dynamic height config
* width dynamic width config
**/
getDynamicHW(int32_t instanceID, int32_t taskStamp, string modelname)
generates(vec<int32_t> height, vec<int32_t> width);
/**
* request to process a input data with previously started AI model(dynamic batch or resolution version)
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param inDataVec input data buffers
* @param inTensorVec input tensor descriptions
* @param outDataVec output data buffers
* @param outTensorVec output tensor descriptions
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
**/
runDynamicModel(int32_t instanceID, int32_t taskStamp,
vec<DataInfo> inDataVec, vec<TensorDescription> inTensorVec,
vec<DataInfo> outDataVec, vec<TensorDescription> outTensorVec,
uint32_t ulTimeout, string selectModelName, DynamicConfig dynamicPara) generates(int32_t result);
/**
* request to process a input data with previously started AI model(dynamic aipp and dynamic batch or resolution version)
*
* @param instanceID a instanceID previously registered to ModelMngr service
* @param taskStamp a cookie data for callback
* @param inDataVec input data buffers
* @param inTensorVec input tensor descriptions
* @param outDataVec output data buffers
* @param outTensorVec output tensor descriptions
*
* @return status code for the operation, one of:
* AI_OK:
* on_success
* AI_ENOENT:
* An unexpected internal ModelMngr HAL error occurred, and the
* instance is not available.
**/
runAippDynamicModel(int32_t instanceID, int32_t taskStamp,
vec<DataInfo> inDataVec,
vec<TensorDescription> inTensorVec,
vec<handle> aippParaVec,
vec<AippPosition> aippPositionVec,
vec<DataInfo> outDataVec,
vec<TensorDescription> outTensorVec,
uint32_t ulTimeout, string modelName, DynamicConfig dynamicPara) generates(int32_t result);
/**
* request to map memory with fd and sync page
*
* @param id ID registered to MemoryManager service
* @param offset offset of memory request
* @param size size of memory request
* @param data handle
* @return status code for the request, 0 on sucess, others when error occured
**/
createByFd(int32_t id, uint32_t offset, uint32_t size, handle data) generates(int32_t status);
};