Linux C++¶
开发环境搭建¶
Ubuntu18.04-x64系统
OpenCV = v3.4.1,仅sample和tool需要
运行前确保上位机网络处于正常连接状态
运行前确保上位机系统时间正常同步网络时间,否则可能导致License激活失败
采集数据运行前确保上位机正常连接摄像头,并确认已按照要求进行相关配置文件的配置,具体配置过程可参考对应Sample或工具源码同级目录下的readme
安装以下运行时依赖
sudo apt install libopenblas-dev
sudo apt install liblapack-dev
sudo apt install libx11-dev
sudo apt install libgoogle-perftools-dev
接入说明¶
您需要按照如下流程完成应用的开发工作。
License申请
使用算法SDK前需要申请对应的License,否则无法正常使用
集成基础SDK
用于Gemini2设备的控制和取流操作。
Orbbec SDK使用示例¶
获取 Orbbec SDK ,摄像头sdk初始化配置
formatConverFilter_.setFormatConvertType(FORMAT_MJPG_TO_BGR888);
//获取彩色相机的所有流配置,包括流的分辨率,帧率,以及帧的格式
auto colorProfiles = pipeline_.getStreamProfileList(OB_SENSOR_COLOR);
//通过接口设置感兴趣项,返回对应Profile列表的首个Profile
auto colorProfile = colorProfiles->getVideoStreamProfile(640, 480, OB_FORMAT_MJPG, 30);
if (!colorProfile) {
colorProfile = colorProfiles->getProfile(0)->as();
}
printf("colorProfile | type: %d, format:%d, width:%d, height:%d\n",
colorProfile->type(),
colorProfile->format(),
colorProfile->width(),
colorProfile->height());
//获取深度相机的所有流配置,包括流的分辨率,帧率,以及帧的格式
auto depthProfiles = pipeline_.getStreamProfileList(OB_SENSOR_DEPTH);
//通过接口设置感兴趣项,返回对应Profile列表的首个Profile
auto depthProfile = depthProfiles->getVideoStreamProfile(640, 400, OB_FORMAT_Y14, 30);
if (!depthProfile) {
depthProfile = depthProfiles->getProfile(0)->as();
}
printf("depthProfile | type: %d, format:%d, width:%d, height:%d\n",
depthProfile->type(),
depthProfile->format(),
depthProfile->width(),
depthProfile->height());
//通过创建Config来配置Pipeline要启用或者禁用哪些流,这里将启用彩色流和深度流
std::shared_ptr config = std::make_shared();
config->enableStream(colorProfile);
config->enableStream(depthProfile);
// 配置对齐模式为软件D2C对齐
config->setAlignMode(ALIGN_D2C_HW_MODE);
try {
pipeline_.enableFrameSync();
} catch (ob::Error &e) {
printf("pipeline_.enableFrameSync\n");
}
//启动在Config中配置的流,如果不传参数,将启动默认配置启动流
pipeline_.start(config);
// 设置0.1mm精度
pipeline_.getDevice()->setIntProperty(OB_PROP_DEPTH_PRECISION_LEVEL_INT, OB_PRECISION_0MM1);
if (pipeline_.getDevice()->isPropertySupported(OB_PROP_COLOR_MIRROR_BOOL, OB_PERMISSION_WRITE)) {
//设置镜像
pipeline_.getDevice()->setBoolProperty(OB_PROP_COLOR_MIRROR_BOOL, true);
}
if (pipeline_.getDevice()->isPropertySupported(OB_PROP_DEPTH_MIRROR_BOOL, OB_PERMISSION_WRITE)) {
//设置镜像
pipeline_.getDevice()->setBoolProperty(OB_PROP_DEPTH_MIRROR_BOOL, true);
}
获取摄像头内参
OBCameraParam cameraParam = pipeline_.getCameraParam();
printf("depthIntrinsic | fx:%f, fy:%f, cx:%f, cy:%f, width:%d, height:%d\n",
cameraParam.depthIntrinsic.fx,
cameraParam.depthIntrinsic.fy,
cameraParam.depthIntrinsic.cx,
cameraParam.depthIntrinsic.cy,
cameraParam.depthIntrinsic.width,
cameraParam.depthIntrinsic.height);
printf("rgbIntrinsic | fx:%f, fy:%f, cx:%f, cy:%f, width:%d, height:%d\n",
cameraParam.rgbIntrinsic.fx,
cameraParam.rgbIntrinsic.fy,
cameraParam.rgbIntrinsic.cx,
cameraParam.rgbIntrinsic.cy,
cameraParam.rgbIntrinsic.width,
cameraParam.rgbIntrinsic.height);
// D2C之后,RGB和DEPTH的内参是一样的,取哪个都可以
calibration_.fx = cameraParam.rgbIntrinsic.fx;
calibration_.fy = cameraParam.rgbIntrinsic.fy;
calibration_.cx = cameraParam.rgbIntrinsic.cx;
calibration_.cy = cameraParam.rgbIntrinsic.cy;
calibration_.color_width = cameraParam.rgbIntrinsic.width;
calibration_.color_height = cameraParam.rgbIntrinsic.height;
calibration_.depth_width = cameraParam.depthIntrinsic.width;
calibration_.depth_height = cameraParam.depthIntrinsic.height;
calibration_.depth_unit = FRC_DEPTH_100_UM;
摄像头sdk取流
auto frameSet = pipeline_.waitForFrames(SAMPLE_READ_WAIT_TIMEOUT);
if (frameSet == nullptr) {
return Status::FAIL;
}
auto colorFrame = frameSet->colorFrame();
if (colorFrame) {
colorFrame = formatConverFilter_.process(colorFrame)->as();
}
auto depthFrame = frameSet->depthFrame();
if (!colorFrame || !depthFrame) {
return Status::FAIL;
}
Image color;
color.create(FRC_IMAGE_FORMAT_COLOR_RGB24,
colorFrame->width(),
colorFrame->height(),
0,
(const uint8_t *)colorFrame->data());
Image depth;
depth.create(FRC_IMAGE_FORMAT_DEPTH16,
depthFrame->width(),
depthFrame->height(),
0,
(const uint8_t *)depthFrame->data());
Capture capture;
capture.create();
capture.setColorImage(color);
capture.setDepthImage(depth);
调用本地重建
Builder faceBuilder;
if (FRC_STATUS_OK != faceBuilder.Create(caliParam)) {
printf("builder create failed !\n");
return;
}
bool hasFrontFace = false;
for (size_t i = 0; i < 11; i++) {
if (!rgbArr[i].isValid() || !depthArr[i].isValid()) {
continue;
}
Capture capture;
capture.create();
capture.setColorImage(rgbArr[i]);
capture.setDepthImage(depthArr[i]);
if (i != 5) {
// front face
memset((void *)keyFrameArr[i].landmark, 0, sizeof(keyFrameArr[i].landmark));
} else {
hasFrontFace = true;
}
Frame frame;
frame.create();
frame.setCapture(capture);
frame.setKeyFrame((frc_keyframe_t *)(&keyFrameArr[i]));
faceBuilder.AddKeyFrame(frame);
}
// process Frames
frc_build_result_t buildResult;
auto processStatus = hasFrontFace && FRC_STATUS_OK == faceBuilder.Process(&buildResult);
if (!processStatus) {
printf("reconstruction failed!\n");
} else {
// save Model
saveModelWithTexture(outDir_, buildResult);
frc_build_result_release(&buildResult);
printf("reconstruction success!\n");
}
调用云端重建
void BuildByCloud(std::string outDir, cv::String &modelFilePath){
char command[256] = {0};
sprintf(command, "python3 cloud.py --data_path %s\n", outDir.c_str());
system(command);
fstream urlFile(outDir + "/url_path.txt", ios::in);
if(urlFile){
string filePath;
urlFile >> filePath;
modelFilePath = cv::String(filePath);
urlFile.close();
remove((outDir + "/url_path.txt").c_str());
}
}
集成算法SDK
算法SDK用于人脸重建
算法SDK使用示例¶
对SDK进行初始化
// 非必须,也可以将license写在frc.properties中
if (FRC_STATUS_OK != frc::set_license(app_key,app_secret,auth_code)) {
return false;
}
if (FRC_STATUS_OK != frc::initialize(args_.work_dir.c_str())) {
return false;
}
Scanner对象初始化
frc_calibration_t calibration_; //相机参数
string workingdir_; //工作目录
frc_scanner_configuration_t config_; //配置参数(用于正脸检测难易程度参数配置)
config_.frontface_config = 0.3; //设置正脸检测参数0~0.5以内
Scanner facescanner_;
if (FRC_STATUS_OK != facescanner_.create(config_,calibration_,Working_directory)) {
return false;
}
注:Working_directory:dependence.bin文件所在目录,用户若不指定,默认为可执行文件所在目录
Scanner处理每一帧数据流capture,并返回一个该帧处理结果frame
while (1) {
//grab函数就是上面提到的将设备读入的数据流封装在capture里
Capture capture;
if (!camera_.grab(capture)) {
continue;
}
Frame frame = faceScanner_.process(capture, scan_flag);
if (!frame.isValid()) {
cout << "invalid" << endl;
continue;
}
}
注: scan_flag==FRC_SCAN_FLAG_INIT; //未开始采集
scan_flag==FRC_SCAN_FLAG_START; //开始采集
解析每一帧处理结果Frame中的数据,用于结果保存和交互
int keyframe_num=0; //统计获取到的关键帧帧数
while (shouldContinue) {
//获取该帧结果frame里的非图片数据,是一个结构体
frc_keyframe_t key_frame = frame.getKeyFrame();
//根据该结构体里的status(枚举类型),用于做交互
switch (key_frame.status) {
case FRC_FRAME_STATUS_FRONT_FACE_KEYFRAME:
case FRC_FRAME_STATUS_OTHER_FACE_KEYFRAME: {
// write keyframe -img
Capture original_capture = frame.getCapture();
Image depth_image = original_capture.getDepthImage();
// 关键帧 rgb、depth数据及frame非图片数据
depthArray_[key_frame.id] = depth_image;
rgbArray_[key_frame.id] = color_image;
keyFrameArray_[key_frame.id] = key_frame;
if (scan_flag == FRC_SCAN_FLAG_START)
keyframe_num++;
if (key == ' ' && key_frame.status == FRC_FRAME_STATUS_FRONT_FACE_KEYFRAME) {
scan_flag = FRC_SCAN_FLAG_START;
keyframe_num++;
}
} break;
default:
break;
}
if (key == 27 ||keyframe_num==11) {
shouldContinue = false;
}
//重建
if(keyframe_num == 11){
Build3DModel(rgbArray_,depthArray_,keyFrameArray_,calibration_);
}
}
获取关键帧并获得结果
Builder faceBuilder;
if (FRC_STATUS_OK != faceBuilder.Create(caliParam)) {
LOG_ERROR("builder create failed !");
return;
}
bool hasFrontFace = false;
for (size_t i = 0; i < 11; i++) {
cout << "push frame:" << i << " " << __LINE__ << endl;
if (!rgbArr[i].isValid() || !depthArr[i].isValid() ) {
continue;
}
Capture capture;
capture.create();
capture.setColorImage(rgbArr[i]);
capture.setDepthImage(depthArr[i]);
if (i != 5) { // front face
memset((void*)keyFrameArr[i].landmark, 0, sizeof(keyFrameArr[i].landmark));
} else {
hasFrontFace = true;
}
Frame frame;
frame.create();
frame.setCapture(capture);
frame.setKeyFrame((frc_keyframe_t*)(&keyFrameArr[i]));
faceBuilder.AddKeyFrame(frame);
}
// process Frames
frc_build_result_t buildResult;
if (!hasFrontFace||FRC_STATUS_OK != faceBuilder.Process(&buildResult))
cout << "reconstruction failed!" << endl;
else {
cout << "start reconstruction!" << endl;
// save Model
saveModelWithTexture(outDir_, buildResult);
frc_build_result_release(&buildResult);
cout << "reconstruction success!" << endl;
}