这篇教程C++ CV_ELEM_SIZE函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中CV_ELEM_SIZE函数的典型用法代码示例。如果您正苦于以下问题:C++ CV_ELEM_SIZE函数的具体用法?C++ CV_ELEM_SIZE怎么用?C++ CV_ELEM_SIZE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了CV_ELEM_SIZE函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: cvmat_remove_columnCvMat cvmat_remove_column(const CvMat*mat, int column){ assert(column<mat->cols && column>=0); assert(mat->cols > 1); CvMat new_mat = cvMat(mat->rows, mat->cols-1, mat->type, malloc(mat->rows*mat->cols*8)); int t; int size = CV_ELEM_SIZE(mat->type); for(t=0;t<mat->rows;t++) { int pos = 0; int s; for(s=0;s<mat->cols;s++) { if(s!=column) { memcpy(CV_MAT_ELEM_PTR(new_mat, t, pos), CV_MAT_ELEM_PTR((*mat), t, s), size); pos++; } } } return new_mat;}
开发者ID:JackieXie168,项目名称:mrscake,代码行数:20,
示例2: close/*m_buf只在readHeader中被使用*/bool JpegDecoder::readHeader(){ bool result = false; close(); JpegState* state = new JpegState; m_state = state; state->cinfo.err = jpeg_std_error(&state->jerr.pub); state->jerr.pub.error_exit = error_exit; if( setjmp( state->jerr.setjmp_buffer ) == 0 ) { jpeg_create_decompress( &state->cinfo ); if( !(m_buf.data.ptr == 0) ) { jpeg_buffer_src(&state->cinfo, &state->source); state->source.pub.next_input_byte = m_buf.data.ptr; //state->source.pub.bytes_in_buffer = m_buf.cols*m_buf.rows*m_buf.elemSize();//+++CV_ELEM_SIZE state->source.pub.bytes_in_buffer = m_buf.cols*m_buf.rows*CV_ELEM_SIZE(m_buf.type);//+++CV_ELEM_SIZE } else { m_f = fopen( m_filename.c_str(), "rb" ); if( m_f ) jpeg_stdio_src( &state->cinfo, m_f ); } jpeg_read_header( &state->cinfo, TRUE ); m_width = state->cinfo.image_width; m_height = state->cinfo.image_height; m_type = state->cinfo.num_components > 1 ? CV_8UC3 : CV_8UC1; result = true; } if( !result ) close(); return result;}
开发者ID:unix8net,项目名称:jpegReadOfOpencv,代码行数:44,
示例3: cvGetSubRect// Selects sub-array (no data is copied)CV_IMPL CvMat*cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ){ CvMat* res = 0; CV_FUNCNAME( "cvGetRect" ); __BEGIN__; CvMat stub, *mat = (CvMat*)arr; if( !CV_IS_MAT( mat )) CV_CALL( mat = cvGetMat( mat, &stub )); if( !submat ) CV_ERROR( CV_StsNullPtr, "" ); if( (rect.x|rect.y|rect.width|rect.height) < 0 ) CV_ERROR( CV_StsBadSize, "" ); if( rect.x + rect.width > mat->cols || rect.y + rect.height > mat->rows ) CV_ERROR( CV_StsBadSize, "" ); { submat->data.ptr = mat->data.ptr + (size_t)rect.y*mat->step + rect.x*CV_ELEM_SIZE(mat->type); submat->step = mat->step & (rect.height > 1 ? -1 : 0); submat->type = (mat->type & (rect.width < mat->cols ? ~CV_MAT_CONT_FLAG : -1)) | (submat->step == 0 ? CV_MAT_CONT_FLAG : 0); submat->rows = rect.height; submat->cols = rect.width; submat->refcount = 0; res = submat; } __END__; return res;}
开发者ID:liangfu,项目名称:dnn,代码行数:42,
示例4: CameraRGB CameraRGB(std::string& propertyPrefix, const Ice::PropertiesPtr propIn) : prefix(propertyPrefix), imageFmt(), imageDescription(new jderobot::ImageDescription()), cameraDescription(new jderobot::CameraDescription()), replyTask() { Ice::PropertiesPtr prop = propIn; //fill cameraDescription cameraDescription->name = prop->getProperty(prefix+"Name"); if (cameraDescription->name.size() == 0) jderobot::Logger::getInstance()->warning( "Camera name not configured" ); cameraDescription->shortDescription = prop->getProperty(prefix + "ShortDescription"); //fill imageDescription imageDescription->width = prop->getPropertyAsIntWithDefault(prefix+"width",5);; imageDescription->height = prop->getPropertyAsIntWithDefault(prefix+"height",5);; int fps = prop->getPropertyAsIntWithDefault(prefix+"fps",5); //we use formats according to colorspaces std::string fmtStr = prop->getPropertyWithDefault(prefix+"Format","ImageRGB8");//default format YUY2 imageFmt = colorspaces::Image::Format::searchFormat(fmtStr); if (!imageFmt) jderobot::Logger::getInstance()->warning( "Format " + fmtStr + " unknown" ); imageDescription->size = imageDescription->width * imageDescription->height * CV_ELEM_SIZE(imageFmt->cvType); imageDescription->format = imageFmt->name; // Set the formats allowed mFormats.push_back(colorspaces::ImageRGB8::FORMAT_RGB8.get()->name); jderobot::Logger::getInstance()->info( "Starting thread for camera: " + cameraDescription->name ); replyTask = new ReplyTask(this,fps, mFormats[0]); this->control=replyTask->start();//my own thread }
开发者ID:AeroCano,项目名称:JdeRobot,代码行数:41,
示例5: cvGetSubRect_dCvMat* cvGetSubRect_d( const CvArr* arr, CvMat* submat, CvRect rect ){ CvMat* res = 0; CvMat stub, *mat = (CvMat*)arr; if( !CV_IS_MAT( mat )) mat = cvGetMat( mat, &stub ); if( !submat ) CV_Error( CV_StsNullPtr, "" ); if( (rect.x|rect.y|rect.width|rect.height) < 0 ) CV_Error( CV_StsBadSize, "" ); if( rect.x + rect.width > mat->cols || rect.y + rect.height > mat->rows ) CV_Error( CV_StsBadSize, "" ); { /* int* refcount = mat->refcount; if( refcount ) ++*refcount; cvDecRefData( submat ); */ submat->data.ptr = mat->data.ptr + (size_t)rect.y*mat->step + rect.x*CV_ELEM_SIZE(mat->type); submat->step = mat->step; submat->type = (mat->type & (rect.width < mat->cols ? ~CV_MAT_CONT_FLAG : -1)) | (rect.height <= 1 ? CV_MAT_CONT_FLAG : 0); submat->rows = rect.height; submat->cols = rect.width; submat->refcount = 0; res = submat; } return res;}
开发者ID:skt041959,项目名称:hengshi_track_camera,代码行数:40,
示例6: cvPointSeqFromMatCV_IMPL CvSeq* cvPointSeqFromMat( int seq_kind, const CvArr* arr, CvContour* contour_header, CvSeqBlock* block ){ CvSeq* contour = 0; CV_FUNCNAME( "cvPointSeqFromMat" ); assert( arr != 0 && contour_header != 0 && block != 0 ); __BEGIN__; int eltype; CvMat* mat = (CvMat*)arr; if( !CV_IS_MAT( mat )) CV_ERROR( CV_StsBadArg, "Input array is not a valid matrix" ); eltype = CV_MAT_TYPE( mat->type ); if( eltype != CV_32SC2 && eltype != CV_32FC2 ) CV_ERROR( CV_StsUnsupportedFormat, "The matrix can not be converted to point sequence because of " "inappropriate element type" ); if( (mat->width != 1 && mat->height != 1) || !CV_IS_MAT_CONT(mat->type)) CV_ERROR( CV_StsBadArg, "The matrix converted to point sequence must be " "1-dimensional and continuous" ); CV_CALL( cvMakeSeqHeaderForArray( (seq_kind & (CV_SEQ_KIND_MASK|CV_SEQ_FLAG_CLOSED)) | eltype, sizeof(CvContour), CV_ELEM_SIZE(eltype), mat->data.ptr, mat->width*mat->height, (CvSeq*)contour_header, block )); contour = (CvSeq*)contour_header; __END__; return contour;}
开发者ID:allanca,项目名称:otterdive,代码行数:39,
示例7: cvarrToMatMat cvarrToMat(const CvArr* arr, bool copyData, bool /*allowND*/, int coiMode, AutoBuffer<double>* abuf ){ if( !arr ) return Mat(); if( CV_IS_MAT_HDR_Z(arr) ) return cvMatToMat((const CvMat*)arr, copyData); if( CV_IS_MATND(arr) ) return cvMatNDToMat((const CvMatND*)arr, copyData ); if( CV_IS_IMAGE(arr) ) { const IplImage* iplimg = (const IplImage*)arr; if( coiMode == 0 && iplimg->roi && iplimg->roi->coi > 0 ) CV_Error(CV_BadCOI, "COI is not supported by the function"); return iplImageToMat(iplimg, copyData); } if( CV_IS_SEQ(arr) ) { CvSeq* seq = (CvSeq*)arr; int total = seq->total, type = CV_MAT_TYPE(seq->flags), esz = seq->elem_size; if( total == 0 ) return Mat(); CV_Assert(total > 0 && CV_ELEM_SIZE(seq->flags) == esz); if(!copyData && seq->first->next == seq->first) return Mat(total, 1, type, seq->first->data); if( abuf ) { abuf->allocate(((size_t)total*esz + sizeof(double)-1)/sizeof(double)); double* bufdata = abuf->data(); cvCvtSeqToArray(seq, bufdata, CV_WHOLE_SEQ); return Mat(total, 1, type, bufdata); } Mat buf(total, 1, type); cvCvtSeqToArray(seq, buf.ptr(), CV_WHOLE_SEQ); return buf; } CV_Error(CV_StsBadArg, "Unknown array type");}
开发者ID:AliMiraftab,项目名称:opencv,代码行数:39,
示例8: cvArrPrintvoid cvArrPrint(CvArr * arr){ CvMat * mat; CvMat stub; mat = cvGetMat(arr, &stub); int cn = CV_MAT_CN(mat->type); int depth = CV_MAT_DEPTH(mat->type); int step = MAX(mat->step, cn*mat->cols*CV_ELEM_SIZE(depth)); switch(depth){ case CV_8U: cv_arr_write(stdout, "%u", (uchar *)mat->data.ptr, mat->rows, cn, step); break; case CV_8S: cv_arr_write(stdout, "%d", (char *)mat->data.ptr, mat->rows, cn, step); break; case CV_16U: cv_arr_write(stdout, "%u", (ushort *)mat->data.ptr, mat->rows, cn, step); break; case CV_16S: cv_arr_write(stdout, "%d", (short *)mat->data.ptr, mat->rows, cn, step); break; case CV_32S: cv_arr_write(stdout, "%d", (int *)mat->data.ptr, mat->rows, cn, step); break; case CV_32F: cv_arr_write(stdout, "%f", (float *)mat->data.ptr, mat->rows, cn, step); break; case CV_64F: cv_arr_write(stdout, "%g", (double *)mat->data.ptr, mat->rows, cn, step); break; default: CV_Error( CV_StsError, "Unknown element type"); break; }}
开发者ID:Avatarchik,项目名称:EmguCV-Unity,代码行数:38,
示例9: cvSampleLineCV_IMPL intcvSampleLine( const void* img, CvPoint pt1, CvPoint pt2, void* _buffer, int connectivity ){ int count = -1; CV_FUNCNAME( "cvSampleLine" ); __BEGIN__; int i, coi = 0, pix_size; CvMat stub, *mat = (CvMat*)img; CvLineIterator iterator; uchar* buffer = (uchar*)_buffer; CV_CALL( mat = cvGetMat( mat, &stub, &coi )); if( coi != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !buffer ) CV_ERROR( CV_StsNullPtr, "" ); CV_CALL( count = cvInitLineIterator( mat, pt1, pt2, &iterator, connectivity )); pix_size = CV_ELEM_SIZE(mat->type); for( i = 0; i < count; i++ ) { for( int j = 0; j < pix_size; j++ ) buffer[j] = iterator.ptr[j]; buffer += pix_size; CV_NEXT_LINE_POINT( iterator ); } __END__; return count;}
开发者ID:allanca,项目名称:otterdive,代码行数:38,
示例10: cvDeInterlaceCV_IMPL voidcvDeInterlace( const CvArr* framearr, CvArr* fieldEven, CvArr* fieldOdd ){ CV_FUNCNAME("cvDeInterlace"); __BEGIN__; CvMat frame_stub, *frame = (CvMat*)framearr; CvMat even_stub, *even = (CvMat*)fieldEven; CvMat odd_stub, *odd = (CvMat*)fieldOdd; CvSize size; int y; CV_CALL( frame = cvGetMat( frame, &frame_stub )); CV_CALL( even = cvGetMat( even, &even_stub )); CV_CALL( odd = cvGetMat( odd, &odd_stub )); if( !CV_ARE_TYPES_EQ( frame, even ) || !CV_ARE_TYPES_EQ( frame, odd )) CV_ERROR( CV_StsUnmatchedFormats, "All the input images must have the same type" ); if( frame->cols != even->cols || frame->cols != odd->cols || frame->rows != even->rows*2 || odd->rows != even->rows ) CV_ERROR( CV_StsUnmatchedSizes, "Uncorrelated sizes of the input image and output fields" ); size = cvGetMatSize( even ); size.width *= CV_ELEM_SIZE( even->type ); for( y = 0; y < size.height; y++ ) { memcpy( even->data.ptr + even->step*y, frame->data.ptr + frame->step*y*2, size.width ); memcpy( odd->data.ptr + even->step*y, frame->data.ptr + frame->step*(y*2+1), size.width ); } __END__;}
开发者ID:273k,项目名称:OpenCV-Android,代码行数:37,
示例11: cvCreateCrossValidationEstimateModel// This function create cross-validation EstimateModel.ML_IMPL CvStatModel*cvCreateCrossValidationEstimateModel( int samples_all, const CvStatModelParams* estimateParams, const CvMat* sampleIdx){ CvStatModel* model = NULL; CvCrossValidationModel* crVal = NULL; CV_FUNCNAME ("cvCreateCrossValidationEstimateModel"); __BEGIN__ int k_fold = 10; int i, j, k, s_len; int samples_selected; CvRNG rng; CvRNG* prng; int* res_s_data; int* te_s_data; int* folds; rng = cvRNG(cvGetTickCount()); cvRandInt (&rng); cvRandInt (&rng); cvRandInt (&rng); cvRandInt (&rng);// Check input parameters. if (estimateParams) k_fold = ((CvCrossValidationParams*)estimateParams)->k_fold; if (!k_fold) { CV_ERROR (CV_StsBadArg, "Error in parameters of cross-validation (k_fold == 0)!"); } if (samples_all <= 0) { CV_ERROR (CV_StsBadArg, "<samples_all> should be positive!"); }// Alloc memory and fill standart StatModel's fields. CV_CALL (crVal = (CvCrossValidationModel*)cvCreateStatModel ( CV_STAT_MODEL_MAGIC_VAL | CV_CROSSVAL_MAGIC_VAL, sizeof(CvCrossValidationModel), cvReleaseCrossValidationModel, NULL, NULL)); crVal->current_fold = -1; crVal->folds_all = k_fold; if (estimateParams && ((CvCrossValidationParams*)estimateParams)->is_regression) crVal->is_regression = 1; else crVal->is_regression = 0; if (estimateParams && ((CvCrossValidationParams*)estimateParams)->rng) prng = ((CvCrossValidationParams*)estimateParams)->rng; else prng = &rng; // Check and preprocess sample indices. if (sampleIdx) { int s_step; int s_type = 0; if (!CV_IS_MAT (sampleIdx)) CV_ERROR (CV_StsBadArg, "Invalid sampleIdx array"); if (sampleIdx->rows != 1 && sampleIdx->cols != 1) CV_ERROR (CV_StsBadSize, "sampleIdx array must be 1-dimensional"); s_len = sampleIdx->rows + sampleIdx->cols - 1; s_step = sampleIdx->rows == 1 ? 1 : sampleIdx->step / CV_ELEM_SIZE(sampleIdx->type); s_type = CV_MAT_TYPE (sampleIdx->type); switch (s_type) { case CV_8UC1: case CV_8SC1: { uchar* s_data = sampleIdx->data.ptr; // sampleIdx is array of 1's and 0's - // i.e. it is a mask of the selected samples if( s_len != samples_all ) CV_ERROR (CV_StsUnmatchedSizes, "Sample mask should contain as many elements as the total number of samples"); samples_selected = 0; for (i = 0; i < s_len; i++) samples_selected += s_data[i * s_step] != 0; if (samples_selected == 0) CV_ERROR (CV_StsOutOfRange, "No samples is selected!"); } s_len = samples_selected; break; case CV_32SC1: if (s_len > samples_all) CV_ERROR (CV_StsOutOfRange,//.........这里部分代码省略.........
开发者ID:cybertk,项目名称:opencv,代码行数:101,
示例12: cvFloodFillCV_IMPL voidcvFloodFill( CvArr* arr, CvPoint seed_point, CvScalar newVal, CvScalar lo_diff, CvScalar up_diff, CvConnectedComp* comp, int flags, CvArr* maskarr ){ cv::Ptr<CvMat> tempMask; std::vector<CvFFillSegment> buffer; if( comp ) memset( comp, 0, sizeof(*comp) ); int i, type, depth, cn, is_simple; int buffer_size, connectivity = flags & 255; union { uchar b[4]; int i[4]; float f[4]; double _[4]; } nv_buf; nv_buf._[0] = nv_buf._[1] = nv_buf._[2] = nv_buf._[3] = 0; struct { cv::Vec3b b; cv::Vec3i i; cv::Vec3f f; } ld_buf, ud_buf; CvMat stub, *img = cvGetMat(arr, &stub); CvMat maskstub, *mask = (CvMat*)maskarr; CvSize size; type = CV_MAT_TYPE( img->type ); depth = CV_MAT_DEPTH(type); cn = CV_MAT_CN(type); if( connectivity == 0 ) connectivity = 4; else if( connectivity != 4 && connectivity != 8 ) CV_Error( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" ); is_simple = mask == 0 && (flags & CV_FLOODFILL_MASK_ONLY) == 0; for( i = 0; i < cn; i++ ) { if( lo_diff.val[i] < 0 || up_diff.val[i] < 0 ) CV_Error( CV_StsBadArg, "lo_diff and up_diff must be non-negative" ); is_simple &= fabs(lo_diff.val[i]) < DBL_EPSILON && fabs(up_diff.val[i]) < DBL_EPSILON; } size = cvGetMatSize( img ); if( (unsigned)seed_point.x >= (unsigned)size.width || (unsigned)seed_point.y >= (unsigned)size.height ) CV_Error( CV_StsOutOfRange, "Seed point is outside of image" ); cvScalarToRawData( &newVal, &nv_buf, type, 0 ); buffer_size = MAX( size.width, size.height ) * 2; buffer.resize( buffer_size ); if( is_simple ) { int elem_size = CV_ELEM_SIZE(type); const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x; for(i = 0; i < elem_size; i++) if (seed_ptr[i] != nv_buf.b[i]) break; if (i != elem_size) { if( type == CV_8UC1 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.b[0], comp, flags, &buffer); else if( type == CV_8UC3 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, cv::Vec3b(nv_buf.b), comp, flags, &buffer); else if( type == CV_32SC1 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.i[0], comp, flags, &buffer); else if( type == CV_32FC1 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.f[0], comp, flags, &buffer); else if( type == CV_32SC3 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, cv::Vec3i(nv_buf.i), comp, flags, &buffer); else if( type == CV_32FC3 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, cv::Vec3f(nv_buf.f), comp, flags, &buffer); else CV_Error( CV_StsUnsupportedFormat, "" ); return; } } if( !mask ) { /* created mask will be 8-byte aligned */ tempMask = cvCreateMat( size.height + 2, (size.width + 9) & -8, CV_8UC1 ); mask = tempMask; } else { mask = cvGetMat( mask, &maskstub ); if( !CV_IS_MASK_ARR( mask )) CV_Error( CV_StsBadMask, "" );//.........这里部分代码省略.........
开发者ID:DevShah,项目名称:18551,代码行数:101,
示例13: cvPreCornerDetectCV_IMPL voidcvPreCornerDetect( const void* srcarr, void* dstarr, int aperture_size ){ CvSepFilter dx_filter, dy_filter, d2x_filter, d2y_filter, dxy_filter; CvMat *Dx = 0, *Dy = 0, *D2x = 0, *D2y = 0, *Dxy = 0; CvMat *tempsrc = 0; int buf_size = 1 << 12; CV_FUNCNAME( "cvPreCornerDetect" ); __BEGIN__; int i, j, y, dst_y = 0, max_dy, delta = 0; int temp_step = 0, d_step; uchar* shifted_ptr = 0; int depth, d_depth; int stage = CV_START; CvSobelFixedIPPFunc ipp_sobel_vert = 0, ipp_sobel_horiz = 0, ipp_sobel_vert_second = 0, ipp_sobel_horiz_second = 0, ipp_sobel_cross = 0; CvSize el_size, size, stripe_size; int aligned_width; CvPoint el_anchor; double factor; CvMat stub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; bool use_ipp = false; CV_CALL( src = cvGetMat( srcarr, &stub )); CV_CALL( dst = cvGetMat( dst, &dststub )); if( CV_MAT_TYPE(src->type) != CV_8UC1 && CV_MAT_TYPE(src->type) != CV_32FC1 || CV_MAT_TYPE(dst->type) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Input must be 8uC1 or 32fC1, output must be 32fC1" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( aperture_size == CV_SCHARR ) CV_ERROR( CV_StsOutOfRange, "CV_SCHARR is not supported by this function" ); if( aperture_size < 3 || aperture_size > 7 || !(aperture_size & 1) ) CV_ERROR( CV_StsOutOfRange, "Derivative filter aperture size must be 3, 5 or 7" ); depth = CV_MAT_DEPTH(src->type); d_depth = depth == CV_8U ? CV_16S : CV_32F; size = cvGetMatSize(src); aligned_width = cvAlign(size.width, 4); el_size = cvSize( aperture_size, aperture_size ); el_anchor = cvPoint( aperture_size/2, aperture_size/2 ); if( aperture_size <= 5 && icvFilterSobelVert_8u16s_C1R_p ) { if( depth == CV_8U ) { ipp_sobel_vert = icvFilterSobelVert_8u16s_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_8u16s_C1R_p; ipp_sobel_vert_second = icvFilterSobelVertSecond_8u16s_C1R_p; ipp_sobel_horiz_second = icvFilterSobelHorizSecond_8u16s_C1R_p; ipp_sobel_cross = icvFilterSobelCross_8u16s_C1R_p; } else if( depth == CV_32F ) { ipp_sobel_vert = icvFilterSobelVert_32f_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_32f_C1R_p; ipp_sobel_vert_second = icvFilterSobelVertSecond_32f_C1R_p; ipp_sobel_horiz_second = icvFilterSobelHorizSecond_32f_C1R_p; ipp_sobel_cross = icvFilterSobelCross_32f_C1R_p; } } if( ipp_sobel_vert && ipp_sobel_horiz && ipp_sobel_vert_second && ipp_sobel_horiz_second && ipp_sobel_cross ) { CV_CALL( tempsrc = icvIPPFilterInit( src, buf_size, el_size )); shifted_ptr = tempsrc->data.ptr + el_anchor.y*tempsrc->step + el_anchor.x*CV_ELEM_SIZE(depth); temp_step = tempsrc->step ? tempsrc->step : CV_STUB_STEP; max_dy = tempsrc->rows - aperture_size + 1; use_ipp = true; } else { ipp_sobel_vert = ipp_sobel_horiz = 0; ipp_sobel_vert_second = ipp_sobel_horiz_second = ipp_sobel_cross = 0; dx_filter.init_deriv( size.width, depth, d_depth, 1, 0, aperture_size ); dy_filter.init_deriv( size.width, depth, d_depth, 0, 1, aperture_size ); d2x_filter.init_deriv( size.width, depth, d_depth, 2, 0, aperture_size ); d2y_filter.init_deriv( size.width, depth, d_depth, 0, 2, aperture_size ); dxy_filter.init_deriv( size.width, depth, d_depth, 1, 1, aperture_size ); max_dy = buf_size / src->cols; max_dy = MAX( max_dy, aperture_size ); } CV_CALL( Dx = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( Dy = cvCreateMat( max_dy, aligned_width, d_depth ));//.........这里部分代码省略.........
开发者ID:cybertk,项目名称:opencv,代码行数:101,
示例14: icvCornerEigenValsVecsstatic voidicvCornerEigenValsVecs( const CvMat* src, CvMat* eigenv, int block_size, int aperture_size, int op_type, double k=0. ){ CvSepFilter dx_filter, dy_filter; CvBoxFilter blur_filter; CvMat *tempsrc = 0; CvMat *Dx = 0, *Dy = 0, *cov = 0; CvMat *sqrt_buf = 0; int buf_size = 1 << 12; CV_FUNCNAME( "icvCornerEigenValsVecs" ); __BEGIN__; int i, j, y, dst_y = 0, max_dy, delta = 0; int aperture_size0 = aperture_size; int temp_step = 0, d_step; uchar* shifted_ptr = 0; int depth, d_depth; int stage = CV_START; CvSobelFixedIPPFunc ipp_sobel_vert = 0, ipp_sobel_horiz = 0; CvFilterFixedIPPFunc ipp_scharr_vert = 0, ipp_scharr_horiz = 0; CvSize el_size, size, stripe_size; int aligned_width; CvPoint el_anchor; double factorx, factory; bool use_ipp = false; if( block_size < 3 || !(block_size & 1) ) CV_ERROR( CV_StsOutOfRange, "averaging window size must be an odd number >= 3" ); if( aperture_size < 3 && aperture_size != CV_SCHARR || !(aperture_size & 1) ) CV_ERROR( CV_StsOutOfRange, "Derivative filter aperture size must be a positive odd number >=3 or CV_SCHARR" ); depth = CV_MAT_DEPTH(src->type); d_depth = depth == CV_8U ? CV_16S : CV_32F; size = cvGetMatSize(src); aligned_width = cvAlign(size.width, 4); aperture_size = aperture_size == CV_SCHARR ? 3 : aperture_size; el_size = cvSize( aperture_size, aperture_size ); el_anchor = cvPoint( aperture_size/2, aperture_size/2 ); if( aperture_size <= 5 && icvFilterSobelVert_8u16s_C1R_p ) { if( depth == CV_8U && aperture_size0 == CV_SCHARR ) { ipp_scharr_vert = icvFilterScharrVert_8u16s_C1R_p; ipp_scharr_horiz = icvFilterScharrHoriz_8u16s_C1R_p; } else if( depth == CV_32F && aperture_size0 == CV_SCHARR ) { ipp_scharr_vert = icvFilterScharrVert_32f_C1R_p; ipp_scharr_horiz = icvFilterScharrHoriz_32f_C1R_p; } else if( depth == CV_8U ) { ipp_sobel_vert = icvFilterSobelVert_8u16s_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_8u16s_C1R_p; } else if( depth == CV_32F ) { ipp_sobel_vert = icvFilterSobelVert_32f_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_32f_C1R_p; } } if( ipp_sobel_vert && ipp_sobel_horiz || ipp_scharr_vert && ipp_scharr_horiz ) { CV_CALL( tempsrc = icvIPPFilterInit( src, buf_size, cvSize(el_size.width,el_size.height + block_size))); shifted_ptr = tempsrc->data.ptr + el_anchor.y*tempsrc->step + el_anchor.x*CV_ELEM_SIZE(depth); temp_step = tempsrc->step ? tempsrc->step : CV_STUB_STEP; max_dy = tempsrc->rows - aperture_size + 1; use_ipp = true; } else { ipp_sobel_vert = ipp_sobel_horiz = 0; ipp_scharr_vert = ipp_scharr_horiz = 0; CV_CALL( dx_filter.init_deriv( size.width, depth, d_depth, 1, 0, aperture_size0 )); CV_CALL( dy_filter.init_deriv( size.width, depth, d_depth, 0, 1, aperture_size0 )); max_dy = buf_size / src->cols; max_dy = MAX( max_dy, aperture_size + block_size ); } CV_CALL( Dx = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( Dy = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( cov = cvCreateMat( max_dy + block_size + 1, size.width, CV_32FC3 )); CV_CALL( sqrt_buf = cvCreateMat( 2, size.width, CV_32F )); Dx->cols = Dy->cols = size.width; if( !use_ipp )//.........这里部分代码省略.........
开发者ID:cybertk,项目名称:opencv,代码行数:101,
示例15: cvMinAreaRect2CV_IMPL CvBox2DcvMinAreaRect2( const CvArr* array, CvMemStorage* storage ){ CvMemStorage* temp_storage = 0; CvBox2D box; CvPoint2D32f* points = 0; CV_FUNCNAME( "cvMinAreaRect2" ); memset(&box, 0, sizeof(box)); __BEGIN__; int i, n; CvSeqReader reader; CvContour contour_header; CvSeqBlock block; CvSeq* ptseq = (CvSeq*)array; CvPoint2D32f out[3]; if( CV_IS_SEQ(ptseq) ) { if( !CV_IS_SEQ_POINT_SET(ptseq) && (CV_SEQ_KIND(ptseq) != CV_SEQ_KIND_CURVE || !CV_IS_SEQ_CONVEX(ptseq) || CV_SEQ_ELTYPE(ptseq) != CV_SEQ_ELTYPE_PPOINT )) CV_ERROR( CV_StsUnsupportedFormat, "Input sequence must consist of 2d points or pointers to 2d points" ); if( !storage ) storage = ptseq->storage; } else { CV_CALL( ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block )); } if( storage ) { CV_CALL( temp_storage = cvCreateChildMemStorage( storage )); } else { CV_CALL( temp_storage = cvCreateMemStorage(1 << 10)); } if( !CV_IS_SEQ_CONVEX( ptseq )) { CV_CALL( ptseq = cvConvexHull2( ptseq, temp_storage, CV_CLOCKWISE, 1 )); } else if( !CV_IS_SEQ_POINT_SET( ptseq )) { CvSeqWriter writer; if( !CV_IS_SEQ(ptseq->v_prev) || !CV_IS_SEQ_POINT_SET(ptseq->v_prev)) CV_ERROR( CV_StsBadArg, "Convex hull must have valid pointer to point sequence stored in v_prev" ); cvStartReadSeq( ptseq, &reader ); cvStartWriteSeq( CV_SEQ_KIND_CURVE|CV_SEQ_FLAG_CONVEX|CV_SEQ_ELTYPE(ptseq->v_prev), sizeof(CvContour), CV_ELEM_SIZE(ptseq->v_prev->flags), temp_storage, &writer ); for( i = 0; i < ptseq->total; i++ ) { CvPoint pt = **(CvPoint**)(reader.ptr); CV_WRITE_SEQ_ELEM( pt, writer ); } ptseq = cvEndWriteSeq( &writer ); } n = ptseq->total; CV_CALL( points = (CvPoint2D32f*)cvAlloc( n*sizeof(points[0]) )); cvStartReadSeq( ptseq, &reader ); if( CV_SEQ_ELTYPE( ptseq ) == CV_32SC2 ) { for( i = 0; i < n; i++ ) { CvPoint pt; CV_READ_SEQ_ELEM( pt, reader ); points[i].x = (float)pt.x; points[i].y = (float)pt.y; } } else { for( i = 0; i < n; i++ ) { CV_READ_SEQ_ELEM( points[i], reader ); } } if( n > 2 ) { icvRotatingCalipers( points, n, CV_CALIPERS_MINAREARECT, (float*)out ); box.center.x = out[0].x + (out[1].x + out[2].x)*0.5f; box.center.y = out[0].y + (out[1].y + out[2].y)*0.5f; box.size.height = (float)sqrt((double)out[1].x*out[1].x + (double)out[1].y*out[1].y); box.size.width = (float)sqrt((double)out[2].x*out[2].x + (double)out[2].y*out[2].y);//.........这里部分代码省略.........
开发者ID:273k,项目名称:OpenCV-Android,代码行数:101,
示例16: crossCorrvoid crossCorr( const Mat& img, const Mat& _templ, Mat& corr, Size corrsize, int ctype, Point anchor, double delta, int borderType ){ const double blockScale = 4.5; const int minBlockSize = 256; std::vector<uchar> buf; Mat templ = _templ; int depth = img.depth(), cn = img.channels(); int tdepth = templ.depth(), tcn = templ.channels(); int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype); CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 ); if( depth != tdepth && tdepth != std::max(CV_32F, depth) ) { _templ.convertTo(templ, std::max(CV_32F, depth)); tdepth = templ.depth(); } CV_Assert( depth == tdepth || tdepth == CV_32F); CV_Assert( corrsize.height <= img.rows + templ.rows - 1 && corrsize.width <= img.cols + templ.cols - 1 ); CV_Assert( ccn == 1 || delta == 0 ); corr.create(corrsize, ctype); int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth); Size blocksize, dftsize; blocksize.width = cvRound(templ.cols*blockScale); blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 ); blocksize.width = std::min( blocksize.width, corr.cols ); blocksize.height = cvRound(templ.rows*blockScale); blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 ); blocksize.height = std::min( blocksize.height, corr.rows ); dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2); dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1); if( dftsize.width <= 0 || dftsize.height <= 0 ) CV_Error( CV_StsOutOfRange, "the input arrays are too big" ); // recompute block size blocksize.width = dftsize.width - templ.cols + 1; blocksize.width = MIN( blocksize.width, corr.cols ); blocksize.height = dftsize.height - templ.rows + 1; blocksize.height = MIN( blocksize.height, corr.rows ); Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth ); Mat dftImg( dftsize, maxDepth ); int i, k, bufSize = 0; if( tcn > 1 && tdepth != maxDepth ) bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth); if( cn > 1 && depth != maxDepth ) bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)* (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth)); if( (ccn > 1 || cn > 1) && cdepth != maxDepth ) bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth)); buf.resize(bufSize); // compute DFT of each template plane for( k = 0; k < tcn; k++ ) { int yofs = k*dftsize.height; Mat src = templ; Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height)); Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows)); if( tcn > 1 ) { src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]); int pairs[] = {k, 0}; mixChannels(&templ, 1, &src, 1, pairs, 1); } if( dst1.data != src.data ) src.convertTo(dst1, dst1.depth()); if( dst.cols > templ.cols ) { Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols)); part = Scalar::all(0); } dft(dst, dst, 0, templ.rows); } int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width; int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height; int tileCount = tileCountX * tileCountY; Size wholeSize = img.size(); Point roiofs(0,0); Mat img0 = img;//.........这里部分代码省略.........
开发者ID:2december,项目名称:opencv,代码行数:101,
示例17: icvCrossCorr//.........这里部分代码省略......... blocksize.width = cvRound(templ->cols*block_scale); blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 ); blocksize.width = MIN( blocksize.width, corr->cols ); blocksize.height = cvRound(templ->rows*block_scale); blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 ); blocksize.height = MIN( blocksize.height, corr->rows ); dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1); if( dftsize.width == 1 ) dftsize.width = 2; dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1); if( dftsize.width <= 0 || dftsize.height <= 0 ) CV_Error( CV_StsOutOfRange, "the input arrays are too big" ); // recompute block size blocksize.width = dftsize.width - templ->cols + 1; blocksize.width = MIN( blocksize.width, corr->cols ); blocksize.height = dftsize.height - templ->rows + 1; blocksize.height = MIN( blocksize.height, corr->rows ); dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth );#ifdef USE_OPENMP num_threads = cvGetNumThreads();#else num_threads = 1;#endif for( k = 0; k < num_threads; k++ ) dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth ); if( templ_cn > 1 && templ_depth != max_depth ) buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth); if( cn > 1 && depth != max_depth ) buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)* (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth)); if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth ) buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth)); if( buf_size > 0 ) { for( k = 0; k < num_threads; k++ ) buf[k].resize(buf_size); } // compute DFT of each template plane for( k = 0; k < templ_cn; k++ ) { CvMat dstub, *src, *dst, temp; CvMat* planes[] = { 0, 0, 0, 0 }; int yofs = k*dftsize.height; src = templ; dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows)); if( templ_cn > 1 ) { planes[k] = templ_depth == max_depth ? dst : cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, &buf[0][0] ); cvSplit( templ, planes[0], planes[1], planes[2], planes[3] ); src = planes[k]; planes[k] = 0; }
开发者ID:AlexandreFreitas,项目名称:danfreve-blinkdetection,代码行数:67,
示例18: CV_MAT_DEPTHvoid cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, double scale, double delta, int borderType ){ int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );#ifdef HAVE_TEGRA_OPTIMIZATION if (scale == 1.0 && delta == 0) { Mat src = _src.getMat(), dst = _dst.getMat(); if (ksize == 1 && tegra::laplace1(src, dst, borderType)) return; if (ksize == 3 && tegra::laplace3(src, dst, borderType)) return; if (ksize == 5 && tegra::laplace5(src, dst, borderType)) return; }#endif if( ksize == 1 || ksize == 3 ) { float K[2][9] = { { 0, 1, 0, 1, -4, 1, 0, 1, 0 }, { 2, 0, 2, 0, -8, 0, 2, 0, 2 } }; Mat kernel(3, 3, CV_32F, K[ksize == 3]); if( scale != 1 ) kernel *= scale; filter2D( _src, _dst, ddepth, kernel, Point(-1, -1), delta, borderType ); } else { int ktype = std::max(CV_32F, std::max(ddepth, sdepth)); int wdepth = sdepth == CV_8U && ksize <= 5 ? CV_16S : sdepth <= CV_32F ? CV_32F : CV_64F; int wtype = CV_MAKETYPE(wdepth, cn); Mat kd, ks; getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); CV_OCL_RUN(_dst.isUMat(), ocl_Laplacian5(_src, _dst, kd, ks, scale, delta, borderType, wdepth, ddepth)) const size_t STRIPE_SIZE = 1 << 14; Ptr<FilterEngine> fx = createSeparableLinearFilter(stype, wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); Ptr<FilterEngine> fy = createSeparableLinearFilter(stype, wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); Mat src = _src.getMat(), dst = _dst.getMat(); int y = fx->start(src), dsty = 0, dy = 0; fy->start(src); const uchar* sptr = src.data + y*src.step; int dy0 = std::min(std::max((int)(STRIPE_SIZE/(CV_ELEM_SIZE(stype)*src.cols)), 1), src.rows); Mat d2x( dy0 + kd.rows - 1, src.cols, wtype ); Mat d2y( dy0 + kd.rows - 1, src.cols, wtype ); for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy ) { fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step ); dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step ); if( dy > 0 ) { Mat dstripe = dst.rowRange(dsty, dsty + dy); d2x.rows = d2y.rows = dy; // modify the headers, which should work d2x += d2y; d2x.convertTo( dstripe, ddepth, scale, delta ); } } }}
开发者ID:HanaLeeHn,项目名称:opencv,代码行数:74,
示例19: getElemSizestatic inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); }
开发者ID:muranava,项目名称:Synopsis,代码行数:1,
示例20: CV_ELEM_SIZEinlinesize_t GpuMat::elemSize() const{ return CV_ELEM_SIZE(flags);}
开发者ID:Achraf33,项目名称:opencv,代码行数:5,
示例21: cvSVD//.........这里部分代码省略......... if( !CV_IS_MAT( v )) CV_CALL( v = cvGetMat( v, &vstub )); if( !(flags & CV_SVD_V_T) ) { v_rows = v->rows; v_cols = v->cols; } else { v_rows = v->cols; v_cols = v->rows; } if( !CV_ARE_TYPES_EQ( a, v )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( v_rows != n || v_cols != n ) CV_ERROR( CV_StsUnmatchedSizes, t_svd ? "U matrix has unappropriate size" : "V matrix has unappropriate size" ); if( w_is_mat && w_cols != v_cols ) CV_ERROR( CV_StsUnmatchedSizes, t_svd ? "U and W have incompatible sizes" : "V and W have incompatible sizes" ); } else { v = &vstub; v->data.ptr = 0; v->step = 0; } type = CV_MAT_TYPE( a->type ); pix_size = CV_ELEM_SIZE(type); buf_size = n*2 + m; if( !(flags & CV_SVD_MODIFY_A) ) { a_buf_offset = buf_size; buf_size += a->rows*a->cols; } if( temp_u ) { u_buf_offset = buf_size; buf_size += u->rows*u->cols; } buf_size *= pix_size; if( buf_size <= CV_MAX_LOCAL_SIZE ) { buffer = (uchar*)cvStackAlloc( buf_size ); local_alloc = 1; } else { CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); } if( !(flags & CV_SVD_MODIFY_A) ) { cvInitMatHeader( &tmat, m, n, type, buffer + a_buf_offset*pix_size ); if( !t_svd ) cvCopy( a, &tmat );
开发者ID:burbineSE,项目名称:Software_Engineer_91.411_1,代码行数:67,
示例22: elemSize int elemSize() const { return CV_ELEM_SIZE(type_); }
开发者ID:4auka,项目名称:opencv,代码行数:1,
示例23: cvFilter2D//.........这里部分代码省略......... else { CV_CALL( kernel_data = (float*)cvAlloc( sz )); local_alloc = 0; } kernel_hdr = cvMat( kernel->rows, kernel->cols, CV_32F, kernel_data ); if( CV_MAT_TYPE(kernel->type) == CV_32FC1 ) cvCopy( kernel, &kernel_hdr ); else cvConvertScale( kernel, &kernel_hdr, 1, 0 ); kernel = &kernel_hdr; } size = cvGetMatSize( src ); depth = CV_MAT_DEPTH(type); src_step = src->step; dst_step = dst->step ? dst->step : CV_STUB_STEP; if( icvFilter_8u_C1R_p && (src->rows >= ipp_lower_limit || src->cols >= ipp_lower_limit) ) { CvFilterIPPFunc ipp_func = type == CV_8UC1 ? (CvFilterIPPFunc)icvFilter_8u_C1R_p : type == CV_8UC3 ? (CvFilterIPPFunc)icvFilter_8u_C3R_p : type == CV_8UC4 ? (CvFilterIPPFunc)icvFilter_8u_C4R_p : type == CV_16SC1 ? (CvFilterIPPFunc)icvFilter_16s_C1R_p : type == CV_16SC3 ? (CvFilterIPPFunc)icvFilter_16s_C3R_p : type == CV_16SC4 ? (CvFilterIPPFunc)icvFilter_16s_C4R_p : type == CV_32FC1 ? (CvFilterIPPFunc)icvFilter_32f_C1R_p : type == CV_32FC3 ? (CvFilterIPPFunc)icvFilter_32f_C3R_p : type == CV_32FC4 ? (CvFilterIPPFunc)icvFilter_32f_C4R_p : 0; if( ipp_func ) { CvSize el_size = { kernel->cols, kernel->rows }; CvPoint el_anchor = { el_size.width - anchor.x - 1, el_size.height - anchor.y - 1 }; int stripe_size = 1 << 16; // the optimal value may depend on CPU cache, // overhead of current IPP code etc. const uchar* shifted_ptr; int i, j, y, dy = 0; int temp_step; // mirror the kernel around the center for( i = 0; i < (el_size.height+1)/2; i++ ) { float* top_row = kernel->data.fl + el_size.width*i; float* bottom_row = kernel->data.fl + el_size.width*(el_size.height - i - 1); for( j = 0; j < (el_size.width+1)/2; j++ ) { float a = top_row[j], b = top_row[el_size.width - j - 1]; float c = bottom_row[j], d = bottom_row[el_size.width - j - 1]; top_row[j] = d; top_row[el_size.width - j - 1] = c; bottom_row[j] = b; bottom_row[el_size.width - j - 1] = a; } } CV_CALL( temp = icvIPPFilterInit( src, stripe_size, el_size )); shifted_ptr = temp->data.ptr + anchor.y*temp->step + anchor.x*CV_ELEM_SIZE(type); temp_step = temp->step ? temp->step : CV_STUB_STEP; for( y = 0; y < src->rows; y += dy ) { dy = icvIPPFilterNextStripe( src, temp, y, el_size, anchor ); IPPI_CALL( ipp_func( shifted_ptr, temp_step, dst->data.ptr + y*dst_step, dst_step, cvSize(src->cols, dy), kernel->data.fl, el_size, el_anchor )); } EXIT; } } CV_CALL( state = icvFilterInitAlloc( src->cols, cv32f, CV_MAT_CN(type), cvSize(kernel->cols, kernel->rows), anchor, kernel->data.ptr, ICV_GENERIC_KERNEL )); if( CV_MAT_CN(type) == 2 ) CV_ERROR( CV_BadNumChannels, "Unsupported number of channels" ); func = (CvFilterFunc)(filter_tab.fn_2d[depth]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); if( size.height == 1 ) src_step = dst_step = CV_STUB_STEP; IPPI_CALL( func( src->data.ptr, src_step, dst->data.ptr, dst_step, &size, state, 0 )); __END__; cvReleaseMat( &temp ); icvFilterFree( &state ); if( !local_alloc ) cvFree( (void**)&kernel_data );}
开发者ID:ThadeuFerreira,项目名称:sift-coprojeto,代码行数:101,
示例24: icvIPPSepFilterint icvIPPSepFilter( const CvMat* src, CvMat* dst, const CvMat* kernelX, const CvMat* kernelY, CvPoint anchor ){ int result = 0; CvMat* top_bottom = 0; CvMat* vout_hin = 0; CvMat* dst_buf = 0; CV_FUNCNAME( "icvIPPSepFilter" ); __BEGIN__; CvSize ksize; CvPoint el_anchor; CvSize size; int type, depth, pix_size; int i, x, y, dy = 0, prev_dy = 0, max_dy; CvMat vout; CvCopyNonConstBorderFunc copy_border_func; CvIPPSepFilterFunc x_func = 0, y_func = 0; int src_step, top_bottom_step; float *kx, *ky; int align, stripe_size; if( !icvFilterRow_8u_C1R_p ) EXIT; if( !CV_ARE_TYPES_EQ( src, dst ) || !CV_ARE_SIZES_EQ( src, dst ) || !CV_IS_MAT_CONT(kernelX->type & kernelY->type) || CV_MAT_TYPE(kernelX->type) != CV_32FC1 || CV_MAT_TYPE(kernelY->type) != CV_32FC1 || kernelX->cols != 1 && kernelX->rows != 1 || kernelY->cols != 1 && kernelY->rows != 1 || (unsigned)anchor.x >= (unsigned)(kernelX->cols + kernelX->rows - 1) || (unsigned)anchor.y >= (unsigned)(kernelY->cols + kernelY->rows - 1) ) CV_ERROR( CV_StsError, "Internal Error: incorrect parameters" ); ksize.width = kernelX->cols + kernelX->rows - 1; ksize.height = kernelY->cols + kernelY->rows - 1; /*if( ksize.width <= 5 && ksize.height <= 5 ) { float* ker = (float*)cvStackAlloc( ksize.width*ksize.height*sizeof(ker[0])); CvMat kernel = cvMat( ksize.height, ksize.width, CV_32F, ker ); for( y = 0, i = 0; y < ksize.height; y++ ) for( x = 0; x < ksize.width; x++, i++ ) ker[i] = kernelY->data.fl[y]*kernelX->data.fl[x]; CV_CALL( cvFilter2D( src, dst, &kernel, anchor )); EXIT; }*/ type = CV_MAT_TYPE(src->type); depth = CV_MAT_DEPTH(type); pix_size = CV_ELEM_SIZE(type); if( type == CV_8UC1 ) x_func = icvFilterRow_8u_C1R_p, y_func = icvFilterColumn_8u_C1R_p; else if( type == CV_8UC3 ) x_func = icvFilterRow_8u_C3R_p, y_func = icvFilterColumn_8u_C3R_p; else if( type == CV_8UC4 ) x_func = icvFilterRow_8u_C4R_p, y_func = icvFilterColumn_8u_C4R_p; else if( type == CV_16SC1 ) x_func = icvFilterRow_16s_C1R_p, y_func = icvFilterColumn_16s_C1R_p; else if( type == CV_16SC3 ) x_func = icvFilterRow_16s_C3R_p, y_func = icvFilterColumn_16s_C3R_p; else if( type == CV_16SC4 ) x_func = icvFilterRow_16s_C4R_p, y_func = icvFilterColumn_16s_C4R_p; else if( type == CV_32FC1 ) x_func = icvFilterRow_32f_C1R_p, y_func = icvFilterColumn_32f_C1R_p; else if( type == CV_32FC3 ) x_func = icvFilterRow_32f_C3R_p, y_func = icvFilterColumn_32f_C3R_p; else if( type == CV_32FC4 ) x_func = icvFilterRow_32f_C4R_p, y_func = icvFilterColumn_32f_C4R_p; else EXIT; size = cvGetMatSize(src); stripe_size = src->data.ptr == dst->data.ptr ? 1 << 15 : 1 << 16; max_dy = MAX( ksize.height - 1, stripe_size/(size.width + ksize.width - 1)); max_dy = MIN( max_dy, size.height + ksize.height - 1 ); align = 8/CV_ELEM_SIZE(depth); CV_CALL( top_bottom = cvCreateMat( ksize.height*2, cvAlign(size.width,align), type )); CV_CALL( vout_hin = cvCreateMat( max_dy + ksize.height, cvAlign(size.width + ksize.width - 1, align), type )); if( src->data.ptr == dst->data.ptr && size.height ) CV_CALL( dst_buf = cvCreateMat( max_dy + ksize.height, cvAlign(size.width, align), type )); kx = (float*)cvStackAlloc( ksize.width*sizeof(kx[0]) ); ky = (float*)cvStackAlloc( ksize.height*sizeof(ky[0]) ); // mirror the kernels for( i = 0; i < ksize.width; i++ ) kx[i] = kernelX->data.fl[ksize.width - i - 1];//.........这里部分代码省略.........
开发者ID:ThadeuFerreira,项目名称:sift-coprojeto,代码行数:101,
示例25: cvConvexityDefects/* it must have more than 3 points */CV_IMPL CvSeq*cvConvexityDefects( const CvArr* array, const CvArr* hullarray, CvMemStorage* storage ){ CvSeq* defects = 0; CV_FUNCNAME( "cvConvexityDefects" ); __BEGIN__; int i, index; CvPoint* hull_cur; /* is orientation of hull different from contour one */ int rev_orientation; CvContour contour_header; union { CvContour c; CvSeq s; } hull_header; CvSeqBlock block, hullblock; CvSeq *ptseq = (CvSeq*)array, *hull = (CvSeq*)hullarray; CvSeqReader hull_reader; CvSeqReader ptseq_reader; CvSeqWriter writer; int is_index; if( CV_IS_SEQ( ptseq )) { if( !CV_IS_SEQ_POINT_SET( ptseq )) CV_ERROR( CV_StsUnsupportedFormat, "Input sequence is not a sequence of points" ); if( !storage ) storage = ptseq->storage; } else { CV_CALL( ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block )); } if( CV_SEQ_ELTYPE( ptseq ) != CV_32SC2 ) CV_ERROR( CV_StsUnsupportedFormat, "Floating-point coordinates are not supported here" ); if( CV_IS_SEQ( hull )) { int hulltype = CV_SEQ_ELTYPE( hull ); if( hulltype != CV_SEQ_ELTYPE_PPOINT && hulltype != CV_SEQ_ELTYPE_INDEX ) CV_ERROR( CV_StsUnsupportedFormat, "Convex hull must represented as a sequence " "of indices or sequence of pointers" ); if( !storage ) storage = hull->storage; } else { CvMat* mat = (CvMat*)hull; if( !CV_IS_MAT( hull )) CV_ERROR(CV_StsBadArg, "Convex hull is neither sequence nor matrix"); if( mat->cols != 1 && mat->rows != 1 || !CV_IS_MAT_CONT(mat->type) || CV_MAT_TYPE(mat->type) != CV_32SC1 ) CV_ERROR( CV_StsBadArg, "The matrix should be 1-dimensional and continuous array of int's" ); if( mat->cols + mat->rows - 1 > ptseq->total ) CV_ERROR( CV_StsBadSize, "Convex hull is larger than the point sequence" ); CV_CALL( hull = cvMakeSeqHeaderForArray( CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED, sizeof(CvContour), CV_ELEM_SIZE(mat->type), mat->data.ptr, mat->cols + mat->rows - 1, &hull_header.s, &hullblock )); } is_index = CV_SEQ_ELTYPE(hull) == CV_SEQ_ELTYPE_INDEX; if( !storage ) CV_ERROR( CV_StsNullPtr, "NULL storage pointer" ); CV_CALL( defects = cvCreateSeq( CV_SEQ_KIND_GENERIC, sizeof(CvSeq), sizeof(CvConvexityDefect), storage )); if( ptseq->total < 4 || hull->total < 3) { //CV_ERROR( CV_StsBadSize, // "point seq size must be >= 4, convex hull size must be >= 3" ); EXIT; } /* recognize co-orientation of ptseq and its hull */ { int sign = 0; int index1, index2, index3; if( !is_index ) { CvPoint* pos = *CV_SEQ_ELEM( hull, CvPoint*, 0 );//.........这里部分代码省略.........
开发者ID:JackJone,项目名称:opencv,代码行数:101,
示例26: icvInitPyramidalAlgorithmstatic voidicvInitPyramidalAlgorithm( const CvMat* imgA, const CvMat* imgB, CvMat* pyrA, CvMat* pyrB, int level, CvTermCriteria * criteria, int max_iters, int flags, uchar *** imgI, uchar *** imgJ, int **step, CvSize** size, double **scale, cv::AutoBuffer<uchar>* buffer ){ const int ALIGN = 8; int pyrBytes, bufferBytes = 0, elem_size; int level1 = level + 1; int i; CvSize imgSize, levelSize; *imgI = *imgJ = 0; *step = 0; *scale = 0; *size = 0; /* check input arguments */ if( ((flags & CV_LKFLOW_PYR_A_READY) != 0 && !pyrA) || ((flags & CV_LKFLOW_PYR_B_READY) != 0 && !pyrB) ) CV_Error( CV_StsNullPtr, "Some of the precomputed pyramids are missing" ); if( level < 0 ) CV_Error( CV_StsOutOfRange, "The number of pyramid levels is negative" ); switch( criteria->type ) { case CV_TERMCRIT_ITER: criteria->epsilon = 0.f; break; case CV_TERMCRIT_EPS: criteria->max_iter = max_iters; break; case CV_TERMCRIT_ITER | CV_TERMCRIT_EPS: break; default: assert( 0 ); CV_Error( CV_StsBadArg, "Invalid termination criteria" ); } /* compare squared values */ criteria->epsilon *= criteria->epsilon; /* set pointers and step for every level */ pyrBytes = 0; imgSize = cvGetSize(imgA); elem_size = CV_ELEM_SIZE(imgA->type); levelSize = imgSize; for( i = 1; i < level1; i++ ) { levelSize.width = (levelSize.width + 1) >> 1; levelSize.height = (levelSize.height + 1) >> 1; int tstep = cvAlign(levelSize.width,ALIGN) * elem_size; pyrBytes += tstep * levelSize.height; } assert( pyrBytes <= imgSize.width * imgSize.height * elem_size * 4 / 3 ); /* buffer_size = <size for patches> + <size for pyramids> */ bufferBytes = (int)((level1 >= 0) * ((pyrA->data.ptr == 0) + (pyrB->data.ptr == 0)) * pyrBytes + (sizeof(imgI[0][0]) * 2 + sizeof(step[0][0]) + sizeof(size[0][0]) + sizeof(scale[0][0])) * level1); buffer->allocate( bufferBytes ); *imgI = (uchar **) (uchar*)(*buffer); *imgJ = *imgI + level1; *step = (int *) (*imgJ + level1); *scale = (double *) (*step + level1); *size = (CvSize *)(*scale + level1); imgI[0][0] = imgA->data.ptr; imgJ[0][0] = imgB->data.ptr; step[0][0] = imgA->step; scale[0][0] = 1; size[0][0] = imgSize; if( level > 0 ) { uchar *bufPtr = (uchar *) (*size + level1); uchar *ptrA = pyrA->data.ptr; uchar *ptrB = pyrB->data.ptr; if( !ptrA ) { ptrA = bufPtr; bufPtr += pyrBytes; } if( !ptrB ) ptrB = bufPtr;//.........这里部分代码省略.........
开发者ID:bkuhlman80,项目名称:opencv,代码行数:101,
示例27: cvSVBkSbCV_IMPL voidcvSVBkSb( const CvArr* warr, const CvArr* uarr, const CvArr* varr, const CvArr* barr, CvArr* xarr, int flags ){ uchar* buffer = 0; int local_alloc = 0; CV_FUNCNAME( "cvSVBkSb" ); __BEGIN__; CvMat wstub, *w = (CvMat*)warr; CvMat bstub, *b = (CvMat*)barr; CvMat xstub, *x = (CvMat*)xarr; CvMat ustub, ustub2, *u = (CvMat*)uarr; CvMat vstub, vstub2, *v = (CvMat*)varr; uchar* tw = 0; int type; int temp_u = 0, temp_v = 0; int u_buf_offset = 0, v_buf_offset = 0, w_buf_offset = 0, t_buf_offset = 0; int buf_size = 0, pix_size; int m, n, nm; int u_rows, u_cols; int v_rows, v_cols; if( !CV_IS_MAT( w )) CV_CALL( w = cvGetMat( w, &wstub )); if( !CV_IS_MAT( u )) CV_CALL( u = cvGetMat( u, &ustub )); if( !CV_IS_MAT( v )) CV_CALL( v = cvGetMat( v, &vstub )); if( !CV_IS_MAT( x )) CV_CALL( x = cvGetMat( x, &xstub )); if( !CV_ARE_TYPES_EQ( w, u ) || !CV_ARE_TYPES_EQ( w, v ) || !CV_ARE_TYPES_EQ( w, x )) CV_ERROR( CV_StsUnmatchedFormats, "All matrices must have the same type" ); type = CV_MAT_TYPE( w->type ); pix_size = CV_ELEM_SIZE(type); if( !(flags & CV_SVD_U_T) ) { temp_u = 1; u_buf_offset = buf_size; buf_size += u->cols*u->rows*pix_size; u_rows = u->rows; u_cols = u->cols; } else { u_rows = u->cols; u_cols = u->rows; } if( !(flags & CV_SVD_V_T) ) { temp_v = 1; v_buf_offset = buf_size; buf_size += v->cols*v->rows*pix_size; v_rows = v->rows; v_cols = v->cols; } else { v_rows = v->cols; v_cols = v->rows; } m = u_rows; n = v_rows; nm = MIN(n,m); if( (u_rows != u_cols && v_rows != v_cols) || x->rows != v_rows ) CV_ERROR( CV_StsBadSize, "V or U matrix must be square" ); if( (w->rows == 1 || w->cols == 1) && w->rows + w->cols - 1 == nm ) { if( CV_IS_MAT_CONT(w->type) ) tw = w->data.ptr; else { w_buf_offset = buf_size; buf_size += nm*pix_size; } } else { if( w->cols != v_cols || w->rows != u_cols ) CV_ERROR( CV_StsBadSize, "W must be 1d array of MIN(m,n) elements or " "matrix which size matches to U and V" ); w_buf_offset = buf_size; buf_size += nm*pix_size; } if( b ) {//.........这里部分代码省略.........
开发者ID:burbineSE,项目名称:Software_Engineer_91.411_1,代码行数:101,
示例28: icvConvertPointsHomogenious//.........这里部分代码省略......... { if( src->rows == dst->rows && src->cols == dst->cols ) { if( CV_ARE_TYPES_EQ( src, dst ) ) cvCopy( src, dst ); else cvConvert( src, dst ); } else { if( !CV_ARE_TYPES_EQ( src, dst )) { CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type )); cvConvert( src, temp ); src = temp; } cvTranspose( src, dst ); } if( ones ) cvSet( ones, cvRealScalar(1.) ); } else { int s_plane_stride, s_stride, d_plane_stride, d_stride, elem_size; if( !CV_ARE_TYPES_EQ( src, dst )) { CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type )); cvConvert( src, temp ); src = temp; } elem_size = CV_ELEM_SIZE(src->type); if( s_count == src->cols ) s_plane_stride = src->step / elem_size, s_stride = 1; else s_stride = src->step / elem_size, s_plane_stride = 1; if( d_count == dst->cols ) d_plane_stride = dst->step / elem_size, d_stride = 1; else d_stride = dst->step / elem_size, d_plane_stride = 1; CV_CALL( denom = cvCreateMat( 1, d_count, dst->type )); if( CV_MAT_DEPTH(dst->type) == CV_32F ) { const float* xs = src->data.fl; const float* ys = xs + s_plane_stride; const float* zs = 0; const float* ws = xs + (s_dims - 1)*s_plane_stride; float* iw = denom->data.fl; float* xd = dst->data.fl; float* yd = xd + d_plane_stride; float* zd = 0; if( d_dims == 3 ) { zs = ys + s_plane_stride; zd = yd + d_plane_stride; }
开发者ID:liangfu,项目名称:dnn,代码行数:66,
示例29: cvFitLineCV_IMPL voidcvFitLine( const CvArr* array, int dist, double param, double reps, double aeps, float *line ){ cv::AutoBuffer<schar> buffer; schar* points = 0; union { CvContour contour; CvSeq seq; } header; CvSeqBlock block; CvSeq* ptseq = (CvSeq*)array; int type; if( !line ) CV_Error( CV_StsNullPtr, "NULL pointer to line parameters" ); if( CV_IS_SEQ(ptseq) ) { type = CV_SEQ_ELTYPE(ptseq); if( ptseq->total == 0 ) CV_Error( CV_StsBadSize, "The sequence has no points" ); if( (type!=CV_32FC2 && type!=CV_32FC3 && type!=CV_32SC2 && type!=CV_32SC3) || CV_ELEM_SIZE(type) != ptseq->elem_size ) CV_Error( CV_StsUnsupportedFormat, "Input sequence must consist of 2d points or 3d points" ); } else { CvMat* mat = (CvMat*)array; type = CV_MAT_TYPE(mat->type); if( !CV_IS_MAT(mat)) CV_Error( CV_StsBadArg, "Input array is not a sequence nor matrix" ); if( !CV_IS_MAT_CONT(mat->type) || (type!=CV_32FC2 && type!=CV_32FC3 && type!=CV_32SC2 && type!=CV_32SC3) || (mat->width != 1 && mat->height != 1)) CV_Error( CV_StsBadArg, "Input array must be 1d continuous array of 2d or 3d points" ); ptseq = cvMakeSeqHeaderForArray( CV_SEQ_KIND_GENERIC|type, sizeof(CvContour), CV_ELEM_SIZE(type), mat->data.ptr, mat->width + mat->height - 1, &header.seq, &block ); } if( reps < 0 || aeps < 0 ) CV_Error( CV_StsOutOfRange, "Both reps and aeps must be non-negative" ); if( CV_MAT_DEPTH(type) == CV_32F && ptseq->first->next == ptseq->first ) { /* no need to copy data in this case */ points = ptseq->first->data; } else { buffer.allocate(ptseq->total*CV_ELEM_SIZE(type)); points = buffer; cvCvtSeqToArray( ptseq, points, CV_WHOLE_SEQ ); if( CV_MAT_DEPTH(type) != CV_32F ) { int i, total = ptseq->total*CV_MAT_CN(type); assert( CV_MAT_DEPTH(type) == CV_32S ); for( i = 0; i < total; i++ ) ((float*)points)[i] = (float)((int*)points)[i]; } } if( dist == CV_DIST_USER ) CV_Error( CV_StsBadArg, "User-defined distance is not allowed" ); if( CV_MAT_CN(type) == 2 ) { IPPI_CALL( icvFitLine2D( (CvPoint2D32f*)points, ptseq->total, dist, (float)param, (float)reps, (float)aeps, line )); } else { IPPI_CALL( icvFitLine3D( (CvPoint3D32f*)points, ptseq->total, dist, (float)param, (float)reps, (float)aeps, line )); }}
开发者ID:NaterGator,项目名称:AndroidOpenCV_mods,代码行数:81,
注:本文中的CV_ELEM_SIZE函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ CV_EML_FCN函数代码示例 C++ CV_CALL函数代码示例 |