您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ CV_MAT_CN函数代码示例

51自学网 2021-06-01 20:07:56
  C++
这篇教程C++ CV_MAT_CN函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中CV_MAT_CN函数的典型用法代码示例。如果您正苦于以下问题:C++ CV_MAT_CN函数的具体用法?C++ CV_MAT_CN怎么用?C++ CV_MAT_CN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了CV_MAT_CN函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: cvInitUndistortMap

CV_IMPL voidcvInitUndistortMap( const CvMat* A, const CvMat* dist_coeffs,                    CvArr* mapxarr, CvArr* mapyarr ){    uchar* buffer = 0;    CV_FUNCNAME( "cvInitUndistortMap" );    __BEGIN__;        float a[9], k[4];    int coi1 = 0, coi2 = 0;    CvMat mapxstub, *_mapx = (CvMat*)mapxarr;    CvMat mapystub, *_mapy = (CvMat*)mapyarr;    float *mapx, *mapy;    CvMat _a = cvMat( 3, 3, CV_32F, a ), _k;    int mapxstep, mapystep;    int u, v;    float u0, v0, fx, fy, _fx, _fy, k1, k2, p1, p2;    CvSize size;    CV_CALL( _mapx = cvGetMat( _mapx, &mapxstub, &coi1 ));    CV_CALL( _mapy = cvGetMat( _mapy, &mapystub, &coi2 ));    if( coi1 != 0 || coi2 != 0 )        CV_ERROR( CV_BadCOI, "The function does not support COI" );    if( CV_MAT_TYPE(_mapx->type) != CV_32FC1 )        CV_ERROR( CV_StsUnsupportedFormat, "Both maps must have 32fC1 type" );    if( !CV_ARE_TYPES_EQ( _mapx, _mapy ))        CV_ERROR( CV_StsUnmatchedFormats, "" );    if( !CV_ARE_SIZES_EQ( _mapx, _mapy ))        CV_ERROR( CV_StsUnmatchedSizes, "" );    if( !CV_IS_MAT(A) || A->rows != 3 || A->cols != 3  ||        CV_MAT_TYPE(A->type) != CV_32FC1 && CV_MAT_TYPE(A->type) != CV_64FC1 )        CV_ERROR( CV_StsBadArg, "Intrinsic matrix must be a valid 3x3 floating-point matrix" );    if( !CV_IS_MAT(dist_coeffs) || dist_coeffs->rows != 1 && dist_coeffs->cols != 1 ||        dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 ||        CV_MAT_DEPTH(dist_coeffs->type) != CV_64F &&        CV_MAT_DEPTH(dist_coeffs->type) != CV_32F )        CV_ERROR( CV_StsBadArg,            "Distortion coefficients must be 1x4 or 4x1 floating-point vector" );    cvConvert( A, &_a );    _k = cvMat( dist_coeffs->rows, dist_coeffs->cols,                CV_MAKETYPE(CV_32F, CV_MAT_CN(dist_coeffs->type)), k );    cvConvert( dist_coeffs, &_k );    u0 = a[2]; v0 = a[5];    fx = a[0]; fy = a[4];    _fx = 1.f/fx; _fy = 1.f/fy;    k1 = k[0]; k2 = k[1];    p1 = k[2]; p2 = k[3];    mapxstep = _mapx->step ? _mapx->step : CV_STUB_STEP;    mapystep = _mapy->step ? _mapy->step : CV_STUB_STEP;    mapx = _mapx->data.fl;    mapy = _mapy->data.fl;    size = cvGetMatSize(_mapx);        /*if( icvUndistortGetSize_p && icvCreateMapCameraUndistort_32f_C1R_p )    {        int buf_size = 0;        if( icvUndistortGetSize_p( size, &buf_size ) && buf_size > 0 )        {            CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));            if( icvCreateMapCameraUndistort_32f_C1R_p(                mapx, mapxstep, mapy, mapystep, size,                a[0], a[4], a[2], a[5], k[0], k[1], k[2], k[3], buffer ) >= 0 )                EXIT;        }    }*/        mapxstep /= sizeof(mapx[0]);    mapystep /= sizeof(mapy[0]);    for( v = 0; v < size.height; v++, mapx += mapxstep, mapy += mapystep )    {        float y = (v - v0)*_fy;        float y2 = y*y;        float _2p1y = 2*p1*y;        float _3p1y2 = 3*p1*y2;        float p2y2 = p2*y2;        for( u = 0; u < size.width; u++ )        {            float x = (u - u0)*_fx;            float x2 = x*x;            float r2 = x2 + y2;            float d = 1 + (k1 + k2*r2)*r2;            float _u = fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0;            float _v = fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0;            mapx[u] = _u;            mapy[u] = _v;        }//.........这里部分代码省略.........
开发者ID:DORARA29,项目名称:AtomManipulator,代码行数:101,


示例2: _buffer

bool  PngDecoder::readData( Mat& img ){    bool result = false;    AutoBuffer<uchar*> _buffer(m_height);    uchar** buffer = _buffer;    int color = img.channels() > 1;    uchar* data = img.data;    int step = (int)img.step;    if( m_png_ptr && m_info_ptr && m_end_info && m_width && m_height )    {        png_structp png_ptr = (png_structp)m_png_ptr;        png_infop info_ptr = (png_infop)m_info_ptr;        png_infop end_info = (png_infop)m_end_info;        if( setjmp( png_jmpbuf ( png_ptr ) ) == 0 )        {            int y;            if( img.depth() == CV_8U && m_bit_depth == 16 )                png_set_strip_16( png_ptr );            else if( !isBigEndian() )                png_set_swap( png_ptr );            if(img.channels() < 4)             {                /* observation: png_read_image() writes 400 bytes beyond                 * end of data when reading a 400x118 color png                 * "mpplus_sand.png".  OpenCV crashes even with demo                 * programs.  Looking at the loaded image I'd say we get 4                 * bytes per pixel instead of 3 bytes per pixel.  Test                 * indicate that it is a good idea to always ask for                 * stripping alpha..  18.11.2004 Axel Walthelm                 */                 png_set_strip_alpha( png_ptr );            }            if( m_color_type == PNG_COLOR_TYPE_PALETTE )                png_set_palette_to_rgb( png_ptr );            if( m_color_type == PNG_COLOR_TYPE_GRAY && m_bit_depth < 8 )#if PNG_LIBPNG_VER_MAJOR*100 + PNG_LIBPNG_VER_MINOR >= 104                png_set_expand_gray_1_2_4_to_8( png_ptr );#else                png_set_gray_1_2_4_to_8( png_ptr );#endif                        if( CV_MAT_CN(m_type) > 1 && color )                png_set_bgr( png_ptr ); // convert RGB to BGR            else if( color )                png_set_gray_to_rgb( png_ptr ); // Gray->RGB            else                png_set_rgb_to_gray( png_ptr, 1, 0.299, 0.587 ); // RGB->Gray            png_read_update_info( png_ptr, info_ptr );            for( y = 0; y < m_height; y++ )                buffer[y] = data + y*step;            png_read_image( png_ptr, buffer );            png_read_end( png_ptr, end_info );            result = true;        }    }    close();    return result;}
开发者ID:bertptrs,项目名称:uni-mir,代码行数:69,


示例3: CV_MAT_DEPTH

void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,                    double scale, double delta, int borderType ){    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);    if (ddepth < 0)        ddepth = sdepth;    _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );#ifdef HAVE_TEGRA_OPTIMIZATION    if (scale == 1.0 && delta == 0)    {        Mat src = _src.getMat(), dst = _dst.getMat();        if (ksize == 1 && tegra::laplace1(src, dst, borderType))            return;        if (ksize == 3 && tegra::laplace3(src, dst, borderType))            return;        if (ksize == 5 && tegra::laplace5(src, dst, borderType))            return;    }#endif    if( ksize == 1 || ksize == 3 )    {        float K[2][9] =        {            { 0, 1, 0, 1, -4, 1, 0, 1, 0 },            { 2, 0, 2, 0, -8, 0, 2, 0, 2 }        };        Mat kernel(3, 3, CV_32F, K[ksize == 3]);        if( scale != 1 )            kernel *= scale;        filter2D( _src, _dst, ddepth, kernel, Point(-1, -1), delta, borderType );    }    else    {        int ktype = std::max(CV_32F, std::max(ddepth, sdepth));        int wdepth = sdepth == CV_8U && ksize <= 5 ? CV_16S : sdepth <= CV_32F ? CV_32F : CV_64F;        int wtype = CV_MAKETYPE(wdepth, cn);        Mat kd, ks;        getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );        CV_OCL_RUN(_dst.isUMat(),                   ocl_Laplacian5(_src, _dst, kd, ks, scale,                                  delta, borderType, wdepth, ddepth))        const size_t STRIPE_SIZE = 1 << 14;        Ptr<FilterEngine> fx = createSeparableLinearFilter(stype,            wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );        Ptr<FilterEngine> fy = createSeparableLinearFilter(stype,            wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );        Mat src = _src.getMat(), dst = _dst.getMat();        int y = fx->start(src), dsty = 0, dy = 0;        fy->start(src);        const uchar* sptr = src.data + y*src.step;        int dy0 = std::min(std::max((int)(STRIPE_SIZE/(CV_ELEM_SIZE(stype)*src.cols)), 1), src.rows);        Mat d2x( dy0 + kd.rows - 1, src.cols, wtype );        Mat d2y( dy0 + kd.rows - 1, src.cols, wtype );        for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy )        {            fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step );            dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step );            if( dy > 0 )            {                Mat dstripe = dst.rowRange(dsty, dsty + dy);                d2x.rows = d2y.rows = dy; // modify the headers, which should work                d2x += d2y;                d2x.convertTo( dstripe, ddepth, scale, delta );            }        }    }}
开发者ID:HanaLeeHn,项目名称:opencv,代码行数:74,


示例4: CV_MAT_CN

inlineint CudaMem::channels() const{    return CV_MAT_CN(flags);}
开发者ID:0kazuya,项目名称:opencv,代码行数:5,


示例5: imread_

static void*imread_( const string& filename, int flags, int hdrtype, Mat* mat=0 ){    IplImage* image = 0;    CvMat *matrix = 0;    Mat temp, *data = &temp;    ImageDecoder decoder = findDecoder(filename);    if( decoder.empty() )        return 0;    decoder->setSource(filename);    if( !decoder->readHeader() )        return 0;    CvSize size;    size.width = decoder->width();    size.height = decoder->height();    int type = decoder->type();    if( flags != -1 )    {        if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 )            type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type));        if( (flags & CV_LOAD_IMAGE_COLOR) != 0 ||           ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) )            type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3);        else            type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1);    }    if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT )    {        if( hdrtype == LOAD_CVMAT )        {            matrix = cvCreateMat( size.height, size.width, type );            temp = cvarrToMat(matrix);        }        else        {            mat->create( size.height, size.width, type );            data = mat;        }    }    else    {        image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) );        temp = cvarrToMat(image);    }    if( !decoder->readData( *data ))    {        cvReleaseImage( &image );        cvReleaseMat( &matrix );        if( mat )            mat->release();        return 0;    }    return hdrtype == LOAD_CVMAT ? (void*)matrix :        hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat;}
开发者ID:Ashwini7,项目名称:smart-python-programs,代码行数:62,


示例6: cvGetRectSubPix

CV_IMPL voidcvGetRectSubPix( const void* srcarr, void* dstarr, CvPoint2D32f center ){    static CvFuncTable gr_tab[2];    static int inittab = 0;    CV_FUNCNAME( "cvGetRectSubPix" );    __BEGIN__;    CvMat srcstub, *src = (CvMat*)srcarr;    CvMat dststub, *dst = (CvMat*)dstarr;    CvSize src_size, dst_size;    CvGetRectSubPixFunc func;    int cn, src_step, dst_step;    if( !inittab )    {        icvInitGetRectSubPixC1RTable( gr_tab + 0 );        icvInitGetRectSubPixC3RTable( gr_tab + 1 );        inittab = 1;    }    if( !CV_IS_MAT(src))        CV_CALL( src = cvGetMat( src, &srcstub ));    if( !CV_IS_MAT(dst))        CV_CALL( dst = cvGetMat( dst, &dststub ));    cn = CV_MAT_CN( src->type );    if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst ))        CV_ERROR( CV_StsUnsupportedFormat, "" );    src_size = cvGetMatSize( src );    dst_size = cvGetMatSize( dst );    src_step = src->step ? src->step : CV_STUB_STEP;    dst_step = dst->step ? dst->step : CV_STUB_STEP;    //if( dst_size.width > src_size.width || dst_size.height > src_size.height )    //    CV_ERROR( CV_StsBadSize, "destination ROI must be smaller than source ROI" );    if( CV_ARE_DEPTHS_EQ( src, dst ))    {        func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]);    }    else    {        if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F )            CV_ERROR( CV_StsUnsupportedFormat, "" );        func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[1]);    }    if( !func )        CV_ERROR( CV_StsUnsupportedFormat, "" );    IPPI_CALL( func( src->data.ptr, src_step, src_size,                     dst->data.ptr, dst_step, dst_size, center ));    __END__;}
开发者ID:allanca,项目名称:otterdive,代码行数:61,


示例7: cvIntegral

CV_IMPL voidcvIntegral( const CvArr* image, CvArr* sumImage,            CvArr* sumSqImage, CvArr* tiltedSumImage ){        CV_FUNCNAME( "cvIntegralImage" );    __BEGIN__;    CvMat src_stub, *src = (CvMat*)image;    CvMat sum_stub, *sum = (CvMat*)sumImage;    CvMat sqsum_stub, *sqsum = (CvMat*)sumSqImage;    CvMat tilted_stub, *tilted = (CvMat*)tiltedSumImage;    int coi0 = 0, coi1 = 0, coi2 = 0, coi3 = 0;    //int depth;    int cn;    int src_step, sum_step, sqsum_step, tilted_step;    CvSize size;    CV_CALL( src = cvGetMat( src, &src_stub, &coi0 ));    CV_CALL( sum = cvGetMat( sum, &sum_stub, &coi1 ));        if( sum->width != src->width + 1 ||        sum->height != src->height + 1 )        CV_ERROR( CV_StsUnmatchedSizes, "" );	if(CV_MAT_DEPTH(src->type)!=CV_8U || CV_MAT_CN(src->type)!=1)		CV_ERROR( CV_StsUnsupportedFormat, "the source array must be 8UC1");    if( CV_MAT_DEPTH( sum->type ) != CV_32S ||        !CV_ARE_CNS_EQ( src, sum ))        CV_ERROR( CV_StsUnsupportedFormat,        "Sum array must have 32s type in case of 8u source array"        "and the same number of channels as the source array" );    if( sqsum )    {        CV_CALL( sqsum = cvGetMat( sqsum, &sqsum_stub, &coi2 ));        if( !CV_ARE_SIZES_EQ( sum, sqsum ) )            CV_ERROR( CV_StsUnmatchedSizes, "" );        if( CV_MAT_DEPTH( sqsum->type ) != CV_64S || !CV_ARE_CNS_EQ( src, sqsum ))            CV_ERROR( CV_StsUnsupportedFormat,                      "Squares sum array must be 64s "                      "and the same number of channels as the source array" );    }    if( tilted )    {        if( !sqsum )            CV_ERROR( CV_StsNullPtr,            "Squared sum array must be passed if tilted sum array is passed" );        CV_CALL( tilted = cvGetMat( tilted, &tilted_stub, &coi3 ));        if( !CV_ARE_SIZES_EQ( sum, tilted ) )            CV_ERROR( CV_StsUnmatchedSizes, "" );        if( !CV_ARE_TYPES_EQ( sum, tilted ) )            CV_ERROR( CV_StsUnmatchedFormats,                      "Sum and tilted sum must have the same types" );        if( CV_MAT_CN(tilted->type) != 1 )            CV_ERROR( CV_StsNotImplemented,                      "Tilted sum can not be computed for multi-channel arrays" );    }    if( coi0 || coi1 || coi2 || coi3 )        CV_ERROR( CV_BadCOI, "COI is not supported by the function" );    //depth = CV_MAT_DEPTH(src->type);    cn = CV_MAT_CN(src->type);    size = cvGetMatSize(src);    src_step = src->step ? src->step : CV_STUB_STEP;    sum_step = sum->step ? sum->step : CV_STUB_STEP;    sqsum_step = !sqsum ? 0 : sqsum->step ? sqsum->step : CV_STUB_STEP;    tilted_step = !tilted ? 0 : tilted->step ? tilted->step : CV_STUB_STEP;    if( cn == 1 )    {                cvIntegralImage_8u32s64s_C1R( src->data.ptr, src_step, (int*)(sum->data.ptr), sum_step,                        sqsum ? (int64*)(sqsum->data.ptr) : 0, sqsum_step,                        tilted ? (int*)(tilted->data.ptr) : 0, tilted_step, size );    }    __END__;}
开发者ID:Jeaniowang,项目名称:EasyMulticoreDSP,代码行数:87,


示例8: CV_FUNCNAME

void CvBoxFilter::init( int _max_width, int _src_type, int _dst_type,                        bool _normalized, CvSize _ksize,                        CvPoint _anchor, int _border_mode,                        CvScalar _border_value ){    CV_FUNCNAME( "CvBoxFilter::init" );    __BEGIN__;        sum = 0;    normalized = _normalized;    if( normalized && CV_MAT_TYPE(_src_type) != CV_MAT_TYPE(_dst_type) ||        !normalized && CV_MAT_CN(_src_type) != CV_MAT_CN(_dst_type))        CV_ERROR( CV_StsUnmatchedFormats,        "In case of normalized box filter input and output must have the same type./n"        "In case of unnormalized box filter the number of input and output channels must be the same" );    min_depth = CV_MAT_DEPTH(_src_type) == CV_8U ? CV_32S : CV_64F;    CvBaseImageFilter::init( _max_width, _src_type, _dst_type, 1, _ksize,                             _anchor, _border_mode, _border_value );        scale = normalized ? 1./(ksize.width*ksize.height) : 1;    if( CV_MAT_DEPTH(src_type) == CV_8U )        x_func = (CvRowFilterFunc)icvSumRow_8u32s;    else if( CV_MAT_DEPTH(src_type) == CV_32F )        x_func = (CvRowFilterFunc)icvSumRow_32f64f;    else        CV_ERROR( CV_StsUnsupportedFormat, "Unknown/unsupported input image format" );    if( CV_MAT_DEPTH(dst_type) == CV_8U )    {        if( !normalized )            CV_ERROR( CV_StsBadArg, "Only normalized box filter can be used for 8u->8u transformation" );        y_func = (CvColumnFilterFunc)icvSumCol_32s8u;    }    else if( CV_MAT_DEPTH(dst_type) == CV_16S )    {        if( normalized || CV_MAT_DEPTH(src_type) != CV_8U )            CV_ERROR( CV_StsBadArg, "Only 8u->16s unnormalized box filter is supported in case of 16s output" );        y_func = (CvColumnFilterFunc)icvSumCol_32s16s;    }	else if( CV_MAT_DEPTH(dst_type) == CV_32S )	{		if( normalized || CV_MAT_DEPTH(src_type) != CV_8U )			CV_ERROR( CV_StsBadArg, "Only 8u->32s unnormalized box filter is supported in case of 32s output");		y_func = (CvColumnFilterFunc)icvSumCol_32s32s;	}    else if( CV_MAT_DEPTH(dst_type) == CV_32F )    {        if( CV_MAT_DEPTH(src_type) != CV_32F )            CV_ERROR( CV_StsBadArg, "Only 32f->32f box filter (normalized or not) is supported in case of 32f output" );        y_func = (CvColumnFilterFunc)icvSumCol_64f32f;    }	else{		CV_ERROR( CV_StsBadArg, "Unknown/unsupported destination image format" );	}    __END__;}
开发者ID:cybertk,项目名称:opencv,代码行数:63,


示例9: cvConvexHull2

CV_IMPL CvSeq*cvConvexHull2( const CvArr* array, void* hull_storage,               int orientation, int return_points ){    CvMat* mat = 0;    CvContour contour_header;    CvSeq hull_header;    CvSeqBlock block, hullblock;    CvSeq* ptseq = 0;    CvSeq* hullseq = 0;    if( CV_IS_SEQ( array ))    {        ptseq = (CvSeq*)array;        if( !CV_IS_SEQ_POINT_SET( ptseq ))            CV_Error( CV_StsBadArg, "Unsupported sequence type" );        if( hull_storage == 0 )            hull_storage = ptseq->storage;    }    else    {        ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block );    }    bool isStorage = isStorageOrMat(hull_storage);    if(isStorage)    {        if( return_points )        {            hullseq = cvCreateSeq(CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE(ptseq)|                                  CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX,                                  sizeof(CvContour), sizeof(CvPoint),(CvMemStorage*)hull_storage );        }        else        {            hullseq = cvCreateSeq(                                  CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE_PPOINT|                                  CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX,                                  sizeof(CvContour), sizeof(CvPoint*), (CvMemStorage*)hull_storage );        }    }    else    {        mat = (CvMat*)hull_storage;        if( (mat->cols != 1 && mat->rows != 1) || !CV_IS_MAT_CONT(mat->type))            CV_Error( CV_StsBadArg,                     "The hull matrix should be continuous and have a single row or a single column" );        if( mat->cols + mat->rows - 1 < ptseq->total )            CV_Error( CV_StsBadSize, "The hull matrix size might be not enough to fit the hull" );        if( CV_MAT_TYPE(mat->type) != CV_SEQ_ELTYPE(ptseq) &&           CV_MAT_TYPE(mat->type) != CV_32SC1 )            CV_Error( CV_StsUnsupportedFormat,                     "The hull matrix must have the same type as input or 32sC1 (integers)" );        hullseq = cvMakeSeqHeaderForArray(                                          CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED,                                          sizeof(hull_header), CV_ELEM_SIZE(mat->type), mat->data.ptr,                                          mat->cols + mat->rows - 1, &hull_header, &hullblock );        cvClearSeq( hullseq );    }    int hulltype = CV_SEQ_ELTYPE(hullseq);    int total = ptseq->total;    if( total == 0 )    {        if( !isStorage )            CV_Error( CV_StsBadSize,                     "Point sequence can not be empty if the output is matrix" );        return 0;    }    cv::AutoBuffer<double> _ptbuf;    cv::Mat h0;    cv::convexHull(cv::cvarrToMat(ptseq, false, false, 0, &_ptbuf), h0,                   orientation == CV_CLOCKWISE, CV_MAT_CN(hulltype) == 2);    if( hulltype == CV_SEQ_ELTYPE_PPOINT )    {        const int* idx = h0.ptr<int>();        int ctotal = (int)h0.total();        for( int i = 0; i < ctotal; i++ )        {            void* ptr = cvGetSeqElem(ptseq, idx[i]);            cvSeqPush( hullseq, &ptr );        }    }    else        cvSeqPushMulti(hullseq, h0.ptr(), (int)h0.total());    if (isStorage)    {        return hullseq;    }    else    {//.........这里部分代码省略.........
开发者ID:Aspie96,项目名称:opencv,代码行数:101,


示例10: icvSumCol_32s16s

static voidicvSumCol_32s16s( const int** src, short* dst,                  int dst_step, int count, void* params ){    CvBoxFilter* state = (CvBoxFilter*)params;    int ksize = state->get_kernel_size().height;    int ktotal = ksize*state->get_kernel_size().width;    int i, width = state->get_width();    int cn = CV_MAT_CN(state->get_src_type());    int* sum = (int*)state->get_sum_buf();    int* _sum_count = state->get_sum_count_ptr();    int sum_count = *_sum_count;    dst_step /= sizeof(dst[0]);    width *= cn;    src += sum_count;    count += ksize - 1 - sum_count;    for( ; count--; src++ )    {        const int* sp = src[0];        if( sum_count+1 < ksize )        {            for( i = 0; i <= width - 2; i += 2 )            {                int s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1];                sum[i] = s0; sum[i+1] = s1;            }            for( ; i < width; i++ )                sum[i] += sp[i];            sum_count++;        }        else if( ktotal < 128 )        {            const int* sm = src[-ksize+1];            for( i = 0; i <= width - 2; i += 2 )            {                int s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1];                dst[i] = (short)s0; dst[i+1] = (short)s1;                s0 -= sm[i]; s1 -= sm[i+1];                sum[i] = s0; sum[i+1] = s1;            }            for( ; i < width; i++ )            {                int s0 = sum[i] + sp[i];                dst[i] = (short)s0;                sum[i] = s0 - sm[i];            }            dst += dst_step;        }        else        {            const int* sm = src[-ksize+1];            for( i = 0; i <= width - 2; i += 2 )            {                int s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1];                dst[i] = CV_CAST_16S(s0); dst[i+1] = CV_CAST_16S(s1);                s0 -= sm[i]; s1 -= sm[i+1];                sum[i] = s0; sum[i+1] = s1;            }            for( ; i < width; i++ )            {                int s0 = sum[i] + sp[i];                dst[i] = CV_CAST_16S(s0);                sum[i] = s0 - sm[i];            }            dst += dst_step;        }    }    *_sum_count = sum_count;}
开发者ID:cybertk,项目名称:opencv,代码行数:76,


示例11: icvSumCol_64f32f

static voidicvSumCol_64f32f( const double** src, float* dst,                  int dst_step, int count, void* params ){    CvBoxFilter* state = (CvBoxFilter*)params;    int ksize = state->get_kernel_size().height;    int i, width = state->get_width();    int cn = CV_MAT_CN(state->get_src_type());    double scale = state->get_scale();    bool normalized = state->is_normalized();    double* sum = (double*)state->get_sum_buf();    int* _sum_count = state->get_sum_count_ptr();    int sum_count = *_sum_count;    dst_step /= sizeof(dst[0]);    width *= cn;    src += sum_count;    count += ksize - 1 - sum_count;    for( ; count--; src++ )    {        const double* sp = src[0];        if( sum_count+1 < ksize )        {            for( i = 0; i <= width - 2; i += 2 )            {                double s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1];                sum[i] = s0; sum[i+1] = s1;            }            for( ; i < width; i++ )                sum[i] += sp[i];            sum_count++;        }        else        {            const double* sm = src[-ksize+1];            if( normalized )                for( i = 0; i <= width - 2; i += 2 )                {                    double s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1];                    double t0 = s0*scale, t1 = s1*scale;                    s0 -= sm[i]; s1 -= sm[i+1];                    dst[i] = (float)t0; dst[i+1] = (float)t1;                    sum[i] = s0; sum[i+1] = s1;                }            else                for( i = 0; i <= width - 2; i += 2 )                {                    double s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1];                    dst[i] = (float)s0; dst[i+1] = (float)s1;                    s0 -= sm[i]; s1 -= sm[i+1];                    sum[i] = s0; sum[i+1] = s1;                }            for( ; i < width; i++ )            {                double s0 = sum[i] + sp[i], t0 = s0*scale;                sum[i] = s0 - sm[i]; dst[i] = (float)t0;            }            dst += dst_step;        }    }    *_sum_count = sum_count;}
开发者ID:cybertk,项目名称:opencv,代码行数:67,


示例12: return

 inline int oclMat::oclchannels() const {     return (CV_MAT_CN(flags)) == 3 ? 4 : (CV_MAT_CN(flags)); }
开发者ID:MPS-UPB,项目名称:10Team,代码行数:4,


示例13: cvKMeans2

CV_IMPL voidcvKMeans2( const CvArr* samples_arr, int cluster_count,           CvArr* labels_arr, CvTermCriteria termcrit ){    CvMat* centers = 0;    CvMat* old_centers = 0;    CvMat* counters = 0;    CV_FUNCNAME( "cvKMeans2" );    __BEGIN__;    CvMat samples_stub, labels_stub;    CvMat* samples = (CvMat*)samples_arr;    CvMat* labels = (CvMat*)labels_arr;    CvMat* temp = 0;    CvRNG rng = CvRNG(-1);    int i, j, k, sample_count, dims;    int ids_delta, iter;    double max_dist;    if( !CV_IS_MAT( samples ))        CV_CALL( samples = cvGetMat( samples, &samples_stub ));    if( !CV_IS_MAT( labels ))        CV_CALL( labels = cvGetMat( labels, &labels_stub ));    if( cluster_count < 1 )        CV_ERROR( CV_StsOutOfRange, "Number of clusters should be positive" );    if( CV_MAT_DEPTH(samples->type) != CV_32F || CV_MAT_TYPE(labels->type) != CV_32SC1 )        CV_ERROR( CV_StsUnsupportedFormat,        "samples should be floating-point matrix, cluster_idx - integer vector" );    if( labels->rows != 1 && (labels->cols != 1 || !CV_IS_MAT_CONT(labels->type)) ||        labels->rows + labels->cols - 1 != samples->rows )        CV_ERROR( CV_StsUnmatchedSizes,        "cluster_idx should be 1D vector of the same number of elements as samples' number of rows" );    CV_CALL( termcrit = cvCheckTermCriteria( termcrit, 1e-6, 100 ));    termcrit.epsilon *= termcrit.epsilon;    sample_count = samples->rows;    if( cluster_count > sample_count )        cluster_count = sample_count;    dims = samples->cols*CV_MAT_CN(samples->type);    ids_delta = labels->step ? labels->step/(int)sizeof(int) : 1;    CV_CALL( centers = cvCreateMat( cluster_count, dims, CV_64FC1 ));    CV_CALL( old_centers = cvCreateMat( cluster_count, dims, CV_64FC1 ));    CV_CALL( counters = cvCreateMat( 1, cluster_count, CV_32SC1 ));    // init centers    for( i = 0; i < sample_count; i++ )        labels->data.i[i] = cvRandInt(&rng) % cluster_count;    counters->cols = cluster_count; // cut down counters    max_dist = termcrit.epsilon*2;    for( iter = 0; iter < termcrit.max_iter; iter++ )    {        // computer centers        cvZero( centers );        cvZero( counters );        for( i = 0; i < sample_count; i++ )        {            float* s = (float*)(samples->data.ptr + i*samples->step);            k = labels->data.i[i*ids_delta];            double* c = (double*)(centers->data.ptr + k*centers->step);            for( j = 0; j <= dims - 4; j += 4 )            {                double t0 = c[j] + s[j];                double t1 = c[j+1] + s[j+1];                c[j] = t0;                c[j+1] = t1;                t0 = c[j+2] + s[j+2];                t1 = c[j+3] + s[j+3];                c[j+2] = t0;                c[j+3] = t1;            }            for( ; j < dims; j++ )                c[j] += s[j];            counters->data.i[k]++;        }        if( iter > 0 )            max_dist = 0;        for( k = 0; k < cluster_count; k++ )        {            double* c = (double*)(centers->data.ptr + k*centers->step);            if( counters->data.i[k] != 0 )            {                double scale = 1./counters->data.i[k];//.........这里部分代码省略.........
开发者ID:cybertk,项目名称:opencv,代码行数:101,


示例14: cvFindStereoCorrespondenceGC

CV_IMPL void cvFindStereoCorrespondenceGC( const CvArr* _left, const CvArr* _right,    CvArr* _dispLeft, CvArr* _dispRight, CvStereoGCState* state, int useDisparityGuess ){    CvStereoGCState2 state2;    state2.orphans = 0;    state2.maxOrphans = 0;    CV_FUNCNAME( "cvFindStereoCorrespondenceGC" );    __BEGIN__;    CvMat lstub, *left = cvGetMat( _left, &lstub );    CvMat rstub, *right = cvGetMat( _right, &rstub );    CvMat dlstub, *dispLeft = cvGetMat( _dispLeft, &dlstub );    CvMat drstub, *dispRight = cvGetMat( _dispRight, &drstub );    CvSize size;    int iter, i, nZeroExpansions = 0;    CvRNG rng = cvRNG(-1);    int* disp;    CvMat _disp;    int64 E;    CV_ASSERT( state != 0 );    CV_ASSERT( CV_ARE_SIZES_EQ(left, right) && CV_ARE_TYPES_EQ(left, right) &&               CV_MAT_TYPE(left->type) == CV_8UC1 );    CV_ASSERT( !dispLeft ||        (CV_ARE_SIZES_EQ(dispLeft, left) && CV_MAT_CN(dispLeft->type) == 1) );    CV_ASSERT( !dispRight ||        (CV_ARE_SIZES_EQ(dispRight, left) && CV_MAT_CN(dispRight->type) == 1) );    size = cvGetSize(left);    if( !state->left || state->left->width != size.width || state->left->height != size.height )    {        int pcn = (int)(sizeof(GCVtx*)/sizeof(int));        int vcn = (int)(sizeof(GCVtx)/sizeof(int));        int ecn = (int)(sizeof(GCEdge)/sizeof(int));        cvReleaseMat( &state->left );        cvReleaseMat( &state->right );        cvReleaseMat( &state->ptrLeft );        cvReleaseMat( &state->ptrRight );        cvReleaseMat( &state->dispLeft );        cvReleaseMat( &state->dispRight );        state->left = cvCreateMat( size.height, size.width, CV_8UC3 );        state->right = cvCreateMat( size.height, size.width, CV_8UC3 );        state->dispLeft = cvCreateMat( size.height, size.width, CV_16SC1 );        state->dispRight = cvCreateMat( size.height, size.width, CV_16SC1 );        state->ptrLeft = cvCreateMat( size.height, size.width, CV_32SC(pcn) );        state->ptrRight = cvCreateMat( size.height, size.width, CV_32SC(pcn) );        state->vtxBuf = cvCreateMat( 1, size.height*size.width*2, CV_32SC(vcn) );        state->edgeBuf = cvCreateMat( 1, size.height*size.width*12 + 16, CV_32SC(ecn) );    }    if( !useDisparityGuess )    {        cvSet( state->dispLeft, cvScalarAll(OCCLUDED));        cvSet( state->dispRight, cvScalarAll(OCCLUDED));    }    else    {        CV_ASSERT( dispLeft && dispRight );        cvConvert( dispLeft, state->dispLeft );        cvConvert( dispRight, state->dispRight );    }    state2.Ithreshold = state->Ithreshold;    state2.interactionRadius = state->interactionRadius;    state2.lambda = cvRound(state->lambda*DENOMINATOR);    state2.lambda1 = cvRound(state->lambda1*DENOMINATOR);    state2.lambda2 = cvRound(state->lambda2*DENOMINATOR);    state2.K = cvRound(state->K*DENOMINATOR);    icvInitStereoConstTabs();    icvInitGraySubpix( left, right, state->left, state->right );    disp = (int*)cvStackAlloc( state->numberOfDisparities*sizeof(disp[0]) );    _disp = cvMat( 1, state->numberOfDisparities, CV_32S, disp );    cvRange( &_disp, state->minDisparity, state->minDisparity + state->numberOfDisparities );    cvRandShuffle( &_disp, &rng );    if( state2.lambda < 0 && (state2.K < 0 || state2.lambda1 < 0 || state2.lambda2 < 0) )    {        float L = icvComputeK(state)*0.2f;        state2.lambda = cvRound(L*DENOMINATOR);    }    if( state2.K < 0 )        state2.K = state2.lambda*5;    if( state2.lambda1 < 0 )        state2.lambda1 = state2.lambda*3;    if( state2.lambda2 < 0 )        state2.lambda2 = state2.lambda;    icvInitStereoTabs( &state2 );    E = icvComputeEnergy( state, &state2, !useDisparityGuess );    for( iter = 0; iter < state->maxIters; iter++ )    {        for( i = 0; i < state->numberOfDisparities; i++ )        {            int alpha = disp[i];//.........这里部分代码省略.........
开发者ID:glo,项目名称:ee384b,代码行数:101,


示例15: imreadmulti_

/*** Read an image into memory and return the information** @param[in] filename File to load* @param[in] flags Flags* @param[in] mats Reference to C++ vector<Mat> object to hold the images**/static boolimreadmulti_(const String& filename, int flags, std::vector<Mat>& mats){    /// Search for the relevant decoder to handle the imagery    ImageDecoder decoder;#ifdef HAVE_GDAL    if (flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL){        decoder = GdalDecoder().newDecoder();    }    else{#endif        decoder = findDecoder(filename);#ifdef HAVE_GDAL    }#endif    /// if no decoder was found, return nothing.    if (!decoder){        return 0;    }    /// set the filename in the driver    decoder->setSource(filename);    // read the header to make sure it succeeds    if (!decoder->readHeader())        return 0;    for (;;)    {        // grab the decoded type        int type = decoder->type();        if( (flags & IMREAD_LOAD_GDAL) != IMREAD_LOAD_GDAL && flags != IMREAD_UNCHANGED )        {            if ((flags & CV_LOAD_IMAGE_ANYDEPTH) == 0)                type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type));            if ((flags & CV_LOAD_IMAGE_COLOR) != 0 ||                ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1))                type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3);            else                type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1);        }        // read the image data        Mat mat(decoder->height(), decoder->width(), type);        if (!decoder->readData(mat))        {            // optionally rotate the data if EXIF' orientation flag says so            if( (flags & IMREAD_IGNORE_ORIENTATION) == 0 && flags != IMREAD_UNCHANGED )            {                ApplyExifOrientation(filename, mat);            }            break;        }        mats.push_back(mat);        if (!decoder->nextPage())        {            break;        }    }    return !mats.empty();}
开发者ID:GilseoneMoraes,项目名称:opencv,代码行数:75,


示例16: cvMeanShift

/*F/////////////////////////////////////////////////////////////////////////////////////////    Name:    cvMeanShift//    Purpose: MeanShift algorithm//    Context://    Parameters://      imgProb     - 2D object probability distribution//      windowIn    - CvRect of CAMSHIFT Window intial size//      numIters    - If CAMSHIFT iterates this many times, stop//      windowOut   - Location, height and width of converged CAMSHIFT window//      len         - If != NULL, return equivalent len//      width       - If != NULL, return equivalent width//      itersUsed   - Returns number of iterations CAMSHIFT took to converge//    Returns://      The function itself returns the area found//    Notes://F*/CV_IMPL intcvMeanShift( const void* imgProb, CvRect windowIn,             CvTermCriteria criteria, CvConnectedComp* comp ){    CvMoments moments;    int    i = 0, eps;    CvMat  stub, *mat = (CvMat*)imgProb;    CvMat  cur_win;    CvRect cur_rect = windowIn;    CV_FUNCNAME( "cvMeanShift" );    if( comp )        comp->rect = windowIn;    moments.m00 = moments.m10 = moments.m01 = 0;    __BEGIN__;    CV_CALL( mat = cvGetMat( mat, &stub ));    if( CV_MAT_CN( mat->type ) > 1 )        CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );    if( windowIn.height <= 0 || windowIn.width <= 0 )        CV_ERROR( CV_StsBadArg, "Input window has non-positive sizes" );    if( windowIn.x < 0 || windowIn.x + windowIn.width > mat->cols ||        windowIn.y < 0 || windowIn.y + windowIn.height > mat->rows )        CV_ERROR( CV_StsBadArg, "Initial window is not inside the image ROI" );    CV_CALL( criteria = cvCheckTermCriteria( criteria, 1., 100 ));    eps = cvRound( criteria.epsilon * criteria.epsilon );    for( i = 0; i < criteria.max_iter; i++ )    {        int dx, dy, nx, ny;        double inv_m00;        CV_CALL( cvGetSubRect( mat, &cur_win, cur_rect ));         CV_CALL( cvMoments( &cur_win, &moments ));        /* Calculating center of mass */        if( fabs(moments.m00) < DBL_EPSILON )            break;        inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00;        dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 );        dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 );        nx = cur_rect.x + dx;        ny = cur_rect.y + dy;        if( nx < 0 )            nx = 0;        else if( nx + cur_rect.width > mat->cols )            nx = mat->cols - cur_rect.width;        if( ny < 0 )            ny = 0;        else if( ny + cur_rect.height > mat->rows )            ny = mat->rows - cur_rect.height;        dx = nx - cur_rect.x;        dy = ny - cur_rect.y;        cur_rect.x = nx;        cur_rect.y = ny;        /* Check for coverage centers mass & window */        if( dx*dx + dy*dy < eps )            break;    }    __END__;    if( comp )    {        comp->rect = cur_rect;        comp->area = (float)moments.m00;    }    return i;}
开发者ID:273k,项目名称:OpenCV-Android,代码行数:100,


示例17: image2Mat

/* Convert QImage to cv::Mat */cv::Mat image2Mat(const QImage &img, int requiredMatType, MatColorOrder requriedOrder){    int targetDepth = CV_MAT_DEPTH(requiredMatType);    int targetChannels = CV_MAT_CN(requiredMatType);    Q_ASSERT(targetChannels==CV_CN_MAX || targetChannels==1 || targetChannels==3 || targetChannels==4);    Q_ASSERT(targetDepth==CV_8U || targetDepth==CV_16U || targetDepth==CV_32F);    if (img.isNull())        return cv::Mat();    //Find the closest image format that can be used in image2Mat_shared()    QImage::Format format = findClosestFormat(img.format());    QImage image = (format==img.format()) ? img : img.convertToFormat(format);    MatColorOrder srcOrder;    cv::Mat mat0 = image2Mat_shared(image, &srcOrder);    //Adjust mat channells if needed.    cv::Mat mat_adjustCn;    const float maxAlpha = targetDepth==CV_8U ? 255 : (targetDepth==CV_16U ? 65535 : 1.0);    if (targetChannels == CV_CN_MAX)        targetChannels = mat0.channels();    switch(targetChannels) {    case 1:        if (mat0.channels() == 3) {            cv::cvtColor(mat0, mat_adjustCn, CV_RGB2GRAY);        } else if (mat0.channels() == 4) {            if (srcOrder == MCO_BGRA)                cv::cvtColor(mat0, mat_adjustCn, CV_BGRA2GRAY);            else if (srcOrder == MCO_RGBA)                cv::cvtColor(mat0, mat_adjustCn, CV_RGBA2GRAY);            else//MCO_ARGB                cv::cvtColor(argb2bgra(mat0), mat_adjustCn, CV_BGRA2GRAY);        }        break;    case 3:        if (mat0.channels() == 1) {            cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_GRAY2BGR : CV_GRAY2RGB);        } else if (mat0.channels() == 3) {            if (requriedOrder != srcOrder)                cv::cvtColor(mat0, mat_adjustCn, CV_RGB2BGR);        } else if (mat0.channels() == 4) {            if (srcOrder == MCO_ARGB) {                mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 3));                int ARGB2RGB[] = {1,0, 2,1, 3,2};                int ARGB2BGR[] = {1,2, 2,1, 3,0};                cv::mixChannels(&mat0, 1, &mat_adjustCn, 1, requriedOrder == MCO_BGR ? ARGB2BGR : ARGB2RGB, 3);            } else if (srcOrder == MCO_BGRA) {                cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_BGRA2BGR : CV_BGRA2RGB);            } else {//RGBA                cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_RGBA2BGR : CV_RGBA2RGB);            }        }        break;    case 4:        if (mat0.channels() == 1) {            if (requriedOrder == MCO_ARGB) {                cv::Mat alphaMat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 1), cv::Scalar(maxAlpha));                mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 4));                cv::Mat in[] = {alphaMat, mat0};                int from_to[] = {0,0, 1,1, 1,2, 1,3};                cv::mixChannels(in, 2, &mat_adjustCn, 1, from_to, 4);            } else if (requriedOrder == MCO_RGBA) {                cv::cvtColor(mat0, mat_adjustCn, CV_GRAY2RGBA);            } else {//MCO_BGRA                cv::cvtColor(mat0, mat_adjustCn, CV_GRAY2BGRA);            }        } else if (mat0.channels() == 3) {            if (requriedOrder == MCO_ARGB) {                cv::Mat alphaMat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 1), cv::Scalar(maxAlpha));                mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 4));                cv::Mat in[] = {alphaMat, mat0};                int from_to[] = {0,0, 1,1, 2,2, 3,3};                cv::mixChannels(in, 2, &mat_adjustCn, 1, from_to, 4);            } else if (requriedOrder == MCO_RGBA) {                cv::cvtColor(mat0, mat_adjustCn, CV_RGB2RGBA);            } else {//MCO_BGRA                cv::cvtColor(mat0, mat_adjustCn, CV_RGB2BGRA);            }        } else if (mat0.channels() == 4) {            if (srcOrder != requriedOrder)                mat_adjustCn = adjustChannelsOrder(mat0, srcOrder, requriedOrder);        }        break;    default:        break;    }    //Adjust depth if needed.    if (targetDepth == CV_8U)        return mat_adjustCn.empty() ? mat0.clone() : mat_adjustCn;    if (mat_adjustCn.empty())        mat_adjustCn = mat0;    cv::Mat mat_adjustDepth;    mat_adjustCn.convertTo(mat_adjustDepth, CV_MAKE_TYPE(targetDepth, mat_adjustCn.channels()), targetDepth == CV_16U ? 255.0 : 1/255.0);    return mat_adjustDepth;}
开发者ID:AndresCidoncha,项目名称:Proyecto-Videovigilancia-SOA,代码行数:100,


示例18: cvGetQuadrangleSubPix

CV_IMPL voidcvGetQuadrangleSubPix( const void* srcarr, void* dstarr, const CvMat* mat ){    static  CvFuncTable  gq_tab[2];    static  int inittab = 0;    CV_FUNCNAME( "cvGetQuadrangleSubPix" );    __BEGIN__;    CvMat srcstub, *src = (CvMat*)srcarr;    CvMat dststub, *dst = (CvMat*)dstarr;    CvSize src_size, dst_size;    CvGetQuadrangleSubPixFunc func;    float m[6];    int k, cn;    if( !inittab )    {        icvInitGetQuadrangleSubPixC1RTable( gq_tab + 0 );        icvInitGetQuadrangleSubPixC3RTable( gq_tab + 1 );        inittab = 1;    }    if( !CV_IS_MAT(src))        CV_CALL( src = cvGetMat( src, &srcstub ));    if( !CV_IS_MAT(dst))        CV_CALL( dst = cvGetMat( dst, &dststub ));    if( !CV_IS_MAT(mat))        CV_ERROR( CV_StsBadArg, "map matrix is not valid" );    cn = CV_MAT_CN( src->type );    if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst ))        CV_ERROR( CV_StsUnsupportedFormat, "" );    src_size = cvGetMatSize( src );    dst_size = cvGetMatSize( dst );    /*if( dst_size.width > src_size.width || dst_size.height > src_size.height )        CV_ERROR( CV_StsBadSize, "destination ROI must not be larger than source ROI" );*/    if( mat->rows != 2 || mat->cols != 3 )        CV_ERROR( CV_StsBadArg,        "Transformation matrix must be 2x3" );    if( CV_MAT_TYPE( mat->type ) == CV_32FC1 )    {        for( k = 0; k < 3; k++ )        {            m[k] = mat->data.fl[k];            m[3 + k] = ((float*)(mat->data.ptr + mat->step))[k];        }    }    else if( CV_MAT_TYPE( mat->type ) == CV_64FC1 )    {        for( k = 0; k < 3; k++ )        {            m[k] = (float)mat->data.db[k];            m[3 + k] = (float)((double*)(mat->data.ptr + mat->step))[k];        }    }    else        CV_ERROR( CV_StsUnsupportedFormat,            "The transformation matrix should have 32fC1 or 64fC1 type" );    if( CV_ARE_DEPTHS_EQ( src, dst ))    {        func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]);    }    else    {        if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F )            CV_ERROR( CV_StsUnsupportedFormat, "" );        func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[1]);    }    if( !func )        CV_ERROR( CV_StsUnsupportedFormat, "" );    IPPI_CALL( func( src->data.ptr, src->step, src_size,                     dst->data.ptr, dst->step, dst_size, m ));    __END__;}
开发者ID:allanca,项目名称:otterdive,代码行数:87,


示例19: cvGoodFeaturesToTrack

CV_IMPL voidcvGoodFeaturesToTrack( const void* image, void* eigImage, void* tempImage,                       CvPoint2D32f* corners, int *corner_count,                       double quality_level, double min_distance,                       const void* maskImage, int block_size,                       int use_harris, double harris_k ){    CvMat* _eigImg = 0;    CvMat* _tmpImg = 0;    CV_FUNCNAME( "cvGoodFeaturesToTrack" );    __BEGIN__;    double max_val = 0;    int max_count = 0;    int count = 0;    int x, y, i, k = 0;    int min_dist;    /* when selecting points, use integer coordinates */    CvPoint *ptr = (CvPoint *) corners;    /* process floating-point images using integer arithmetics */    int *eig_data = 0;    int *tmp_data = 0;    int **ptr_data = 0;    uchar *mask_data = 0;    int  mask_step = 0;    CvSize size;    int    coi1 = 0, coi2 = 0, coi3 = 0;    CvMat  stub, *img = (CvMat*)image;    CvMat  eig_stub, *eig = (CvMat*)eigImage;    CvMat  tmp_stub, *tmp = (CvMat*)tempImage;    CvMat  mask_stub, *mask = (CvMat*)maskImage;    if( corner_count )    {        max_count = *corner_count;        *corner_count = 0;    }    CV_CALL( img = cvGetMat( img, &stub, &coi1 ));    if( eig )    {        CV_CALL( eig = cvGetMat( eig, &eig_stub, &coi2 ));    }    else    {        CV_CALL( _eigImg = cvCreateMat( img->rows, img->cols, CV_32FC1 ));        eig = _eigImg;    }    if( tmp )    {        CV_CALL( tmp = cvGetMat( tmp, &tmp_stub, &coi3 ));    }    else    {        CV_CALL( _tmpImg = cvCreateMat( img->rows, img->cols, CV_32FC1 ));        tmp = _tmpImg;    }    if( mask )    {        CV_CALL( mask = cvGetMat( mask, &mask_stub ));        if( !CV_IS_MASK_ARR( mask ))        {            CV_ERROR( CV_StsBadMask, "" );        }    }    if( coi1 != 0 || coi2 != 0 || coi3 != 0 )        CV_ERROR( CV_BadCOI, "" );    if( CV_MAT_CN(img->type) != 1 ||            CV_MAT_CN(eig->type) != 1 ||            CV_MAT_CN(tmp->type) != 1 )        CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );    if( CV_MAT_DEPTH(tmp->type) != CV_32F ||            CV_MAT_DEPTH(eig->type) != CV_32F )        CV_ERROR( CV_BadDepth, cvUnsupportedFormat );    if( !corners || !corner_count )        CV_ERROR( CV_StsNullPtr, "" );    if( max_count <= 0 )        CV_ERROR( CV_StsBadArg, "maximal corners number is non positive" );    if( quality_level <= 0 || min_distance < 0 )        CV_ERROR( CV_StsBadArg, "quality level or min distance are non positive" );    if( use_harris )    {        CV_CALL( cvCornerHarris( img, eig, block_size, 3, harris_k ));    }    else    {//.........这里部分代码省略.........
开发者ID:ThadeuFerreira,项目名称:sift-coprojeto,代码行数:101,


示例20: matchTemplate_CCOEFF_NORMED

static bool matchTemplate_CCOEFF_NORMED(InputArray _image, InputArray _templ, OutputArray _result){    matchTemplate(_image, _templ, _result, CV_TM_CCORR);    UMat temp, image_sums, image_sqsums;    integral(_image, image_sums, image_sqsums, CV_32F, CV_32F);    int type = image_sums.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);    ocl::Kernel k("matchTemplate_CCOEFF_NORMED", ocl::imgproc::match_template_oclsrc,        format("-D CCOEFF_NORMED -D type=%s -D elem_type=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn));    if (k.empty())        return false;    UMat templ = _templ.getUMat();    Size size = _image.size(), tsize = templ.size();    _result.create(size.height - templ.rows + 1, size.width - templ.cols + 1, CV_32F);    UMat result = _result.getUMat();    float scale = 1.f / tsize.area();    if (cn == 1)    {        float templ_sum = (float)sum(templ)[0];        multiply(templ, templ, temp, 1, CV_32F);        float templ_sqsum = (float)sum(temp)[0];        templ_sqsum -= scale * templ_sum * templ_sum;        templ_sum   *= scale;        if (templ_sqsum < DBL_EPSILON)        {            result = Scalar::all(1);            return true;        }        k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadOnlyNoSize(image_sqsums),                      ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols, scale, templ_sum, templ_sqsum);    }    else    {        Vec4f templ_sum = Vec4f::all(0), templ_sqsum = Vec4f::all(0);        templ_sum = sum(templ);        multiply(templ, templ, temp, 1, CV_32F);        templ_sqsum = sum(temp);        float templ_sqsum_sum = 0;        for (int i = 0; i < cn; i ++)            templ_sqsum_sum += templ_sqsum[i] - scale * templ_sum[i] * templ_sum[i];        templ_sum *= scale;        if (templ_sqsum_sum < DBL_EPSILON)        {            result = Scalar::all(1);            return true;        }        if (cn == 2)            k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadOnlyNoSize(image_sqsums),                   ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols, scale,                   templ_sum[0], templ_sum[1], templ_sqsum_sum);        else            k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadOnlyNoSize(image_sqsums),                   ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols, scale,                   templ_sum[0], templ_sum[1], templ_sum[2], templ_sum[3], templ_sqsum_sum);    }    size_t globalsize[2] = { result.cols, result.rows };    return k.run(2, globalsize, NULL, false);}
开发者ID:Annemettevraa,项目名称:opencv,代码行数:73,


示例21: memset

bool  SunRasterDecoder::readHeader(){    bool result = false;    if( !m_strm.open( m_filename )) return false;    try    {        m_strm.skip( 4 );        m_width  = m_strm.getDWord();        m_height = m_strm.getDWord();        m_bpp    = m_strm.getDWord();        int palSize = 3*(1 << m_bpp);        m_strm.skip( 4 );        m_encoding = (SunRasType)m_strm.getDWord();        m_maptype = (SunRasMapType)m_strm.getDWord();        m_maplength = m_strm.getDWord();        if( m_width > 0 && m_height > 0 &&            (m_bpp == 1 || m_bpp == 8 || m_bpp == 24 || m_bpp == 32) &&            (m_type == RAS_OLD || m_type == RAS_STANDARD ||             (m_type == RAS_BYTE_ENCODED && m_bpp == 8) || m_type == RAS_FORMAT_RGB) &&            ((m_maptype == RMT_NONE && m_maplength == 0) ||             (m_maptype == RMT_EQUAL_RGB && m_maplength <= palSize && m_bpp <= 8)))        {            memset( m_palette, 0, sizeof(m_palette));            if( m_maplength != 0 )            {                uchar buffer[256*3];                if( m_strm.getBytes( buffer, m_maplength ) == m_maplength )                {                    int i;                    palSize = m_maplength/3;                    for( i = 0; i < palSize; i++ )                    {                        m_palette[i].b = buffer[i + 2*palSize];                        m_palette[i].g = buffer[i + palSize];                        m_palette[i].r = buffer[i];                        m_palette[i].a = 0;                    }                    m_type = IsColorPalette( m_palette, m_bpp ) ? CV_8UC3 : CV_8UC1;                    m_offset = m_strm.getPos();                    assert( m_offset == 32 + m_maplength );                    result = true;                }            }            else            {                m_type = m_bpp > 8 ? CV_8UC3 : CV_8UC1;                if( CV_MAT_CN(m_type) == 1 )                    FillGrayPalette( m_palette, m_bpp );                m_offset = m_strm.getPos();                assert( m_offset == 32 + m_maplength );                result = true;            }        }    }    catch(...)    {    }    if( !result )    {        m_offset = -1;        m_width = m_height = -1;        m_strm.close();    }    return result;}
开发者ID:glo,项目名称:ee384b,代码行数:78,


示例22: crossCorr

void crossCorr( const Mat& img, const Mat& _templ, Mat& corr,                Size corrsize, int ctype,                Point anchor, double delta, int borderType ){    const double blockScale = 4.5;    const int minBlockSize = 256;    std::vector<uchar> buf;    Mat templ = _templ;    int depth = img.depth(), cn = img.channels();    int tdepth = templ.depth(), tcn = templ.channels();    int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype);    CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 );    if( depth != tdepth && tdepth != std::max(CV_32F, depth) )    {        _templ.convertTo(templ, std::max(CV_32F, depth));        tdepth = templ.depth();    }    CV_Assert( depth == tdepth || tdepth == CV_32F);    CV_Assert( corrsize.height <= img.rows + templ.rows - 1 &&               corrsize.width <= img.cols + templ.cols - 1 );    CV_Assert( ccn == 1 || delta == 0 );    corr.create(corrsize, ctype);    int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth);    Size blocksize, dftsize;    blocksize.width = cvRound(templ.cols*blockScale);    blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 );    blocksize.width = std::min( blocksize.width, corr.cols );    blocksize.height = cvRound(templ.rows*blockScale);    blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 );    blocksize.height = std::min( blocksize.height, corr.rows );    dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2);    dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1);    if( dftsize.width <= 0 || dftsize.height <= 0 )        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );    // recompute block size    blocksize.width = dftsize.width - templ.cols + 1;    blocksize.width = MIN( blocksize.width, corr.cols );    blocksize.height = dftsize.height - templ.rows + 1;    blocksize.height = MIN( blocksize.height, corr.rows );    Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth );    Mat dftImg( dftsize, maxDepth );    int i, k, bufSize = 0;    if( tcn > 1 && tdepth != maxDepth )        bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth);    if( cn > 1 && depth != maxDepth )        bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)*            (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth));    if( (ccn > 1 || cn > 1) && cdepth != maxDepth )        bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth));    buf.resize(bufSize);    // compute DFT of each template plane    for( k = 0; k < tcn; k++ )    {        int yofs = k*dftsize.height;        Mat src = templ;        Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height));        Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows));        if( tcn > 1 )        {            src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]);            int pairs[] = {k, 0};            mixChannels(&templ, 1, &src, 1, pairs, 1);        }        if( dst1.data != src.data )            src.convertTo(dst1, dst1.depth());        if( dst.cols > templ.cols )        {            Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols));            part = Scalar::all(0);        }        dft(dst, dst, 0, templ.rows);    }    int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width;    int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height;    int tileCount = tileCountX * tileCountY;    Size wholeSize = img.size();    Point roiofs(0,0);    Mat img0 = img;//.........这里部分代码省略.........
开发者ID:Annemettevraa,项目名称:opencv,代码行数:101,


示例23: imdecode_

static void*imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 ){    CV_Assert(buf.data && buf.isContinuous());    IplImage* image = 0;    CvMat *matrix = 0;    Mat temp, *data = &temp;    string filename = tempfile();    bool removeTempFile = false;    ImageDecoder decoder = findDecoder(buf);    if( decoder.empty() )        return 0;    if( !decoder->setSource(buf) )    {        FILE* f = fopen( filename.c_str(), "wb" );        if( !f )            return 0;        removeTempFile = true;        size_t bufSize = buf.cols*buf.rows*buf.elemSize();        fwrite( &buf.data[0], 1, bufSize, f );        fclose(f);        decoder->setSource(filename);    }    if( !decoder->readHeader() )    {        if( removeTempFile )            remove(filename.c_str());        return 0;    }    CvSize size;    size.width = decoder->width();    size.height = decoder->height();    int type = decoder->type();    if( flags != -1 )    {        if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 )            type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type));        if( (flags & CV_LOAD_IMAGE_COLOR) != 0 ||           ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) )            type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3);        else            type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1);    }    if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT )    {        if( hdrtype == LOAD_CVMAT )        {            matrix = cvCreateMat( size.height, size.width, type );            temp = cvarrToMat(matrix);        }        else        {            mat->create( size.height, size.width, type );            data = mat;        }    }    else    {        image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) );        temp = cvarrToMat(image);    }    bool code = decoder->readData( *data );    if( removeTempFile )        remove(filename.c_str());    if( !code )    {        cvReleaseImage( &image );        cvReleaseMat( &matrix );        if( mat )            mat->release();        return 0;    }    return hdrtype == LOAD_CVMAT ? (void*)matrix :        hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat;}
开发者ID:Ashwini7,项目名称:smart-python-programs,代码行数:85,


示例24: cvThreshold

CV_IMPL doublecvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type ){    CvHistogram* hist = 0;        CV_FUNCNAME( "cvThreshold" );    __BEGIN__;    CvSize roi;    int src_step, dst_step;    CvMat src_stub, *src = (CvMat*)srcarr;    CvMat dst_stub, *dst = (CvMat*)dstarr;    CvMat src0, dst0;    int coi1 = 0, coi2 = 0;    int ithresh, imaxval, cn;    bool use_otsu;    CV_CALL( src = cvGetMat( src, &src_stub, &coi1 ));    CV_CALL( dst = cvGetMat( dst, &dst_stub, &coi2 ));    if( coi1 + coi2 )        CV_ERROR( CV_BadCOI, "COI is not supported by the function" );    if( !CV_ARE_CNS_EQ( src, dst ) )        CV_ERROR( CV_StsUnmatchedFormats, "Both arrays must have equal number of channels" );    cn = CV_MAT_CN(src->type);    if( cn > 1 )    {        src = cvReshape( src, &src0, 1 );        dst = cvReshape( dst, &dst0, 1 );    }    use_otsu = (type & ~CV_THRESH_MASK) == CV_THRESH_OTSU;    type &= CV_THRESH_MASK;    if( use_otsu )    {        float _ranges[] = { 0, 256 };        float* ranges = _ranges;        int hist_size = 256;        void* srcarr0 = src;        if( CV_MAT_TYPE(src->type) != CV_8UC1 )            CV_ERROR( CV_StsNotImplemented, "Otsu method can only be used with 8uC1 images" );        CV_CALL( hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges ));        cvCalcArrHist( &srcarr0, hist );        thresh = cvFloor(icvGetThreshVal_Otsu( hist ));    }    if( !CV_ARE_DEPTHS_EQ( src, dst ) )    {        if( CV_MAT_TYPE(dst->type) != CV_8UC1 )            CV_ERROR( CV_StsUnsupportedFormat, "In case of different types destination should be 8uC1" );        if( type != CV_THRESH_BINARY && type != CV_THRESH_BINARY_INV )            CV_ERROR( CV_StsBadArg,            "In case of different types only CV_THRESH_BINARY "            "and CV_THRESH_BINARY_INV thresholding types are supported" );        if( maxval < 0 )        {            CV_CALL( cvSetZero( dst ));        }        else        {            CV_CALL( cvCmpS( src, thresh, dst, type == CV_THRESH_BINARY ? CV_CMP_GT : CV_CMP_LE ));            if( maxval < 255 )                CV_CALL( cvAndS( dst, cvScalarAll( maxval ), dst ));        }        EXIT;    }    if( !CV_ARE_SIZES_EQ( src, dst ) )        CV_ERROR( CV_StsUnmatchedSizes, "" );    roi = cvGetMatSize( src );    if( CV_IS_MAT_CONT( src->type & dst->type ))    {        roi.width *= roi.height;        roi.height = 1;        src_step = dst_step = CV_STUB_STEP;    }    else    {        src_step = src->step;        dst_step = dst->step;    }    switch( CV_MAT_DEPTH(src->type) )    {    case CV_8U:                ithresh = cvFloor(thresh);        imaxval = cvRound(maxval);        if( type == CV_THRESH_TRUNC )            imaxval = ithresh;        imaxval = CV_CAST_8U(imaxval);//.........这里部分代码省略.........
开发者ID:caomw,项目名称:tactical-visual-servoing,代码行数:101,


示例25: close

bool  PngDecoder::readHeader(){    bool result = false;    close();    png_structp png_ptr = png_create_read_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );    if( png_ptr )    {        png_infop info_ptr = png_create_info_struct( png_ptr );        png_infop end_info = png_create_info_struct( png_ptr );        m_png_ptr = png_ptr;        m_info_ptr = info_ptr;        m_end_info = end_info;        m_buf_pos = 0;        if( info_ptr && end_info )        {            if( setjmp( png_jmpbuf( png_ptr ) ) == 0 )            {                if( !m_buf.empty() )                    png_set_read_fn(png_ptr, this, (png_rw_ptr)readDataFromBuf );                else                {                    m_f = fopen( m_filename.c_str(), "rb" );                    if( m_f )                        png_init_io( png_ptr, m_f );                }                if( !m_buf.empty() || m_f )                {                    png_uint_32 width, height;                    int bit_depth, color_type;                    png_read_info( png_ptr, info_ptr );                    png_get_IHDR( png_ptr, info_ptr, &width, &height,                                  &bit_depth, &color_type, 0, 0, 0 );                    m_width = (int)width;                    m_height = (int)height;                    m_color_type = color_type;                    m_bit_depth = bit_depth;                    if( bit_depth <= 8 || bit_depth == 16 )                    {                        switch(color_type)                         {                           case PNG_COLOR_TYPE_RGB:                           case PNG_COLOR_TYPE_PALETTE:                               m_type = CV_8UC3;                               break;                          case PNG_COLOR_TYPE_RGB_ALPHA:                               m_type = CV_8UC4;                               break;                          default:                               m_type = CV_8UC1;                        }                        if( bit_depth == 16 )                            m_type = CV_MAKETYPE(CV_16U, CV_MAT_CN(m_type));                        result = true;                    }                }            }        }    }    if( !result )        close();    return result;}
开发者ID:bertptrs,项目名称:uni-mir,代码行数:73,


示例26: imread_

/** * Read an image into memory and return the information * * @param[in] filename File to load * @param[in] flags Flags * @param[in] hdrtype { LOAD_CVMAT=0, *                      LOAD_IMAGE=1, *                      LOAD_MAT=2 *                    } * @param[in] mat Reference to C++ Mat object (If LOAD_MAT) * @param[in] scale_denom Scale value **/static void*imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ){    IplImage* image = 0;    CvMat *matrix = 0;    Mat temp, *data = &temp;    /// Search for the relevant decoder to handle the imagery    ImageDecoder decoder;#ifdef HAVE_GDAL    if(flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL ){        decoder = GdalDecoder().newDecoder();    }else{#endif        decoder = findDecoder( filename );#ifdef HAVE_GDAL    }#endif    /// if no decoder was found, return nothing.    if( !decoder ){        return 0;    }    int scale_denom = 1;    if( flags > IMREAD_LOAD_GDAL )    {    if( flags & IMREAD_REDUCED_GRAYSCALE_2 )        scale_denom = 2;    else if( flags & IMREAD_REDUCED_GRAYSCALE_4 )        scale_denom = 4;    else if( flags & IMREAD_REDUCED_GRAYSCALE_8 )        scale_denom = 8;    }    /// set the scale_denom in the driver    decoder->setScale( scale_denom );    /// set the filename in the driver    decoder->setSource( filename );   // read the header to make sure it succeeds   if( !decoder->readHeader() )        return 0;    // established the required input image size    CvSize size;    size.width = decoder->width();    size.height = decoder->height();    // grab the decoded type    int type = decoder->type();    if( (flags & IMREAD_LOAD_GDAL) != IMREAD_LOAD_GDAL && flags != IMREAD_UNCHANGED )    {        if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 )            type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type));        if( (flags & CV_LOAD_IMAGE_COLOR) != 0 ||           ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) )            type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3);        else            type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1);    }    if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT )    {        if( hdrtype == LOAD_CVMAT )        {            matrix = cvCreateMat( size.height, size.width, type );            temp = cvarrToMat( matrix );        }        else        {            mat->create( size.height, size.width, type );            data = mat;        }    }    else    {        image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) );        temp = cvarrToMat( image );    }    // read the image data    if( !decoder->readData( *data ))    {//.........这里部分代码省略.........
开发者ID:GilseoneMoraes,项目名称:opencv,代码行数:101,


示例27: cvAbsDiff

CV_IMPL  voidcvAbsDiff( const void* srcarr1, const void* srcarr2, void* dstarr ){    static CvFuncTable adiff_tab;    static int inittab = 0;    CV_FUNCNAME( "cvAbsDiff" );    __BEGIN__;    int coi1 = 0, coi2 = 0, coi3 = 0;    CvMat srcstub1, *src1 = (CvMat*)srcarr1;    CvMat srcstub2, *src2 = (CvMat*)srcarr2;    CvMat dststub,  *dst = (CvMat*)dstarr;    int src1_step, src2_step, dst_step;    CvSize size;    int type;    if( !inittab )    {        icvInitAbsDiffTable( &adiff_tab );        inittab = 1;    }    CV_CALL( src1 = cvGetMat( src1, &srcstub1, &coi1 ));    CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 ));    CV_CALL( dst = cvGetMat( dst, &dststub, &coi3 ));    if( coi1 != 0 || coi2 != 0 || coi3 != 0 )        CV_ERROR( CV_BadCOI, "" );    if( !CV_ARE_SIZES_EQ( src1, src2 ) )        CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes );    size = icvGetMatSize( src1 );    type = CV_MAT_TYPE(src1->type);    if( !CV_ARE_SIZES_EQ( src1, dst ))        CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes );        if( !CV_ARE_TYPES_EQ( src1, src2 ))        CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats );    if( !CV_ARE_TYPES_EQ( src1, dst ))        CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats );    size.width *= CV_MAT_CN( type );    src1_step = src1->step;    src2_step = src2->step;    dst_step = dst->step;    if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type ))    {        size.width *= size.height;        size.height = 1;        src1_step = src2_step = dst_step = CV_STUB_STEP;    }    {        CvFunc2D_3A func = (CvFunc2D_3A)            (adiff_tab.fn_2d[CV_MAT_DEPTH(type)]);        if( !func )            CV_ERROR( CV_StsUnsupportedFormat, "" );        IPPI_CALL( func( src1->data.ptr, src1_step, src2->data.ptr, src2_step,                         dst->data.ptr, dst_step, size ));    }    __END__;}
开发者ID:Rhoana,项目名称:rhoana,代码行数:72,


示例28: cvUndistort2

CV_IMPL voidcvUndistort2( const CvArr* _src, CvArr* _dst, const CvMat* A, const CvMat* dist_coeffs ){    static int inittab = 0;    uchar* buffer = 0;    CV_FUNCNAME( "cvUndistort2" );    __BEGIN__;    float a[9], k[4];    int coi1 = 0, coi2 = 0;    CvMat srcstub, *src = (CvMat*)_src;    CvMat dststub, *dst = (CvMat*)_dst;    CvMat _a = cvMat( 3, 3, CV_32F, a ), _k;    int cn, src_step, dst_step;    CvSize size;    if( !inittab )    {        icvInitLinearCoeffTab();        icvInitCubicCoeffTab();        inittab = 1;    }    CV_CALL( src = cvGetMat( src, &srcstub, &coi1 ));    CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 ));    if( coi1 != 0 || coi2 != 0 )        CV_ERROR( CV_BadCOI, "The function does not support COI" );    if( CV_MAT_DEPTH(src->type) != CV_8U )        CV_ERROR( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );    if( src->data.ptr == dst->data.ptr )        CV_ERROR( CV_StsNotImplemented, "In-place undistortion is not implemented" );    if( !CV_ARE_TYPES_EQ( src, dst ))        CV_ERROR( CV_StsUnmatchedFormats, "" );    if( !CV_ARE_SIZES_EQ( src, dst ))        CV_ERROR( CV_StsUnmatchedSizes, "" );    if( !CV_IS_MAT(A) || A->rows != 3 || A->cols != 3  ||        CV_MAT_TYPE(A->type) != CV_32FC1 && CV_MAT_TYPE(A->type) != CV_64FC1 )        CV_ERROR( CV_StsBadArg, "Intrinsic matrix must be a valid 3x3 floating-point matrix" );    if( !CV_IS_MAT(dist_coeffs) || dist_coeffs->rows != 1 && dist_coeffs->cols != 1 ||        dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 ||        CV_MAT_DEPTH(dist_coeffs->type) != CV_64F &&        CV_MAT_DEPTH(dist_coeffs->type) != CV_32F )        CV_ERROR( CV_StsBadArg,            "Distortion coefficients must be 1x4 or 4x1 floating-point vector" );    cvConvert( A, &_a );    _k = cvMat( dist_coeffs->rows, dist_coeffs->cols,                CV_MAKETYPE(CV_32F, CV_MAT_CN(dist_coeffs->type)), k );    cvConvert( dist_coeffs, &_k );    cn = CV_MAT_CN(src->type);    size = cvGetMatSize(src);    src_step = src->step ? src->step : CV_STUB_STEP;    dst_step = dst->step ? dst->step : CV_STUB_STEP;    if( fabs((double)k[2]) < 1e-5 && fabs((double)k[3]) < 1e-5 && icvUndistortGetSize_p )    {        int buf_size = 0;        CvUndistortRadialIPPFunc func =            cn == 1 ? (CvUndistortRadialIPPFunc)icvUndistortRadial_8u_C1R_p :                      (CvUndistortRadialIPPFunc)icvUndistortRadial_8u_C3R_p;        if( func && icvUndistortGetSize_p( size, &buf_size ) >= 0 && buf_size > 0 )        {            CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));            if( func( src->data.ptr, src_step, dst->data.ptr,                      dst_step, size, a[0], a[4],                      a[2], a[5], k[0], k[1], buffer ) >= 0 )                EXIT;        }    }    icvUnDistort_8u_CnR( src->data.ptr, src_step,        dst->data.ptr, dst_step, size, a, k, cn );    __END__;    cvFree( &buffer );}
开发者ID:DORARA29,项目名称:AtomManipulator,代码行数:88,



注:本文中的CV_MAT_CN函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ CV_MAT_DEPTH函数代码示例
C++ CV_MAKETYPE函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。