您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ CV_MAKETYPE函数代码示例

51自学网 2021-06-01 20:07:54
  C++
这篇教程C++ CV_MAKETYPE函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中CV_MAKETYPE函数的典型用法代码示例。如果您正苦于以下问题:C++ CV_MAKETYPE函数的具体用法?C++ CV_MAKETYPE怎么用?C++ CV_MAKETYPE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了CV_MAKETYPE函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: type

UMat& UMat::setTo(InputArray _value, InputArray _mask){    bool haveMask = !_mask.empty();#ifdef HAVE_OPENCL    int tp = type(), cn = CV_MAT_CN(tp);    if( dims <= 2 && cn <= 4 && CV_MAT_DEPTH(tp) < CV_64F && ocl::useOpenCL() )    {        Mat value = _value.getMat();        CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::UMAT) );        double buf[4] = { 0, 0, 0, 0 };        convertAndUnrollScalar(value, tp, (uchar *)buf, 1);        int scalarcn = cn == 3 ? 4 : cn, rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;        String opts = format("-D dstT=%s -D rowsPerWI=%d -D dstST=%s -D dstT1=%s -D cn=%d",                             ocl::memopTypeToStr(tp), rowsPerWI,                             ocl::memopTypeToStr(CV_MAKETYPE(tp, scalarcn)),                             ocl::memopTypeToStr(CV_MAT_DEPTH(tp)), cn);        ocl::Kernel setK(haveMask ? "setMask" : "set", ocl::core::copyset_oclsrc, opts);        if( !setK.empty() )        {            ocl::KernelArg scalararg(0, 0, 0, 0, buf, CV_ELEM_SIZE1(tp) * scalarcn);            UMat mask;            if( haveMask )            {                mask = _mask.getUMat();                CV_Assert( mask.size() == size() && mask.type() == CV_8UC1 );                ocl::KernelArg maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),                        dstarg = ocl::KernelArg::ReadWrite(*this);                setK.args(maskarg, dstarg, scalararg);            }            else            {                ocl::KernelArg dstarg = ocl::KernelArg::WriteOnly(*this);                setK.args(dstarg, scalararg);            }            size_t globalsize[] = { cols, (rows + rowsPerWI - 1) / rowsPerWI };            if( setK.run(2, globalsize, NULL, false) )                return *this;        }    }#endif    Mat m = getMat(haveMask ? ACCESS_RW : ACCESS_WRITE);    m.setTo(_value, _mask);    return *this;}
开发者ID:athena0304,项目名称:opencv,代码行数:49,


示例2: matFromImage

// Converts a ROS Image to a cv::Mat by sharing the data or chaning its endianness if neededcv::Mat matFromImage(const sensor_msgs::Image& source){  int source_type = getCvType(source.encoding);  int byte_depth = enc::bitDepth(source.encoding) / 8;  int num_channels = enc::numChannels(source.encoding);  if (source.step < source.width * byte_depth * num_channels)  {    std::stringstream ss;    ss << "Image is wrongly formed: step < width * byte_depth * num_channels  or  " << source.step << " != " <<        source.width << " * " << byte_depth << " * " << num_channels;    throw Exception(ss.str());  }  if (source.height * source.step != source.data.size())  {    std::stringstream ss;    ss << "Image is wrongly formed: height * step != size  or  " << source.height << " * " <<              source.step << " != " << source.data.size();    throw Exception(ss.str());  }  // If the endianness is the same as locally, share the data  cv::Mat mat(source.height, source.width, source_type, const_cast<uchar*>(&source.data[0]), source.step);  if ((boost::endian::order::native == boost::endian::order::big && source.is_bigendian) ||      (boost::endian::order::native == boost::endian::order::little && !source.is_bigendian) ||      byte_depth == 1)    return mat;  // Otherwise, reinterpret the data as bytes and switch the channels accordingly  mat = cv::Mat(source.height, source.width, CV_MAKETYPE(CV_8U, num_channels*byte_depth),                const_cast<uchar*>(&source.data[0]), source.step);  cv::Mat mat_swap(source.height, source.width, mat.type());  std::vector<int> fromTo;  fromTo.reserve(num_channels*byte_depth);  for(int i = 0; i < num_channels; ++i)    for(int j = 0; j < byte_depth; ++j)    {      fromTo.push_back(byte_depth*i + j);      fromTo.push_back(byte_depth*i + byte_depth - 1 - j);    }  cv::mixChannels(std::vector<cv::Mat>(1, mat), std::vector<cv::Mat>(1, mat_swap), fromTo);  // Interpret mat_swap back as the proper type  mat_swap = cv::Mat(source.height, source.width, source_type, mat_swap.data, mat_swap.step);  return mat_swap;}
开发者ID:LouLinear,项目名称:APC_vision,代码行数:50,


示例3: ocl_pyrUp

static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType){    int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);    if (channels > 4 || borderType != BORDER_DEFAULT)        return false;    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;    if (depth == CV_64F && !doubleSupport)        return false;    Size ssize = _src.size();    if ((_dsz.area() != 0) && (_dsz != Size(ssize.width * 2, ssize.height * 2)))        return false;    UMat src = _src.getUMat();    Size dsize = Size(ssize.width * 2, ssize.height * 2);    _dst.create( dsize, src.type() );    UMat dst = _dst.getUMat();    int float_depth = depth == CV_64F ? CV_64F : CV_32F;    const int local_size = 16;    char cvt[2][50];    String buildOptions = format(            "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "            "-D T1=%s -D cn=%d -D LOCAL_SIZE=%d",            ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)),            ocl::convertTypeStr(float_depth, depth, channels, cvt[0]),            ocl::convertTypeStr(depth, float_depth, channels, cvt[1]),            doubleSupport ? " -D DOUBLE_SUPPORT" : "",            ocl::typeToStr(depth), channels, local_size    );    size_t globalThreads[2] = { dst.cols, dst.rows };    size_t localThreads[2] = { local_size, local_size };    ocl::Kernel k;    if (ocl::Device::getDefault().isIntel() && channels == 1)    {        k.create("pyrUp_unrolled", ocl::imgproc::pyr_up_oclsrc, buildOptions);        globalThreads[0] = dst.cols/2; globalThreads[1] = dst.rows/2;    }    else        k.create("pyrUp", ocl::imgproc::pyr_up_oclsrc, buildOptions);    if (k.empty())        return false;    k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));    return k.run(2, globalThreads, localThreads, false);}
开发者ID:112000,项目名称:opencv,代码行数:49,


示例4: rb_query

/* * call-seq: *   query -> IplImage or nil * * Grabs and returns a frame camera or file. Just a combination of grab and retrieve in one call. */VALUErb_query(VALUE self){  IplImage *frame = cvQueryFrame(CVCAPTURE(self));  if(!frame)    return Qnil;  VALUE image = cIplImage::new_object(cvSize(frame->width, frame->height), CV_MAKETYPE(CV_8U, frame->nChannels));  if (frame->origin == IPL_ORIGIN_TL) {    cvCopy(frame, CVARR(image));  }  else {    cvFlip(frame, CVARR(image));  }  return image;}
开发者ID:thenoseman,项目名称:ruby-opencv,代码行数:21,


示例5:

bool KGDAL2CV::CheckDataType(const GDALDataType& gdalDataType, cv::Mat img){	int TypeMap_GDAL2_0[GDT_TypeCount] = { CV_USRTYPE1, CV_8U, CV_16U, CV_16S, CV_32S, CV_32S, CV_32F, CV_64F, CV_USRTYPE1, CV_USRTYPE1, CV_USRTYPE1, CV_USRTYPE1};	int imgType = img.type();		if (imgType == CV_MAKETYPE(TypeMap_GDAL2_0[gdalDataType], img.channels()))	{		if (gdalDataType == GDT_UInt32) std::cout << "cv::Mat doesn't support datatype: CV_32U!" << std::endl;		if (TypeMap_GDAL2_0[gdalDataType] == CV_USRTYPE1) std::cout << "user define datatype may be unmatched!" << std::endl;		return true;	} 	std::cout << "Warning: use the different Data Type between cv::Mat and GDAL!/r/n" 				 "/tproper range cast may be used!" << std::endl;	return false;}
开发者ID:PlainSailing,项目名称:GDAL2CV,代码行数:15,


示例6: ocl_pyrDown

static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType){    int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;    if (cn > 4 || (depth == CV_64F && !doubleSupport))        return false;    Size ssize = _src.size();    Size dsize = _dsz.area() == 0 ? Size((ssize.width + 1) / 2, (ssize.height + 1) / 2) : _dsz;    if (dsize.height < 2 || dsize.width < 2)        return false;    CV_Assert( ssize.width > 0 && ssize.height > 0 &&            std::abs(dsize.width*2 - ssize.width) <= 2 &&            std::abs(dsize.height*2 - ssize.height) <= 2 );    UMat src = _src.getUMat();    _dst.create( dsize, src.type() );    UMat dst = _dst.getUMat();    int float_depth = depth == CV_64F ? CV_64F : CV_32F;    const int local_size = 256;    int kercn = 1;    if (depth == CV_8U && float_depth == CV_32F && cn == 1 && ocl::Device::getDefault().isIntel())        kercn = 4;    const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",                                       "BORDER_REFLECT_101" };    char cvt[2][50];    String buildOptions = format(            "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "            "-D T1=%s -D cn=%d -D kercn=%d -D fdepth=%d -D %s -D LOCAL_SIZE=%d",            ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, cn)),            ocl::convertTypeStr(float_depth, depth, cn, cvt[0]),            ocl::convertTypeStr(depth, float_depth, cn, cvt[1]),            doubleSupport ? " -D DOUBLE_SUPPORT" : "", ocl::typeToStr(depth),            cn, kercn, float_depth, borderMap[borderType], local_size    );    ocl::Kernel k("pyrDown", ocl::imgproc::pyr_down_oclsrc, buildOptions);    if (k.empty())        return false;    k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));    size_t localThreads[2]  = { local_size/kercn, 1 };    size_t globalThreads[2] = { (src.cols + (kercn-1))/kercn, (dst.rows + 1) / 2 };    return k.run(2, globalThreads, localThreads, false);}
开发者ID:Asafadari,项目名称:opencv,代码行数:48,


示例7: types

std::vector<int> types(int depth_start, int depth_end, int cn_start, int cn_end){    std::vector<int> v;    v.reserve((depth_end - depth_start + 1) * (cn_end - cn_start + 1));    for (int depth = depth_start; depth <= depth_end; ++depth)    {        for (int cn = cn_start; cn <= cn_end; ++cn)        {            v.push_back(CV_MAKETYPE(depth, cn));        }    }    return v;}
开发者ID:JaehyunAhn,项目名称:Basic_OpenCV_utilization,代码行数:16,


示例8: CV_MAT_DEPTH

  void FilterBase::apply(cv::InputArray _src, cv::OutputArray _dst, const int &ddepth){    int stype = _src.type();    int dcn = _src.channels();    int depth = CV_MAT_DEPTH(stype);    if (0 <= ddepth)      depth = ddepth;    Mat src, dst;    src = _src.getMat();    Size sz = src.size();    _dst.create(sz, CV_MAKETYPE(depth, dcn));    dst = _dst.getMat();    int imageWidth = src.rows;    int imageHeight = src.cols;    Mat srcChannels[3];    split(src, srcChannels);    int margineWidth = kernel.cols / 2;    int margineHeight = kernel.rows / 2;    double kernelElemCount = (double)(kernel.cols * kernel.rows);    for(int ch = 0; ch < dcn; ++ch){      for(int y = 0; y < imageHeight; ++y){	Vec3d  *ptr = dst.ptr<Vec3d>(y);	for(int x = 0; x < imageWidth; ++x){	  if (isEdge(x, y, imageWidth, imageHeight, margineWidth, margineWidth)){	    ptr[x][ch]	      = calcKernelOutputAtEdge(srcChannels[ch],				       kernel, x, y,				       imageWidth, imageHeight,				       margineWidth, margineHeight);	  }else{	    ptr[x][ch]	      = calcKernelOutput(srcChannels[ch],				 kernel, x, y,				 margineWidth, margineHeight,				 				 kernelElemCount);	  }	}      }    }  }
开发者ID:YoshiProton,项目名称:lets_enjoy_cv_programing,代码行数:47,


示例9: mxCreateNumericArray

MxArray::MxArray(const cv::Mat& mat, mxClassID classid, bool transpose){    // handle special case of empty input Mat by creating an empty array    classid = (classid == mxUNKNOWN_CLASS) ? ClassIDOf[mat.depth()] : classid;    if (mat.empty()) {        p_ = mxCreateNumericArray(0, 0, classid, mxREAL);        if (!p_)            mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");        return;    }    // transpose cv::Mat if needed    cv::Mat input(mat);    if (input.dims == 2 && transpose)        input = input.t();    // Create a new mxArray (of the specified classID) equivalent to cv::Mat    const mwSize nchannels = input.channels();    const int* dims_ = input.size;    std::vector<mwSize> d(dims_, dims_ + input.dims);    d.push_back(nchannels);    std::swap(d[0], d[1]);    if (classid == mxLOGICAL_CLASS) {        // OpenCV's logical true is any nonzero, while MATLAB's true is 1.        cv::compare(input, 0, input, cv::CMP_NE);        input.setTo(1, input);        p_ = mxCreateLogicalArray(d.size(), &d[0]);    }    else        p_ = mxCreateNumericArray(d.size(), &d[0], classid, mxREAL);    if (!p_)        mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");    // split input cv::Mat into several single-channel arrays    std::vector<cv::Mat> channels;    channels.reserve(nchannels);    cv::split(input, channels);    // Copy each channel from Mat to mxArray (converting to specified classid),    // as in: p_(:,:,i) <- cast_to_classid_type(channels[i])    std::vector<mwSize> si(d.size(), 0);               // subscript index    const int type = CV_MAKETYPE(DepthOf[classid], 1); // destination type    for (mwIndex i = 0; i < nchannels; ++i) {        si[si.size() - 1] = i;                   // last dim is a channel index        void *ptr = reinterpret_cast<void*>(            reinterpret_cast<size_t>(mxGetData(p_)) +            mxGetElementSize(p_) * subs(si));    // ptr to i-th channel data        cv::Mat m(input.dims, dims_, type, ptr); // only creates Mat header        channels[i].convertTo(m, type);          // Write to mxArray through m    }}
开发者ID:ivalab,项目名称:mexopencv,代码行数:47,


示例10: type

void UMat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const{    bool noScale = std::fabs(alpha - 1) < DBL_EPSILON && std::fabs(beta) < DBL_EPSILON;    int stype = type(), cn = CV_MAT_CN(stype);    if( _type < 0 )        _type = _dst.fixedType() ? _dst.type() : stype;    else        _type = CV_MAKETYPE(CV_MAT_DEPTH(_type), cn);    int sdepth = CV_MAT_DEPTH(stype), ddepth = CV_MAT_DEPTH(_type);    if( sdepth == ddepth && noScale )    {        copyTo(_dst);        return;    }    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;    bool needDouble = sdepth == CV_64F || ddepth == CV_64F;    if( dims <= 2 && cn && _dst.isUMat() && ocl::useOpenCL() &&            ((needDouble && doubleSupport) || !needDouble) )    {        char cvt[40];        ocl::Kernel k("convertTo", ocl::core::convert_oclsrc,                      format("-D srcT=%s -D dstT=%s -D convertToDT=%s%s", ocl::typeToStr(sdepth),                             ocl::typeToStr(ddepth), ocl::convertTypeStr(CV_32F, ddepth, 1, cvt),                             doubleSupport ? " -D DOUBLE_SUPPORT" : ""));        if (!k.empty())        {            UMat src = *this;            _dst.create( size(), _type );            UMat dst = _dst.getUMat();            float alphaf = (float)alpha, betaf = (float)beta;            k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst, cn), alphaf, betaf);            size_t globalsize[2] = { dst.cols * cn, dst.rows };            if (k.run(2, globalsize, NULL, false))                return;        }    }    Mat m = getMat(ACCESS_READ);    m.convertTo(_dst, _type, alpha, beta);}
开发者ID:RandallTalea,项目名称:opencv,代码行数:45,


示例11: mxCreateNumericArray

MxArray::MxArray(const cv::Mat& mat, mxClassID classid, bool transpose){    if (mat.empty())    {        p_ = mxCreateNumericArray(0, 0, mxDOUBLE_CLASS, mxREAL);        if (!p_)            mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");        return;    }    cv::Mat input = (mat.dims == 2 && transpose) ? mat.t() : mat;    // Create a new mxArray.    const int nchannels = input.channels();    const int* dims_ = input.size;    std::vector<mwSize> d(dims_, dims_ + input.dims);    d.push_back(nchannels);    classid = (classid == mxUNKNOWN_CLASS)        ? ClassIDOf[input.depth()] : classid;    std::swap(d[0], d[1]);    if (classid == mxLOGICAL_CLASS)    {        // OpenCV's logical true is any nonzero while matlab's true is 1.        cv::compare(input, 0, input, cv::CMP_NE);        input.setTo(1, input);        p_ = mxCreateLogicalArray(d.size(), &d[0]);    }    else {        p_ = mxCreateNumericArray(d.size(), &d[0], classid, mxREAL);    }    if (!p_)        mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");    // Copy each channel.    std::vector<cv::Mat> channels;    split(input, channels);    std::vector<mwSize> si(d.size(), 0); // subscript index.    int type = CV_MAKETYPE(DepthOf[classid], 1); // destination type.    for (int i = 0; i < nchannels; ++i)    {        si[si.size() - 1] = i; // last dim is a channel index.        void *ptr = reinterpret_cast<void*>(                reinterpret_cast<size_t>(mxGetData(p_)) +                mxGetElementSize(p_) * subs(si));        cv::Mat m(input.dims, dims_, type, ptr);        channels[i].convertTo(m, type); // Write to mxArray through m.    }}
开发者ID:demarlio25,项目名称:mexopencv,代码行数:45,


示例12: parse_path

/** * Load an image in the file of the proprietary format. * The file name describes the image configuration as: *   *-(width)x(height)x(num_channels)-(opencv_depth_code).bin * There is no header in the file. The file is a binary dump of an OpenCV cv::Mat data. * For the better portability, an existing format can be used to carry image data. */cv::Mat cv_subtractor::read_binary_image_file(const std::string filename) {  std::vector<int> tokens;  { // Extract the information on the image from the file name    const std::vector<char> delims = {'-', 'x','x','-','.'};    std::string dir;    std::string basename;    parse_path(filename, dir, basename);    tokens = get_tokens(basename, delims);    if (tokens.size() != delims.size()) {      return cv::Mat();    }  }  std::ifstream file(filename, std::ios::binary);  if (!file.good()) {    return cv::Mat();  }  file.unsetf(std::ios::skipws);  { // Check file size    const size_t image_byte_size      = tokens[1] * tokens[2] * tokens[3] * CV_ELEM_SIZE(tokens[4]);    file.seekg(0, std::ios::end);    const size_t file_size = static_cast<size_t>(file.tellg());    if (image_byte_size != file_size) {      return cv::Mat();    }  }  // Construct an image data structure  cv::Mat image(tokens[1], tokens[2], CV_MAKETYPE(tokens[4], tokens[3]));  // Reset the file pointer  file.seekg(0, std::ios::beg);  // Load the image from the file  std::copy(std::istream_iterator<unsigned char>(file),            std::istream_iterator<unsigned char>(),            reinterpret_cast<unsigned char*>(image.data));  return image;}
开发者ID:LLNL,项目名称:lbann,代码行数:51,


示例13: CV_MAT_DEPTH

void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,                 double scale, double delta, int borderType ){    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);    if (ddepth < 0)        ddepth = sdepth;    int dtype = CV_MAKETYPE(ddepth, cn);    _dst.create( _src.size(), dtype );#ifdef HAVE_TEGRA_OPTIMIZATION    if (tegra::useTegra() && scale == 1.0 && delta == 0)    {        Mat src = _src.getMat(), dst = _dst.getMat();        if (tegra::scharr(src, dst, dx, dy, borderType))            return;    }#endif#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)    CV_IPP_CHECK()    {        if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType))        {            CV_IMPL_ADD(CV_IMPL_IPP);            return;        }    }#endif    int ktype = std::max(CV_32F, std::max(ddepth, sdepth));    Mat kx, ky;    getScharrKernels( kx, ky, dx, dy, false, ktype );    if( scale != 1 )    {        // usually the smoothing part is the slowest to compute,        // so try to scale it instead of the faster differenciating part        if( dx == 0 )            kx *= scale;        else            ky *= scale;    }    sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );}
开发者ID:165-goethals,项目名称:opencv,代码行数:43,


示例14: CV_Assert

cv::Mat cv::viz::vtkTrajectorySource::ExtractPoints(InputArray _traj){    CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT);    CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));    Mat points(1, (int)_traj.total(), CV_MAKETYPE(_traj.depth(), 3));    const Affine3d* dpath = _traj.getMat().ptr<Affine3d>();    const Affine3f* fpath = _traj.getMat().ptr<Affine3f>();    if (_traj.depth() == CV_32F)        for(int i = 0; i < points.cols; ++i)            points.at<Vec3f>(i) = fpath[i].translation();    if (_traj.depth() == CV_64F)        for(int i = 0; i < points.cols; ++i)            points.at<Vec3d>(i) = dpath[i].translation();    return points;}
开发者ID:cyberCBM,项目名称:DetectO,代码行数:19,


示例15: GPUSender

 GPUSender(sensor_msgs::ImageConstPtr example, std::string encoding,         ros::Publisher* pub) :     image_msg_(new sensor_msgs::Image()),     publisher_(pub) {     int bitdepth, channels;     bitdepth = sensor_msgs::image_encodings::bitDepth(encoding);     channels = sensor_msgs::image_encodings::numChannels(encoding);     image_msg_->header = example->header;     image_msg_->height = example->height;     image_msg_->width = example->width;     image_msg_->encoding = encoding;     image_msg_->step = example->width*bitdepth*channels/8;     image_msg_->data.resize( image_msg_->height * image_msg_->step );     image_data_ = cv::Mat(image_msg_->height, image_msg_->width,             CV_MAKETYPE(CV_8U, channels),             &image_msg_->data[0], image_msg_->step);     cv::cuda::registerPageLocked(image_data_); }
开发者ID:contradict,项目名称:SampleReturn,代码行数:19,


示例16: CV_MAT_DEPTH

void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,                int ksize, double scale, double delta, int borderType ){    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);    if (ddepth < 0)        ddepth = sdepth;    _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );#ifdef HAVE_TEGRA_OPTIMIZATION    if (scale == 1.0 && delta == 0)    {        Mat src = _src.getMat(), dst = _dst.getMat();        if (ksize == 3 && tegra::sobel3x3(src, dst, dx, dy, borderType))            return;        if (ksize == -1 && tegra::scharr(src, dst, dx, dy, borderType))            return;    }#endif#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)    if(dx < 3 && dy < 3 && cn == 1 && borderType == BORDER_REPLICATE)    {        Mat src = _src.getMat(), dst = _dst.getMat();        if (IPPDeriv(src, dst, ddepth, dx, dy, ksize,scale))            return;    }#endif    int ktype = std::max(CV_32F, std::max(ddepth, sdepth));    Mat kx, ky;    getDerivKernels( kx, ky, dx, dy, ksize, false, ktype );    if( scale != 1 )    {        // usually the smoothing part is the slowest to compute,        // so try to scale it instead of the faster differenciating part        if( dx == 0 )            kx *= scale;        else            ky *= scale;    }    sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );}
开发者ID:HanaLeeHn,项目名称:opencv,代码行数:42,


示例17: ocl_pyrDown

static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType){    int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);    if (channels > 4 || borderType != BORDER_DEFAULT)        return false;    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;    if ((depth == CV_64F) && !(doubleSupport))        return false;    Size ssize = _src.size();    Size dsize = _dsz.area() == 0 ? Size((ssize.width + 1) / 2, (ssize.height + 1) / 2) : _dsz;    CV_Assert( ssize.width > 0 && ssize.height > 0 &&            std::abs(dsize.width*2 - ssize.width) <= 2 &&            std::abs(dsize.height*2 - ssize.height) <= 2 );    UMat src = _src.getUMat();    _dst.create( dsize, src.type() );    UMat dst = _dst.getUMat();    int float_depth = depth == CV_64F ? CV_64F : CV_32F;    char cvt[2][50];    String buildOptions = format(            "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "            "-D T1=%s -D cn=%d",            ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)),            ocl::convertTypeStr(float_depth, depth, channels, cvt[0]),            ocl::convertTypeStr(depth, float_depth, channels, cvt[1]),            doubleSupport ? " -D DOUBLE_SUPPORT" : "",            ocl::typeToStr(depth), channels    );    ocl::Kernel k("pyrDown", ocl::imgproc::pyr_down_oclsrc, buildOptions);    if (k.empty())        return false;    k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));    size_t localThreads[2]  = { 256, 1 };    size_t globalThreads[2] = { src.cols, dst.rows };    return k.run(2, globalThreads, localThreads, false);}
开发者ID:JackieSTU,项目名称:opencv,代码行数:42,


示例18: merge

    void merge(const GpuMat* src, size_t n, GpuMat& dst, const cudaStream_t& stream)     {        CV_Assert(src);        CV_Assert(n > 0);               bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) &&                          DeviceInfo().supports(NATIVE_DOUBLE);        CV_Assert(src[0].depth() != CV_64F || double_ok);        int depth = src[0].depth();        Size size = src[0].size();        bool single_channel_only = true;        int total_channels = 0;        for (size_t i = 0; i < n; ++i)        {            CV_Assert(src[i].size() == size);            CV_Assert(src[i].depth() == depth);            single_channel_only = single_channel_only && src[i].channels() == 1;            total_channels += src[i].channels();        }        CV_Assert(single_channel_only);        CV_Assert(total_channels <= 4);        if (total_channels == 1)              src[0].copyTo(dst);        else         {            dst.create(size, CV_MAKETYPE(depth, total_channels));            DevMem2D src_as_devmem[4];            for(size_t i = 0; i < n; ++i)                src_as_devmem[i] = src[i];            DevMem2D dst_as_devmem(dst);            split_merge::merge_caller(src_as_devmem, dst_as_devmem,                                      total_channels, CV_ELEM_SIZE(depth),                                      stream);        }       }
开发者ID:JaehyunAhn,项目名称:Basic_OpenCV_utilization,代码行数:42,


示例19: split

            static void split(const oclMat &mat_src, oclMat *mat_dst)            {                CV_Assert(mat_dst);                int depth = mat_src.depth();                int num_channels = mat_src.oclchannels();                Size size = mat_src.size();                if(num_channels == 1)                {                    mat_src.copyTo(mat_dst[0]);                    return;                }                int i;                for(i = 0; i < num_channels; i++)                    mat_dst[i].create(size, CV_MAKETYPE(depth, 1));                split_vector_run(mat_src, mat_dst);            }
开发者ID:Reetalibrarian,项目名称:opencv,代码行数:20,


示例20: rb_query

/* * call-seq: *   query -> IplImage or nil * * Grabs and returns a frame camera or file. Just a combination of grab and retrieve in one call. */VALUErb_query(VALUE self){  VALUE image = Qnil;  try {    IplImage *frame = cvQueryFrame(CVCAPTURE(self));    if (!frame)      return Qnil;    image = cIplImage::new_object(cvSize(frame->width, frame->height),				  CV_MAKETYPE(CV_8U, frame->nChannels));    if (frame->origin == IPL_ORIGIN_TL)      cvCopy(frame, CVARR(image));    else      cvFlip(frame, CVARR(image));  }  catch (cv::Exception& e) {    raise_cverror(e);  }  return image;}
开发者ID:iceydee,项目名称:ruby-opencv,代码行数:26,


示例21: UNUSED

void ScaleTransformer::Apply(size_t id, cv::Mat &mat){    UNUSED(id);    // If matrix has not been converted to the right type, do it now as rescaling    // requires floating point type.    if (mat.type() != CV_MAKETYPE(m_imageElementType, m_imgChannels))    {        mat.convertTo(mat, m_imageElementType);    }    auto seed = GetSeed();    auto rng = m_rngs.pop_or_create([seed]() { return std::make_unique<std::mt19937>(seed); });    auto index = UniIntT(0, static_cast<int>(m_interp.size()) - 1)(*rng);    assert(m_interp.size() > 0);    cv::resize(mat, mat, cv::Size((int)m_imgWidth, (int)m_imgHeight), 0, 0, m_interp[index]);    m_rngs.push(std::move(rng));}
开发者ID:DanielMerget,项目名称:CNTK,代码行数:20,


示例22: img

CCV::CCV(Texture* t, int numColors, int coherenceThreshold){	this->m_numColors	 	= numColors;	this->m_coherenceThreshold 	= coherenceThreshold;	this->m_numPix 			= t->m_width * t->m_height;	//convert texture to cv::Mat	cv::Mat img(cv::Size(t->m_width, t->m_height), CV_MAKETYPE(t->m_numBytesPerChan * 8, t->m_numChannels), t->m_data);	//split the image into its' r, g and b channel		cv::Mat img_planes[3];	cv::split(img, img_planes);			//calculate the CCVs	m_CCV_r = calculateCCV(img_planes[0]);	m_CCV_g = calculateCCV(img_planes[1]);	m_CCV_b = calculateCCV(img_planes[2]);		}
开发者ID:sobols,项目名称:Las-Vegas-Reconstruction,代码行数:20,


示例23: _prepareImgAndDrawKeypoints

static void _prepareImgAndDrawKeypoints( const Mat& img1, const vector<KeyPoint>& keypoints1,                                         const Mat& img2, const vector<KeyPoint>& keypoints2,                                         Mat& outImg, Mat& outImg1, Mat& outImg2,                                         const Scalar& singlePointColor, int flags ){    Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );    if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )    {        if( size.width > outImg.cols || size.height > outImg.rows )            CV_Error( CV_StsBadSize, "outImg has size less than need to draw img1 and img2 together" );        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );    }    else    {        outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );        outImg = Scalar::all(0);        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );        if( img1.type() == CV_8U )            cvtColor( img1, outImg1, CV_GRAY2BGR );        else            img1.copyTo( outImg1 );        if( img2.type() == CV_8U )            cvtColor( img2, outImg2, CV_GRAY2BGR );        else            img2.copyTo( outImg2 );    }    // draw keypoints    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )    {        Mat _outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );        drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );        Mat _outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );        drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );    }}
开发者ID:DenisBMSTU,项目名称:install_opencv,代码行数:41,


示例24: DispCvArrMultiChannel

DLLEXP void DispCvArrMultiChannel( CvArr *a, char *varName /*= VAR_NAME(varName)*/ ){	CvMat tmpHeader, *m;	m = cvGetMat(a, &tmpHeader);	int cnNum = CV_MAT_CN(m->type), depth = CV_MAT_DEPTH(m->type);	CvMat **p = new CvMat *[4];	for (int i = 0; i < 4; i++)		p[i] = ( i >= cnNum ? NULL : 		cvCreateMat(m->rows, m->cols, CV_MAKETYPE(depth, 1)) );	cvSplit(m, p[0], p[1], p[2], p[3]);	for (int i = 0; i < cnNum; i++)	{		CString str;		str.Format("%s : channel %d", varName, i);		DispCvArr(p[i], str.GetBuffer());	}	for (int i = 0; i < cnNum; i++)		cvReleaseMat(&p[i]);	delete []p;}
开发者ID:FlynnShek,项目名称:facerecog,代码行数:21,


示例25: CV_MAKETYPE

/*! Get new frame	/return <true> if video capture object is opened */bool ofxCv2FrameGrabber::getNewFrame(){	if (!cap.isOpened()) { return false; }		Mat frame;	cap >> frame;		if (frame.cols != cvImage.cols || 		frame.rows != cvImage.rows ||		frame.channels() != cvImage.channels() ||		frame.depth() != cvImage.depth())	{		cvImage.create(frame.rows, frame.cols, CV_MAKETYPE(CV_MAT_DEPTH(frame.depth()),frame.channels()));	}	cvtColor(frame, cvImage, CV_BGR2RGB);		textureIsDirty = true;		return true;}
开发者ID:marynel,项目名称:ofxOpenCv2,代码行数:24,


示例26: _prepareImgAndDrawKeypoints

static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,                                         InputArray img2, const std::vector<KeyPoint>& keypoints2,                                         InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,                                         const Scalar& singlePointColor, DrawMatchesFlags flags ){    Mat outImg;    Size img1size = img1.size(), img2size = img2.size();    Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );    if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )    {        outImg = _outImg.getMat();        if( size.width > outImg.cols || size.height > outImg.rows )            CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );    }    else    {        const int cn1 = img1.channels(), cn2 = img2.channels();        const int out_cn = std::max(3, std::max(cn1, cn2));        _outImg.create(size, CV_MAKETYPE(img1.depth(), out_cn));        outImg = _outImg.getMat();        outImg = Scalar::all(0);        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );        _prepareImage(img1, outImg1);        _prepareImage(img2, outImg2);    }    // draw keypoints    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )    {        Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );        drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );        Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );        drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );    }}
开发者ID:JoeHowse,项目名称:opencv,代码行数:40,


示例27: run_still_image

void run_still_image(const std::string& filename) {    cv::Mat img = cv::imread(filename, cv::IMREAD_COLOR);    if (img.empty()) {        std::cout << "Failed to load image /"" << filename << "/"." << std::endl;        return;    }    int width = img.cols;    int height = img.rows;    int dim = img.channels();    img.convertTo(img, CV_32F, 1.0 / 255.0);    cv::Mat gray, out;    std::vector<cv::Point2f> points;    cv::Mat noise;    cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);    lime::npr::poissonDisk(gray, &points, lime::npr::PDS_RAND_QUEUE, 1.0, 4.0);    pencilDrawing(img, out, points);    cv::namedWindow("Input");    cv::namedWindow("Output");    cv::imshow("Input", img);    cv::imshow("Output", out);    cv::waitKey(0);    cv::destroyAllWindows();    if (save_result) {        out.convertTo(out, CV_MAKETYPE(CV_8U, dim), 255.0);        noise.convertTo(noise, CV_8UC3, 255.0);        std::string base = basename(filename);        std::string fileout = base + "_out.png";        std::string filenoise = base + "_noise.png";        cv::imwrite(fileout, out);        cv::imwrite(filenoise, noise);        std::cout << "Results are saved !!" << std::endl;    }}
开发者ID:caomw,项目名称:lime,代码行数:39,


示例28: rb_query

/* * Grabs, decodes and returns the next video frame. * @overload query * @return [IplImage] Next video frame * @return [nil] Failed to read next video frame * @opencv_func cvQueryFrame */VALUErb_query(VALUE self){  VALUE image = Qnil;  IplImage *frame = NULL;  try {    if (!(frame = cvQueryFrame(CVCAPTURE(self)))) {      return Qnil;    }    image = cIplImage::new_object(frame->width, frame->height,				  CV_MAKETYPE(IPL2CV_DEPTH(frame->depth), frame->nChannels));    if (frame->origin == IPL_ORIGIN_TL) {      cvCopy(frame, CVARR(image));    }    else {      cvFlip(frame, CVARR(image));    }  }  catch (cv::Exception& e) {    raise_cverror(e);  }  return image;}
开发者ID:PoloPro,项目名称:ruby-opencv,代码行数:30,


示例29: CV_MAKETYPE

void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,                 double scale, double delta, int borderType ){    Mat src = _src.getMat();    if (ddepth < 0)        ddepth = src.depth();    _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );    Mat dst = _dst.getMat();#ifdef HAVE_TEGRA_OPTIMIZATION    if (scale == 1.0 && delta == 0)        if (tegra::scharr(src, dst, dx, dy, borderType))            return;#endif#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)    if(dx < 2 && dy < 2 && src.channels() == 1 && borderType == 1)    {        if(IPPDerivScharr(src, dst, ddepth, dx, dy, scale))            return;    }#endif    int ktype = std::max(CV_32F, std::max(ddepth, src.depth()));    Mat kx, ky;    getScharrKernels( kx, ky, dx, dy, false, ktype );    if( scale != 1 )    {        // usually the smoothing part is the slowest to compute,        // so try to scale it instead of the faster differenciating part        if( dx == 0 )            kx *= scale;        else            ky *= scale;    }    sepFilter2D( src, dst, ddepth, kx, ky, Point(-1,-1), delta, borderType );}
开发者ID:2693,项目名称:opencv,代码行数:37,


示例30: size

void testproject::GSstreamImages(cv::Mat& leftImage,cv::Mat& rightImage){ //HL changed Img to Image	Mat doubleImg;	Mat combined_img;	Mat upsidedown_img;	if (videostreaming.videoStreamingOn)	{		videostreaming.update_MatVideoBuffer(doubleVideoBuffer, doubleImg);		Size size( leftImage.cols + rightImage.cols, leftImage.rows );		//place two images side by side		combined_img.create( size, CV_MAKETYPE(leftImage.depth(), 3) );		Mat imgLeft = combined_img( Rect(0, 0, leftImage.cols, leftImage.rows) );		Mat imgRight = combined_img( Rect(leftImage.cols, 0, rightImage.cols, rightImage.rows) );		cvtColor( leftImage, imgLeft, CV_GRAY2BGR );		cvtColor( rightImage, imgRight, CV_GRAY2BGR );		Mat smaller;		pyrDown(combined_img, smaller, Size(combined_img.cols/2, combined_img.rows/2));		flip(smaller, upsidedown_img,0);		if (toggle < 5) {			videostreaming.setAsDefaultVideoMode(combinedVideoBuffer);		} 		else {			videostreaming.setAsDefaultVideoMode();		}		toggle = toggle == 10 ? 0 : toggle+1;		videostreaming.update_MatVideoBuffer(combinedVideoBuffer, smaller);	}}
开发者ID:Gabs48,项目名称:HaloLinux_Software,代码行数:37,



注:本文中的CV_MAKETYPE函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ CV_MAT_CN函数代码示例
C++ CV_INSTRUMENT_REGION函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。