您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ CHECK_GT函数代码示例

51自学网 2021-06-01 20:00:49
  C++
这篇教程C++ CHECK_GT函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中CHECK_GT函数的典型用法代码示例。如果您正苦于以下问题:C++ CHECK_GT函数的具体用法?C++ CHECK_GT怎么用?C++ CHECK_GT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了CHECK_GT函数的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: CHECK_GT

void MemoryDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,     vector<Blob<Dtype>*>* top) {  batch_size_ = this->layer_param_.memory_data_param().batch_size();  this->datum_channels_ = this->layer_param_.memory_data_param().channels();  this->datum_height_ = this->layer_param_.memory_data_param().height();  this->datum_width_ = this->layer_param_.memory_data_param().width();  this->datum_size_ = this->datum_channels_ * this->datum_height_ *      this->datum_width_;  CHECK_GT(batch_size_ * this->datum_size_, 0) <<      "batch_size, channels, height, and width must be specified and"      " positive in memory_data_param";  (*top)[0]->Reshape(batch_size_, this->datum_channels_, this->datum_height_,                     this->datum_width_);  (*top)[1]->Reshape(batch_size_, 1, 1, 1);  added_data_.Reshape(batch_size_, this->datum_channels_, this->datum_height_,                      this->datum_width_);  added_label_.Reshape(batch_size_, 1, 1, 1);  data_ = NULL;  labels_ = NULL;  added_data_.cpu_data();  added_label_.cpu_data();}
开发者ID:caomw,项目名称:DISC,代码行数:22,


示例2: CHECK

void MemoryDataLayer<Dtype>::AddMatVector(const vector<cv::Mat>& mat_vector,    const vector<int>& labels) {  size_t num = mat_vector.size();  CHECK(!has_new_data_) <<      "Can't add mat until current data has been consumed.";  CHECK_GT(num, 0) << "There is no mat to add";  CHECK_EQ(num % batch_size_, 0) <<      "The added data must be a multiple of the batch size.";  added_data_.Reshape(num, channels_, height_, width_);  added_label_.Reshape(num, 1, 1, 1);  // Apply data transformations (mirror, scale, crop...)  this->data_transformer_->Transform(mat_vector, &added_data_);  // Copy Labels  Dtype* top_label = added_label_.mutable_cpu_data();  for (int item_id = 0; item_id < num; ++item_id) {    top_label[item_id] = labels[item_id];  }  // num_images == batch_size_  Dtype* top_data = added_data_.mutable_cpu_data();  Reset(top_data, top_label, num);  has_new_data_ = true;}
开发者ID:codeaudit,项目名称:Xeon-CafPhi,代码行数:22,


示例3: CHECK_GT

void LogLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {  NeuronLayer<Dtype>::LayerSetUp(bottom, top);  const Dtype base = this->layer_param_.log_param().base();  if (base != Dtype(-1)) {    CHECK_GT(base, 0) << "base must be strictly positive.";  }  // If base == -1, interpret the base as e and set log_base = 1 exactly.  // Otherwise, calculate its log explicitly.  const Dtype log_base = (base == Dtype(-1)) ? Dtype(1) : log(base);  CHECK(!isnan(log_base))      << "NaN result: log(base) = log(" << base << ") = " << log_base;  CHECK(!isinf(log_base))      << "Inf result: log(base) = log(" << base << ") = " << log_base;  base_scale_ = Dtype(1) / log_base;  CHECK(!isnan(base_scale_))      << "NaN result: 1/log(base) = 1/log(" << base << ") = " << base_scale_;  CHECK(!isinf(base_scale_))      << "Inf result: 1/log(base) = 1/log(" << base << ") = " << base_scale_;  input_scale_ = this->layer_param_.log_param().scale();  input_shift_ = this->layer_param_.log_param().shift();  backward_num_scale_ = input_scale_ / log_base;}
开发者ID:71squared,项目名称:caffe,代码行数:23,


示例4: CHECK_GT

void GradientChecker<Dtype>::CheckGradientEltwise(    Layer<Dtype>* layer,    const vector<Blob<Dtype>*>& bottom,    const vector<Blob<Dtype>*>& top) {    layer->SetUp(        bottom,        top);    CHECK_GT(top.size(), 0)<< "Eltwise mode requires at least one top blob.";    const int check_bottom = -1;    const bool element_wise = true;    for (int i = 0; i < top.size(); ++i) {        for (int j = 0; j < top[i]->count(); ++j) {            CheckGradientSingle(                layer,                bottom,                top,                check_bottom,                i,                j,                element_wise);        }    }}
开发者ID:rickyHong,项目名称:CaffeForOpenCL,代码行数:23,


示例5: setAVCFormat

// H.264 bitstream without start codes.sp<MetaData> setAVCFormat(AVCodecContext *avctx){    ALOGV("AVC");	CHECK_EQ(avctx->codec_id, AV_CODEC_ID_H264);	CHECK_GT(avctx->extradata_size, 0);	CHECK_EQ(avctx->extradata[0], 1); //configurationVersion    if (avctx->width == 0 || avctx->height == 0) {         int32_t width, height;         sp<ABuffer> seqParamSet = new ABuffer(avctx->extradata_size - 8);         memcpy(seqParamSet->data(), avctx->extradata + 8, avctx->extradata_size - 8);         FindAVCDimensions(seqParamSet, &width, &height);         avctx->width  = width;         avctx->height = height;     }    sp<MetaData> meta = new MetaData;    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);    meta->setData(kKeyAVCC, kTypeAVCC, avctx->extradata, avctx->extradata_size);	return meta;}
开发者ID:daddy366,项目名称:anarchy-stagefright-plugins,代码行数:24,


示例6: ExpectArraysCloseUptoScale

void ExpectArraysCloseUptoScale(int n,                                const double* p,                                const double* q,                                double tol) {  CHECK_GT(n, 0);  CHECK(p);  CHECK(q);  double p_max = 0;  double q_max = 0;  int p_i = 0;  int q_i = 0;  for (int i = 0; i < n; ++i) {    if (std::abs(p[i]) > p_max) {      p_max = std::abs(p[i]);      p_i = i;    }    if (std::abs(q[i]) > q_max) {      q_max = std::abs(q[i]);      q_i = i;    }  }  // If both arrays are all zeros, they are equal up to scale, but  // for testing purposes, that's more likely to be an error than  // a desired result.  CHECK_NE(p_max, 0.0);  CHECK_NE(q_max, 0.0);  for (int i = 0; i < n; ++i) {    double p_norm = p[i] / p[p_i];    double q_norm = q[i] / q[q_i];    EXPECT_NEAR(p_norm, q_norm, tol) << "i=" << i;  }}
开发者ID:hanjianwei,项目名称:ACG-Tracker-Demo,代码行数:37,


示例7: CHECK

void MemoryDataLayer<Dtype>::AddDatumVector(const vector<Datum>& datum_vector) {  CHECK(!has_new_data_) <<      "Can't add Datum when earlier ones haven't been consumed"      << " by the upper layers";  size_t num = datum_vector.size();  CHECK_GT(num, 0) << "There is no datum to add";  CHECK_LE(num, batch_size_) <<      "The number of added datum must be no greater than the batch size";  Dtype* top_data = added_data_.mutable_cpu_data();  Dtype* top_label = added_label_.mutable_cpu_data();  for (int batch_item_id = 0; batch_item_id < num; ++batch_item_id) {    // Apply data transformations (mirror, scale, crop...)    this->data_transformer_.Transform(        batch_item_id, datum_vector[batch_item_id], this->mean_, top_data);    // top_label[batch_item_id] = datum_vector[batch_item_id].label();    for (int i = 0; i < datum_vector[batch_item_id].label().size(); i++){    	top_label[batch_item_id*datum_vector[batch_item_id].label().size()+i] = datum_vector[batch_item_id].label(i);    }  }  // num_images == batch_size_  Reset(top_data, top_label, batch_size_);  has_new_data_ = true;}
开发者ID:caomw,项目名称:DISC,代码行数:24,


示例8: CHECK

vector<int> DataTransformer<Dtype>::InferBlobShape(const Datum& datum) {  #ifndef CAFFE_HEADLESS  if (datum.encoded()) {    CHECK(!(param_.force_color() && param_.force_gray()))        << "cannot set both force_color and force_gray";    cv::Mat cv_img;    if (param_.force_color() || param_.force_gray()) {    // If force_color then decode in color otherwise decode in gray.      cv_img = DecodeDatumToCVMat(datum, param_.force_color());    } else {      cv_img = DecodeDatumToCVMatNative(datum);    }    // InferBlobShape using the cv::image.    return InferBlobShape(cv_img);  }  #endif  const int crop_size = param_.crop_size();  const int datum_channels = datum.channels();  const int datum_height = datum.height();  const int datum_width = datum.width();  // Check dimensions.  CHECK_GT(datum_channels, 0);  CHECK_GE(datum_height, crop_size);  CHECK_GE(datum_width, crop_size);  // Build BlobShape.  vector<int> shape(4);  shape[0] = 1;  shape[1] = datum_channels;  shape[2] = (crop_size)? crop_size: datum_height;  shape[3] = (crop_size)? crop_size: datum_width;  return shape;}
开发者ID:appcoreopc,项目名称:Strada.jl,代码行数:36,


示例9: CHECK_GT

void SoftmaxWithLossLayer<Dtype>::Forward_cpu(    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {  // The forward pass computes the softmax prob values.  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);  const Dtype* prob_data = prob_.cpu_data();  const Dtype* label = bottom[1]->cpu_data();  int num = prob_.num();  int dim = prob_.count() / num;  int spatial_dim = prob_.height() * prob_.width();  Dtype loss = 0;  for (int i = 0; i < num; ++i) {    for (int j = 0; j < spatial_dim; j++) {      const int label_value = static_cast<int>(label[i * spatial_dim + j]);      CHECK_GT(dim, label_value * spatial_dim);      loss -= log(std::max(prob_data[i * dim +          label_value * spatial_dim + j],                           Dtype(FLT_MIN)));    }  }  top[0]->mutable_cpu_data()[0] = loss / num / spatial_dim;  if (top.size() == 2) {    top[1]->ShareData(prob_);  }}
开发者ID:alasin,项目名称:ViewpointsAndKeypoints,代码行数:24,


示例10: MY_LOGI

void SfDelegate::OnReadCompleted(net::URLRequest *request, int bytes_read) {    if (bytes_read == -1) {        MY_LOGI(StringPrintf(                    "OnReadCompleted, read failed, status %d",                    request->status().status()).c_str());        mOwner->onReadCompleted(ERROR_IO);        return;    }    MY_LOGV(StringPrintf("OnReadCompleted, read %d bytes", bytes_read).c_str());    if (bytes_read < 0) {        MY_LOGI(StringPrintf(                    "Read failed w/ status %d/n",                    request->status().status()).c_str());        mOwner->onReadCompleted(ERROR_IO);        return;    } else if (bytes_read == 0) {        mAtEOS = true;        mOwner->onReadCompleted(mNumBytesRead);        return;    }    CHECK_GT(bytes_read, 0);    CHECK_LE(mNumBytesRead + bytes_read, mNumBytesTotal);    memcpy((uint8_t *)mDataDestination + mNumBytesRead,           mReadBuffer->data(),           bytes_read);    mNumBytesRead += bytes_read;    readMore(request);}
开发者ID:Dm47021,项目名称:chaos_frameworks_av,代码行数:36,


示例11: ALOGV

status_t CameraSource::start(MetaData *meta) {    ALOGV("start");    CHECK(!mStarted);    if (mInitCheck != OK) {        ALOGE("CameraSource is not initialized yet");        return mInitCheck;    }    char value[PROPERTY_VALUE_MAX];    if (property_get("media.stagefright.record-stats", value, NULL)        && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {        mCollectStats = true;    }    mStartTimeUs = 0;    mNumInputBuffers = 0;    if (meta) {        int64_t startTimeUs;        if (meta->findInt64(kKeyTime, &startTimeUs)) {            mStartTimeUs = startTimeUs;        }        int32_t nBuffers;        if (meta->findInt32(kKeyNumBuffers, &nBuffers)) {            CHECK_GT(nBuffers, 0);            mNumInputBuffers = nBuffers;        }    }    status_t err;    if ((err = startCameraRecording()) == OK) {        mStarted = true;    }    return err;}
开发者ID:SundownerROM,项目名称:frameworks_av,代码行数:36,


示例12: TEST

    TEST(Logging, CheckOpFail)    {        int i1 = 1;        int i2 = 2;        unsigned u1 = 3;        unsigned u2 = 4;        float f1 = 5.5f;        float f2 = 6.6f;        int* p1 = &i1;        int* p2 = &i2;        char const * message = "message";        EXPECT_THROW(CHECK_NE(i1, i1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_NE(u1, u1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_NE(f1, f1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_NE(p1, p1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_EQ(i1, i2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_EQ(u1, u2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_EQ(f1, f2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_EQ(p1, p2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_GT(i1, i2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_GT(u1, u2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_GT(f1, f2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_GT(i1, i1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_GT(u1, u2) << message, Logging::CheckException);        EXPECT_THROW(CHECK_GT(f1, f1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_LT(i2, i1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_LT(u2, u1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_LT(f2, f1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_LT(i1, i1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_LT(u2, u1) << message, Logging::CheckException);        EXPECT_THROW(CHECK_LT(f1, f1) << message, Logging::CheckException);    }
开发者ID:BitFunnel,项目名称:BitFunnel,代码行数:38,


示例13: CHECK_GE

void SliceLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,      vector<Blob<Dtype>*>* top) {  Layer<Dtype>::SetUp(bottom, top);  const SliceParameter& slice_param = this->layer_param_.slice_param();  slice_dim_ = slice_param.slice_dim();  CHECK_GE(slice_dim_, 0);  CHECK_LE(slice_dim_, 1) << "Can only slice num and channels";  slice_point_.clear();  std::copy(slice_param.slice_point().begin(),      slice_param.slice_point().end(),      std::back_inserter(slice_point_));  count_ = 0;  num_ = bottom[0]->num();  channels_ = bottom[0]->channels();  height_ = bottom[0]->height();  width_ = bottom[0]->width();  if (slice_point_.size() != 0) {    CHECK_EQ(slice_point_.size(), top->size() - 1);    if (slice_dim_ == 0) {      CHECK_LE(top->size(), num_);    } else {      CHECK_LE(top->size(), channels_);    }    int prev = 0;    vector<int> slices;    for (int i = 0; i < slice_point_.size(); ++i) {      CHECK_GT(slice_point_[i], prev);      slices.push_back(slice_point_[i] - prev);      prev = slice_point_[i];    }    if (slice_dim_ == 0) {      slices.push_back(num_ - prev);      for (int i = 0; i < top->size(); ++i) {        (*top)[i]->Reshape(slices[i], channels_, height_, width_);         count_ += (*top)[i]->count();      }    } else {      slices.push_back(channels_ - prev);      for (int i = 0; i < top->size(); ++i) {        (*top)[i]->Reshape(num_, slices[i], height_, width_);         count_ += (*top)[i]->count();      }    }  } else {    if (slice_dim_ == 0) {      CHECK_EQ(num_ % top->size(), 0)          << "Number of top blobs (" << top->size() << ") "          << "should evenly divide input num ( " << num_ << ")";      num_ = num_ / top->size();    } else {      CHECK_EQ(channels_ % top->size(), 0)          << "Number of top blobs (" << top->size() << ") "          << "should evenly divide input channels ( " << channels_ << ")";      channels_ = channels_ / top->size();    }    for (int i = 0; i < top->size(); ++i) {      (*top)[i]->Reshape(num_, channels_, height_, width_);      count_ += (*top)[i]->count();    }  }  CHECK_EQ(count_, bottom[0]->count());}
开发者ID:FangZhenpeng,项目名称:caffe,代码行数:63,


示例14: CHECK

void ImageDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,        const vector<Blob<Dtype>*>& top) {    const int new_height = this->layer_param_.image_data_param().new_height();    const int new_width  = this->layer_param_.image_data_param().new_width();    const bool is_color  = this->layer_param_.image_data_param().is_color();    string root_folder = this->layer_param_.image_data_param().root_folder();    CHECK((new_height == 0 && new_width == 0) ||          (new_height > 0 && new_width > 0)) << "Current implementation requires "                  "new_height and new_width to be set at the same time.";    // Read the file with filenames and labels    const string& source = this->layer_param_.image_data_param().source();    LOG(INFO) << "Opening file " << source;    std::ifstream infile(source.c_str());    string filename;    int label;    while (infile >> filename >> label) {        lines_.push_back(std::make_pair(filename, label));    }    if (this->layer_param_.image_data_param().shuffle()) {        // randomly shuffle data        LOG(INFO) << "Shuffling data";        const unsigned int prefetch_rng_seed = caffe_rng_rand();        prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));        ShuffleImages();    }    LOG(INFO) << "A total of " << lines_.size() << " images.";    lines_id_ = 0;    // Check if we would need to randomly skip a few data points    if (this->layer_param_.image_data_param().rand_skip()) {        unsigned int skip = caffe_rng_rand() %                            this->layer_param_.image_data_param().rand_skip();        LOG(INFO) << "Skipping first " << skip << " data points.";        CHECK_GT(lines_.size(), skip) << "Not enough points to skip";        lines_id_ = skip;    }    // Read an image, and use it to initialize the top blob.    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,                                      new_height, new_width, is_color);    const int channels = cv_img.channels();    const int height = cv_img.rows;    const int width = cv_img.cols;    // image    const int crop_size = this->layer_param_.transform_param().crop_size();    const int batch_size = this->layer_param_.image_data_param().batch_size();    if (crop_size > 0) {        top[0]->Reshape(batch_size, channels, crop_size, crop_size);        this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size);        this->transformed_data_.Reshape(1, channels, crop_size, crop_size);    } else {        top[0]->Reshape(batch_size, channels, height, width);        this->prefetch_data_.Reshape(batch_size, channels, height, width);        this->transformed_data_.Reshape(1, channels, height, width);    }    LOG(INFO) << "output data size: " << top[0]->num() << ","              << top[0]->channels() << "," << top[0]->height() << ","              << top[0]->width();    // label    top[1]->Reshape(batch_size, 1, 1, 1);    this->prefetch_label_.Reshape(batch_size, 1, 1, 1);}
开发者ID:corba777,项目名称:caffe,代码行数:63,


示例15: CHECK

void Solver<Dtype>::InitTestNets() {  CHECK(Caffe::root_solver());  const bool has_net_param = param_.has_net_param();  const bool has_net_file = param_.has_net();  const int num_generic_nets = has_net_param + has_net_file;  CHECK_LE(num_generic_nets, 1)      << "Both net_param and net_file may not be specified.";  const int num_test_net_params = param_.test_net_param_size();  const int num_test_net_files = param_.test_net_size();  const int num_test_nets = num_test_net_params + num_test_net_files;  if (num_generic_nets) {      CHECK_GE(param_.test_iter_size(), num_test_nets)          << "test_iter must be specified for each test network.";  } else {      CHECK_EQ(param_.test_iter_size(), num_test_nets)          << "test_iter must be specified for each test network.";  }  // If we have a generic net (specified by net or net_param, rather than  // test_net or test_net_param), we may have an unlimited number of actual  // test networks -- the actual number is given by the number of remaining  // test_iters after any test nets specified by test_net_param and/or test_net  // are evaluated.  const int num_generic_net_instances = param_.test_iter_size() - num_test_nets;  const int num_test_net_instances = num_test_nets + num_generic_net_instances;  if (param_.test_state_size()) {    CHECK_EQ(param_.test_state_size(), num_test_net_instances)        << "test_state must be unspecified or specified once per test net.";  }  if (num_test_net_instances) {    CHECK_GT(param_.test_interval(), 0);  }  int test_net_id = 0;  vector<string> sources(num_test_net_instances);  vector<NetParameter> net_params(num_test_net_instances);  for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) {      sources[test_net_id] = "test_net_param";      net_params[test_net_id].CopyFrom(param_.test_net_param(i));  }  for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) {      sources[test_net_id] = "test_net file: " + param_.test_net(i);      ReadNetParamsFromTextFileOrDie(param_.test_net(i),          &net_params[test_net_id]);  }  const int remaining_test_nets = param_.test_iter_size() - test_net_id;  if (has_net_param) {    for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) {      sources[test_net_id] = "net_param";      net_params[test_net_id].CopyFrom(param_.net_param());    }  }  if (has_net_file) {    for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) {      sources[test_net_id] = "net file: " + param_.net();      ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]);    }  }  test_nets_.resize(num_test_net_instances);  for (int i = 0; i < num_test_net_instances; ++i) {    // Set the correct NetState.  We start with the solver defaults (lowest    // precedence); then, merge in any NetState specified by the net_param    // itself; finally, merge in any NetState specified by the test_state    // (highest precedence).    NetState net_state;    net_state.set_phase(TEST);    net_state.MergeFrom(net_params[i].state());    if (param_.test_state_size()) {      net_state.MergeFrom(param_.test_state(i));    }    net_params[i].mutable_state()->CopyFrom(net_state);    LOG(INFO)        << "Creating test net (#" << i << ") specified by " << sources[i];    if (Caffe::root_solver()) {      test_nets_[i].reset(new Net<Dtype>(net_params[i]));    } else {      test_nets_[i].reset(new Net<Dtype>(net_params[i],          root_solver_->test_nets_[i].get()));    }    test_nets_[i]->set_debug_info(param_.debug_info());  }}
开发者ID:sixsamuraisoldier,项目名称:caffe,代码行数:80,


示例16: CHECK

void MKLPoolingLayer<Dtype>::Init(      const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {  PoolingParameter pool_param = this->layer_param_.pooling_param();  channels_ = bottom[0]->channels();  height_ = bottom[0]->height();  width_ = bottom[0]->width();  num_ = bottom[0]->num();  if (pool_param.global_pooling()) {    CHECK(!(pool_param.has_kernel_size() ||      pool_param.has_kernel_h() || pool_param.has_kernel_w()))      << "With Global_pooling: true Filter size cannot specified";  } else {    CHECK(!pool_param.has_kernel_size() !=      !(pool_param.has_kernel_h() && pool_param.has_kernel_w()))      << "Filter size is kernel_size OR kernel_h and kernel_w; not both";    CHECK(pool_param.has_kernel_size() ||      (pool_param.has_kernel_h() && pool_param.has_kernel_w()))      << "For non-square filters both kernel_h and kernel_w are required.";  }  CHECK((!pool_param.has_pad() && pool_param.has_pad_h()      && pool_param.has_pad_w())      || (!pool_param.has_pad_h() && !pool_param.has_pad_w()))      << "pad is pad OR pad_h and pad_w are required.";  CHECK((!pool_param.has_stride() && pool_param.has_stride_h()      && pool_param.has_stride_w())      || (!pool_param.has_stride_h() && !pool_param.has_stride_w()))      << "Stride is stride OR stride_h and stride_w are required.";  global_pooling_ = pool_param.global_pooling();  if (global_pooling_) {    kernel_h_ = bottom[0]->height();    kernel_w_ = bottom[0]->width();  } else {    if (pool_param.has_kernel_size()) {      kernel_h_ = kernel_w_ = pool_param.kernel_size();    } else {      kernel_h_ = pool_param.kernel_h();      kernel_w_ = pool_param.kernel_w();    }  }  CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";  CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";  if (!pool_param.has_pad_h()) {    pad_h_ = pad_w_ = pool_param.pad();  } else {    pad_h_ = pool_param.pad_h();    pad_w_ = pool_param.pad_w();  }  if (!pool_param.has_stride_h()) {    stride_h_ = stride_w_ = pool_param.stride();  } else {    stride_h_ = pool_param.stride_h();    stride_w_ = pool_param.stride_w();  }  if (global_pooling_) {    CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1)      << "With Global_pooling: true; only pad = 0 and stride = 1";  }  if (pad_h_ != 0 || pad_w_ != 0) {    CHECK(this->layer_param_.pooling_param().pool()        == PoolingParameter_PoolMethod_AVE        || this->layer_param_.pooling_param().pool()        == PoolingParameter_PoolMethod_MAX)        << "Padding implemented only for average and max pooling.";    CHECK_LT(pad_h_, kernel_h_);    CHECK_LT(pad_w_, kernel_w_);  }  pooled_height_ = static_cast<int>(ceil(static_cast<float>(      bottom[0]->height() + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;  pooled_width_ = static_cast<int>(ceil(static_cast<float>(      bottom[0]->width() + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;  if (pad_h_ || pad_w_) {    // If we have padding, ensure that the last pooling starts strictly    // inside the image (instead of at the padding); otherwise clip the last.    if ((pooled_height_ - 1) * stride_h_ >= bottom[0]->height() + pad_h_) {      --pooled_height_;    }    if ((pooled_width_ - 1) * stride_w_ >= bottom[0]->width() + pad_w_) {      --pooled_width_;    }    CHECK_LT((pooled_height_ - 1) * stride_h_, bottom[0]->height() + pad_h_);    CHECK_LT((pooled_width_ - 1) * stride_w_, bottom[0]->width() + pad_w_);  }  top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,      pooled_width_);  if (top.size() > 1) {    (reinterpret_cast<Blob<size_t>* > (top[1]) )->Reshape(bottom[0]->num(),            channels_, pooled_height_, pooled_width_);  }  // If max/min/avg pooling, we will initialize the vector index part.  if (top.size() == 1) {    max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_,            pooled_width_);  }  // If stochastic pooling, we will initialize the random index part.  if (this->layer_param_.pooling_param().pool() ==//.........这里部分代码省略.........
开发者ID:huanleo,项目名称:caffe,代码行数:101,


示例17: CHECK_EQ

	void NonLocalLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,		const vector<Blob<Dtype>*>& top)	{		CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "			<< "corresponding to (num, channels, height, width)";		// Configure the kernel size, padding, stride, and inputs.		ConvolutionParameter conv_param = this->layer_param_.convolution_param();		/*CHECK(!conv_param.has_kernel_size() !=			!(conv_param.has_kernel_h() && conv_param.has_kernel_w()))			<< "Filter size is kernel_size OR kernel_h and kernel_w; not both";		CHECK(conv_param.has_kernel_size() ||			(conv_param.has_kernel_h() && conv_param.has_kernel_w()))			<< "For non-square filters both kernel_h and kernel_w are required.";		CHECK((!conv_param.has_pad() && conv_param.has_pad_h()			&& conv_param.has_pad_w())			|| (!conv_param.has_pad_h() && !conv_param.has_pad_w()))			<< "pad is pad OR pad_h and pad_w are required.";		CHECK((!conv_param.has_stride() && conv_param.has_stride_h()			&& conv_param.has_stride_w())			|| (!conv_param.has_stride_h() && !conv_param.has_stride_w()))			<< "Stride is stride OR stride_h and stride_w are required.";		if (conv_param.has_kernel_size()) {			kernel_h_ = kernel_w_ = conv_param.kernel_size();		}		else {			kernel_h_ = conv_param.kernel_h();			kernel_w_ = conv_param.kernel_w();		}		CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";		CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";		if (!conv_param.has_pad_h()) {			pad_h_ = pad_w_ = conv_param.pad();		}		else {			pad_h_ = conv_param.pad_h();			pad_w_ = conv_param.pad_w();		}		if (!conv_param.has_stride_h()) {			stride_h_ = stride_w_ = conv_param.stride();		}		else {			stride_h_ = conv_param.stride_h();			stride_w_ = conv_param.stride_w();		}*/		//kernel		if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {			CHECK_EQ(0, conv_param.kernel_size_size())				<< "Either kernel_size or kernel_h/w should be specified; not both.";			kernel_h_ = conv_param.kernel_h();			kernel_w_ = conv_param.kernel_w();		}		else {			const int num_kernel_dims = conv_param.kernel_size_size();			CHECK(num_kernel_dims == 1)				<< "kernel_size must be specified once, or once per spatial dimension "				<< "(kernel_size specified " << num_kernel_dims << " times; ";			kernel_h_ = kernel_w_ = conv_param.kernel_size(0);		}		CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";		CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";		//stride		if (conv_param.has_stride_h() || conv_param.has_stride_w()) {			CHECK_EQ(0, conv_param.stride_size())				<< "Either stride or stride_h/w should be specified; not both.";			stride_h_ = conv_param.stride_h();			stride_w_ = conv_param.stride_w();		}		else {			const int num_stride_dims = conv_param.stride_size();			CHECK(num_stride_dims == 0 || num_stride_dims == 1)				<< "stride must be specified once, or once per spatial dimension "				<< "(stride specified " << num_stride_dims << " times; ";			const int kDefaultStride = 1;			stride_h_ = stride_w_ = (num_stride_dims == 0) ? kDefaultStride : conv_param.stride(0);		}		//pad		if (conv_param.has_pad_h() || conv_param.has_pad_w()) {			CHECK_EQ(0, conv_param.pad_size())				<< "Either pad or pad_h/w should be specified; not both.";			pad_h_ = conv_param.pad_h();			pad_w_ = conv_param.pad_w();		}		else {			const int num_pad_dims = conv_param.pad_size();			CHECK(num_pad_dims == 0 || num_pad_dims == 1)				<< "pad must be specified once, or once per spatial dimension "				<< "(pad specified " << num_pad_dims << " times; ";			const int kDefaultPad = 0;			pad_h_ = pad_w_ = (num_pad_dims == 0) ? kDefaultPad : conv_param.pad(0);		}		// Special case: im2col is the identity for 1x1 convolution with stride 1		// and no padding, so flag for skipping the buffer and transformation.		is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1			&& stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0;		// Configure output channels and groups.		channels_ = bottom[0]->channels();		num_output_ = channels_ * kernel_h_ * kernel_w_;//.........这里部分代码省略.........
开发者ID:xieguotian,项目名称:caffe,代码行数:101,


示例18: CHECK_GE

void Im2colLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {  ConvolutionParameter conv_param = this->layer_param_.convolution_param();  force_nd_im2col_ = conv_param.force_nd_im2col();  const int input_num_dims = bottom[0]->shape().size();  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());  const int first_spatial_dim = channel_axis_ + 1;  num_spatial_axes_ = input_num_dims - first_spatial_dim;  CHECK_GE(num_spatial_axes_, 1);  vector<int> dim_blob_shape(1, num_spatial_axes_);  // Setup filter kernel dimensions (kernel_shape_).  kernel_shape_.Reshape(dim_blob_shape);  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "kernel_h & kernel_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.kernel_size_size())        << "Either kernel_size or kernel_h/w should be specified; not both.";    kernel_shape_data[0] = conv_param.kernel_h();    kernel_shape_data[1] = conv_param.kernel_w();  } else {    const int num_kernel_dims = conv_param.kernel_size_size();    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)        << "kernel_size must be specified once, or once per spatial dimension "        << "(kernel_size specified " << num_kernel_dims << " times; "        << num_spatial_axes_ << " spatial dims);";      for (int i = 0; i < num_spatial_axes_; ++i) {        kernel_shape_data[i] =            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);      }  }  for (int i = 0; i < num_spatial_axes_; ++i) {    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";  }  // Setup stride dimensions (stride_).  stride_.Reshape(dim_blob_shape);  int* stride_data = stride_.mutable_cpu_data();  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "stride_h & stride_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.stride_size())        << "Either stride or stride_h/w should be specified; not both.";    stride_data[0] = conv_param.stride_h();    stride_data[1] = conv_param.stride_w();  } else {    const int num_stride_dims = conv_param.stride_size();    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||          num_stride_dims == num_spatial_axes_)        << "stride must be specified once, or once per spatial dimension "        << "(stride specified " << num_stride_dims << " times; "        << num_spatial_axes_ << " spatial dims);";    const int kDefaultStride = 1;    for (int i = 0; i < num_spatial_axes_; ++i) {      stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :          conv_param.stride((num_stride_dims == 1) ? 0 : i);      CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";    }  }  // Setup pad dimensions (pad_).  pad_.Reshape(dim_blob_shape);  int* pad_data = pad_.mutable_cpu_data();  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "pad_h & pad_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.pad_size())        << "Either pad or pad_h/w should be specified; not both.";    pad_data[0] = conv_param.pad_h();    pad_data[1] = conv_param.pad_w();  } else {    const int num_pad_dims = conv_param.pad_size();    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||          num_pad_dims == num_spatial_axes_)        << "pad must be specified once, or once per spatial dimension "        << "(pad specified " << num_pad_dims << " times; "        << num_spatial_axes_ << " spatial dims);";    const int kDefaultPad = 0;    for (int i = 0; i < num_spatial_axes_; ++i) {      pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :          conv_param.pad((num_pad_dims == 1) ? 0 : i);    }  }}
开发者ID:ALISCIFP,项目名称:caffe-stn,代码行数:82,


示例19: CHECK_EQ

void DataTransformer<Dtype>::Transform(const cv::Mat& cv_img,                                       Blob<Dtype>* transformed_blob, bool fixed_trans) {  const int crop_size = param_.crop_size();  const int img_channels = cv_img.channels();  const int img_height = cv_img.rows;  const int img_width = cv_img.cols;  // Check dimensions.  const int channels = transformed_blob->channels();  const int height = transformed_blob->height();  const int width = transformed_blob->width();  const int num = transformed_blob->num();  CHECK_EQ(channels, img_channels);  CHECK_LE(height, img_height);  CHECK_LE(width, img_width);  CHECK_GE(num, 1);  CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte";  const Dtype scale = param_.scale();  const bool do_mirror = ( param_.mirror() && Rand(2) ) || (fixed_trans && apply_mirror_);  if ( !fixed_trans ) apply_mirror_ = do_mirror;  const bool has_mean_file = param_.has_mean_file();  const bool has_mean_values = mean_values_.size() > 0;  CHECK_GT(img_channels, 0);  CHECK_GE(img_height, crop_size);  CHECK_GE(img_width, crop_size);  Dtype* mean = NULL;  if (has_mean_file) {    CHECK_EQ(img_channels, data_mean_.channels());    CHECK_EQ(img_height, data_mean_.height());    CHECK_EQ(img_width, data_mean_.width());    mean = data_mean_.mutable_cpu_data();  }  if (has_mean_values) {    CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) <<     "Specify either 1 mean_value or as many as channels: " << img_channels;    if (img_channels > 1 && mean_values_.size() == 1) {      // Replicate the mean_value for simplicity      for (int c = 1; c < img_channels; ++c) {        mean_values_.push_back(mean_values_[0]);      }    }  }  int h_off = 0;  int w_off = 0;  cv::Mat cv_cropped_img = cv_img;  if (crop_size) {    CHECK_EQ(crop_size, height);    CHECK_EQ(crop_size, width);    // We only do random crop when we do training.    if (phase_ == TRAIN) {      h_off = Rand(img_height - crop_size + 1);      w_off = Rand(img_width - crop_size + 1);    } else {      h_off = (img_height - crop_size) / 2;      w_off = (img_width - crop_size) / 2;    }    if ( fixed_trans ) {        h_off = offset_h_;        w_off = offset_w_;    } else {        offset_h_ = h_off;        offset_w_ = w_off;    }    cv::Rect roi(w_off, h_off, crop_size, crop_size);    cv_cropped_img = cv_img(roi);  } else {    CHECK_EQ(img_height, height);    CHECK_EQ(img_width, width);  }  CHECK(cv_cropped_img.data);  Dtype* transformed_data = transformed_blob->mutable_cpu_data();  int top_index;  for (int h = 0; h < height; ++h) {    const uchar* ptr = cv_cropped_img.ptr<uchar>(h);    int img_index = 0;    for (int w = 0; w < width; ++w) {      for (int c = 0; c < img_channels; ++c) {        if (do_mirror) {          top_index = (c * height + h) * width + (width - 1 - w);        } else {          top_index = (c * height + h) * width + w;        }        // int top_index = (c * height + h) * width + w;        Dtype pixel = static_cast<Dtype>(ptr[img_index++]);        if (has_mean_file) {          int mean_index = (c * img_height + h_off + h) * img_width + w_off + w;          transformed_data[top_index] =            (pixel - mean[mean_index]) * scale;        } else {          if (has_mean_values) {            transformed_data[top_index] =              (pixel - mean_values_[c]) * scale;//.........这里部分代码省略.........
开发者ID:Rt0220,项目名称:caffe,代码行数:101,


示例20: evaluate

  std::string evaluate(const Model& model,                        const Data& validation_data,                       const Data& train_data = Data()) const {    CHECK_GT(validation_data.size(), 0);    auto validation_user_itemset = validation_data.get_feature_to_set_hashtable(0, 1);    std::unordered_map<size_t, std::unordered_set<size_t>> train_user_itemset;    if (train_data.size() != 0) {      train_user_itemset = train_data.get_feature_to_set_hashtable(0, 1);    }        size_t num_users = train_data.feature_group_total_dimension(0);    CHECK_EQ(num_users, train_user_itemset.size());    std::vector<std::vector<double>> user_rets(num_users);    parallel_for(0, num_users, [&](size_t uid) {                  user_rets[uid] = std::vector<double>(8, 0.);                });    dynamic_parallel_for(0, num_users, [&](size_t uid) {    //for (size_t uid = 0; uid < num_users; ++uid) {      auto iter = validation_user_itemset.find(uid);      if (iter == validation_user_itemset.end()) return;      auto train_it = train_user_itemset.find(iter->first);      CHECK(train_it != train_user_itemset.end());      std::unordered_set<size_t>& validation_set = iter->second;      // Models are required to have this function      auto rec_list = model.recommend(iter->first, 10, train_it->second);            for (auto& rec_iid : rec_list) {        CHECK_LT(rec_iid, train_data.feature_group_total_dimension(1));      }      for (auto& iid : validation_set){        CHECK_LT(iid, train_data.feature_group_total_dimension(1));      }      auto eval_rets = evaluate_rec_list(rec_list, validation_set);      //std::transform(rets.begin(), rets.end(), eval_rets.begin(), rets.begin(),      //               std::plus<double>());      user_rets[uid].assign(eval_rets.begin(), eval_rets.end());     });    //}    double num_users_for_test = static_cast<double>(validation_user_itemset.size());    std::vector<double> rets(8, 0.);    parallel_for(0, 8, [&](size_t colid) {              for (size_t uid = 0; uid < num_users; ++uid) {                rets[colid] += user_rets[uid][colid] / num_users_for_test;              }    });    std::stringstream ss;    ss << std::setw(8) << std::setprecision(5) << rets[0] << "|"        << std::setw(8) << std::setprecision(5) << rets[1]  << "|"        << std::setw(8) << std::setprecision(5) << rets[2] << "|"        << std::setw(8) << std::setprecision(5) << rets[3] << "|"        << std::setw(8) << std::setprecision(5) << rets[4] << "|"        << std::setw(8) << std::setprecision(5) << rets[5] << "|"        << std::setw(8) << std::setprecision(5) << rets[6] << "|"        << std::setw(8) << std::setprecision(5) << rets[7];// << "|"        //<< std::setw(8) << std::setprecision(5) << rets[8] << "|"        //<< std::setw(8) << std::setprecision(5) << rets[9];     return ss.str();   } 
开发者ID:buptqitian,项目名称:CDAE,代码行数:62,


示例21: CHECK

  void MultiImageDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,                                             const vector<Blob<Dtype>*>& top) {    const int new_height = this->layer_param_.multi_image_data_param().new_height();    const int new_width  = this->layer_param_.multi_image_data_param().new_width();    const bool is_color  = this->layer_param_.multi_image_data_param().is_color();    string root_folder = this->layer_param_.multi_image_data_param().root_folder();    const int num_images = this->layer_param_.multi_image_data_param().num_images();        CHECK((new_height == 0 && new_width == 0) ||          (new_height > 0 && new_width > 0)) << "Current implementation requires "      "new_height and new_width to be set at the same time.";    CHECK_GT(num_images, 0) << "The number of images should be positive.";    // Read the file with filenames and labels    const string& source = this->layer_param_.multi_image_data_param().source();    LOG(INFO) << "Opening file " << source;    std::ifstream infile(source.c_str());    std::string line;    while (std::getline(infile, line)) {      std::istringstream iss(line);      std::vector<string> filenames(num_images);      for (int image_index = 0; image_index < num_images; image_index++)        iss >> filenames[image_index];      int label;      iss >> label;      lines_.push_back(std::make_pair(filenames, label));    }    if (this->layer_param_.multi_image_data_param().shuffle()) {      // randomly shuffle data      LOG(INFO) << "Shuffling data";      const unsigned int prefetch_rng_seed = caffe_rng_rand();      prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));      ShuffleImages();    }    LOG(INFO) << "A total of " << lines_.size() << " x " << num_images << " images.";    lines_id_ = 0;    // Check if we would need to randomly skip a few data points    if (this->layer_param_.multi_image_data_param().rand_skip()) {      unsigned int skip = caffe_rng_rand() %        this->layer_param_.multi_image_data_param().rand_skip();      LOG(INFO) << "Skipping first " << skip << " data points.";      CHECK_GT(lines_.size(), skip) << "Not enough points to skip";      lines_id_ = skip;    }    // Read an image, and use it to initialize the top blob.    CHECK(lines_[lines_id_].first.size()) << "There is no image in the first line.";    cv::Mat cv_img = ReadImageToCVMat(root_folder + *lines_[lines_id_].first.begin(),                                      new_height, new_width, is_color);    CHECK(cv_img.data) << "Could not load " << *lines_[lines_id_].first.begin();    // Use data_transformer to infer the expected blob shape from a cv_image.    vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);    this->transformed_data_.Reshape(top_shape);    top_shape[1] *= num_images;    // Reshape prefetch_data and top[0] according to the batch_size.    const int batch_size = this->layer_param_.multi_image_data_param().batch_size();    CHECK_GT(batch_size, 0) << "Positive batch size required";    top_shape[0] = batch_size;    for (int i = 0; i < this->PREFETCH_COUNT; ++i) {      this->prefetch_[i].data_.Reshape(top_shape);    }    top[0]->Reshape(top_shape);    LOG(INFO) << "output data size: " << top[0]->num() << ","              << top[0]->channels() << "," << top[0]->height() << ","              << top[0]->width();    // label    vector<int> label_shape(1, batch_size);    top[1]->Reshape(label_shape);    for (int i = 0; i < this->PREFETCH_COUNT; ++i) {      this->prefetch_[i].label_.Reshape(label_shape);    }  }
开发者ID:art-programmer,项目名称:DeepLearningLearningSession,代码行数:73,


示例22: while

void CropLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,    const vector<Blob<Dtype>*>& top) {  // Construct a map from top blobs to layer inds, skipping over in-place  // connections.  map<Blob<Dtype>*, int> down_map;  for (int layer_ind = 0; layer_ind < this->net_->top_vecs().size();       ++layer_ind) {    vector<Blob<Dtype>*> tops = this->net_->top_vecs()[layer_ind];    for (int top_ind = 0; top_ind < tops.size(); ++top_ind) {      if (down_map.find(tops[top_ind]) == down_map.end()) {        down_map[tops[top_ind]] = layer_ind;      }    }  }  // Walk back from the first bottom, keeping track of all the blobs we pass.  set<Blob<Dtype>*> path_blobs;  Blob<Dtype>* blob = bottom[0];  int layer_ind;  // TODO this logic can be simplified if all blobs are tops  path_blobs.insert(blob);  while (down_map.find(blob) != down_map.end()) {    layer_ind = down_map[blob];    if (this->net_->bottom_vecs()[layer_ind].size() == 0) {      break;    }    blob = this->net_->bottom_vecs()[layer_ind][0];    path_blobs.insert(blob);  }  // Now walk back from the second bottom, until we find a blob of intersection.  Blob<Dtype>* inter_blob = bottom[1];  while (path_blobs.find(inter_blob) == path_blobs.end()) {    CHECK(down_map.find(inter_blob) != down_map.end())        << "Cannot align apparently disconnected blobs.";    layer_ind = down_map[inter_blob];    CHECK_GT(this->net_->bottom_vecs()[layer_ind].size(), 0)        << "Cannot align apparently disconnected blobs.";    inter_blob = this->net_->bottom_vecs()[layer_ind][0];  }  // Compute the coord map from the blob of intersection to each bottom.  vector<DiagonalAffineMap<Dtype> > coord_maps(2,      DiagonalAffineMap<Dtype>::identity(2));  for (int i = 0; i < 2; ++i) {    for (Blob<Dtype>* blob = bottom[i]; blob != inter_blob;         blob = this->net_->bottom_vecs()[down_map[blob]][0]) {      shared_ptr<Layer<Dtype> > layer = this->net_->layers()[down_map[blob]];      //std::cout<<i<<"} "<<coord_maps[i].coefs()[0].first<<","<<coord_maps[i].coefs()[0].second<<"; "<<coord_maps[i].coefs()[1].first<<","<<coord_maps[i].coefs()[1].second<<" compose with "<<layer->coord_map().coefs()[0].first<<","<<layer->coord_map().coefs()[0].first<<"; "<<layer->coord_map().coefs()[1].first<<","<<layer->coord_map().coefs()[1].first<<std::endl;      coord_maps[i] = coord_maps[i].compose(layer->coord_map());      //std::cout<<"    is "<<coord_maps[i].coefs()[0].first<<","<<coord_maps[i].coefs()[0].second<<"; "<<coord_maps[i].coefs()[1].first<<","<<coord_maps[i].coefs()[1].second<<std::endl;    }  }  // Compute the mapping from first bottom coordinates to second.  DiagonalAffineMap<Dtype> crop_map =      coord_maps[1].compose(coord_maps[0].inv());    /*std::cout<<"cood_maps[0]="<<coord_maps[0].coefs()[0].first<<","<<coord_maps[0].coefs()[0].second<<std::endl;    std::cout<<"cood_maps[0]="<<coord_maps[0].coefs()[1].first<<","<<coord_maps[0].coefs()[1].second<<std::endl;    std::cout<<"cood_maps[1]="<<coord_maps[1].coefs()[0].first<<","<<coord_maps[1].coefs()[0].second<<std::endl;    std::cout<<"cood_maps[1]="<<coord_maps[1].coefs()[1].first<<","<<coord_maps[1].coefs()[1].second<<std::endl;    std::cout<<"cood_map="<<crop_map.coefs()[0].first<<","<<crop_map.coefs()[0].second<<std::endl;    std::cout<<"crop_map="<<crop_map.coefs()[1].first<<","<<crop_map.coefs()[1].second<<std::endl;*/  for (int i = 0; i < 2; ++i) {    // Check for scale mismatch (unfortunately, CHECK_DOUBLE_EQ does not    // support a message like the other CHECKs).    CHECK_DOUBLE_EQ(crop_map.coefs()[i].first, 1);    CHECK_LE(crop_map.coefs()[i].second, 0) << "Negative crop width.";    // Check that the crop width is an integer.    CHECK_DOUBLE_EQ(crop_map.coefs()[i].second,        round(crop_map.coefs()[i].second));  }  crop_h_ = - round(crop_map.coefs()[0].second);  crop_w_ = - round(crop_map.coefs()[1].second);}
开发者ID:herobd,项目名称:dsb_caffe,代码行数:71,


示例23: CHECK_GE

void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {  // Configure the kernel size, padding, stride, and inputs.  ConvolutionParameter conv_param = this->layer_param_.convolution_param();  force_nd_im2col_ = conv_param.force_nd_im2col();  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());  const int num_axes = bottom[0]->num_axes();  if (num_axes == 5 && channel_axis_ == 1 && bottom[0]->shape(2) == 1) {    forced_3d_ = true;  } else {    forced_3d_ = false;  }  const int first_spatial_axis = channel_axis_ + 1 + forced_3d_;  num_spatial_axes_ = num_axes - first_spatial_axis;  CHECK_GE(num_spatial_axes_, 0);  vector<int> spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1));  // Setup filter kernel dimensions (kernel_shape_).  kernel_shape_.Reshape(spatial_dim_blob_shape);  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "kernel_h & kernel_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.kernel_size_size())        << "Either kernel_size or kernel_h/w should be specified; not both.";    kernel_shape_data[0] = conv_param.kernel_h();    kernel_shape_data[1] = conv_param.kernel_w();  } else {    const int num_kernel_dims = conv_param.kernel_size_size();    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)        << "kernel_size must be specified once, or once per spatial dimension "        << "(kernel_size specified " << num_kernel_dims << " times; "        << num_spatial_axes_ << " spatial dims).";      for (int i = 0; i < num_spatial_axes_; ++i) {        kernel_shape_data[i] =            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);      }  }  for (int i = 0; i < num_spatial_axes_; ++i) {    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";  }  // Setup stride dimensions (stride_).  stride_.Reshape(spatial_dim_blob_shape);  int* stride_data = stride_.mutable_cpu_data();  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "stride_h & stride_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.stride_size())        << "Either stride or stride_h/w should be specified; not both.";    stride_data[0] = conv_param.stride_h();    stride_data[1] = conv_param.stride_w();  } else {    const int num_stride_dims = conv_param.stride_size();    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||          num_stride_dims == num_spatial_axes_)        << "stride must be specified once, or once per spatial dimension "        << "(stride specified " << num_stride_dims << " times; "        << num_spatial_axes_ << " spatial dims).";    const int kDefaultStride = 1;    for (int i = 0; i < num_spatial_axes_; ++i) {      stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :          conv_param.stride((num_stride_dims == 1) ? 0 : i);      CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";    }  }  // Setup pad dimensions (pad_).  pad_.Reshape(spatial_dim_blob_shape);  int* pad_data = pad_.mutable_cpu_data();  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "pad_h & pad_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.pad_size())        << "Either pad or pad_h/w should be specified; not both.";    pad_data[0] = conv_param.pad_h();    pad_data[1] = conv_param.pad_w();  } else {    const int num_pad_dims = conv_param.pad_size();    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||          num_pad_dims == num_spatial_axes_)        << "pad must be specified once, or once per spatial dimension "        << "(pad specified " << num_pad_dims << " times; "        << num_spatial_axes_ << " spatial dims).";    const int kDefaultPad = 0;    for (int i = 0; i < num_spatial_axes_; ++i) {      pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :          conv_param.pad((num_pad_dims == 1) ? 0 : i);    }  }  // Setup dilation dimensions (dilation_).  dilation_.Reshape(spatial_dim_blob_shape);  int* dilation_data = dilation_.mutable_cpu_data();  const int num_dilation_dims = conv_param.dilation_size();  CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 ||        num_dilation_dims == num_spatial_axes_)      << "dilation must be specified once, or once per spatial dimension "      << "(dilation specified " << num_dilation_dims << " times; "      << num_spatial_axes_ << " spatial dims).";  const int kDefaultDilation = 1;  for (int i = 0; i < num_spatial_axes_; ++i) {    dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation :                       conv_param.dilation((num_dilation_dims == 1) ? 0 : i);//.........这里部分代码省略.........
开发者ID:chuckcho,项目名称:video-caffe,代码行数:101,


示例24: CHECK

void ImageLabelmapDataLayer<Dtype>::load_batch(LabelmapBatch<Dtype>* batch) {  CPUTimer batch_timer;  batch_timer.Start();  double read_time = 0;  double trans_time = 0;  CPUTimer timer;  CHECK(batch->data_.count());  CHECK(batch->labelmap_.count());  CHECK(this->transformed_data_.count());  CHECK(this->transformed_labelmap_.count());  ImageDataParameter image_data_param = this->layer_param_.image_data_param();  const int batch_size = image_data_param.batch_size();  const int new_height = image_data_param.new_height();  const int new_width = image_data_param.new_width();  const bool is_color = image_data_param.is_color();  string root_folder = image_data_param.root_folder();  // Reshape according to the first image of each batch  // on single input batches allows for inputs of varying dimension.  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,      new_height, new_width, is_color);  cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,      new_height, new_width, 0);  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;  // Use data_transformer to infer the expected blob shape from a cv_img.  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);  vector<int> top_shape_labelmap = this->data_transformer_->InferBlobShape(cv_gt);    this->transformed_data_.Reshape(top_shape);  this->transformed_labelmap_.Reshape(top_shape_labelmap);  // Reshape prefetch_data and top[0] according to the batch_size.  top_shape[0] = batch_size;  top_shape_labelmap[0] = batch_size;    batch->data_.Reshape(top_shape);  batch->labelmap_.Reshape(top_shape_labelmap);  Dtype* prefetch_data = batch->data_.mutable_cpu_data();  Dtype* prefetch_labelmap = batch->labelmap_.mutable_cpu_data();  // datum scales  const int lines_size = lines_.size();  for (int item_id = 0; item_id < batch_size; ++item_id) {    // get a blob    timer.Start();    CHECK_GT(lines_size, lines_id_);    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,                                    0, 0, is_color);    cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,                                    0, 0, 0);    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;    const int height = cv_img.rows;    const int width = cv_img.cols;    const int gt_channels = cv_gt.channels();    const int gt_height = cv_gt.rows;    const int gt_width = cv_gt.cols;    CHECK((height == gt_height) && (width == gt_width)) << "GT image size should be equal to true image size";    CHECK(gt_channels == 1) << "GT image channel number should be 1";     if (new_height > 0 && new_width > 0) {        cv::resize(cv_img, cv_img, cv::Size(new_width, new_height));        cv::resize(cv_gt, cv_gt, cv::Size(new_width, new_height), 0, 0, cv::INTER_LINEAR);    }    if (!cv_img.data || !cv_gt.data) {      continue;    }    read_time += timer.MicroSeconds();    timer.Start();    // Apply transformations (mirror, crop...) to the image    int offset = batch->data_.offset(item_id);    int offset_gt = batch->labelmap_.offset(item_id);    //CHECK(offset == offset_gt) << "fetching should be synchronized";    this->transformed_data_.set_cpu_data(prefetch_data + offset);    this->transformed_labelmap_.set_cpu_data(prefetch_labelmap + offset_gt);    std::pair<int, int> hw_off = this->data_transformer_->LocTransform(cv_img, &(this->transformed_data_));        cv::Mat encoded_gt;    //regression    encoded_gt = cv_gt/255;    //[***Cautions***]    //One small trick leveraging opencv roundoff feature for **consensus sampling** in Holistically-Nested Edge Detection paper.    //For general binary edge maps this is okay    //For 5-subject aggregated edge maps (BSDS), this will abandon weak edge points labeled by only two or less labelers.    this->data_transformer_->LabelmapTransform(encoded_gt, &(this->transformed_labelmap_), hw_off);        trans_time += timer.MicroSeconds();    // go to the next iter    lines_id_++;    if (lines_id_ >= lines_size) {      // We have reached the end. Restart from the first.      DLOG(INFO) << "Restarting data prefetching from start.";      lines_id_ = 0;//.........这里部分代码省略.........
开发者ID:ilovecv,项目名称:hed,代码行数:101,


示例25: CHECK

void FloDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {  CPUTimer batch_timer;  batch_timer.Start();  double read_time = 0;  double trans_time = 0;  CPUTimer timer;  CHECK(batch->data_.count());  CHECK(this->transformed_data_.count());  ImageDataParameter image_data_param = this->layer_param_.image_data_param();  const int batch_size = image_data_param.batch_size();  string root_folder = image_data_param.root_folder();  // Reshape according to the first image of each batch  // on single input batches allows for inputs of varying dimension.  int xSize, ySize;  CHECK(readFloFile(root_folder + lines_[lines_id_].first, NULL, xSize, ySize))      << "Could not load " << lines_[lines_id_].first;    // Use data_transformer to infer the expected blob shape from a cv_img.  vector<int> top_shape = vector<int>(4);  top_shape[0] = 1;  top_shape[1] = 2;  top_shape[2] = ySize;  top_shape[3] = xSize;    //this->transformed_data_.Reshape(top_shape);    // Reshape batch according to the batch_size.  top_shape[0] = batch_size;  batch->data_.Reshape(top_shape);  Dtype* prefetch_data = batch->data_.mutable_cpu_data();  // datum scales  const int lines_size = lines_.size();  for (int item_id = 0; item_id < batch_size; ++item_id) {    // get a blob    timer.Start();    CHECK_GT(lines_size, lines_id_);        read_time += timer.MicroSeconds();    timer.Start();    // Apply transformations (mirror, crop...) to the image    int offset = batch->data_.offset(item_id);    //this->transformed_data_.set_cpu_data(prefetch_data + offset);        CHECK(readFloFile(root_folder + lines_[lines_id_].first, prefetch_data + offset, xSize, ySize))        << "Could not load " << lines_[lines_id_].first;        //this->data_transformer_->Transform(cv_img, &(this->transformed_data_));        trans_time += timer.MicroSeconds();    // go to the next iter    lines_id_++;    if (lines_id_ >= lines_size) {      // We have reached the end. Restart from the first.      DLOG(INFO) << "Restarting data prefetching from start.";      lines_id_ = 0;      if (this->layer_param_.image_data_param().shuffle()) {        ShuffleImages();      }    }  }  batch_timer.Stop();  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";}
开发者ID:ergysr,项目名称:caffe,代码行数:69,


示例26: Rand

void DataTransformer<Dtype>::Transform(const Datum& datum,                                       Dtype* transformed_data) {  const string& data = datum.data();  const int datum_channels = datum.channels();  const int datum_height = datum.height();  const int datum_width = datum.width();  const int crop_size = param_.crop_size();  const Dtype scale = param_.scale();  const bool do_mirror = param_.mirror() && Rand(2);  const bool has_mean_file = param_.has_mean_file();  const bool has_uint8 = data.size() > 0;  const bool has_mean_values = mean_values_.size() > 0;  CHECK_GT(datum_channels, 0);  CHECK_GE(datum_height, crop_size);  CHECK_GE(datum_width, crop_size);  Dtype* mean = NULL;  if (has_mean_file) {    CHECK_EQ(datum_channels, data_mean_.channels());    CHECK_EQ(datum_height, data_mean_.height());    CHECK_EQ(datum_width, data_mean_.width());    mean = data_mean_.mutable_cpu_data();  }  if (has_mean_values) {    CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) <<     "Specify either 1 mean_value or as many as channels: " << datum_channels;    if (datum_channels > 1 && mean_values_.size() == 1) {      // Replicate the mean_value for simplicity      for (int c = 1; c < datum_channels; ++c) {        mean_values_.push_back(mean_values_[0]);      }    }  }  int height = datum_height;  int width = datum_width;  int h_off = 0;  int w_off = 0;  if (crop_size) {    height = crop_size;    width = crop_size;    // We only do random crop when we do training.    if (phase_ == TRAIN) {      h_off = Rand(datum_height - crop_size + 1);      w_off = Rand(datum_width - crop_size + 1);    } else {      h_off = (datum_height - crop_size) / 2;      w_off = (datum_width - crop_size) / 2;    }  }  Dtype datum_element;  int top_index, data_index;  for (int c = 0; c < datum_channels; ++c) {    for (int h = 0; h < height; ++h) {      for (int w = 0; w < width; ++w) {        data_index = (c * datum_height + h_off + h) * datum_width + w_off + w;        if (do_mirror) {          top_index = (c * height + h) * width + (width - 1 - w);        } else {          top_index = (c * height + h) * width + w;        }        if (has_uint8) {          datum_element =            static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));        } else {          datum_element = datum.float_data(data_index);        }        if (has_mean_file) {          transformed_data[top_index] =            (datum_element - mean[data_index]) * scale;        } else {          if (has_mean_values) {            transformed_data[top_index] =              (datum_element - mean_values_[c]) * scale;          } else {            transformed_data[top_index] = datum_element * scale;          }        }      }    }  }}
开发者ID:Rt0220,项目名称:caffe,代码行数:86,


示例27: CHECK_GT

void MultiStageMeanfieldLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {  init_cpu = false;  init_gpu = false;  const caffe::MultiStageMeanfieldParameter meanfield_param = this->layer_param_.multi_stage_meanfield_param();  num_iterations_ = meanfield_param.num_iterations();  CHECK_GT(num_iterations_, 1) << "Number of iterations must be greater than 1.";  theta_alpha_ = meanfield_param.theta_alpha();  theta_beta_ = meanfield_param.theta_beta();  theta_gamma_ = meanfield_param.theta_gamma();  count_ = bottom[0]->count();  num_ = bottom[0]->num();  channels_ = bottom[0]->channels();  height_ = bottom[0]->height();  width_ = bottom[0]->width();  num_pixels_ = height_ * width_;  LOG(INFO) << "This implementation has not been tested batch size > 1.";  top[0]->Reshape(num_, channels_, height_, width_);  // Initialize the parameters that will updated by backpropagation.  if (this->blobs_.size() > 0) {    LOG(INFO) << "Multimeanfield layer skipping parameter initialization.";  } else {    this->blobs_.resize(3);// blobs_[0] - spatial kernel weights, blobs_[1] - bilateral kernel weights, blobs_[2] - compatability matrix    // Allocate space for kernel weights.    this->blobs_[0].reset(new Blob<Dtype>(1, 1, channels_, channels_));    this->blobs_[1].reset(new Blob<Dtype>(1, 1, channels_, channels_));    caffe_set(channels_ * channels_, Dtype(0.), this->blobs_[0]->mutable_cpu_data());    caffe_set(channels_ * channels_, Dtype(0.), this->blobs_[1]->mutable_cpu_data());    // Initialize the kernels weights. The two files spatial.par and bilateral.par should be available.    FILE * pFile;    pFile = fopen("spatial.par", "r");    CHECK(pFile) << "The file 'spatial.par' is not found. Please create it with initial spatial kernel weights.";    for (int i = 0; i < channels_; i++) {      fscanf(pFile, "%lf", &this->blobs_[0]->mutable_cpu_data()[i * channels_ + i]);    }    fclose(pFile);    pFile = fopen("bilateral.par", "r");    CHECK(pFile) << "The file 'bilateral.par' is not found. Please create it with initial bilateral kernel weights.";    for (int i = 0; i < channels_; i++) {      fscanf(pFile, "%lf", &this->blobs_[1]->mutable_cpu_data()[i * channels_ + i]);    }    fclose(pFile);    // Initialize the compatibility matrix.    this->blobs_[2].reset(new Blob<Dtype>(1, 1, channels_, channels_));    caffe_set(channels_ * channels_, Dtype(0.), this->blobs_[2]->mutable_cpu_data());    // Initialize it to have the Potts model.    for (int c = 0; c < channels_; ++c) {      (this->blobs_[2]->mutable_cpu_data())[c * channels_ + c] = Dtype(-1.);    }  }  float spatial_kernel[2 * num_pixels_];  float *spatial_kernel_gpu_;  compute_spatial_kernel(spatial_kernel);  spatial_lattice_.reset(new ModifiedPermutohedral());  spatial_norm_.Reshape(1, 1, height_, width_);  Dtype* norm_data_gpu ;  Dtype*  norm_data;  // Initialize the spatial lattice. This does not need to be computed for every image because we use a fixed size.  switch (Caffe::mode()) {    case Caffe::CPU:      norm_data = spatial_norm_.mutable_cpu_data();      spatial_lattice_->init(spatial_kernel, 2, width_, height_);      // Calculate spatial filter normalization factors.      norm_feed_= new Dtype[num_pixels_];      caffe_set(num_pixels_, Dtype(1.0), norm_feed_);      // pass norm_feed and norm_data to gpu      spatial_lattice_->compute(norm_data, norm_feed_, 1);      bilateral_kernel_buffer_ = new float[5 * num_pixels_];      init_cpu = true;      break;    #ifndef CPU_ONLY    case Caffe::GPU:      CUDA_CHECK(cudaMalloc((void**)&spatial_kernel_gpu_, 2*num_pixels_ * sizeof(float))) ;      CUDA_CHECK(cudaMemcpy(spatial_kernel_gpu_, spatial_kernel, 2*num_pixels_ * sizeof(float), cudaMemcpyHostToDevice)) ;      spatial_lattice_->init(spatial_kernel_gpu_, 2, width_, height_);      CUDA_CHECK(cudaMalloc((void**)&norm_feed_, num_pixels_ * sizeof(Dtype))) ;      caffe_gpu_set(num_pixels_, Dtype(1.0), norm_feed_);      norm_data_gpu = spatial_norm_.mutable_gpu_data();      spatial_lattice_->compute(norm_data_gpu, norm_feed_, 1);       norm_data = spatial_norm_.mutable_cpu_data();      CUDA_CHECK(cudaMalloc((void**)&bilateral_kernel_buffer_, 5 * num_pixels_ * sizeof(float))) ;      CUDA_CHECK(cudaFree(spatial_kernel_gpu_));      init_gpu = true;      break;//.........这里部分代码省略.........
开发者ID:hyenal,项目名称:crfasrnn,代码行数:101,



注:本文中的CHECK_GT函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ CHECK_HANDLE函数代码示例
C++ CHECK_GL_ERRORS函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。