这篇教程C++ CHECK_EQ函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中CHECK_EQ函数的典型用法代码示例。如果您正苦于以下问题:C++ CHECK_EQ函数的具体用法?C++ CHECK_EQ怎么用?C++ CHECK_EQ使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了CHECK_EQ函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: CHECK_GEvoid BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); force_nd_im2col_ = conv_param.force_nd_im2col(); channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); const int first_spatial_axis = channel_axis_ + 1; const int num_axes = bottom[0]->num_axes(); num_spatial_axes_ = num_axes - first_spatial_axis; CHECK_GE(num_spatial_axes_, 0); vector<int> bottom_dim_blob_shape(1, num_spatial_axes_ + 1); vector<int> spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1)); // Setup filter kernel dimensions (kernel_shape_). kernel_shape_.Reshape(spatial_dim_blob_shape); int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { CHECK_EQ(num_spatial_axes_, 2) << "kernel_h & kernel_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.kernel_size_size()) << "Either kernel_size or kernel_h/w should be specified; not both."; kernel_shape_data[0] = conv_param.kernel_h(); kernel_shape_data[1] = conv_param.kernel_w(); } else { const int num_kernel_dims = conv_param.kernel_size_size(); CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) << "kernel_size must be specified once, or once per spatial dimension " << "(kernel_size specified " << num_kernel_dims << " times; " << num_spatial_axes_ << " spatial dims)."; for (int i = 0; i < num_spatial_axes_; ++i) { kernel_shape_data[i] = conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); } } for (int i = 0; i < num_spatial_axes_; ++i) { CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; } // Setup stride dimensions (stride_). stride_.Reshape(spatial_dim_blob_shape); int* stride_data = stride_.mutable_cpu_data(); if (conv_param.has_stride_h() || conv_param.has_stride_w()) { CHECK_EQ(num_spatial_axes_, 2) << "stride_h & stride_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.stride_size()) << "Either stride or stride_h/w should be specified; not both."; stride_data[0] = conv_param.stride_h(); stride_data[1] = conv_param.stride_w(); } else { const int num_stride_dims = conv_param.stride_size(); CHECK(num_stride_dims == 0 || num_stride_dims == 1 || num_stride_dims == num_spatial_axes_) << "stride must be specified once, or once per spatial dimension " << "(stride specified " << num_stride_dims << " times; " << num_spatial_axes_ << " spatial dims)."; const int kDefaultStride = 1; for (int i = 0; i < num_spatial_axes_; ++i) { stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : conv_param.stride((num_stride_dims == 1) ? 0 : i); CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; } } // Setup pad dimensions (pad_). pad_.Reshape(spatial_dim_blob_shape); int* pad_data = pad_.mutable_cpu_data(); if (conv_param.has_pad_h() || conv_param.has_pad_w()) { CHECK_EQ(num_spatial_axes_, 2) << "pad_h & pad_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.pad_size()) << "Either pad or pad_h/w should be specified; not both."; pad_data[0] = conv_param.pad_h(); pad_data[1] = conv_param.pad_w(); } else { const int num_pad_dims = conv_param.pad_size(); CHECK(num_pad_dims == 0 || num_pad_dims == 1 || num_pad_dims == num_spatial_axes_) << "pad must be specified once, or once per spatial dimension " << "(pad specified " << num_pad_dims << " times; " << num_spatial_axes_ << " spatial dims)."; const int kDefaultPad = 0; for (int i = 0; i < num_spatial_axes_; ++i) { pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : conv_param.pad((num_pad_dims == 1) ? 0 : i); } } // Setup dilation dimensions (dilation_). dilation_.Reshape(spatial_dim_blob_shape); int* dilation_data = dilation_.mutable_cpu_data(); const int num_dilation_dims = conv_param.dilation_size(); CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 || num_dilation_dims == num_spatial_axes_) << "dilation must be specified once, or once per spatial dimension " << "(dilation specified " << num_dilation_dims << " times; " << num_spatial_axes_ << " spatial dims)."; const int kDefaultDilation = 1; for (int i = 0; i < num_spatial_axes_; ++i) { dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation : conv_param.dilation((num_dilation_dims == 1) ? 0 : i); } // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. is_1x1_ = true;//.........这里部分代码省略.........
开发者ID:voidrank,项目名称:caffe,代码行数:101,
示例2: CHECK//.........这里部分代码省略......... if (!pool_param.has_pad_h()) { pad_h_ = pad_w_ = pool_param.pad(); } else { pad_h_ = pool_param.pad_h(); pad_w_ = pool_param.pad_w(); } if (!pool_param.has_stride_h()) { stride_h_ = stride_w_ = pool_param.stride(); } else { stride_h_ = pool_param.stride_h(); stride_w_ = pool_param.stride_w(); } if (global_pooling_) { CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) << "With Global_pooling: true; only pad = 0 and stride = 1"; } if (pad_h_ != 0 || pad_w_ != 0) { CHECK(this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_AVE || this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) << "Padding implemented only for average and max pooling."; CHECK_LT(pad_h_, kernel_h_); CHECK_LT(pad_w_, kernel_w_); } pooled_height_ = static_cast<int>(ceil(static_cast<float>( bottom[0]->height() + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; pooled_width_ = static_cast<int>(ceil(static_cast<float>( bottom[0]->height() + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; if (pad_h_ || pad_w_) { // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. if ((pooled_height_ - 1) * stride_h_ >= bottom[0]->height() + pad_h_) { --pooled_height_; } if ((pooled_width_ - 1) * stride_w_ >= bottom[0]->height() + pad_w_) { --pooled_width_; } CHECK_LT((pooled_height_ - 1) * stride_h_, bottom[0]->height() + pad_h_); CHECK_LT((pooled_width_ - 1) * stride_w_, bottom[0]->height() + pad_w_); } size_t dim = 4; size_t src_sizes[4], src_strides[4]; size_t dst_sizes[4], dst_strides[4]; src_sizes[0] = bottom[0]->width(); src_sizes[1] = bottom[0]->height(); src_sizes[2] = bottom[0]->channels(); src_sizes[3] = bottom[0]->num(); src_strides[0] = 1; src_strides[1] = src_sizes[0]; src_strides[2] = src_sizes[0]*src_sizes[1]; src_strides[3] = src_sizes[0]*src_sizes[1]*src_sizes[2]; dst_sizes[0] = pooled_width_; dst_sizes[1] = pooled_height_; dst_sizes[2] = src_sizes[2]; dst_sizes[3] = src_sizes[3]; dst_strides[0] = 1; dst_strides[1] = dst_sizes[0]; dst_strides[2] = dst_sizes[0]*dst_sizes[1]; dst_strides[3] = dst_sizes[0]*dst_sizes[1]*dst_sizes[2]; src_offset[0] = -pad_w_; src_offset[1] = -pad_h_; kernel_stride[0] = stride_w_; kernel_stride[1] = stride_h_; kernel_size[0] = kernel_w_; kernel_size[1] = kernel_h_; dnnError_t e; e = dnnLayoutCreate<Dtype>(&fwd_bottom_data->layout_usr, dim, src_sizes, src_strides); CHECK_EQ(e, E_SUCCESS); e = dnnLayoutCreate<Dtype>(&fwd_top_data->layout_usr, dim, dst_sizes, dst_strides); CHECK_EQ(e, E_SUCCESS); e = dnnLayoutCreate<Dtype>(&bwd_bottom_diff->layout_usr, dim, src_sizes, src_strides); CHECK_EQ(e, E_SUCCESS); e = dnnLayoutCreate<Dtype>(&bwd_top_diff->layout_usr, dim, dst_sizes, dst_strides); CHECK_EQ(e, E_SUCCESS); // Names are for debugging only fwd_bottom_data->name = "fwd_bottom_data @ " + this->layer_param_.name(); fwd_top_data->name = "fwd_top_data @ " + this->layer_param_.name(); bwd_top_diff->name = "bwd_top_diff @ " + this->layer_param_.name(); bwd_bottom_diff->name = "bwd_bottom_diff @ " + this->layer_param_.name(); // Primitives will be allocated during the first fwd pass poolingFwd = NULL; poolingBwd = NULL;}
开发者ID:crobertob,项目名称:caffe,代码行数:101,
示例3: senderstatic void* sender(void* arg) { SendArg* sa = (SendArg*)arg; int64_t value = 0; while (!brpc::IsAskedToQuit()) { braft::PeerId leader; // Select leader of the target group from RouteTable if (braft::rtb::select_leader(FLAGS_group, &leader) != 0) { // Leader is unknown in RouteTable. Ask RouteTable to refresh leader // by sending RPCs. butil::Status st = braft::rtb::refresh_leader( FLAGS_group, FLAGS_timeout_ms); if (!st.ok()) { // Not sure about the leader, sleep for a while and the ask again. LOG(WARNING) << "Fail to refresh_leader : " << st; bthread_usleep(FLAGS_timeout_ms * 1000L); } continue; } // Now we known who is the leader, construct Stub and then sending // rpc brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to " << leader; bthread_usleep(FLAGS_timeout_ms * 1000L); continue; } example::AtomicService_Stub stub(&channel); brpc::Controller cntl; cntl.set_timeout_ms(FLAGS_timeout_ms); example::CompareExchangeRequest request; example::AtomicResponse response; request.set_id(sa->id); request.set_expected_value(value); request.set_new_value(value + 1); stub.compare_exchange(&cntl, &request, &response, NULL); if (cntl.Failed()) { LOG(WARNING) << "Fail to send request to " << leader << " : " << cntl.ErrorText(); // Clear leadership since this RPC failed. braft::rtb::update_leader(FLAGS_group, braft::PeerId()); bthread_usleep(FLAGS_timeout_ms * 1000L); continue; } if (!response.success()) { // A redirect response if (!response.has_old_value()) { LOG(WARNING) << "Fail to send request to " << leader << ", redirecting to " << (response.has_redirect() ? response.redirect() : "nowhere"); // Update route table since we have redirect information braft::rtb::update_leader(FLAGS_group, response.redirect()); continue; } // old_value unmatches expected value check if this is the initial // request if (value == 0 || response.old_value() == value + 1) { // ^^^ ^^^ // It's initalized value ^^^ // There was false negative value = response.old_value(); } else { CHECK_EQ(value, response.old_value()); exit(-1); } } else { value = response.new_value(); } g_latency_recorder << cntl.latency_us(); if (FLAGS_log_each_request) { LOG(INFO) << "Received response from " << leader << " old_value=" << response.old_value() << " new_value=" << response.new_value() << " latency=" << cntl.latency_us(); bthread_usleep(1000L * 1000L); } } return NULL;}
开发者ID:ctero-graham,项目名称:braft,代码行数:84,
示例4: CHECK_EQvoid DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs."; CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output."; CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output."; if (top->size() == 1) { output_labels_ = false; } else { output_labels_ = true; } // Initialize the leveldb leveldb::DB* db_temp; leveldb::Options options; options.create_if_missing = false; options.max_open_files = 10; LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source(); leveldb::Status status = leveldb::DB::Open( options, this->layer_param_.data_param().source(), &db_temp); CHECK(status.ok()) << "Failed to open leveldb " << this->layer_param_.data_param().source() << std::endl << status.ToString(); db_.reset(db_temp); iter_.reset(db_->NewIterator(leveldb::ReadOptions())); iter_->SeekToFirst(); // Check if we would need to randomly skip a few data points if (this->layer_param_.data_param().rand_skip()) { unsigned int skip = caffe_rng_rand() % this->layer_param_.data_param().rand_skip(); LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { iter_->Next(); if (!iter_->Valid()) { iter_->SeekToFirst(); } } } // Read a data point, and use it to initialize the top blob. Datum datum; datum.ParseFromString(iter_->value().ToString()); // image int crop_size = this->layer_param_.data_param().crop_size(); if (crop_size > 0) { (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(), datum.channels(), crop_size, crop_size); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), crop_size, crop_size)); } else { (*top)[0]->Reshape( this->layer_param_.data_param().batch_size(), datum.channels(), datum.height(), datum.width()); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), datum.height(), datum.width())); } LOG(INFO) << "output data size: " << (*top)[0]->num() << "," << (*top)[0]->channels() << "," << (*top)[0]->height() << "," << (*top)[0]->width(); // label if (output_labels_) { (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1); prefetch_label_.reset( new Blob<Dtype>(this->layer_param_.data_param().batch_size(), 1, 1, 1)); } // datum size datum_channels_ = datum.channels(); datum_height_ = datum.height(); datum_width_ = datum.width(); datum_size_ = datum.channels() * datum.height() * datum.width(); CHECK_GT(datum_height_, crop_size); CHECK_GT(datum_width_, crop_size); // check if we want to have mean if (this->layer_param_.data_param().has_mean_file()) { const string& mean_file = this->layer_param_.data_param().mean_file(); LOG(INFO) << "Loading mean file from" << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); data_mean_.FromProto(blob_proto); CHECK_EQ(data_mean_.num(), 1); CHECK_EQ(data_mean_.channels(), datum_channels_); CHECK_EQ(data_mean_.height(), datum_height_); CHECK_EQ(data_mean_.width(), datum_width_); } else { // Simply initialize an all-empty mean. data_mean_.Reshape(1, datum_channels_, datum_height_, datum_width_); } // Now, start the prefetch thread. Before calling prefetch, we make two // cpu_data calls so that the prefetch thread does not accidentally make // simultaneous cudaMalloc calls when the main thread is running. In some // GPUs this seems to cause failures if we do not so. prefetch_data_->mutable_cpu_data(); if (output_labels_) { prefetch_label_->mutable_cpu_data(); } data_mean_.cpu_data(); DLOG(INFO) << "Initializing prefetch"; CreatePrefetchThread(); DLOG(INFO) << "Prefetch initialized.";}
开发者ID:zhangxc11,项目名称:caffe,代码行数:99,
示例5: CHECK_EQvoid FilterLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(top.size(), bottom.size() - 1); first_reshape_ = true;}
开发者ID:victorv,项目名称:caffe,代码行数:5,
示例6: ir_call_external_1void ir_call_external_1(struct ir *ir, struct ir_value *addr) { CHECK_EQ(addr->type, VALUE_I64); struct ir_instr *instr = ir_append_instr(ir, OP_CALL_EXTERNAL, VALUE_V); ir_set_arg0(ir, instr, addr);}
开发者ID:ST3ALth,项目名称:redream,代码行数:6,
示例7: parseStreamMuxConfigstatic status_t parseStreamMuxConfig( ABitReader *bits, unsigned *numSubFrames, unsigned *frameLengthType, ssize_t *fixedFrameLength, bool *otherDataPresent, unsigned *otherDataLenBits) { unsigned audioMuxVersion = bits->getBits(1); unsigned audioMuxVersionA = 0; if (audioMuxVersion == 1) { audioMuxVersionA = bits->getBits(1); } CHECK_EQ(audioMuxVersionA, 0u); // otherwise future spec if (audioMuxVersion != 0) { return ERROR_UNSUPPORTED; // XXX to be implemented; } CHECK_EQ(audioMuxVersion, 0u); // XXX to be implemented unsigned allStreamsSameTimeFraming = bits->getBits(1); CHECK_EQ(allStreamsSameTimeFraming, 1u); // There's only one stream. *numSubFrames = bits->getBits(6); unsigned numProgram = bits->getBits(4); CHECK_EQ(numProgram, 0u); // disabled in RTP LATM unsigned numLayer = bits->getBits(3); CHECK_EQ(numLayer, 0u); // disabled in RTP LATM if (audioMuxVersion == 0) { // AudioSpecificConfig CHECK_EQ(parseAudioSpecificConfig(bits, NULL /* asc */), (status_t)OK); } else { TRESPASS(); // XXX to be implemented } *frameLengthType = bits->getBits(3); *fixedFrameLength = -1; switch (*frameLengthType) { case 0: { /* unsigned bufferFullness = */bits->getBits(8); // The "coreFrameOffset" does not apply since there's only // a single layer. break; } case 1: { *fixedFrameLength = bits->getBits(9); break; } case 2: { // reserved TRESPASS(); break; } case 3: case 4: case 5: { /* unsigned CELPframeLengthTableIndex = */bits->getBits(6); break; } case 6: case 7: { /* unsigned HVXCframeLengthTableIndex = */bits->getBits(1); break; } default: break; }#ifdef STE_HARDWARE status_t parseResult = OK;#endif *otherDataPresent = bits->getBits(1); *otherDataLenBits = 0; if (*otherDataPresent) { if (audioMuxVersion == 1) { TRESPASS(); // XXX to be implemented#ifdef STE_HARDWARE } else if (bits->numBitsLeft() < 9) { parseResult = ERROR_MALFORMED;#endif } else { *otherDataLenBits = 0; unsigned otherDataLenEsc; do {//.........这里部分代码省略.........
开发者ID:minicm4pico,项目名称:android_frameworks_base,代码行数:101,
示例8: LOGvoid Net<Dtype>::Init(const NetParameter& in_param) { LOG(INFO) << "Initializing net from parameters: " << std::endl << in_param.DebugString(); // Create a copy of in_param with splits added where necessary. NetParameter param; InsertSplits(in_param, ¶m); // Basically, build all the layers and set up its connections. name_ = param.name(); map<string, int> blob_name_to_idx; set<string> available_blobs; int num_layers = param.layers_size(); CHECK_EQ(param.input_size() * 4, param.input_dim_size()) << "Incorrect bottom blob dimension specifications."; size_t memory_used = 0; // set the input blobs for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); shared_ptr<Blob<Dtype> > blob_pointer( new Blob<Dtype>(param.input_dim(i * 4), param.input_dim(i * 4 + 1), param.input_dim(i * 4 + 2), param.input_dim(i * 4 + 3))); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); net_input_blob_indices_.push_back(i); net_input_blobs_.push_back(blob_pointer.get()); blob_name_to_idx[blob_name] = i; available_blobs.insert(blob_name); memory_used += blob_pointer->count(); } DLOG(INFO) << "Memory required for Data" << memory_used*sizeof(Dtype); // For each layer, set up their input and output bottom_vecs_.resize(param.layers_size()); top_vecs_.resize(param.layers_size()); bottom_id_vecs_.resize(param.layers_size()); top_id_vecs_.resize(param.layers_size()); for (int i = 0; i < param.layers_size(); ++i) { bool in_place = false; const LayerParameter& layer_param = param.layers(i); layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param))); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = param.force_backward(); // Figure out this layer's input and output for (int j = 0; j < layer_param.bottom_size(); ++j) { const string& blob_name = layer_param.bottom(j); const int blob_id = blob_name_to_idx[blob_name]; if (available_blobs.find(blob_name) == available_blobs.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer" << j; } LOG(INFO) << layer_param.name() << " <- " << blob_name; bottom_vecs_[i].push_back( blobs_[blob_id].get()); bottom_id_vecs_[i].push_back(blob_id); // If a blob needs backward, this layer should provide it. need_backward |= blob_need_backward_[blob_id]; available_blobs.erase(blob_name); } for (int j = 0; j < layer_param.top_size(); ++j) { const string& blob_name = layer_param.top(j); // Check if we are doing in-place computation if (layer_param.bottom_size() > j && blob_name == layer_param.bottom(j)) { // In-place computation LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)"; in_place = true; available_blobs.insert(blob_name); top_vecs_[i].push_back( blobs_[blob_name_to_idx[blob_name]].get()); top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]); } else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) { // If we are not doing in-place computation but has duplicated blobs, // raise an error. LOG(FATAL) << "Duplicate blobs produced by multiple sources."; } else { // Normal output. LOG(INFO) << layer_param.name() << " -> " << blob_name; shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>()); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); blob_name_to_idx[blob_name] = blob_names_.size() - 1; available_blobs.insert(blob_name); top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get()); top_id_vecs_[i].push_back(blob_names_.size() - 1); } } // After this layer is connected, set it up. // LOG(INFO) << "Setting up " << layer_names_[i]; layers_[i]->SetUp(bottom_vecs_[i], &top_vecs_[i]); for (int topid = 0; topid < top_vecs_[i].size(); ++topid) { LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->num() << " " << top_vecs_[i][topid]->channels() << " " << top_vecs_[i][topid]->height() << " " << top_vecs_[i][topid]->width() << " (" << top_vecs_[i][topid]->count() << ")"; if (!in_place) memory_used += top_vecs_[i][topid]->count();//.........这里部分代码省略.........
开发者ID:aybassiouny,项目名称:wincaffe-cmake,代码行数:101,
示例9: constructor_sanity_check void constructor_sanity_check() { CHECK_EQ(0, this->param_.momentum()) << "Momentum cannot be used with AdaGrad."; }
开发者ID:flair2005,项目名称:caffe-gpus,代码行数:4,
示例10: CHECK_GTvoid LSTMLayer<Dtype>::FillUnrolledNet(NetParameter* net_param) const { const int num_output = this->layer_param_.recurrent_param().num_output(); CHECK_GT(num_output, 0) << "num_output must be positive"; const FillerParameter& weight_filler = this->layer_param_.recurrent_param().weight_filler(); const FillerParameter& bias_filler = this->layer_param_.recurrent_param().bias_filler(); // Add generic LayerParameter's (without bottoms/tops) of layer types we'll // use to save redundant code. LayerParameter hidden_param; hidden_param.set_type("InnerProduct"); hidden_param.mutable_inner_product_param()->set_num_output(num_output * 4); hidden_param.mutable_inner_product_param()->set_bias_term(false); hidden_param.mutable_inner_product_param()->set_axis(2); hidden_param.mutable_inner_product_param()-> mutable_weight_filler()->CopyFrom(weight_filler); LayerParameter biased_hidden_param(hidden_param); biased_hidden_param.mutable_inner_product_param()->set_bias_term(true); biased_hidden_param.mutable_inner_product_param()-> mutable_bias_filler()->CopyFrom(bias_filler); LayerParameter sum_param; sum_param.set_type("Eltwise"); sum_param.mutable_eltwise_param()->set_operation( EltwiseParameter_EltwiseOp_SUM); LayerParameter scale_param; scale_param.set_type("Scale"); scale_param.mutable_scale_param()->set_axis(0); LayerParameter slice_param; slice_param.set_type("Slice"); slice_param.mutable_slice_param()->set_axis(0); LayerParameter split_param; split_param.set_type("Split"); vector<BlobShape> input_shapes; RecurrentInputShapes(&input_shapes); CHECK_EQ(2, input_shapes.size()); LayerParameter* input_layer_param = net_param->add_layer(); input_layer_param->set_type("Input"); InputParameter* input_param = input_layer_param->mutable_input_param(); input_layer_param->add_top("c_0"); input_param->add_shape()->CopyFrom(input_shapes[0]); input_layer_param->add_top("h_0"); input_param->add_shape()->CopyFrom(input_shapes[1]); LayerParameter* cont_slice_param = net_param->add_layer(); cont_slice_param->CopyFrom(slice_param); cont_slice_param->set_name("cont_slice"); cont_slice_param->add_bottom("cont"); cont_slice_param->mutable_slice_param()->set_axis(0); // Add layer to transform all timesteps of x to the hidden state dimension. // W_xc_x = W_xc * x + b_c { LayerParameter* x_transform_param = net_param->add_layer(); x_transform_param->CopyFrom(biased_hidden_param); x_transform_param->set_name("x_transform"); x_transform_param->add_param()->set_name("W_xc"); x_transform_param->add_param()->set_name("b_c"); x_transform_param->add_bottom("x"); x_transform_param->add_top("W_xc_x"); x_transform_param->add_propagate_down(true); } if (this->static_input_) { // Add layer to transform x_static to the gate dimension. // W_xc_x_static = W_xc_static * x_static LayerParameter* x_static_transform_param = net_param->add_layer(); x_static_transform_param->CopyFrom(hidden_param); x_static_transform_param->mutable_inner_product_param()->set_axis(1); x_static_transform_param->set_name("W_xc_x_static"); x_static_transform_param->add_param()->set_name("W_xc_static"); x_static_transform_param->add_bottom("x_static"); x_static_transform_param->add_top("W_xc_x_static_preshape"); x_static_transform_param->add_propagate_down(true); LayerParameter* reshape_param = net_param->add_layer(); reshape_param->set_type("Reshape"); BlobShape* new_shape = reshape_param->mutable_reshape_param()->mutable_shape(); new_shape->add_dim(1); // One timestep. // Should infer this->N as the dimension so we can reshape on batch size. new_shape->add_dim(-1); new_shape->add_dim( x_static_transform_param->inner_product_param().num_output()); reshape_param->set_name("W_xc_x_static_reshape"); reshape_param->add_bottom("W_xc_x_static_preshape"); reshape_param->add_top("W_xc_x_static"); } LayerParameter* x_slice_param = net_param->add_layer(); x_slice_param->CopyFrom(slice_param);//.........这里部分代码省略.........
开发者ID:20337112,项目名称:caffe,代码行数:101,
示例11: CHECK_EQvoid Convolution3DLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { CHECK_EQ(bottom.size(), 1) << "Conv Layer takes a single blob as input."; CHECK_EQ(top->size(), 1) << "Conv Layer takes a single blob as output."; kernel_size_ = this->layer_param_.convolution_param().kernel_size(); kernel_depth_ = this->layer_param_.convolution_param().kernel_depth(); stride_ = this->layer_param_.convolution_param().stride(); temporal_stride_ = this->layer_param_.convolution_param().temporal_stride(); pad_ = this->layer_param_.convolution_param().pad(); temporal_pad_ = this->layer_param_.convolution_param().temporal_pad(); num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); length_ = bottom[0]->length(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); num_output_ = this->layer_param_.convolution_param().num_output(); filter_group_ = this->layer_param_.convolution_param().filter_group(); CHECK_GT(num_output_, 0); // number of output filters must be divided by filter_group CHECK_EQ(num_output_ % filter_group_, 0); // The vol2col result buffer would only hold one image at a time to avoid // overly large memory usage. int height_out = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; int width_out = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; int length_out = (length_ + 2 * temporal_pad_ - kernel_depth_) / temporal_stride_ + 1; // buffer for one image col_buffer_.Reshape( 1, channels_ * kernel_depth_ * kernel_size_ * kernel_size_, length_out, height_out, width_out); bias_term_ = this->layer_param_.convolution_param().bias_term(); // Figure out the dimensions for individual gemms. M_ = num_output_ / filter_group_; // doing convolution filter_group_ times per volume K_ = channels_ * kernel_depth_ * kernel_size_ * kernel_size_; N_ = length_out * height_out * width_out; // output size (*top)[0]->Reshape(bottom[0]->num(), num_output_, length_out, height_out, width_out); // Check if we need to set up the weights if (this->blobs_.size() > 0) { LOG(INFO) << "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); } else { this->blobs_.resize(1); } // Initialize the weights this->blobs_[0].reset(new Blob<Dtype>( num_output_, channels_, kernel_depth_, kernel_size_, kernel_size_)); // fill the weights shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>( this->layer_param_.convolution_param().weight_filler())); weight_filler->Fill(this->blobs_[0].get()); // If necessary, initialize and fill the bias term if (bias_term_) { this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, 1, num_output_)); shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>( this->layer_param_.convolution_param().bias_filler())); bias_filler->Fill(this->blobs_[1].get()); } } // Set up the bias filler if (bias_term_) { bias_multiplier_.reset(new SyncedMemory(N_ * sizeof(Dtype))); Dtype* bias_multiplier_data = reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data()); for (int i = 0; i < N_; ++i) { bias_multiplier_data[i] = 1.; } }}
开发者ID:ZhaofanQiu,项目名称:caffe-windows-3dConvNet,代码行数:82,
示例12: CHECK_EQvoid Blob<Dtype>::ShareData(const Blob& other) { CHECK_EQ(count_, other.count()); data_ = other.data();}
开发者ID:siddharthachandra,项目名称:gcrf,代码行数:4,
示例13: CHECK_EQvoid GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>* layer, vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top, int check_bottom, int top_id, int top_data_id, bool element_wise) { if (element_wise) { CHECK_EQ(0, layer->blobs().size()); CHECK_LE(0, top_id); CHECK_LE(0, top_data_id); const int top_count = (*top)[top_id]->count(); for (int blob_id = 0; blob_id < bottom->size(); ++blob_id) { CHECK_EQ(top_count, (*bottom)[blob_id]->count()); } } // First, figure out what blobs we need to check against. vector<Blob<Dtype>*> blobs_to_check; for (int i = 0; i < layer->blobs().size(); ++i) { blobs_to_check.push_back(layer->blobs()[i].get()); } if (check_bottom < 0) { for (int i = 0; i < bottom->size(); ++i) { blobs_to_check.push_back((*bottom)[i]); } } else { CHECK(check_bottom < bottom->size()); blobs_to_check.push_back((*bottom)[check_bottom]); } // Compute the gradient analytically using Backward Caffe::set_random_seed(seed_); // Get any loss from the layer Dtype computed_objective = layer->Forward(*bottom, top); // Get additional loss from the objective computed_objective += GetObjAndGradient(top, top_id, top_data_id); layer->Backward(*top, true, bottom); // Store computed gradients for all checked blobs vector<shared_ptr<Blob<Dtype> > > computed_gradient_blobs(blobs_to_check.size()); for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { Blob<Dtype>* current_blob = blobs_to_check[blob_id]; computed_gradient_blobs[blob_id].reset(new Blob<Dtype>()); computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob); const int count = blobs_to_check[blob_id]->count(); const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); Dtype* computed_gradients = computed_gradient_blobs[blob_id]->mutable_cpu_data(); caffe_copy(count, diff, computed_gradients); } // Compute derivative of top w.r.t. each bottom and parameter input using // finite differencing. // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs."; for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { Blob<Dtype>* current_blob = blobs_to_check[blob_id]; const Dtype* computed_gradients = computed_gradient_blobs[blob_id]->cpu_data(); // LOG(ERROR) << "Blob " << blob_id << ": checking " // << current_blob->count() << " parameters."; for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) { // For an element-wise layer, we only need to do finite differencing to // compute the derivative of (*top)[top_id][top_data_id] w.r.t. // (*bottom)[blob_id][i] only for i == top_data_id. For any other // i != top_data_id, we know the derivative is 0 by definition, and simply // check that that's true. Dtype estimated_gradient = 0; if (!element_wise || (feat_id == top_data_id)) { // Do finite differencing. // Compute loss with stepsize_ added to input. current_blob->mutable_cpu_data()[feat_id] += stepsize_; Caffe::set_random_seed(seed_); Dtype positive_objective = layer->Forward(*bottom, top); positive_objective += GetObjAndGradient(top, top_id, top_data_id); // Compute loss with stepsize_ subtracted from input. current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2; Caffe::set_random_seed(seed_); Dtype negative_objective = layer->Forward(*bottom, top); negative_objective += GetObjAndGradient(top, top_id, top_data_id); // Recover original input value. current_blob->mutable_cpu_data()[feat_id] += stepsize_; estimated_gradient = (positive_objective - negative_objective) / stepsize_ / 2.; } Dtype computed_gradient = computed_gradients[feat_id]; Dtype feature = current_blob->cpu_data()[feat_id]; // LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " " // << current_blob->cpu_diff()[feat_id]; if (kink_ - kink_range_ > fabs(feature) || fabs(feature) > kink_ + kink_range_) { // We check relative accuracy, but for too small values, we threshold // the scale factor by 1. Dtype scale = max( max(fabs(computed_gradient), fabs(estimated_gradient)), 1.); EXPECT_NEAR(computed_gradient, estimated_gradient, threshold_ * scale) << "debug: (top_id, top_data_id, blob_id, feat_id)=" << top_id << "," << top_data_id << "," << blob_id << "," << feat_id; } // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id]; // LOG(ERROR) << "computed gradient: " << computed_gradient // << " estimated_gradient: " << estimated_gradient; } }}
开发者ID:ALISCIFP,项目名称:C3D,代码行数:98,
示例14: CHECK_EQvoid BaseConvolutionLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int first_spatial_axis = channel_axis_ + 1; CHECK_EQ(bottom[0]->num_axes(), first_spatial_axis + num_spatial_axes_) << "bottom num_axes may not change."; num_ = bottom[0]->count(0, channel_axis_); CHECK_EQ(bottom[0]->shape(channel_axis_), channels_) << "Input size incompatible with convolution kernel."; // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) << "All inputs must have the same shape."; } // Shape the tops. bottom_shape_ = &bottom[0]->shape(); compute_output_shape(); vector<int> top_shape(bottom[0]->shape().begin(), bottom[0]->shape().begin() + channel_axis_); top_shape.push_back(num_output_); for (int i = 0; i < num_spatial_axes_; ++i) { top_shape.push_back(output_shape_[i]); } for (int top_id = 0; top_id < top.size(); ++top_id) { top[top_id]->Reshape(top_shape); } if (reverse_dimensions()) { conv_out_spatial_dim_ = bottom[0]->count(first_spatial_axis); } else { conv_out_spatial_dim_ = top[0]->count(first_spatial_axis); } col_offset_ = kernel_dim_ * conv_out_spatial_dim_; output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_; // Setup input dimensions (conv_input_shape_). vector<int> bottom_dim_blob_shape(1, num_spatial_axes_ + 1); conv_input_shape_.Reshape(bottom_dim_blob_shape); int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data(); for (int i = 0; i < num_spatial_axes_ + 1; ++i) { if (reverse_dimensions()) { conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i); } else { conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i); } } // The im2col result buffer will only hold one image at a time to avoid // overly large memory usage. In the special case of 1x1 convolution // it goes lazily unused to save memory. col_buffer_shape_.clear(); col_buffer_shape_.push_back(kernel_dim_ * group_); for (int i = 0; i < num_spatial_axes_; ++i) { if (reverse_dimensions()) { col_buffer_shape_.push_back(input_shape(i + 1)); } else { col_buffer_shape_.push_back(output_shape_[i]); } } col_buffer_.Reshape(col_buffer_shape_); bottom_dim_ = bottom[0]->count(channel_axis_); top_dim_ = top[0]->count(channel_axis_); num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_; num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_; // Set up the all ones "bias multiplier" for adding biases by BLAS out_spatial_dim_ = top[0]->count(first_spatial_axis); if (bias_term_) { vector<int> bias_multiplier_shape(1, out_spatial_dim_); bias_multiplier_.Reshape(bias_multiplier_shape); caffe_set(bias_multiplier_.count(), Dtype(1), bias_multiplier_.mutable_cpu_data()); }}
开发者ID:voidrank,项目名称:caffe,代码行数:69,
示例15: ALOGDSoftHEVC::~SoftHEVC() { ALOGD("In SoftHEVC::~SoftHEVC"); CHECK_EQ(deInitDecoder(), (status_t)OK);}
开发者ID:Khaon,项目名称:av,代码行数:4,
示例16: mainint main(int argc, char const* argv[]){ auto const config_path = osutil::get_config_dir(); DNS::Resolver res(config_path); DNS_ldns::Resolver res_ldns; struct lkp { DNS::RR_type typ; char const* name; }; lkp lookups[] = { {DNS::RR_type::A, "amazon.com"}, {DNS::RR_type::A, "dee.test.digilicious.com"}, {DNS::RR_type::A, "does-not-exist.test.digilicious.com"}, {DNS::RR_type::A, "google-public-dns-a.google.com"}, {DNS::RR_type::A, "google-public-dns-b.google.com"}, {DNS::RR_type::AAAA, "google-public-dns-a.google.com"}, {DNS::RR_type::AAAA, "google-public-dns-b.google.com"}, {DNS::RR_type::CNAME, "cname4.digilicious.com"}, {DNS::RR_type::CNAME, "com.digilicious.in-addr.arpa"}, {DNS::RR_type::MX, "anyold.host"}, {DNS::RR_type::MX, "cname.test.digilicious.com"}, {DNS::RR_type::PTR, "com.digilicious.in-addr.arpa"}, {DNS::RR_type::PTR, "com.google.in-addr.arpa"}, {DNS::RR_type::TLSA, "_25._tcp.digilicious.com"}, {DNS::RR_type::TLSA, "_443._tcp.digilicious.com"}, {DNS::RR_type::TXT, "digilicious.com"}, }; for (auto const& lookup : lookups) { DNS::Query q(res, lookup.typ, lookup.name); DNS_ldns::Query q_ldns(res_ldns, lookup.typ, lookup.name); CHECK_EQ(q.nx_domain(), q_ldns.nx_domain()); CHECK_EQ(q.bogus_or_indeterminate(), q_ldns.bogus_or_indeterminate()); CHECK_EQ(q.authentic_data(), q_ldns.authentic_data()); auto rrs{q.get_records()}; auto rrs_ldns{q_ldns.get_records()}; CHECK_EQ(size(rrs), size(rrs_ldns)); std::sort(begin(rrs), end(rrs)); std::sort(begin(rrs_ldns), end(rrs_ldns)); auto [rr, rr_ldns] = std::mismatch(begin(rrs), end(rrs), begin(rrs_ldns), end(rrs_ldns)); if (rr != end(rrs)) { LOG(FATAL) << *rr << " != " << *rr_ldns; } } // These IP addresses might be stable for a while. auto const goog_a{"google-public-dns-a.google.com"}; auto const goog_b{"google-public-dns-b.google.com"}; auto const addrs_b{res.get_strings(DNS::RR_type::A, goog_b)}; CHECK_EQ(addrs_b.size(), 1U); CHECK_EQ(addrs_b[0], "8.8.4.4"); auto const aaaaddrs_a{res.get_strings(DNS::RR_type::AAAA, goog_a)}; CHECK_EQ(aaaaddrs_a.size(), 1U); CHECK_EQ(aaaaddrs_a[0], "2001:4860:4860::8888"); auto const aaaaddrs_b{res.get_strings(DNS::RR_type::AAAA, goog_b)}; CHECK_EQ(aaaaddrs_b.size(), 1U); CHECK_EQ(aaaaddrs_b[0], "2001:4860:4860::8844"); auto const fcrdnses4{fcrdns4(res, "1.1.1.1")}; CHECK_EQ(fcrdnses4.size(), 1); CHECK(Domain::match(fcrdnses4.front(), "one.one.one.one")) << "no match for " << fcrdnses4.front(); auto const fcrdnses6{fcrdns6(res, "2606:4700:4700::1111")}; CHECK_EQ(fcrdnses6.size(), 1); CHECK(Domain::match(fcrdnses6.front(), "one.one.one.one")) << "no match for " << fcrdnses6.front(); auto const quad9{fcrdns4(res, "9.9.9.9")}; CHECK(Domain::match(quad9.front(), "dns.quad9.net")) << "no match for " << quad9.front();}
开发者ID:gene-hightower,项目名称:ghsmtp,代码行数:86,
示例17: switchvoid Converter::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatMediaPullerNotify: { int32_t what; CHECK(msg->findInt32("what", &what)); if (!mIsPCMAudio && mEncoder == NULL) { ALOGV("got msg '%s' after encoder shutdown.", msg->debugString().c_str()); if (what == MediaPuller::kWhatAccessUnit) { sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); void *mbuf; if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) && mbuf != NULL) { ALOGV("releasing mbuf %p", mbuf); accessUnit->meta()->setPointer("mediaBuffer", NULL); static_cast<MediaBuffer *>(mbuf)->release(); mbuf = NULL; } } break; } if (what == MediaPuller::kWhatEOS) { mInputBufferQueue.push_back(NULL); feedEncoderInputBuffers(); scheduleDoMoreWork(); } else { CHECK_EQ(what, MediaPuller::kWhatAccessUnit); sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit));#if 0 void *mbuf; if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) && mbuf != NULL) { ALOGI("queueing mbuf %p", mbuf); }#endif#if ENABLE_SILENCE_DETECTION if (!mIsVideo) { if (IsSilence(accessUnit)) { if (mInSilentMode) { break; } int64_t nowUs = ALooper::GetNowUs(); if (mFirstSilentFrameUs < 0ll) { mFirstSilentFrameUs = nowUs; } else if (nowUs >= mFirstSilentFrameUs + 10000000ll) { mInSilentMode = true; ALOGI("audio in silent mode now."); break; } } else { if (mInSilentMode) { ALOGI("audio no longer in silent mode."); } mInSilentMode = false; mFirstSilentFrameUs = -1ll; } }#endif mInputBufferQueue.push_back(accessUnit); feedEncoderInputBuffers(); scheduleDoMoreWork(); } break; } case kWhatEncoderActivity: {#if 0 int64_t whenUs; if (msg->findInt64("whenUs", &whenUs)) { int64_t nowUs = ALooper::GetNowUs(); ALOGI("[%s] kWhatEncoderActivity after %lld us", mIsVideo ? "video" : "audio", nowUs - whenUs); }#endif mDoMoreWorkPending = false; if (mEncoder == NULL) { break; }//.........这里部分代码省略.........
开发者ID:menghang,项目名称:a31_422_v33_android_frameworks,代码行数:101,
示例18: parseAudioSpecificConfigstatic status_t parseAudioSpecificConfig(ABitReader *bits, sp<ABuffer> *asc) { const uint8_t *dataStart = bits->data(); size_t totalNumBits = bits->numBitsLeft(); unsigned audioObjectType; CHECK_EQ(parseAudioObjectType(bits, &audioObjectType), (status_t)OK); unsigned samplingFreqIndex = bits->getBits(4); if (samplingFreqIndex == 0x0f) { /* unsigned samplingFrequency = */bits->getBits(24); } unsigned channelConfiguration = bits->getBits(4); unsigned extensionAudioObjectType = 0; unsigned sbrPresent = 0; if (audioObjectType == 5) { extensionAudioObjectType = audioObjectType; sbrPresent = 1; unsigned extensionSamplingFreqIndex = bits->getBits(4); if (extensionSamplingFreqIndex == 0x0f) { /* unsigned extensionSamplingFrequency = */bits->getBits(24); } CHECK_EQ(parseAudioObjectType(bits, &audioObjectType), (status_t)OK); } CHECK((audioObjectType >= 1 && audioObjectType <= 4) || (audioObjectType >= 6 && audioObjectType <= 7) || audioObjectType == 17 || (audioObjectType >= 19 && audioObjectType <= 23)); CHECK_EQ(parseGASpecificConfig( bits, audioObjectType, channelConfiguration), (status_t)OK); if (audioObjectType == 17 || (audioObjectType >= 19 && audioObjectType <= 27)) { unsigned epConfig = bits->getBits(2); if (epConfig == 2 || epConfig == 3) { // ErrorProtectionSpecificConfig return ERROR_UNSUPPORTED; // XXX to be implemented if (epConfig == 3) { unsigned directMapping = bits->getBits(1); CHECK_EQ(directMapping, 1u); } } } if (extensionAudioObjectType != 5 && bits->numBitsLeft() >= 16) { size_t numBitsLeftAtStart = bits->numBitsLeft(); unsigned syncExtensionType = bits->getBits(11); if (syncExtensionType == 0x2b7) { LOGI("found syncExtension"); CHECK_EQ(parseAudioObjectType(bits, &extensionAudioObjectType), (status_t)OK);#ifdef STE_HARDWARE if (extensionAudioObjectType == 5) {#endif sbrPresent = bits->getBits(1); if (sbrPresent == 1) { unsigned extensionSamplingFreqIndex = bits->getBits(4); if (extensionSamplingFreqIndex == 0x0f) { /* unsigned extensionSamplingFrequency = */bits->getBits(24); }#ifdef STE_HARDWARE if (bits->numBitsLeft() >= 12) { syncExtensionType = bits->getBits(11); if (syncExtensionType == 0x548) { /* unsigned psPresent */bits->getBits(1); } else { // Rewind bitstream so that the reading of second // syncExtensionType has no effect bits->rewindBits(11); } } } } else if (extensionAudioObjectType == 22) { sbrPresent = bits->getBits(1); if (sbrPresent == 1) { unsigned extensionSamplingFreqIndex = bits->getBits(4); if (extensionSamplingFreqIndex == 0x0f) { /* unsigned extensionSamplingFrequency = */bits->getBits(24); } } /* unsigned extensionChannelConfiguration = */bits->getBits(4);#endif } size_t numBitsInExtension = numBitsLeftAtStart - bits->numBitsLeft(); if (numBitsInExtension & 7) { // Apparently an extension is always considered an even // multiple of 8 bits long.//.........这里部分代码省略.........
开发者ID:minicm4pico,项目名称:android_frameworks_base,代码行数:101,
示例19: mainint main() { CHECK_EQ(&x, foo());}
开发者ID:jevinskie,项目名称:smoke-cpp-tests,代码行数:3,
示例20: AddCreator // Adds a creator. static void AddCreator(const string& type, Creator creator) { CreatorRegistry& registry = Registry(); CHECK_EQ(registry.count(type), 0) << "Layer type " << type << " already registered."; registry[type] = creator; }
开发者ID:XinGuo1993,项目名称:faster-rcnn_gx,代码行数:7,
示例21: CHECK_GEvoid ARTSPConnection::addAuthentication(AString *request) { if (mAuthType == NONE) { return; } // Find the boundary between headers and the body. ssize_t i = request->find("/r/n/r/n"); CHECK_GE(i, 0); if (mAuthType == BASIC) { AString tmp; tmp.append(mUser); tmp.append(":"); tmp.append(mPass); AString out; encodeBase64(tmp.c_str(), tmp.size(), &out); AString fragment; fragment.append("Authorization: Basic "); fragment.append(out); fragment.append("/r/n"); request->insert(fragment, i + 2); return; }#if defined(HAVE_ANDROID_OS) CHECK_EQ((int)mAuthType, (int)DIGEST); AString method, url; GetMethodAndURL(*request, &method, &url); AString A1; A1.append(mUser); A1.append(":");#ifndef ANDROID_DEFAULT_CODE A1.append(mRealm);#else A1.append("Streaming Server");#endif // #ifndef ANDROID_DEFAULT_CODE A1.append(":"); A1.append(mPass); AString A2; A2.append(method); A2.append(":"); A2.append(url); AString HA1, HA2; H(A1, &HA1); H(A2, &HA2); AString tmp; tmp.append(HA1); tmp.append(":"); tmp.append(mNonce); tmp.append(":"); tmp.append(HA2); AString digest; H(tmp, &digest); AString fragment; fragment.append("Authorization: Digest ");#ifdef ANDROID_DEFAULT_CODE fragment.append("nonce=/""); fragment.append(mNonce); fragment.append("/", ");#endif // #ifndef ANDROID_DEFAULT_CODE fragment.append("username=/""); fragment.append(mUser); fragment.append("/", ");#ifndef ANDROID_DEFAULT_CODE fragment.append("realm=/""); fragment.append(mRealm); fragment.append("/", "); fragment.append("nonce=/""); fragment.append(mNonce); fragment.append("/", ");#endif // #ifndef ANDROID_DEFAULT_CODE fragment.append("uri=/""); fragment.append(url); fragment.append("/", "); fragment.append("response=/""); fragment.append(digest); fragment.append("/""); fragment.append("/r/n"); request->insert(fragment, i + 2);#endif}
开发者ID:LuckJC,项目名称:pro-fw,代码行数:93,
示例22: sizeof//.........这里部分代码省略......... memcpy(mData, copy, size); mSize = size; success = removeUnsynchronizationV2_4(true /* iTunesHack */); } free(copy); copy = NULL; if (!success) { free(mData); mData = NULL; return false; } } else if (header.flags & 0x80) { removeUnsynchronization(); } mFirstFrameOffset = 0; if (header.version_major == 3 && (header.flags & 0x40)) { // Version 2.3 has an optional extended header. if (mSize < 4) { free(mData); mData = NULL; return false; } size_t extendedHeaderSize = U32_AT(&mData[0]) + 4; if (extendedHeaderSize > mSize) { free(mData); mData = NULL; return false; } mFirstFrameOffset = extendedHeaderSize; uint16_t extendedFlags = 0; if (extendedHeaderSize >= 6) { extendedFlags = U16_AT(&mData[4]); if (extendedHeaderSize >= 10) { size_t paddingSize = U32_AT(&mData[6]); if (mFirstFrameOffset + paddingSize > mSize) { free(mData); mData = NULL; return false; } mSize -= paddingSize; } } } else if (header.version_major == 4 && (header.flags & 0x40)) { // Version 2.4 has an optional extended header, that's different // from Version 2.3's... if (mSize < 4) { free(mData); mData = NULL; return false; } size_t ext_size; if (!ParseSyncsafeInteger(mData, &ext_size)) { free(mData); mData = NULL; return false; } if (ext_size < 6 || ext_size > mSize) { free(mData); mData = NULL; return false; } mFirstFrameOffset = ext_size; } if (header.version_major == 2) { mVersion = ID3_V2_2; } else if (header.version_major == 3) { mVersion = ID3_V2_3; } else { CHECK_EQ(header.version_major, 4); mVersion = ID3_V2_4; } return true;}
开发者ID:tuxafgmur,项目名称:DhollmenK_frameworks,代码行数:101,
示例23: CHECKvoid ARTSPConnection::onCompleteConnection(const sp<AMessage> &msg) { sp<AMessage> reply; CHECK(msg->findMessage("reply", &reply)); int32_t connectionID; CHECK(msg->findInt32("connection-id", &connectionID)); if ((connectionID != mConnectionID) || mState != CONNECTING) { // While we were attempting to connect, the attempt was // cancelled. reply->setInt32("result", -ECONNABORTED); reply->post(); return; } struct timeval tv; tv.tv_sec = 0; tv.tv_usec = kSelectTimeoutUs; fd_set ws; FD_ZERO(&ws); FD_SET(mSocket, &ws); int res = select(mSocket + 1, NULL, &ws, NULL, &tv); CHECK_GE(res, 0); if (res == 0) { // Timed out. Not yet connected.#ifndef ANDROID_DEFAULT_CODE int64_t then, now = ALooper::GetNowUs(); if (msg->findInt64("timestamp", &then) && now - then > kRequestTimeout) { ALOGE("connection timeout %lld > %lld", now, then); reply->setInt32("result", -110 /*ETIMEDOUT*/); reply->post(); mState = DISCONNECTED; close(mSocket); mSocket = -1; return; } if(mExited) return;#endif // #ifndef ANDROID_DEFAULT_CODE msg->post(); return; } int err; socklen_t optionLen = sizeof(err); CHECK_EQ(getsockopt(mSocket, SOL_SOCKET, SO_ERROR, &err, &optionLen), 0); CHECK_EQ(optionLen, (socklen_t)sizeof(err)); if (err != 0) { ALOGE("err = %d (%s)", err, strerror(err)); reply->setInt32("result", -err); mState = DISCONNECTED; if (mUIDValid) { HTTPBase::UnRegisterSocketUserTag(mSocket); HTTPBase::UnRegisterSocketUserMark(mSocket); } close(mSocket); mSocket = -1; } else { reply->setInt32("result", OK); mState = CONNECTED; mNextCSeq = 1; postReceiveReponseEvent(); } reply->post();}
开发者ID:LuckJC,项目名称:pro-fw,代码行数:73,
示例24: CHECK_EQvoid MKLPoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " << "corresponding to (num, channels, height, width)"; bool shape_changed = true; if (channels_ == bottom[0]->channels() && height_ == bottom[0]->height() && width_ == bottom[0]->width() && num_ == bottom[0]->num()) shape_changed = false; channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); num_ = bottom[0]->num(); if (global_pooling_) { kernel_h_ = bottom[0]->height(); kernel_w_ = bottom[0]->width(); } pooled_height_ = static_cast<int>(ceil(static_cast<float>( height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; pooled_width_ = static_cast<int>(ceil(static_cast<float>( width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; if (pad_h_ || pad_w_) { // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) { --pooled_height_; } if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) { --pooled_width_; } CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); if (top.size() > 1) { (reinterpret_cast<Blob<size_t>* > (top[1]) )->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); } // If max/min/avg pooling, we will initialize the vector index part. if (top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); } // If stochastic pooling, we will initialize the random index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_STOCHASTIC) { rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); } if (shape_changed) { // Recreate MKL layout size_t dim = 4; size_t src_sizes[4], src_strides[4]; src_sizes[0] = bottom[0]->width(); src_sizes[1] = bottom[0]->height(); src_sizes[2] = bottom[0]->channels(); src_sizes[3] = bottom[0]->num(); src_strides[0] = 1; src_strides[1] = src_sizes[0]; src_strides[2] = src_sizes[0]*src_sizes[1]; src_strides[3] = src_sizes[0]*src_sizes[1]*src_sizes[2]; dnnError_t e; e = dnnLayoutDelete<Dtype>(fwd_bottom_data->layout_usr); CHECK_EQ(e, E_SUCCESS); e = dnnLayoutCreate<Dtype>(&fwd_bottom_data->layout_usr, dim, src_sizes, src_strides); CHECK_EQ(e, E_SUCCESS); }}
开发者ID:crobertob,项目名称:caffe,代码行数:79,
示例25: ALOGW//.........这里部分代码省略......... } } AString line; ssize_t lastDictIndex = -1; for (;;) { if (!receiveLine(&line)) { break; } if (line.empty()) { break; }#ifndef ANDROID_DEFAULT_CODE ALOGI("line: '%s'", line.c_str());#else ALOGV("line: '%s'", line.c_str());#endif if (line.c_str()[0] == ' ' || line.c_str()[0] == '/t') { // Support for folded header values. if (lastDictIndex < 0) { // First line cannot be a continuation of the previous one. return false; } AString &value = response->mHeaders.editValueAt(lastDictIndex); value.append(line); continue; } ssize_t colonPos = line.find(":"); if (colonPos < 0) { // Malformed header line. return false; } AString key(line, 0, colonPos); key.trim(); key.tolower(); line.erase(0, colonPos + 1); lastDictIndex = response->mHeaders.add(key, line); } for (size_t i = 0; i < response->mHeaders.size(); ++i) { response->mHeaders.editValueAt(i).trim(); } unsigned long contentLength = 0; ssize_t i = response->mHeaders.indexOfKey("content-length"); if (i >= 0) { AString value = response->mHeaders.valueAt(i); if (!ParseSingleUnsignedLong(value.c_str(), &contentLength)) { return false; } } if (contentLength > 0) { response->mContent = new ABuffer(contentLength); if (receive(response->mContent->data(), contentLength) != OK) { return false; } } if (response->mStatusCode == 401) { if (mAuthType == NONE && mUser.size() > 0 && parseAuthMethod(response)) { ssize_t i; CHECK_EQ((status_t)OK, findPendingRequest(response, &i)); CHECK_GE(i, 0); sp<AMessage> reply = mPendingRequests.valueAt(i); mPendingRequests.removeItemsAt(i); AString request; CHECK(reply->findString("original-request", &request)); sp<AMessage> msg = new AMessage(kWhatSendRequest, id()); msg->setMessage("reply", reply); msg->setString("request", request.c_str(), request.size()); ALOGI("re-sending request with authentication headers..."); onSendRequest(msg); return true; } } return isRequest ? handleServerRequest(response) : notifyResponseListener(response);}
开发者ID:LuckJC,项目名称:pro-fw,代码行数:101,
示例26: switchvoid MKLPoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // We'll output the mask to top[1] if it's of size >1. size_t* mask = NULL; // suppress warnings about uninitalized variables // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; dnnAlgorithm_t algorithm; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: algorithm = dnnAlgorithmPoolingMax; break; case PoolingParameter_PoolMethod_AVE: algorithm = dnnAlgorithmPoolingAvg; break; case PoolingParameter_PoolMethod_STOCHASTIC: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown pooling method."; } dnnError_t status; void* pooling_res[dnnResourceNumber]; mask = (use_top_mask) ? reinterpret_cast<size_t*>(top[1]->mutable_cpu_data()) : (max_idx_.mutable_cpu_data()); pooling_res[dnnResourceWorkspace] = reinterpret_cast<void*>(mask); void* bottom_data = reinterpret_cast<void *>(const_cast<Dtype*>(bottom[0]->prv_data())); if (NULL == bottom_data) { bottom_data = reinterpret_cast<void *>(const_cast<Dtype*>(bottom[0]->cpu_data())); if (NULL == poolingFwd) { // Now create poolingFwd status = dnnPoolingCreateForward<Dtype>(&poolingFwd, NULL, algorithm, fwd_bottom_data->layout_usr, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); // Now create poolingBwd status = dnnPoolingCreateBackward<Dtype>(&poolingBwd, NULL, algorithm, fwd_bottom_data->layout_usr, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); } } else if (NULL == poolingFwd) { // Is it the first pass? Create a primitive. CHECK_EQ((bottom[0]->get_prv_descriptor_data())->get_descr_type(), PrvMemDescr::PRV_DESCR_MKL2017); shared_ptr<MKLData<Dtype> > mem_descr = boost::static_pointer_cast<MKLData<Dtype> > (bottom[0]->get_prv_descriptor_data()); CHECK(mem_descr != NULL); DLOG(INFO) << "Using layout of " << mem_descr->name << " as input layout for " << this->layer_param_.name(); // copy shared_ptr fwd_bottom_data = mem_descr; // Now create poolingFwd status = dnnPoolingCreateForward<Dtype>(&poolingFwd, NULL, algorithm, fwd_bottom_data->layout_int, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); status = dnnLayoutCreateFromPrimitive<Dtype>(&fwd_top_data->layout_int, poolingFwd, dnnResourceDst); CHECK_EQ(status, 0) << "Failed dnnLayoutCreateFromPrimitive with status " << status << "/n"; fwd_top_data->create_conversions(); // Now create poolingBwd status = dnnPoolingCreateBackward<Dtype>(&poolingBwd, NULL, algorithm, fwd_bottom_data->layout_int, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); status = dnnLayoutCreateFromPrimitive<Dtype>(&bwd_top_diff->layout_int, poolingFwd, dnnResourceDst); CHECK_EQ(status, E_SUCCESS); status = dnnLayoutCreateFromPrimitive<Dtype>(&bwd_bottom_diff->layout_int, poolingFwd, dnnResourceSrc); CHECK_EQ(status, E_SUCCESS); bwd_top_diff->create_conversions(); bwd_bottom_diff->create_conversions(); } pooling_res[dnnResourceSrc] = bottom_data; if (fwd_top_data->convert_from_int) { top[0]->set_prv_data(fwd_top_data->prv_ptr(), fwd_top_data, false); pooling_res[dnnResourceDst] =reinterpret_cast<void *>( const_cast<Dtype*>(fwd_top_data->prv_ptr()));//.........这里部分代码省略.........
开发者ID:crobertob,项目名称:caffe,代码行数:101,
示例27: UNUSEDvoid SoftHEVC::onQueueFilled(OMX_U32 portIndex) { UNUSED(portIndex); if (mOutputPortSettingsChange != NONE) { return; } List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex); List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex); /* If input EOS is seen and decoder is not in flush mode, * set the decoder in flush mode. * There can be a case where EOS is sent along with last picture data * In that case, only after decoding that input data, decoder has to be * put in flush. This case is handled here */ if (mReceivedEOS && !mIsInFlush) { setFlushMode(); } while (!outQueue.empty()) { BufferInfo *inInfo; OMX_BUFFERHEADERTYPE *inHeader; BufferInfo *outInfo; OMX_BUFFERHEADERTYPE *outHeader; size_t timeStampIx; inInfo = NULL; inHeader = NULL; if (!mIsInFlush) { if (!inQueue.empty()) { inInfo = *inQueue.begin(); inHeader = inInfo->mHeader; } else { break; } } outInfo = *outQueue.begin(); outHeader = outInfo->mHeader; outHeader->nFlags = 0; outHeader->nTimeStamp = 0; outHeader->nOffset = 0; if (inHeader != NULL && (inHeader->nFlags & OMX_BUFFERFLAG_EOS)) { ALOGD("EOS seen on input"); mReceivedEOS = true; if (inHeader->nFilledLen == 0) { inQueue.erase(inQueue.begin()); inInfo->mOwnedByUs = false; notifyEmptyBufferDone(inHeader); inHeader = NULL; setFlushMode(); } } // When there is an init required and the decoder is not in flush mode, // update output port's definition and reinitialize decoder. if (mInitNeeded && !mIsInFlush) { bool portWillReset = false; handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight); CHECK_EQ(reInitDecoder(), (status_t)OK); return; } /* Get a free slot in timestamp array to hold input timestamp */ { size_t i; timeStampIx = 0; for (i = 0; i < MAX_TIME_STAMPS; i++) { if (!mTimeStampsValid[i]) { timeStampIx = i; break; } } if (inHeader != NULL) { mTimeStampsValid[timeStampIx] = true; mTimeStamps[timeStampIx] = inHeader->nTimeStamp; } } { ivd_video_decode_ip_t s_dec_ip; ivd_video_decode_op_t s_dec_op; WORD32 timeDelay, timeTaken; size_t sizeY, sizeUV; setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx); GETTIME(&mTimeStart, NULL); /* Compute time elapsed between end of previous decode() * to start of current decode() */ TIME_DIFF(mTimeEnd, mTimeStart, timeDelay); IV_API_CALL_STATUS_T status; status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op); // FIXME: Compare |status| to IHEVCD_UNSUPPORTED_DIMENSIONS, which is not one of the//.........这里部分代码省略.........
开发者ID:Khaon,项目名称:av,代码行数:101,
示例28: LOGvoid MultiLabelLossLayer<Dtype>::Backward_cpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); const Dtype* target = bottom[1]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); int dim = count/num;/* Dtype weight[] = {0.497, 0.3288, 0.1023, 0.0617, 0.1966, 0.1994, 0.8608, 0.8530, 0.1375, 0.1339, 0.1016, 0.0692, 0.3061, 0.2962, 0.0402, 0.2375, 0.5485, 0.2957, 0.0839, 0.7494, 0.2759, 0.0266, 0.0765, 0.0204, 0.3633, 0.0347, 0.1418, 0.0455, 0.2161, 0.0172, 0.0291, 0.5151, 0.0842, 0.4556, 0.0118};*/// Dtype weight[] = {0.4865, 0.0789, 0.6699, 0.1386, 0.1123, 0.2427, 0.7945, 0.1314, 0.1352, 0.1820, 0.0539}; int weight_size = this->layer_param_.multilabel_loss_param().weight_size(); Dtype* weight = NULL; if ( weight_size > 0 ) { CHECK_EQ(weight_size, dim) << "weight must has the same size with channels."; weight = new Dtype[dim]; for(int i = 0; i < dim; i++) weight[i] = this->layer_param_.multilabel_loss_param().weight(i); } else { weight = new Dtype[dim]; for(int i = 0; i < dim; i++) weight[i] = 0.5; } for(int i=0; i<count; i++) { if(target[i] != 0) { if( target[i] > 0) { bottom_diff[i] = (sigmoid_output_data[i] - 1)*exp(1 - weight[i%dim]); // bottom_diff[i] = sigmoid_output_data[i] - 1; } else { bottom_diff[i] = sigmoid_output_data[i] * exp(weight[i%dim]); // bottom_diff[i] = sigmoid_output_data[i]; } } else { bottom_diff[i] = 0; } }/* for (int i = 0; i < count; ++i) { if (target[i] != 0) { bottom_diff[i] = sigmoid_output_data[i] - (target[i] > 0); } else { bottom_diff[i] = 0; } }*/ // Scale down gradient caffe_scal(count, Dtype(1) / num, bottom_diff); const Dtype loss_weight = top[0]->mutable_cpu_diff()[0]; caffe_scal(count, loss_weight, bottom_diff ); delete [] weight; }}
开发者ID:dangweili,项目名称:caffe_openblas,代码行数:74,
示例29: CHECKvoidTiedConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype> *> &bottom, vector<Blob<Dtype> *> *top) { ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK(!conv_param.has_kernel_size() != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK(conv_param.has_kernel_size() || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; CHECK((!conv_param.has_pad() && conv_param.has_pad_h() && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK((!conv_param.has_stride() && conv_param.has_stride_h() && conv_param.has_stride_w()) || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; if (conv_param.has_kernel_size()) { kernel_h_ = kernel_w_ = conv_param.kernel_size(); } else { kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); } CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { pad_h_ = pad_w_ = conv_param.pad(); } else { pad_h_ = conv_param.pad_h(); pad_w_ = conv_param.pad_w(); } if (!conv_param.has_stride_h()) { stride_h_ = stride_w_ = conv_param.stride(); } else { stride_h_ = conv_param.stride_h(); stride_w_ = conv_param.stride_w(); } // Configure output channels and groups. channels_ = bottom[0]->channels(); num_output_ = conv_param.num_output(); CHECK_GT(num_output_, 0); group_ = conv_param.group(); CHECK_EQ(channels_ % group_, 0); CHECK_EQ(num_output_ % group_, 0) << "Number of output should be multiples of group."; // Handle the parameters: weights and biases. // - blobs_[0] holds the filter weights // - blobs_[1] holds the biases (optional) bias_term_ = conv_param.bias_term(); if (this->blobs_.size() > 0) { LOG(INFO) << "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); } else { this->blobs_.resize(1); } // Intialize the weight // output channels x input channels per-group x kernel height x kernel width this->blobs_[0].reset( new Blob<Dtype>(num_output_, channels_ / group_, kernel_h_, kernel_w_)); // fill the weights shared_ptr<Filler<Dtype> > weight_filler( GetFiller<Dtype>(conv_param.weight_filler())); weight_filler->Fill(this->blobs_[0].get()); // If necessary, intiialize and fill the biases: // 1 x 1 x 1 x output channels. if (bias_term_) { this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, num_output_)); shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>( conv_param.bias_filler())); bias_filler->Fill(this->blobs_[1].get()); } } // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true);};
开发者ID:K0stIa,项目名称:si-convnet,代码行数:79,
注:本文中的CHECK_EQ函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ CHECK_EQUAL函数代码示例 C++ CHECK_ENTITY函数代码示例 |