这篇教程C++ CHECK_GE函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中CHECK_GE函数的典型用法代码示例。如果您正苦于以下问题:C++ CHECK_GE函数的具体用法?C++ CHECK_GE怎么用?C++ CHECK_GE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了CHECK_GE函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: caffe_setvoid RegionPoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_rois = bottom[1]->cpu_data(); // Number of ROIs int num_rois = bottom[1]->num(); int batch_size = bottom[0]->num(); int top_count = top[0]->count(); Dtype* top_data = top[0]->mutable_cpu_data(); caffe_set(top_count, Dtype(-FLT_MAX), top_data); int* argmax_data = max_idx_.mutable_cpu_data(); caffe_set(top_count, -1, argmax_data); // For each ROI R = [batch_index, x_outer_1, y_outer_1, x_outer_2, y_outer_2, x_inner_1, y_inner_1, x_inner_2, y_inner_2]: // where R_outer = [x_outer_1, y_outer_1, x_outer_2, y_outer_2] is the outer rectangle of the region and // R_inner = [x_inner_1, y_inner_1, x_inner_2, y_inner_2] is the inner rectangle of the region // max pooler over R by ignoring (setting to zero) the activations that lay inside the inner rectangle R_inner for (int n = 0; n < num_rois; ++n) { int roi_batch_ind = bottom_rois[0]; // outer rectangle of the region int roi_start_w = static_cast<int>(floor(((bottom_rois[1] + 1 + offset_) * spatial_scale_) + 0.5)); int roi_start_h = static_cast<int>(floor(((bottom_rois[2] + 1 + offset_) * spatial_scale_) + 0.5)); int roi_end_w = static_cast<int>(ceil( ((bottom_rois[3] + 1 - offset_) * spatial_scale_) - 0.5)); int roi_end_h = static_cast<int>(ceil( ((bottom_rois[4] + 1 - offset_) * spatial_scale_) - 0.5)); // inner rectangle of the region int roi_start_w_in = static_cast<int>(floor(((bottom_rois[5] + 1 + offset_) * spatial_scale_) + 0.5)); int roi_start_h_in = static_cast<int>(floor(((bottom_rois[6] + 1 + offset_) * spatial_scale_) + 0.5)); int roi_end_w_in = static_cast<int>(ceil( ((bottom_rois[7] + 1 - offset_) * spatial_scale_) - 0.5)); int roi_end_h_in = static_cast<int>(ceil( ((bottom_rois[8] + 1 - offset_) * spatial_scale_) - 0.5)); if (roi_start_w > roi_end_w) { roi_start_w = (roi_start_w + roi_end_w) / 2; roi_end_w = roi_start_w; } if (roi_start_h > roi_end_h) { roi_start_h = (roi_start_h + roi_end_h) / 2; roi_end_h = roi_start_h; } if (roi_start_w_in > roi_end_w_in) { roi_start_w_in = (roi_start_w_in + roi_end_w_in) / 2; roi_end_w_in = roi_start_w_in; } if (roi_start_h_in > roi_end_h_in) { roi_start_h_in = (roi_start_h_in + roi_end_h_in) / 2; roi_end_h_in = roi_start_h_in; } CHECK_GE(roi_batch_ind, 0); CHECK_LT(roi_batch_ind, batch_size); const int roi_height = max(roi_end_h - roi_start_h + 1, 1); const int roi_width = max(roi_end_w - roi_start_w + 1, 1); const Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height_); const Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width_); const Dtype* batch_data = bottom_data + bottom[0]->offset(roi_batch_ind); for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { // Compute pooling region for this output unit: // start (included) = floor(ph * roi_height / pooled_height_) // end (excluded) = ceil((ph + 1) * roi_height / pooled_height_) const int hstart = min(height_, max(0, static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)) + roi_start_h)); const int hend = min(height_, max(0, static_cast<int>(ceil( static_cast<Dtype>(ph+1) * bin_size_h)) + roi_start_h)); const int wstart = min(width_, max(0, static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)) + roi_start_w)); const int wend = min(width_, max(0, static_cast<int>(ceil( static_cast<Dtype>(pw+1) * bin_size_w)) + roi_start_w)); const int pool_index = ph * pooled_width_ + pw; top_data[pool_index] = 0; argmax_data[pool_index] = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (!(w > roi_start_w_in && w < roi_end_w_in && h > roi_start_h_in && h < roi_end_h_in)) { // if it is not inside the inner rectangle of the region const int index = h * width_ + w; if (batch_data[index] > top_data[pool_index]) { top_data[pool_index] = batch_data[index]; argmax_data[pool_index] = index; } } } } } } // Increment all data pointers by one channel batch_data += bottom[0]->offset(0, 1); top_data += top[0]->offset(0, 1); argmax_data += max_idx_.offset(0, 1); } // Increment ROI data pointer//.........这里部分代码省略.........
开发者ID:Jieeee,项目名称:caffe_LocNet,代码行数:101,
示例2: CHECK_LEvoid DataTransformer<Dtype>::Transform(Blob<Dtype>* input_blob, Blob<Dtype>* transformed_blob) { const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); if (transformed_blob->count() == 0) { // Initialize transformed_blob with the right shape. if (crop_size) { transformed_blob->Reshape(input_num, input_channels, crop_size, crop_size); } else { transformed_blob->Reshape(input_num, input_channels, input_height, input_width); } } const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); const int size = transformed_blob->count(); CHECK_LE(input_num, num); CHECK_EQ(input_channels, channels); CHECK_GE(input_height, height); CHECK_GE(input_width, width); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; int h_off = 0; int w_off = 0; if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); // We only do random crop when we do training. if (phase_ == TRAIN) { h_off = Rand(input_height - crop_size + 1); w_off = Rand(input_width - crop_size + 1); } else { h_off = (input_height - crop_size) / 2; w_off = (input_width - crop_size) / 2; } } else { CHECK_EQ(input_height, height); CHECK_EQ(input_width, width); } Dtype* input_data = input_blob->mutable_cpu_data(); if (has_mean_file) { CHECK_EQ(input_channels, data_mean_.channels()); CHECK_EQ(input_height, data_mean_.height()); CHECK_EQ(input_width, data_mean_.width()); for (int n = 0; n < input_num; ++n) { int offset = input_blob->offset(n); caffe_sub(data_mean_.count(), input_data + offset, data_mean_.cpu_data(), input_data + offset); } } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) << "Specify either 1 mean_value or as many as channels: " << input_channels; if (mean_values_.size() == 1) { caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); } else { for (int n = 0; n < input_num; ++n) { for (int c = 0; c < input_channels; ++c) { int offset = input_blob->offset(n, c); caffe_add_scalar(input_height * input_width, -(mean_values_[c]), input_data + offset); } } } } Dtype* transformed_data = transformed_blob->mutable_cpu_data(); for (int n = 0; n < input_num; ++n) { int top_index_n = n * channels; int data_index_n = n * channels; for (int c = 0; c < channels; ++c) { int top_index_c = (top_index_n + c) * height; int data_index_c = (data_index_n + c) * input_height + h_off; for (int h = 0; h < height; ++h) { int top_index_h = (top_index_c + h) * width; int data_index_h = (data_index_c + h) * input_width + w_off; if (do_mirror) { int top_index_w = top_index_h + width - 1; for (int w = 0; w < width; ++w) { transformed_data[top_index_w-w] = input_data[data_index_h + w]; } } else { for (int w = 0; w < width; ++w) {//.........这里部分代码省略.........
开发者ID:runauto,项目名称:caffe-augmentation-1,代码行数:101,
示例3: CHECK_LEvoid DataTransformer<Dtype>::Transform(const Datum& datum, Blob<Dtype>* transformed_blob, int &h_off, int &w_off, int &do_mirror, vector<float> & col_ranges) { const int img_channels = datum.channels(); const int img_height = datum.height(); const int img_width = datum.width(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); const int num = transformed_blob->num(); //CHECK_EQ(channels, img_channels); CHECK_LE(height, img_height); CHECK_LE(width, img_width); CHECK_GE(num, 1); CHECK_EQ(img_channels, col_ranges.size()); const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; if (do_mirror == -1) { do_mirror = param_.mirror() && Rand(2); } CHECK_GT(img_channels, 0); CHECK_GE(img_height, crop_size); CHECK_GE(img_width, crop_size); Dtype* mean = NULL; if (has_mean_file) { CHECK_EQ(img_channels, data_mean_.channels()); if( (img_height == data_mean_.height() && img_width == data_mean_.width() ) || (crop_size == data_mean_.height() && crop_size == data_mean_.width() ) ) { mean = data_mean_.mutable_cpu_data(); } else { CHECK_EQ(img_height, data_mean_.height()); CHECK_EQ(img_width, data_mean_.width()); } } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << "Specify either 1 mean_value or as many as channels: " << img_channels; if (img_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < img_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } //cv::Mat cv_cropped_img = cv_img; if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); // We only do random crop when we do training. if (phase_ == TRAIN) { if (h_off == -1 && w_off == -1) { h_off = Rand(img_height - crop_size + 1); w_off = Rand(img_width - crop_size + 1); } } else { if (h_off == -1 && w_off == -1) { h_off = (img_height - crop_size) / 2; w_off = (img_width - crop_size) / 2; } } //cv::Rect roi(w_off, h_off, crop_size, crop_size); //cv_cropped_img = cv_img(roi); } else { h_off = 0; w_off = 0; CHECK_EQ(img_height, height); CHECK_EQ(img_width, width); } //CHECK(cv_cropped_img.data); Dtype* transformed_data = transformed_blob->mutable_cpu_data(); int top_index; // debug /*char ss1[1010]; sprintf(ss1,"/home/xiaolonw/opt_flows/temp_results/sth.jpg"); cv::Mat img(Size(crop_size, crop_size), CV_8UC1);*/ for (int h = 0; h < height; ++h) { int img_index = 0; for (int w = 0; w < width; ++w) { for (int c = 0; c < img_channels; ++c) { float now_col = col_ranges[c];//.........这里部分代码省略.........
开发者ID:Geekking,项目名称:lisa-caffe-lstm,代码行数:101,
示例4: CHECK_GEvoid BaseConvolutionNDLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { ConvolutionParameter conv_param = this->layer_param_.convolution_param(); channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); const int first_spatial_axis = channel_axis_ + 1; const int num_axes = bottom[0]->num_axes(); num_spatial_axes_ = num_axes - first_spatial_axis; CHECK_GE(num_spatial_axes_, 1); num_ = bottom[0]->count(0, channel_axis_); CHECK_EQ(bottom[0]->shape(channel_axis_), channels_) << "Input size incompatible with convolution kernel."; // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) << "All inputs must have the same shape."; } // Shape the tops. compute_output_shape(); vector<int> top_shape = bottom[0]->shape(); top_shape[channel_axis_] = num_output_; top_shape.resize(first_spatial_axis); // Discard input spatial axes. for (int i = 0; i < num_spatial_axes_; ++i) { top_shape.push_back(output_shape_[i]); } for (int top_id = 0; top_id < top.size(); ++top_id) { top[top_id]->Reshape(top_shape); } if (reverse_dimensions()) { conv_out_spatial_dim_ = bottom[0]->count(first_spatial_axis); } else { conv_out_spatial_dim_ = top[0]->count(first_spatial_axis); } const int* kernel_shape_data = kernel_shape_.cpu_data(); kernel_dim_ = conv_in_channels_; for (int i = 0; i < num_spatial_axes_; ++i) { kernel_dim_ *= kernel_shape_data[i]; } weight_offset_ = conv_out_channels_ * kernel_dim_ / group_ / group_; col_offset_ = kernel_dim_ * conv_out_spatial_dim_ / group_; output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_; // Setup input dimensions (conv_input_shape_). vector<int> bottom_dim_blob_shape(1, num_spatial_axes_ + 1); conv_input_shape_.Reshape(bottom_dim_blob_shape); int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data(); for (int i = 0; i < num_spatial_axes_ + 1; ++i) { if (reverse_dimensions()) { conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i); } else { conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i); } } // The im2col result buffer will only hold one image at a time to avoid // overly large memory usage. In the special case of 1x1 convolution // it goes lazily unused to save memory. col_buffer_shape_.clear(); col_buffer_shape_.push_back(kernel_dim_); const int* input_shape_data = input_shape_.cpu_data() + 1; for (int i = 0; i < num_spatial_axes_; ++i) { if (reverse_dimensions()) { col_buffer_shape_.push_back(input_shape_data[i]); } else { col_buffer_shape_.push_back(output_shape_[i]); } } col_buffer_.Reshape(col_buffer_shape_); bottom_dim_ = bottom[0]->count(channel_axis_); top_dim_ = top[0]->count(channel_axis_); num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_; num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_; // Set up the all ones "bias multiplier" for adding biases by BLAS out_spatial_dim_ = top[0]->count(first_spatial_axis); if (bias_term_) { vector<int> bias_multiplier_shape(1, out_spatial_dim_); bias_multiplier_.Reshape(bias_multiplier_shape); caffe_set(bias_multiplier_.count(), Dtype(1), bias_multiplier_.mutable_cpu_data()); }}
开发者ID:aharrison24,项目名称:caffe,代码行数:78,
示例5: RngUniformFillGPU void RngUniformFillGPU(const Dtype lower, const Dtype upper, void* gpu_data) { CHECK_GE(upper, lower); Dtype* rng_data = static_cast<Dtype*>(gpu_data); caffe_gpu_rng_uniform(sample_size_, lower, upper, rng_data); }
开发者ID:0hm,项目名称:caffe,代码行数:5,
示例6: CHECK_GEvoid ASessionDescription::getFormat(size_t index, AString *value) const { CHECK_GE(index, 0u); CHECK_LT(index, mTracks.size()); *value = mFormats.itemAt(index);}
开发者ID:Gaia-ROM,项目名称:android_frameworks_base,代码行数:6,
示例7: caffe_setvoid ROIPoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_rois = bottom[1]->cpu_data(); // Number of ROIs int num_rois = bottom[1]->num(); int batch_size = bottom[0]->num(); int top_count = top[0]->count(); Dtype* top_data = top[0]->mutable_cpu_data(); caffe_set(top_count, Dtype(-FLT_MAX), top_data); int* argmax_data = max_idx_.mutable_cpu_data(); caffe_set(top_count, -1, argmax_data); // For each ROI R = [batch_index x1 y1 x2 y2]: max pool over R for (int n = 0; n < num_rois; ++n) { int roi_batch_ind = bottom_rois[0]; int roi_start_w = floorf(bottom_rois[1] * spatial_scale_ + 0.5); int roi_start_h = floorf(bottom_rois[2] * spatial_scale_ + 0.5); int roi_end_w = floorf(bottom_rois[3] * spatial_scale_ + 0.5); int roi_end_h = floorf(bottom_rois[4] * spatial_scale_ + 0.5); CHECK_GE(roi_batch_ind, 0); CHECK_LT(roi_batch_ind, batch_size); int roi_height = max(roi_end_h - roi_start_h + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); const Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height_); const Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width_); const Dtype* batch_data = bottom_data + bottom[0]->offset(roi_batch_ind); for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { // Compute pooling region for this output unit: // start (included) = floor(ph * roi_height / pooled_height_) // end (excluded) = ceil((ph + 1) * roi_height / pooled_height_) int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); hstart = min(max(hstart + roi_start_h, 0), height_); hend = min(max(hend + roi_start_h, 0), height_); wstart = min(max(wstart + roi_start_w, 0), width_); wend = min(max(wend + roi_start_w, 0), width_); bool is_empty = (hend <= hstart) || (wend <= wstart); const int pool_index = ph * pooled_width_ + pw; if (is_empty) { top_data[pool_index] = 0; argmax_data[pool_index] = -1; } for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int index = h * width_ + w; if (batch_data[index] > top_data[pool_index]) { top_data[pool_index] = batch_data[index]; argmax_data[pool_index] = index; } } } } } // Increment all data pointers by one channel batch_data += bottom[0]->offset(0, 1); top_data += top[0]->offset(0, 1); argmax_data += max_idx_.offset(0, 1); } // Increment ROI data pointer bottom_rois += bottom[1]->offset(1); }}
开发者ID:clcarwin,项目名称:caffe-win-tools-faceverification,代码行数:80,
示例8: CHECK_LEvoid DataTransformer<Dtype>::Transform(Blob<Dtype>* input_blob, Blob<Dtype>* transformed_blob) { const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); if (transformed_blob->count() == 0) { // Initialize transformed_blob with the right shape. if (crop_size) { transformed_blob->Reshape(input_num, input_channels, crop_size, crop_size); } else { transformed_blob->Reshape(input_num, input_channels, input_height, input_width); } } const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); const int size = transformed_blob->count(); CHECK_LE(input_num, num); CHECK_EQ(input_channels, channels); CHECK_GE(input_height, height); CHECK_GE(input_width, width); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; // mask_size is defaulted to 0 in caffe/proto/caffe.proto const int mask_size = param_.mask_size(); // mask_freq is defaulted to 1 in 3 in caffe/proto/caffe.proto const int mask_freq = param_.mask_freq(); int h_off = 0; int w_off = 0; if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); // We only do random crop when we do training. if (phase_ == TRAIN) { h_off = Rand(input_height - crop_size + 1); w_off = Rand(input_width - crop_size + 1); } else { h_off = (input_height - crop_size) / 2; w_off = (input_width - crop_size) / 2; } } else { CHECK_EQ(input_height, height); CHECK_EQ(input_width, width); } // initialize masking offsets to be same as cropping offsets // so that there is no conflict bool masking = (phase_ == TRAIN) && (mask_size > 0) && (Rand(mask_freq) == 0); int h_mask_start = h_off; int w_mask_start = w_off; if (masking) { int h_effective = input_height; int w_effective = input_width; if (crop_size) { h_effective = w_effective = crop_size; } CHECK_GE(h_effective, mask_size); CHECK_GE(w_effective, mask_size); h_mask_start += Rand(h_effective-mask_size+1); w_mask_start += Rand(w_effective-mask_size+1); } int h_mask_end = h_mask_start + mask_size; int w_mask_end = w_mask_start + mask_size; Dtype* input_data = input_blob->mutable_cpu_data(); if (has_mean_file) { CHECK_EQ(input_channels, data_mean_.channels()); CHECK_EQ(input_height, data_mean_.height()); CHECK_EQ(input_width, data_mean_.width()); for (int n = 0; n < input_num; ++n) { int offset = input_blob->offset(n); caffe_sub(data_mean_.count(), input_data + offset, data_mean_.cpu_data(), input_data + offset); } } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) << "Specify either 1 mean_value or as many as channels: " << input_channels; if (mean_values_.size() == 1) { caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); } else { for (int n = 0; n < input_num; ++n) { for (int c = 0; c < input_channels; ++c) { int offset = input_blob->offset(n, c); caffe_add_scalar(input_height * input_width, -(mean_values_[c]), input_data + offset); } }//.........这里部分代码省略.........
开发者ID:chprasad,项目名称:caffe,代码行数:101,
示例9: Randvoid DataTransformer<Dtype>::Transform(const Datum& datum, Dtype* transformed_data) { const string& data = datum.data(); const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); const bool has_uint8 = data.size() > 0; const bool has_mean_values = mean_values_.size() > 0; // mask_size is defaulted to 0 in caffe/proto/caffe.proto const int mask_size = param_.mask_size(); // mask_freq is defaulted to 1 in 3 in caffe/proto/caffe.proto const int mask_freq = param_.mask_freq(); CHECK_GT(datum_channels, 0); CHECK_GE(datum_height, crop_size); CHECK_GE(datum_width, crop_size); Dtype* mean = NULL; if (has_mean_file) { CHECK_EQ(datum_channels, data_mean_.channels()); CHECK_EQ(datum_height, data_mean_.height()); CHECK_EQ(datum_width, data_mean_.width()); mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << "Specify either 1 mean_value or as many as channels: " << datum_channels; if (datum_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < datum_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } int height = datum_height; int width = datum_width; int h_off = 0; int w_off = 0; if (crop_size) { height = crop_size; width = crop_size; // We only do random crop when we do training. if (phase_ == TRAIN) { h_off = Rand(datum_height - crop_size + 1); w_off = Rand(datum_width - crop_size + 1); } else { h_off = (datum_height - crop_size) / 2; w_off = (datum_width - crop_size) / 2; } } // initialize masking offsets to be same as cropping offsets // so that there is no conflict bool masking = (phase_ == TRAIN) && (mask_size > 0) && (Rand(mask_freq) == 0); int h_mask_start = h_off; int w_mask_start = w_off; if (masking) { int h_effective = datum_height; int w_effective = datum_width; if (crop_size) { h_effective = w_effective = crop_size; } CHECK_GE(h_effective, mask_size); CHECK_GE(w_effective, mask_size); h_mask_start += Rand(h_effective-mask_size+1); w_mask_start += Rand(w_effective-mask_size+1); } int h_mask_end = h_mask_start + mask_size; int w_mask_end = w_mask_start + mask_size; Dtype datum_element; int top_index, data_index; for (int c = 0; c < datum_channels; ++c) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w) { data_index = (c * datum_height + h_off + h) * datum_width + w_off + w; if (do_mirror) { top_index = (c * height + h) * width + (width - 1 - w); } else { top_index = (c * height + h) * width + w; } if (has_uint8) { datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[data_index])); } else { datum_element = datum.float_data(data_index); } if (has_mean_file) { transformed_data[top_index] = (datum_element - mean[data_index]) * scale; } else { if (has_mean_values) { transformed_data[top_index] = (datum_element - mean_values_[c]) * scale; } else {//.........这里部分代码省略.........
开发者ID:chprasad,项目名称:caffe,代码行数:101,
示例10: collect_input_descs//.........这里部分代码省略......... ResetIsNested reset_is_nested(this); is_nested_ = true; std::vector<Analyzer::Expr*> target_exprs; for (auto target_entry : targets) { target_exprs.emplace_back(target_entry->get_expr()); } const auto row_count = rows->rowCount(); if (!row_count) { return std::make_shared<ResultSet>( std::vector<TargetInfo>{}, ExecutorDeviceType::CPU, QueryMemoryDescriptor{}, nullptr, this); } std::vector<ColWidths> agg_col_widths; for (auto wid : get_col_byte_widths(target_exprs, {})) { agg_col_widths.push_back( {wid, int8_t(compact_byte_width(wid, pick_target_compact_width(res_ra_unit, {}, get_min_byte_width())))}); } QueryMemoryDescriptor query_mem_desc{this, allow_multifrag, GroupByColRangeType::Projection, false, false, -1, 0, {sizeof(int64_t)},#ifdef ENABLE_KEY_COMPACTION 0,#endif agg_col_widths, {}, row_count, small_groups_buffer_entry_count_, 0, 0, 0, false, GroupByMemSharing::Shared, CountDistinctDescriptors{}, false, true, false, false, {}, {}, false}; auto compilation_result = compileWorkUnit(false, {}, res_ra_unit, {ExecutorDeviceType::CPU, hoist_literals, opt_level, g_enable_dynamic_watchdog}, {false, allow_multifrag, just_explain, allow_loop_joins, g_enable_watchdog, false, false, g_enable_dynamic_watchdog, g_dynamic_watchdog_time_limit}, nullptr, false, row_set_mem_owner_, row_count, small_groups_buffer_entry_count_, get_min_byte_width(), JoinInfo(JoinImplType::Invalid, std::vector<std::shared_ptr<Analyzer::BinOper>>{}, {}, ""), false); auto column_buffers = result_columns.getColumnBuffers(); CHECK_EQ(column_buffers.size(), static_cast<size_t>(in_col_count)); std::vector<int64_t> init_agg_vals(query_mem_desc.agg_col_widths.size()); auto query_exe_context = query_mem_desc.getQueryExecutionContext(res_ra_unit, init_agg_vals, this, ExecutorDeviceType::CPU, 0, {}, {}, {}, row_set_mem_owner_, false, false, nullptr); const auto hoist_buf = serializeLiterals(compilation_result.literal_values, 0); *error_code = 0; std::vector<std::vector<const int8_t*>> multi_frag_col_buffers{column_buffers}; query_exe_context->launchCpuCode(res_ra_unit, compilation_result.native_functions, hoist_literals, hoist_buf, multi_frag_col_buffers, {{static_cast<int64_t>(result_columns.size())}}, {{0}}, 1u, 0, init_agg_vals, error_code, 1, {}); CHECK_GE(*error_code, 0); return query_exe_context->groupBufferToResults(0, target_exprs, false);}
开发者ID:kanak,项目名称:mapd-core,代码行数:101,
示例11: CHECK_EQvoid DataTransformer<Dtype>::Transform(const cv::Mat& cv_img, Blob<Dtype>* transformed_blob) { const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); const int num = transformed_blob->num(); CHECK_EQ(channels, img_channels); CHECK_LE(height, img_height); CHECK_LE(width, img_width); CHECK_GE(num, 1); CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; // mask_size is defaulted to 0 in caffe/proto/caffe.proto const int mask_size = param_.mask_size(); // mask_freq is defaulted to 1 in 3 in caffe/proto/caffe.proto const int mask_freq = param_.mask_freq(); CHECK_GT(img_channels, 0); CHECK_GE(img_height, crop_size); CHECK_GE(img_width, crop_size); Dtype* mean = NULL; if (has_mean_file) { CHECK_EQ(img_channels, data_mean_.channels()); CHECK_EQ(img_height, data_mean_.height()); CHECK_EQ(img_width, data_mean_.width()); mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << "Specify either 1 mean_value or as many as channels: " << img_channels; if (img_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < img_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } int h_off = 0; int w_off = 0; cv::Mat cv_cropped_img = cv_img; if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); // We only do random crop when we do training. if (phase_ == TRAIN) { h_off = Rand(img_height - crop_size + 1); w_off = Rand(img_width - crop_size + 1); } else { h_off = (img_height - crop_size) / 2; w_off = (img_width - crop_size) / 2; } cv::Rect roi(w_off, h_off, crop_size, crop_size); cv_cropped_img = cv_img(roi); } else { CHECK_EQ(img_height, height); CHECK_EQ(img_width, width); } CHECK(cv_cropped_img.data); // initialize masking offsets to be same as cropping offsets // so that there is no conflict bool masking = (phase_ == TRAIN) && (mask_size > 0) && (Rand(mask_freq) == 0); int h_mask_start = h_off; int w_mask_start = w_off; if (masking) { int h_effective = img_height; int w_effective = img_width; if (crop_size) { h_effective = w_effective = crop_size; } CHECK_GE(h_effective, mask_size); CHECK_GE(w_effective, mask_size); h_mask_start += Rand(h_effective-mask_size+1); w_mask_start += Rand(w_effective-mask_size+1); } int h_mask_end = h_mask_start + mask_size; int w_mask_end = w_mask_start + mask_size; Dtype* transformed_data = transformed_blob->mutable_cpu_data(); int top_index; for (int h = 0; h < height; ++h) { const uchar* ptr = cv_cropped_img.ptr<uchar>(h); int img_index = 0; for (int w = 0; w < width; ++w) { for (int c = 0; c < img_channels; ++c) { if (do_mirror) { top_index = (c * height + h) * width + (width - 1 - w);//.........这里部分代码省略.........
开发者ID:chprasad,项目名称:caffe,代码行数:101,
示例12: CHECK_GEvoid RecurrentLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_GE(bottom[0]->num_axes(), 2) << "bottom[0] must have at least 2 axes -- (#timesteps, #streams, ...)"; T_ = bottom[0]->shape(0); N_ = bottom[0]->shape(1); LOG(INFO) << "Initializing recurrent layer: assuming input batch contains " << T_ << " timesteps of " << N_ << " independent streams."; CHECK_EQ(bottom[1]->num_axes(), 2) << "bottom[1] must have exactly 2 axes -- (#timesteps, #streams)"; CHECK_EQ(T_, bottom[1]->shape(0)); CHECK_EQ(N_, bottom[1]->shape(1)); // If provided, bottom[2] is a static input to the recurrent net. static_input_ = (bottom.size() > 2); if (static_input_) { CHECK_GE(bottom[2]->num_axes(), 1); CHECK_EQ(N_, bottom[2]->shape(0)); } // Create a NetParameter; setup the inputs that aren't unique to particular // recurrent architectures. NetParameter net_param; net_param.set_force_backward(true); net_param.add_input("x"); BlobShape input_shape; for (int i = 0; i < bottom[0]->num_axes(); ++i) { input_shape.add_dim(bottom[0]->shape(i)); } net_param.add_input_shape()->CopyFrom(input_shape); input_shape.Clear(); input_shape.add_dim(1); for (int i = 0; i < bottom[1]->num_axes(); ++i) { input_shape.add_dim(bottom[1]->shape(i)); } net_param.add_input("cont"); net_param.add_input_shape()->CopyFrom(input_shape); if (static_input_) { input_shape.Clear(); for (int i = 0; i < bottom[2]->num_axes(); ++i) { input_shape.add_dim(bottom[2]->shape(i)); } net_param.add_input("x_static"); net_param.add_input_shape()->CopyFrom(input_shape); } // Call the child's FillUnrolledNet implementation to specify the unrolled // recurrent architecture. this->FillUnrolledNet(&net_param); // Prepend this layer's name to the names of each layer in the unrolled net. const string& layer_name = this->layer_param_.name(); if (layer_name.size() > 0) { for (int i = 0; i < net_param.layer_size(); ++i) { LayerParameter* layer = net_param.mutable_layer(i); layer->set_name(layer_name + "_" + layer->name()); } } // Create the unrolled net. unrolled_net_.reset(new Net<Dtype>(net_param)); unrolled_net_->set_debug_info( this->layer_param_.recurrent_param().debug_info()); // Setup pointers to the inputs. x_input_blob_ = CHECK_NOTNULL(unrolled_net_->blob_by_name("x").get()); cont_input_blob_ = CHECK_NOTNULL(unrolled_net_->blob_by_name("cont").get()); if (static_input_) { x_static_input_blob_ = CHECK_NOTNULL(unrolled_net_->blob_by_name("x_static").get()); } // Setup pointers to paired recurrent inputs/outputs. vector<string> recur_input_names; RecurrentInputBlobNames(&recur_input_names); vector<string> recur_output_names; RecurrentOutputBlobNames(&recur_output_names); const int num_recur_blobs = recur_input_names.size(); CHECK_EQ(num_recur_blobs, recur_output_names.size()); recur_input_blobs_.resize(num_recur_blobs); recur_output_blobs_.resize(num_recur_blobs); for (int i = 0; i < recur_input_names.size(); ++i) { recur_input_blobs_[i] = CHECK_NOTNULL(unrolled_net_->blob_by_name(recur_input_names[i]).get()); recur_output_blobs_[i] = CHECK_NOTNULL(unrolled_net_->blob_by_name(recur_output_names[i]).get()); } // Setup pointers to outputs. vector<string> output_names; OutputBlobNames(&output_names); CHECK_EQ(top.size(), output_names.size()) << "OutputBlobNames must provide an output blob name for each top."; output_blobs_.resize(output_names.size()); for (int i = 0; i < output_names.size(); ++i) { output_blobs_[i] =//.........这里部分代码省略.........
开发者ID:VisionLearningGroup,项目名称:Ask_Attend_and_Answer,代码行数:101,
示例13: CreatePartsvoid Pipe::TrainEpoch(int epoch) { Instance *instance; Parts *parts = CreateParts(); Features *features = CreateFeatures(); vector<double> scores; vector<double> gold_outputs; vector<double> predicted_outputs; double total_cost = 0.0; double total_loss = 0.0; double eta; int num_instances = instances_.size(); double lambda = 1.0/(options_->GetRegularizationConstant() * (static_cast<double>(num_instances))); timeval start, end; gettimeofday(&start, NULL); int time_decoding = 0; int time_scores = 0; int num_mistakes = 0; LOG(INFO) << " Iteration #" << epoch + 1; dictionary_->StopGrowth(); for (int i = 0; i < instances_.size(); i++) { int t = num_instances * epoch + i; instance = instances_[i]; MakeParts(instance, parts, &gold_outputs); MakeFeatures(instance, parts, features); // If using only supported features, must remove the unsupported ones. // This is necessary not to mess up the computation of the squared norm // of the feature difference vector in MIRA. if (options_->only_supported_features()) { RemoveUnsupportedFeatures(instance, parts, features); } timeval start_scores, end_scores; gettimeofday(&start_scores, NULL); ComputeScores(instance, parts, features, &scores); gettimeofday(&end_scores, NULL); time_scores += diff_ms(end_scores, start_scores); if (options_->GetTrainingAlgorithm() == "perceptron" || options_->GetTrainingAlgorithm() == "mira" ) { timeval start_decoding, end_decoding; gettimeofday(&start_decoding, NULL); decoder_->Decode(instance, parts, scores, &predicted_outputs); gettimeofday(&end_decoding, NULL); time_decoding += diff_ms(end_decoding, start_decoding); if (options_->GetTrainingAlgorithm() == "perceptron") { for (int r = 0; r < parts->size(); ++r) { if (!NEARLY_EQ_TOL(gold_outputs[r], predicted_outputs[r], 1e-6)) { ++num_mistakes; } } eta = 1.0; } else { CHECK(false) << "Plain mira is not implemented yet."; } MakeGradientStep(parts, features, eta, t, gold_outputs, predicted_outputs); } else if (options_->GetTrainingAlgorithm() == "svm_mira" || options_->GetTrainingAlgorithm() == "crf_mira" || options_->GetTrainingAlgorithm() == "svm_sgd" || options_->GetTrainingAlgorithm() == "crf_sgd") { double loss; timeval start_decoding, end_decoding; gettimeofday(&start_decoding, NULL); if (options_->GetTrainingAlgorithm() == "svm_mira" || options_->GetTrainingAlgorithm() == "svm_sgd") { // Do cost-augmented inference. double cost; decoder_->DecodeCostAugmented(instance, parts, scores, gold_outputs, &predicted_outputs, &cost, &loss); total_cost += cost; } else { // Do marginal inference. double entropy; decoder_->DecodeMarginals(instance, parts, scores, gold_outputs, &predicted_outputs, &entropy, &loss); CHECK_GE(entropy, 0.0); } gettimeofday(&end_decoding, NULL); time_decoding += diff_ms(end_decoding, start_decoding); if (loss < 0.0) { if (!NEARLY_EQ_TOL(loss, 0.0, 1e-9)) { LOG(INFO) << "Warning: negative loss set to zero: " << loss; } loss = 0.0; } total_loss += loss; // Compute difference between predicted and gold feature vectors. FeatureVector difference; MakeFeatureDifference(parts, features, gold_outputs, predicted_outputs, &difference);//.........这里部分代码省略.........
开发者ID:DKlaper,项目名称:gsw-DepParser,代码行数:101,
示例14: CHECK_EQvoid VolumeDataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs."; CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output."; CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output."; if (top->size() == 1) { output_labels_ = false; } else { output_labels_ = true; } // Initialize the leveldb leveldb::DB* db_temp; leveldb::Options options; options.create_if_missing = false; options.max_open_files = 100; LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source(); leveldb::Status status = leveldb::DB::Open( options, this->layer_param_.data_param().source(), &db_temp); CHECK(status.ok()) << "Failed to open leveldb " << this->layer_param_.data_param().source() << std::endl << status.ToString(); db_.reset(db_temp); iter_.reset(db_->NewIterator(leveldb::ReadOptions())); iter_->SeekToFirst(); // Check if we would need to randomly skip a few data points if (this->layer_param_.data_param().rand_skip()) { unsigned int skip = caffe_rng_rand() % this->layer_param_.data_param().rand_skip(); LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { iter_->Next(); if (!iter_->Valid()) { iter_->SeekToFirst(); } } } // Read a data point, and use it to initialize the top blob. VolumeDatum datum; datum.ParseFromString(iter_->value().ToString()); // image int crop_size = this->layer_param_.data_param().crop_size(); if (crop_size > 0) { (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), crop_size, crop_size); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), crop_size, crop_size)); } else { (*top)[0]->Reshape( this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), datum.height(), datum.width()); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), datum.length(), datum.height(), datum.width())); } LOG(INFO) << "output data size: " << (*top)[0]->num() << "," << (*top)[0]->channels() << "," << (*top)[0]->length() << "," << (*top)[0]->height() << "," << (*top)[0]->width(); // label if (output_labels_) { (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1, 1); prefetch_label_.reset( new Blob<Dtype>(this->layer_param_.data_param().batch_size(), 1, 1, 1, 1)); } // datum size datum_channels_ = datum.channels(); datum_length_ = datum.length(); datum_height_ = datum.height(); datum_width_ = datum.width(); datum_size_ = datum.channels() * datum.length() * datum.height() * datum.width(); CHECK_GT(datum_height_, crop_size); CHECK_GT(datum_width_, crop_size); // check if we want to have mean if (this->layer_param_.data_param().has_mean_file()) { const string& mean_file = this->layer_param_.data_param().mean_file(); LOG(INFO) << "Loading mean file from" << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); data_mean_.FromProto(blob_proto); CHECK_EQ(data_mean_.num(), 1); CHECK_EQ(data_mean_.channels(), datum_channels_); CHECK_EQ(data_mean_.length(), datum_length_); CHECK_EQ(data_mean_.height(), datum_height_); CHECK_EQ(data_mean_.width(), datum_width_); } else { // Simply initialize an all-empty mean. data_mean_.Reshape(1, datum_channels_, datum_length_, datum_height_, datum_width_); } // Now, start the prefetch thread. Before calling prefetch, we make two // cpu_data calls so that the prefetch thread does not accidentally make // simultaneous cudaMalloc calls when the main thread is running. In some // GPUs this seems to cause failures if we do not so. prefetch_data_->mutable_cpu_data(); if (output_labels_) { prefetch_label_->mutable_cpu_data(); } data_mean_.cpu_data();//.........这里部分代码省略.........
开发者ID:MarcoSaku,项目名称:Spiking-C3D,代码行数:101,
示例15: H5FcloseHDF5OutputLayer<Dtype>::~HDF5OutputLayer<Dtype>() { if (file_opened_) { herr_t status = H5Fclose(file_id_); CHECK_GE(status, 0) << "Failed to close HDF5 file " << file_name_; }}
开发者ID:azrael417,项目名称:caffe,代码行数:6,
示例16: CHECKvoid ARTSPConnection::onSendRequest(const sp<AMessage> &msg) { sp<AMessage> reply; CHECK(msg->findMessage("reply", &reply)); if (mState != CONNECTED) { reply->setInt32("result", -ENOTCONN); reply->post(); return; } AString request; CHECK(msg->findString("request", &request)); // Just in case we need to re-issue the request with proper authentication // later, stash it away. reply->setString("original-request", request.c_str(), request.size()); addAuthentication(&request); // Find the boundary between headers and the body. ssize_t i = request.find("/r/n/r/n"); CHECK_GE(i, 0); int32_t cseq = mNextCSeq++; AString cseqHeader = "CSeq: "; cseqHeader.append(cseq); cseqHeader.append("/r/n"); request.insert(cseqHeader, i + 2); LOGV("request: '%s'", request.c_str()); size_t numBytesSent = 0; while (numBytesSent < request.size()) { ssize_t n = send(mSocket, request.c_str() + numBytesSent, request.size() - numBytesSent, 0); if (n == 0) { // Server closed the connection. LOGE("Server unexpectedly closed the connection."); reply->setInt32("result", ERROR_IO); reply->post(); return; } else if (n < 0) { if (errno == EINTR) { continue; } LOGE("Error sending rtsp request."); reply->setInt32("result", -errno); reply->post(); return; } numBytesSent += (size_t)n; } mPendingRequests.add(cseq, reply);}
开发者ID:Redux,项目名称:android_frameworks_base,代码行数:62,
示例17: hdf5_get_num_linksint hdf5_get_num_links(hid_t loc_id) { H5G_info_t info; herr_t status = H5Gget_info(loc_id, &info); CHECK_GE(status, 0) << "Error while counting HDF5 links."; return info.nlinks;}
开发者ID:Haiyang21,项目名称:caffe-BVLC-vs2013,代码行数:6,
示例18: receiveBinaryData//.........这里部分代码省略......... } AString statusCodeStr( response->mStatusLine, space1 + 1, space2 - space1 - 1); if (!ParseSingleUnsignedLong( statusCodeStr.c_str(), &response->mStatusCode) || response->mStatusCode < 100 || response->mStatusCode > 999) { return false; } AString line; for (;;) { if (!receiveLine(&line)) { break; } if (line.empty()) { break; } LOGV("line: %s", line.c_str()); ssize_t colonPos = line.find(":"); if (colonPos < 0) { // Malformed header line. return false; } AString key(line, 0, colonPos); key.trim(); key.tolower(); line.erase(0, colonPos + 1); line.trim(); response->mHeaders.add(key, line); } unsigned long contentLength = 0; ssize_t i = response->mHeaders.indexOfKey("content-length"); if (i >= 0) { AString value = response->mHeaders.valueAt(i); if (!ParseSingleUnsignedLong(value.c_str(), &contentLength)) { return false; } } if (contentLength > 0) { response->mContent = new ABuffer(contentLength); size_t numBytesRead = 0; while (numBytesRead < contentLength) { ssize_t n = recv( mSocket, response->mContent->data() + numBytesRead, contentLength - numBytesRead, 0); if (n == 0) { // Server closed the connection. TRESPASS(); } else if (n < 0) { if (errno == EINTR) { continue; } TRESPASS(); } numBytesRead += (size_t)n; } } if (response->mStatusCode == 401) { if (mAuthType == NONE && mUser.size() > 0 && parseAuthMethod(response)) { ssize_t i; CHECK_EQ((status_t)OK, findPendingRequest(response, &i)); CHECK_GE(i, 0); sp<AMessage> reply = mPendingRequests.valueAt(i); mPendingRequests.removeItemsAt(i); AString request; CHECK(reply->findString("original-request", &request)); sp<AMessage> msg = new AMessage(kWhatSendRequest, id()); msg->setMessage("reply", reply); msg->setString("request", request.c_str(), request.size()); LOGI("re-sending request with authentication headers..."); onSendRequest(msg); return true; } } return notifyResponseListener(response);}
开发者ID:Redux,项目名称:android_frameworks_base,代码行数:101,
示例19: CHECKvoid Solver<Dtype>::InitTestNets() { CHECK(Caffe::root_solver()); const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); const int num_generic_nets = has_net_param + has_net_file; CHECK_LE(num_generic_nets, 1) << "Both net_param and net_file may not be specified."; const int num_test_net_params = param_.test_net_param_size(); const int num_test_net_files = param_.test_net_size(); const int num_test_nets = num_test_net_params + num_test_net_files; if (num_generic_nets) { CHECK_GE(param_.test_iter_size(), num_test_nets) << "test_iter must be specified for each test network."; } else { CHECK_EQ(param_.test_iter_size(), num_test_nets) << "test_iter must be specified for each test network."; } // If we have a generic net (specified by net or net_param, rather than // test_net or test_net_param), we may have an unlimited number of actual // test networks -- the actual number is given by the number of remaining // test_iters after any test nets specified by test_net_param and/or test_net // are evaluated. const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; const int num_test_net_instances = num_test_nets + num_generic_net_instances; if (param_.test_state_size()) { CHECK_EQ(param_.test_state_size(), num_test_net_instances) << "test_state must be unspecified or specified once per test net."; } if (num_test_net_instances) { CHECK_GT(param_.test_interval(), 0); } int test_net_id = 0; vector<string> sources(num_test_net_instances); vector<NetParameter> net_params(num_test_net_instances); for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { sources[test_net_id] = "test_net_param"; net_params[test_net_id].CopyFrom(param_.test_net_param(i)); } for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { sources[test_net_id] = "test_net file: " + param_.test_net(i); ReadNetParamsFromTextFileOrDie(param_.test_net(i), &net_params[test_net_id]); } const int remaining_test_nets = param_.test_iter_size() - test_net_id; if (has_net_param) { for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { sources[test_net_id] = "net_param"; net_params[test_net_id].CopyFrom(param_.net_param()); } } if (has_net_file) { for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { sources[test_net_id] = "net file: " + param_.net(); ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); } } test_nets_.resize(num_test_net_instances); test_mean_scores_.resize(num_test_net_instances); for (int i = 0; i < num_test_net_instances; ++i) { // Set the correct NetState. We start with the solver defaults (lowest // precedence); then, merge in any NetState specified by the net_param // itself; finally, merge in any NetState specified by the test_state // (highest precedence). NetState net_state; net_state.set_phase(TEST); net_state.MergeFrom(net_params[i].state()); if (param_.test_state_size()) { net_state.MergeFrom(param_.test_state(i)); } net_params[i].mutable_state()->CopyFrom(net_state); LOG(INFO) << "Creating test net (#" << i << ") specified by " << sources[i]; if (Caffe::root_solver()) { test_nets_[i].reset(new Net<Dtype>(net_params[i])); } else { test_nets_[i].reset(new Net<Dtype>(net_params[i], root_solver_->test_nets_[i].get())); } test_nets_[i]->set_debug_info(param_.debug_info()); }}
开发者ID:kovibalu,项目名称:caffe-cnntools,代码行数:81,
示例20: CHECK_GEvoid ARTSPConnection::addAuthentication(AString *request) { if (mAuthType == NONE) { return; } // Find the boundary between headers and the body. ssize_t i = request->find("/r/n/r/n"); CHECK_GE(i, 0); if (mAuthType == BASIC) { AString tmp; tmp.append(mUser); tmp.append(":"); tmp.append(mPass); AString out; encodeBase64(tmp.c_str(), tmp.size(), &out); AString fragment; fragment.append("Authorization: Basic "); fragment.append(out); fragment.append("/r/n"); request->insert(fragment, i + 2); return; }#if defined(HAVE_ANDROID_OS) CHECK_EQ((int)mAuthType, (int)DIGEST); AString method, url; GetMethodAndURL(*request, &method, &url); AString A1; A1.append(mUser); A1.append(":"); A1.append("Streaming Server"); A1.append(":"); A1.append(mPass); AString A2; A2.append(method); A2.append(":"); A2.append(url); AString HA1, HA2; H(A1, &HA1); H(A2, &HA2); AString tmp; tmp.append(HA1); tmp.append(":"); tmp.append(mNonce); tmp.append(":"); tmp.append(HA2); AString digest; H(tmp, &digest); AString fragment; fragment.append("Authorization: Digest "); fragment.append("nonce=/""); fragment.append(mNonce); fragment.append("/", "); fragment.append("username=/""); fragment.append(mUser); fragment.append("/", "); fragment.append("uri=/""); fragment.append(url); fragment.append("/", "); fragment.append("response=/""); fragment.append(digest); fragment.append("/""); fragment.append("/r/n"); request->insert(fragment, i + 2);#endif}
开发者ID:Redux,项目名称:android_frameworks_base,代码行数:79,
示例21: CHECK_GEvoid CropLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Calculate number of spatial axis CropParameter crop_param = this->layer_param_.crop_param(); channel_axis_ = bottom[0]->CanonicalAxisIndex(crop_param.axis()); channels_ = bottom[0]->shape(channel_axis_); const int first_spatial_axis = channel_axis_ + 1; num_axes_ = bottom[0]->num_axes(); num_spatial_axes_ = num_axes_ - first_spatial_axis; CHECK_GE(num_spatial_axes_, 1); vector<int> dim_blob_shape(1, num_axes_); // Construct a map from top blobs to layer inds, skipping over in-place // connections. CHECK(this->net_!=NULL) << "Crop Layer must be used in a net"; map<Blob<Dtype>*, int> down_map; for (int layer_ind = 0; layer_ind < this->net_->top_vecs().size(); ++layer_ind) { vector<Blob<Dtype>*> tops = this->net_->top_vecs()[layer_ind]; for (int top_ind = 0; top_ind < tops.size(); ++top_ind) { if (down_map.find(tops[top_ind]) == down_map.end()) { down_map[tops[top_ind]] = layer_ind; } } } // Walk back from the first bottom, keeping track of all the blobs we pass. set<Blob<Dtype>*> path_blobs; Blob<Dtype>* blob = bottom[0]; int layer_ind; // TODO this logic can be simplified if all blobs are tops path_blobs.insert(blob); while (down_map.find(blob) != down_map.end()) { layer_ind = down_map[blob]; if (this->net_->bottom_vecs()[layer_ind].size() == 0) { break; } blob = this->net_->bottom_vecs()[layer_ind][0]; path_blobs.insert(blob); } // Now walk back from the second bottom, until we find a blob of intersection. Blob<Dtype>* inter_blob = bottom[1]; while (path_blobs.find(inter_blob) == path_blobs.end()) { CHECK(down_map.find(inter_blob) != down_map.end()) << "Cannot align apparently disconnected blobs."; layer_ind = down_map[inter_blob]; CHECK_GT(this->net_->bottom_vecs()[layer_ind].size(), 0) << "Cannot align apparently disconnected blobs."; inter_blob = this->net_->bottom_vecs()[layer_ind][0]; } // Compute the coord map from the blob of intersection to each bottom. vector<DiagonalAffineMap<Dtype> > coord_maps(2, DiagonalAffineMap<Dtype>::identity(num_spatial_axes_)); for (int i = 0; i < 2; ++i) { for (Blob<Dtype>* blob = bottom[i]; blob != inter_blob; blob = this->net_->bottom_vecs()[down_map[blob]][0]) { shared_ptr<Layer<Dtype> > layer = this->net_->layers()[down_map[blob]]; // printf("[%d] %s/n",i,layer->type()); coord_maps[i] = coord_maps[i].compose(layer->coord_map(num_spatial_axes_)); // printf("done [%d] %s/n",i,layer->type()); } } // Compute the mapping from first bottom coordinates to second. crop_.Reshape(dim_blob_shape); top_shape_.Reshape(dim_blob_shape); bottom_shape_.Reshape(dim_blob_shape); int* crop_data = crop_.mutable_cpu_data(); int* top_shape_data = top_shape_.mutable_cpu_data(); int* bottom_shape_data = bottom_shape_.mutable_cpu_data(); // printf("maps %d %d /n",coord_maps[0].size(), coord_maps[1].size()); DiagonalAffineMap<Dtype> crop_map = coord_maps[1].compose(coord_maps[0].inv());// printf("Done compute map /n");// printf("num_axes_ %d /n",num_axes_); caffe_set(num_axes_, static_cast<int>(0), crop_data); for (int i = 0; i < num_spatial_axes_; ++i) { // Check for scale mismatch (unfortunately, CHECK_DOUBLE_EQ does not // support a message like the other CHECKs). CHECK_DOUBLE_EQ(crop_map.coefs()[i].first, 1); CHECK_LE(crop_map.coefs()[i].second, 0) << "Negative crop width."; // Check that the crop width is an integer. CHECK_DOUBLE_EQ(crop_map.coefs()[i].second, round(crop_map.coefs()[i].second)); crop_data[first_spatial_axis+i] = - round(crop_map.coefs()[i].second); } // printf("shapes /n"); for (int i = 0; i < channel_axis_+1; ++i) { bottom_shape_data[i] = bottom[0]->shape(i); top_shape_data[i] = bottom[0]->shape(i); } // printf("shapes2 /n"); for (int i = 0; i < num_spatial_axes_; ++i) { bottom_shape_data[first_spatial_axis+i] = bottom[0]->shape(first_spatial_axis+i); top_shape_data[first_spatial_axis+i] = bottom[1]->shape(first_spatial_axis+i); } // printf("line size /n"); line_size_ = top_shape_data[num_axes_-1]; // printf("done /n");}
开发者ID:jmerkow,项目名称:I2I,代码行数:97,
示例22: ALOGVvoid BlockIterator::advance_l() { for (;;) { long res = mCluster->GetEntry(mBlockEntryIndex, mBlockEntry); ALOGV("GetEntry returned %ld", res); long long pos; long len; if (res < 0) { // Need to parse this cluster some more CHECK_EQ(res, mkvparser::E_BUFFER_NOT_FULL); res = mCluster->Parse(pos, len); ALOGV("Parse returned %ld", res); if (res < 0) { // I/O error ALOGE("Cluster::Parse returned result %ld", res); mCluster = NULL; break; } continue; } else if (res == 0) { // We're done with this cluster const mkvparser::Cluster *nextCluster; res = mExtractor->mSegment->ParseNext( mCluster, nextCluster, pos, len); ALOGV("ParseNext returned %ld", res); if (res != 0) { // EOF or error mCluster = NULL; break; } CHECK_EQ(res, 0); CHECK(nextCluster != NULL); CHECK(!nextCluster->EOS()); mCluster = nextCluster; res = mCluster->Parse(pos, len); ALOGV("Parse (2) returned %ld", res); CHECK_GE(res, 0); mBlockEntryIndex = 0; continue; } CHECK(mBlockEntry != NULL); CHECK(mBlockEntry->GetBlock() != NULL); ++mBlockEntryIndex; if (mBlockEntry->GetBlock()->GetTrackNumber() == mTrackNum) { break; } }}
开发者ID:AOSP-JF,项目名称:platform_frameworks_av,代码行数:63,
示例23: caffe_setvoid CFMPoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_rois = bottom[1]->cpu_data(); // Number of ROIs int num_rois = bottom[1]->num(); int top_count = top[0]->count(); Dtype* top_data = top[0]->mutable_cpu_data(); caffe_set(top_count, Dtype(-FLT_MAX), top_data); int* argmax_data = max_idx_.mutable_cpu_data(); caffe_set(top_count, -1, argmax_data); Dtype* cfm_data = cfm_weights_.mutable_cpu_data(); const Dtype* mask_data = bottom[2]->cpu_data(); //First set the cfm maps caffe_set(cfm_weights_.count(), Dtype(0.0), cfm_data); for (int n=0; n<num_rois; ++n) { int box_xmin = bottom_rois[5*n+1]; int box_ymin = bottom_rois[5*n+2]; int box_xmax = bottom_rois[5*n+3]; int box_ymax = bottom_rois[5*n+4]; int box_w = box_xmax-box_xmin+1.0; int box_h = box_ymax-box_ymin+1.0; Dtype cell_w = static_cast<Dtype>(mask_width_)/static_cast<Dtype>(box_w); Dtype cell_h = static_cast<Dtype>(mask_height_)/static_cast<Dtype>(box_h); Dtype* cfm_data_this = cfm_data + cfm_weights_.offset(n); const Dtype* mask_data_this = mask_data + bottom[2]->offset(n); for (int y=box_ymin; y<=box_ymax; y++) { for (int x=box_xmin; x<=box_xmax; x++) { //compute where the box falls in the mask Dtype Ycenter = static_cast<Dtype>(y+0.5-box_ymin)*cell_h-0.5; int Ymin = static_cast<int>(ceil(Ycenter-cell_h)); int Ymax = static_cast<int>(floor(Ycenter+cell_h)); Dtype Xcenter = static_cast<Dtype>(x+0.5-box_xmin)*cell_w-0.5; int Xmin = static_cast<int>(ceil(Xcenter-cell_w)); int Xmax = static_cast<int>(floor(Xcenter+cell_w)); Ymin = max(Ymin,0); Xmin = max(Xmin,0); Ymax = min(Ymax, mask_height_-1); Xmax = min(Xmax, mask_width_-1); Dtype val = 0; Dtype count = 0; for(int Y=Ymin; Y<=Ymax; Y++){ for(int X=Xmin; X<=Xmax; X++){ Dtype wty = Dtype(1.0) - fabs(static_cast<Dtype>(Y)-Ycenter)/cell_h; Dtype wtx = Dtype(1.0) - fabs(static_cast<Dtype>(X)-Xcenter)/cell_w; val += mask_data_this[Y*mask_width_ + X]*wty*wtx; count +=wty*wtx; } } count=count==0?1:count; cfm_data_this[y*width_+x] = static_cast<Dtype>(val/count>=0.5); } } } // For each ROI R = [level x1 y1 x2 y2]: max pool over R for (int n = 0; n < num_rois; ++n) { int roi_level = bottom_rois[0]; int roi_start_w = bottom_rois[1]; int roi_start_h = bottom_rois[2]; int roi_end_w = bottom_rois[3]; int roi_end_h = bottom_rois[4]; CHECK_GE(roi_level, 0); CHECK_LT(roi_level, num_levels_); CHECK_GE(roi_start_w, 0); CHECK_GE(roi_start_h, 0); CHECK_LT(roi_end_w, width_); CHECK_LT(roi_end_h, height_); int roi_height = max(roi_end_h - roi_start_h + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); const Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height_); const Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width_); const Dtype* level_data = bottom_data + bottom[0]->offset(roi_level); Dtype* cfm_data_box = cfm_data + cfm_weights_.offset(n); for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { // Compute pooling region for this output unit: // start (included) = floor(ph * roi_height / pooled_height_) // end (excluded) = ceil((ph + 1) * roi_height / pooled_height_) int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); hstart = min(max(hstart + roi_start_h, 0), height_); hend = min(max(hend + roi_start_h, 0), height_); wstart = min(max(wstart + roi_start_w, 0), width_); wend = min(max(wend + roi_start_w, 0), width_);//.........这里部分代码省略.........
开发者ID:bharath272,项目名称:caffe-sds,代码行数:101,
示例24: Rand//.........这里部分代码省略......... case 2: cv::medianBlur(cv_img, cv_img, smooth_param); break; case 3: cv::boxFilter(cv_img, cv_img, -1, cv::Size(smooth_param * 2, smooth_param * 2)); break; default: break; } } if (debug_params && phase_ == TRAIN) { LOG(INFO) << "----------------------------------------"; if (do_rotation) { LOG(INFO) << "* parameter for rotation: "; LOG(INFO) << " current rotation angle: " << current_angle; } if (do_brightness) { LOG(INFO) << "* parameter for contrast adjustment: "; LOG(INFO) << " alpha: " << alpha << ", beta: " << beta; } if (do_smooth) { LOG(INFO) << "* parameter for smooth filtering: "; LOG(INFO) << " smooth type: " << smooth_type << ", smooth param: " << smooth_param; } } const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; CHECK_GT(img_channels, 0); CHECK_GE(img_height, crop_size); CHECK_GE(img_width, crop_size); CHECK_EQ(channels, img_channels); CHECK_LE(height, img_height); CHECK_LE(width, img_width); CHECK_GE(num, 1); CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; Dtype* mean = NULL; if (has_mean_file) { CHECK_EQ(img_channels, data_mean_.channels()); CHECK_EQ(img_height, data_mean_.height()); CHECK_EQ(img_width, data_mean_.width()); mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << "Specify either 1 mean_value or as many as channels: " << img_channels; if (img_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < img_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } int h_off = 0; int w_off = 0; cv::Mat cv_cropped_img = cv_img; if (crop_size) {
开发者ID:runauto,项目名称:caffe-augmentation-1,代码行数:67,
示例25: CHECK_GEvoid ConvolutionRistrettoLayer<Dtype>::LayerSetUp( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); this->force_nd_im2col_ = conv_param.force_nd_im2col(); this->channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); const int first_spatial_axis = this->channel_axis_ + 1; const int num_axes = bottom[0]->num_axes(); this->num_spatial_axes_ = num_axes - first_spatial_axis; CHECK_GE(this->num_spatial_axes_, 0); vector<int> bottom_dim_blob_shape(1, this->num_spatial_axes_ + 1); vector<int> spatial_dim_blob_shape(1, std::max(this->num_spatial_axes_, 1)); // Setup filter kernel dimensions (kernel_shape_). this->kernel_shape_.Reshape(spatial_dim_blob_shape); int* kernel_shape_data = this->kernel_shape_.mutable_cpu_data(); if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { CHECK_EQ(this->num_spatial_axes_, 2) << "kernel_h & kernel_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.kernel_size_size()) << "Either kernel_size or kernel_h/w should be specified; not both."; kernel_shape_data[0] = conv_param.kernel_h(); kernel_shape_data[1] = conv_param.kernel_w(); } else { const int num_kernel_dims = conv_param.kernel_size_size(); CHECK(num_kernel_dims == 1 || num_kernel_dims == this->num_spatial_axes_) << "kernel_size must be specified once, or once per spatial dimension " << "(kernel_size specified " << num_kernel_dims << " times; " << this->num_spatial_axes_ << " spatial dims)."; for (int i = 0; i < this->num_spatial_axes_; ++i) { kernel_shape_data[i] = conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); } } for (int i = 0; i < this->num_spatial_axes_; ++i) { CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; } // Setup stride dimensions (stride_). this->stride_.Reshape(spatial_dim_blob_shape); int* stride_data = this->stride_.mutable_cpu_data(); if (conv_param.has_stride_h() || conv_param.has_stride_w()) { CHECK_EQ(this->num_spatial_axes_, 2) << "stride_h & stride_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.stride_size()) << "Either stride or stride_h/w should be specified; not both."; stride_data[0] = conv_param.stride_h(); stride_data[1] = conv_param.stride_w(); } else { const int num_stride_dims = conv_param.stride_size(); CHECK(num_stride_dims == 0 || num_stride_dims == 1 || num_stride_dims == this->num_spatial_axes_) << "stride must be specified once, or once per spatial dimension " << "(stride specified " << num_stride_dims << " times; " << this->num_spatial_axes_ << " spatial dims)."; const int kDefaultStride = 1; for (int i = 0; i < this->num_spatial_axes_; ++i) { stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : conv_param.stride((num_stride_dims == 1) ? 0 : i); CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; } } // Setup pad dimensions (pad_). this->pad_.Reshape(spatial_dim_blob_shape); int* pad_data = this->pad_.mutable_cpu_data(); if (conv_param.has_pad_h() || conv_param.has_pad_w()) { CHECK_EQ(this->num_spatial_axes_, 2) << "pad_h & pad_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.pad_size()) << "Either pad or pad_h/w should be specified; not both."; pad_data[0] = conv_param.pad_h(); pad_data[1] = conv_param.pad_w(); } else { const int num_pad_dims = conv_param.pad_size(); CHECK(num_pad_dims == 0 || num_pad_dims == 1 || num_pad_dims == this->num_spatial_axes_) << "pad must be specified once, or once per spatial dimension " << "(pad specified " << num_pad_dims << " times; " << this->num_spatial_axes_ << " spatial dims)."; const int kDefaultPad = 0; for (int i = 0; i < this->num_spatial_axes_; ++i) { pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : conv_param.pad((num_pad_dims == 1) ? 0 : i); } } // Setup dilation dimensions (dilation_). this->dilation_.Reshape(spatial_dim_blob_shape); int* dilation_data = this->dilation_.mutable_cpu_data(); const int num_dilation_dims = conv_param.dilation_size(); CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 || num_dilation_dims == this->num_spatial_axes_) << "dilation must be specified once, or once per spatial dimension " << "(dilation specified " << num_dilation_dims << " times; " << this->num_spatial_axes_ << " spatial dims)."; const int kDefaultDilation = 1; for (int i = 0; i < this->num_spatial_axes_; ++i) { dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation : conv_param.dilation((num_dilation_dims == 1) ? 0 : i); } // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. this->is_1x1_ = true;//.........这里部分代码省略.........
开发者ID:MichalBusta,项目名称:Ristretto-caffe,代码行数:101,
示例26: CHECK_EQvoid DataTransformer<Dtype>::Transform(const cv::Mat& cv_img, Blob<Dtype>* transformed_blob) { const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); const int num = transformed_blob->num(); CHECK_EQ(channels, img_channels); CHECK_LE(height, img_height); CHECK_LE(width, img_width); CHECK_GE(num, 1); CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; CHECK_GT(img_channels, 0); CHECK_GE(img_height, crop_size); CHECK_GE(img_width, crop_size); Dtype* mean = NULL; if (has_mean_file) { CHECK_EQ(img_channels, data_mean_.channels()); CHECK_EQ(img_height, data_mean_.height()); CHECK_EQ(img_width, data_mean_.width()); mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << "Specify either 1 mean_value or as many as channels: " << img_channels; if (img_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < img_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } int h_off = 0; int w_off = 0; cv::Mat cv_cropped_img = cv_img; if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); // We only do random crop when we do training. if (phase_ == TRAIN) { h_off = Rand(img_height - crop_size + 1); w_off = Rand(img_width - crop_size + 1); } else { h_off = (img_height - crop_size) / 2; w_off = (img_width - crop_size) / 2; } cv::Rect roi(w_off, h_off, crop_size, crop_size); cv_cropped_img = cv_img(roi); } else { CHECK_EQ(img_height, height); CHECK_EQ(img_width, width); } CHECK(cv_cropped_img.data); Dtype* transformed_data = transformed_blob->mutable_cpu_data(); int top_index; for (int h = 0; h < height; ++h) { const uchar* ptr = cv_cropped_img.ptr<uchar>(h); int img_index = 0; for (int w = 0; w < width; ++w) { for (int c = 0; c < img_channels; ++c) { if (do_mirror) { top_index = (c * height + h) * width + (width - 1 - w); } else { top_index = (c * height + h) * width + w; } // int top_index = (c * height + h) * width + w; Dtype pixel = static_cast<Dtype>(ptr[img_index++]); if (has_mean_file) { int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; transformed_data[top_index] = (pixel - mean[mean_index]) * scale; } else { if (has_mean_values) { transformed_data[top_index] = (pixel - mean_values_[c]) * scale; } else { transformed_data[top_index] = pixel * scale; } } } } }}
开发者ID:Geekking,项目名称:lisa-caffe-lstm,代码行数:99,
示例27: whileARTPAssembler::AssemblyStatus AMPEG4ElementaryAssembler::addPacket( const sp<ARTPSource> &source) { List<sp<ABuffer> > *queue = source->queue(); if (queue->empty()) { return NOT_ENOUGH_DATA; } if (mNextExpectedSeqNoValid) { List<sp<ABuffer> >::iterator it = queue->begin(); while (it != queue->end()) { if ((uint32_t)(*it)->int32Data() >= mNextExpectedSeqNo) { break; } it = queue->erase(it); } if (queue->empty()) { return NOT_ENOUGH_DATA; } } sp<ABuffer> buffer = *queue->begin(); if (!mNextExpectedSeqNoValid) { mNextExpectedSeqNoValid = true; mNextExpectedSeqNo = (uint32_t)buffer->int32Data(); } else if ((uint32_t)buffer->int32Data() != mNextExpectedSeqNo) { ALOGV("Not the sequence number I expected"); return WRONG_SEQUENCE_NUMBER; } uint32_t rtpTime; CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime)); if (mPackets.size() > 0 && rtpTime != mAccessUnitRTPTime) { submitAccessUnit(); } mAccessUnitRTPTime = rtpTime; if (!mIsGeneric) { mPackets.push_back(buffer); } else { // hexdump(buffer->data(), buffer->size()); CHECK_GE(buffer->size(), 2u); unsigned AU_headers_length = U16_AT(buffer->data()); // in bits CHECK_GE(buffer->size(), 2 + (AU_headers_length + 7) / 8); List<AUHeader> headers; ABitReader bits(buffer->data() + 2, buffer->size() - 2); unsigned numBitsLeft = AU_headers_length; unsigned AU_serial = 0; for (;;) { if (numBitsLeft < mSizeLength) { break; } unsigned AU_size = bits.getBits(mSizeLength); numBitsLeft -= mSizeLength; size_t n = headers.empty() ? mIndexLength : mIndexDeltaLength; if (numBitsLeft < n) { break; } unsigned AU_index = bits.getBits(n); numBitsLeft -= n; if (headers.empty()) { AU_serial = AU_index; } else { AU_serial += 1 + AU_index; } if (mCTSDeltaLength > 0) { if (numBitsLeft < 1) { break; } --numBitsLeft; if (bits.getBits(1)) { if (numBitsLeft < mCTSDeltaLength) { break; } bits.skipBits(mCTSDeltaLength); numBitsLeft -= mCTSDeltaLength; } } if (mDTSDeltaLength > 0) { if (numBitsLeft < 1) { break; } --numBitsLeft; if (bits.getBits(1)) { if (numBitsLeft < mDTSDeltaLength) { break; } bits.skipBits(mDTSDeltaLength);//.........这里部分代码省略.........
开发者ID:F1-GalaxyNexus,项目名称:android_frameworks_base,代码行数:101,
示例28: Randvoid DataTransformer<Dtype>::Transform(const Datum& datum, Dtype* transformed_data) { const string& data = datum.data(); const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); const bool has_uint8 = data.size() > 0; const bool has_mean_values = mean_values_.size() > 0; const bool flow = param_.flow(); CHECK_GT(datum_channels, 0); CHECK_GE(datum_height, crop_size); CHECK_GE(datum_width, crop_size); Dtype* mean = NULL; if (has_mean_file) { CHECK_EQ(datum_channels, data_mean_.channels()); CHECK_EQ(datum_height, data_mean_.height()); CHECK_EQ(datum_width, data_mean_.width()); mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << "Specify either 1 mean_value or as many as channels: " << datum_channels; if (datum_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < datum_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } int height = datum_height; int width = datum_width; int h_off = 0; int w_off = 0; if (crop_size) { height = crop_size; width = crop_size; // We only do random crop when we do training. if (phase_ == TRAIN) { h_off = Rand(datum_height - crop_size + 1); w_off = Rand(datum_width - crop_size + 1); } else { h_off = (datum_height - crop_size) / 2; w_off = (datum_width - crop_size) / 2; } } Dtype datum_element; int top_index, data_index; for (int c = 0; c < datum_channels; ++c) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w) { data_index = (c * datum_height + h_off + h) * datum_width + w_off + w; if (do_mirror) { top_index = (c * height + h) * width + (width - 1 - w); } else { top_index = (c * height + h) * width + w; } if (has_uint8) { datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[data_index])); if (flow && c == 2 && do_mirror) { datum_element = 255-datum_element; } } else { datum_element = datum.float_data(data_index); if (flow && c == 2 && do_mirror) { datum_element = 255-datum_element; } } if (has_mean_file) { transformed_data[top_index] = (datum_element - mean[data_index]) * scale; } else { if (has_mean_values) { transformed_data[top_index] = (datum_element - mean_values_[c]) * scale; } else { transformed_data[top_index] = datum_element * scale; } } } } }}
开发者ID:Geekking,项目名称:lisa-caffe-lstm,代码行数:93,
示例29: CHECK_GEcv::Mat ImageData::GetChannelImage(const int index) const { CHECK_GE(index, 0) << "Channel index must be at least 0."; CHECK_LT(index, GetNumChannels()) << "Channel index out of bounds."; return channels_[index];}
开发者ID:teammcr192,项目名称:super-resolution,代码行数:5,
注:本文中的CHECK_GE函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ CHECK_GL_ERROR函数代码示例 C++ CHECK_FLAG函数代码示例 |