您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ ALIGN函数代码示例

51自学网 2021-06-01 19:34:29
  C++
这篇教程C++ ALIGN函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中ALIGN函数的典型用法代码示例。如果您正苦于以下问题:C++ ALIGN函数的具体用法?C++ ALIGN怎么用?C++ ALIGN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了ALIGN函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: cvmx_bootmem_phy_alloc

int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,			       uint64_t address_max, uint64_t alignment,			       uint32_t flags){	uint64_t head_addr;	uint64_t ent_addr;	/* points to previous list entry, NULL current entry is head of list */	uint64_t prev_addr = 0;	uint64_t new_ent_addr = 0;	uint64_t desired_min_addr;#ifdef DEBUG	cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "		     "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx/n",		     (unsigned long long)req_size,		     (unsigned long long)address_min,		     (unsigned long long)address_max,		     (unsigned long long)alignment);#endif	if (cvmx_bootmem_desc->major_version > 3) {		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "			     "version: %d.%d at addr: %p/n",			     (int)cvmx_bootmem_desc->major_version,			     (int)cvmx_bootmem_desc->minor_version,			     cvmx_bootmem_desc);		goto error_out;	}	/*	 * Do a variety of checks to validate the arguments.  The	 * allocator code will later assume that these checks have	 * been made.  We validate that the requested constraints are	 * not self-contradictory before we look through the list of	 * available memory.	 */	/* 0 is not a valid req_size for this allocator */	if (!req_size)		goto error_out;	/* Round req_size up to mult of minimum alignment bytes */	req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &		~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);	/*	 * Convert !0 address_min and 0 address_max to special case of	 * range that specifies an exact memory block to allocate.  Do	 * this before other checks and adjustments so that this	 * tranformation will be validated.	 */	if (address_min && !address_max)		address_max = address_min + req_size;	else if (!address_min && !address_max)		address_max = ~0ull;  /* If no limits given, use max limits */	/*	 * Enforce minimum alignment (this also keeps the minimum free block	 * req_size the same as the alignment req_size.	 */	if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)		alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;	/*	 * Adjust address minimum based on requested alignment (round	 * up to meet alignment).  Do this here so we can reject	 * impossible requests up front. (NOP for address_min == 0)	 */	if (alignment)		address_min = ALIGN(address_min, alignment);	/*	 * Reject inconsistent args.  We have adjusted these, so this	 * may fail due to our internal changes even if this check	 * would pass for the values the user supplied.	 */	if (req_size > address_max - address_min)		goto error_out;	/* Walk through the list entries - first fit found is returned */	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))		cvmx_bootmem_lock();	head_addr = cvmx_bootmem_desc->head_addr;	ent_addr = head_addr;	for (; ent_addr;	     prev_addr = ent_addr,	     ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {		uint64_t usable_base, usable_max;		uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);		if (cvmx_bootmem_phy_get_next(ent_addr)		    && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {			cvmx_dprintf("Internal bootmem_alloc() error: ent: "				"0x%llx, next: 0x%llx/n",				(unsigned long long)ent_addr,				(unsigned long long)				cvmx_bootmem_phy_get_next(ent_addr));//.........这里部分代码省略.........
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:101,


示例2: intel_compute_size

unsigned intintel_compute_size(struct intel_screen_private *intel,                   int w, int h, int bpp, unsigned usage,                   uint32_t *tiling, int *stride){	int pitch, size;	if (*tiling != I915_TILING_NONE) {		/* First check whether tiling is necessary. */		pitch = (w * bpp  + 7) / 8;		pitch = ALIGN(pitch, 64);		size = pitch * ALIGN (h, 2);		if (INTEL_INFO(intel)->gen < 040) {			/* Gen 2/3 has a maximum stride for tiling of			 * 8192 bytes.			 */			if (pitch > KB(8))				*tiling = I915_TILING_NONE;			/* Narrower than half a tile? */			if (pitch < 256)				*tiling = I915_TILING_NONE;			/* Older hardware requires fences to be pot size			 * aligned with a minimum of 1 MiB, so causes			 * massive overallocation for small textures.			 */			if (size < 1024*1024/2 && !intel->has_relaxed_fencing)				*tiling = I915_TILING_NONE;		} else if (!(usage & INTEL_CREATE_PIXMAP_DRI2) && size <= 4096) {			/* Disable tiling beneath a page size, we will not see			 * any benefit from reducing TLB misses and instead			 * just incur extra cost when we require a fence.			 */			*tiling = I915_TILING_NONE;		}	}	pitch = (w * bpp + 7) / 8;	if (!(usage & INTEL_CREATE_PIXMAP_DRI2) && pitch <= 256)		*tiling = I915_TILING_NONE;	if (*tiling != I915_TILING_NONE) {		int aligned_h, tile_height;		if (IS_GEN2(intel))			tile_height = 16;		else if (*tiling == I915_TILING_X)			tile_height = 8;		else			tile_height = 32;		aligned_h = ALIGN(h, tile_height);		*stride = intel_get_fence_pitch(intel,						ALIGN(pitch, 512),						*tiling);		/* Round the object up to the size of the fence it will live in		 * if necessary.  We could potentially make the kernel allocate		 * a larger aperture space and just bind the subset of pages in,		 * but this is easier and also keeps us out of trouble (as much)		 * with drm_intel_bufmgr_check_aperture().		 */		size = intel_get_fence_size(intel, *stride * aligned_h);		if (size > intel->max_tiling_size)			*tiling = I915_TILING_NONE;	}	if (*tiling == I915_TILING_NONE) {		/* We only require a 64 byte alignment for scanouts, but		 * a 256 byte alignment for sharing with PRIME.		 */		*stride = ALIGN(pitch, 256);		/* Round the height up so that the GPU's access to a 2x2 aligned		 * subspan doesn't address an invalid page offset beyond the		 * end of the GTT.		 */		size = *stride * ALIGN(h, 2);	}	return size;}
开发者ID:01org,项目名称:iotg-lin-gfx-ddx,代码行数:83,


示例3: cache

//.........这里部分代码省略.........					if( tri->verts[j].xyz[k] > entityDef->localReferenceBounds[1][k] + CHECK_BOUNDS_EPSILON							|| tri->verts[j].xyz[k] < entityDef->localReferenceBounds[0][k] - CHECK_BOUNDS_EPSILON )					{						common->Printf( "bad referenceBounds on %s:%s/n", entityDef->parms.hModel->Name(), shader->GetName() );						break;					}				}				if( k != 3 )				{					break;				}			}		}				// view frustum culling for the precise surface bounds, which is tighter		// than the entire entity reference bounds		// If the entire model wasn't visible, there is no need to check the		// individual surfaces.		const bool surfaceDirectlyVisible = modelIsVisible && !idRenderMatrix::CullBoundsToMVP( vEntity->mvp, tri->bounds );				// RB: added check wether GPU skinning is available at all		const bool gpuSkinned = ( tri->staticModelWithJoints != NULL && r_useGPUSkinning.GetBool() && glConfig.gpuSkinningAvailable );		// RB end				//--------------------------		// base drawing surface		//--------------------------		drawSurf_t* baseDrawSurf = NULL;		if( surfaceDirectlyVisible )		{			// make sure we have an ambient cache and all necessary normals / tangents			if( !vertexCache.CacheIsCurrent( tri->indexCache ) )			{				tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );			}						if( !vertexCache.CacheIsCurrent( tri->ambientCache ) )			{				// we are going to use it for drawing, so make sure we have the tangents and normals				if( shader->ReceivesLighting() && !tri->tangentsCalculated )				{					assert( tri->staticModelWithJoints == NULL );					R_DeriveTangents( tri );										// RB: this was hit by parametric particle models ..					//assert( false );	// this should no longer be hit					// RB end				}				tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );			}						// add the surface for drawing			// we can re-use some of the values for light interaction surfaces			baseDrawSurf = ( drawSurf_t* )R_FrameAlloc( sizeof( *baseDrawSurf ), FRAME_ALLOC_DRAW_SURFACE );			baseDrawSurf->frontEndGeo = tri;			baseDrawSurf->space = vEntity;			baseDrawSurf->scissorRect = vEntity->scissorRect;			baseDrawSurf->extraGLState = 0;			baseDrawSurf->renderZFail = 0;						R_SetupDrawSurfShader( baseDrawSurf, shader, renderEntity );						// Check for deformations (eyeballs, flares, etc)			const deform_t shaderDeform = shader->Deform();			if( shaderDeform != DFRM_NONE )			{
开发者ID:slacka,项目名称:RBDOOM-3-BFG,代码行数:67,


示例4: zynq_load

int zynq_load(Xilinx_desc *desc, const void *buf, size_t bsize){	unsigned long ts; /* Timestamp */	u32 partialbit = 0;	u32 i, control, isr_status, status, swap, diff;	u32 *buf_start;	/* Detect if we are going working with partial or full bitstream */	if (bsize != desc->size) {		printf("%s: Working with partial bitstream/n", __func__);		partialbit = 1;	}	buf_start = check_data((u8 *)buf, bsize, &swap);	if (!buf_start)		return FPGA_FAIL;	/* Check if data is postpone from start */	diff = (u32)buf_start - (u32)buf;	if (diff) {		printf("%s: Bitstream is not validated yet (diff %x)/n",		       __func__, diff);		return FPGA_FAIL;	}	if ((u32)buf < SZ_1M) {		printf("%s: Bitstream has to be placed up to 1MB (%x)/n",		       __func__, (u32)buf);		return FPGA_FAIL;	}	if ((u32)buf != ALIGN((u32)buf, ARCH_DMA_MINALIGN)) {		u32 *new_buf = (u32 *)ALIGN((u32)buf, ARCH_DMA_MINALIGN);		printf("%s: Align buffer at %x to %x(swap %d)/n", __func__,		       (u32)buf_start, (u32)new_buf, swap);		for (i = 0; i < (bsize/4); i++)			new_buf[i] = load_word(&buf_start[i], swap);		swap = SWAP_DONE;		buf = new_buf;	} else if (swap != SWAP_DONE) {		/* For bitstream which are aligned */		u32 *new_buf = (u32 *)buf;		printf("%s: Bitstream is not swapped(%d) - swap it/n", __func__,		       swap);		for (i = 0; i < (bsize/4); i++)			new_buf[i] = load_word(&buf_start[i], swap);		swap = SWAP_DONE;	}	/* Clear loopback bit */	clrbits_le32(&devcfg_base->mctrl, DEVCFG_MCTRL_PCAP_LPBK);	if (!partialbit) {		zynq_slcr_devcfg_disable();		/* Setting PCFG_PROG_B signal to high */		control = readl(&devcfg_base->ctrl);		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);		/* Setting PCFG_PROG_B signal to low */		writel(control & ~DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);		/* Polling the PCAP_INIT status for Reset */		ts = get_timer(0);		while (readl(&devcfg_base->status) & DEVCFG_STATUS_PCFG_INIT) {			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {				printf("%s: Timeout wait for INIT to clear/n",				       __func__);				return FPGA_FAIL;			}		}		/* Setting PCFG_PROG_B signal to high */		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);		/* Polling the PCAP_INIT status for Set */		ts = get_timer(0);		while (!(readl(&devcfg_base->status) &			DEVCFG_STATUS_PCFG_INIT)) {			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {				printf("%s: Timeout wait for INIT to set/n",				       __func__);				return FPGA_FAIL;			}		}	}	isr_status = readl(&devcfg_base->int_sts);	/* Clear it all, so if Boot ROM comes back, it can proceed */	writel(0xFFFFFFFF, &devcfg_base->int_sts);	if (isr_status & DEVCFG_ISR_FATAL_ERROR_MASK) {		debug("%s: Fatal errors in PCAP 0x%X/n", __func__, isr_status);//.........这里部分代码省略.........
开发者ID:Bing0,项目名称:u-boot,代码行数:101,


示例5: ubifs_wbuf_sync_nolock

/** * ubifs_wbuf_sync_nolock - synchronize write-buffer. * @wbuf: write-buffer to synchronize * * This function synchronizes write-buffer @buf and returns zero in case of * success or a negative error code in case of failure. * * Note, although write-buffers are of @c->max_write_size, this function does * not necessarily writes all @c->max_write_size bytes to the flash. Instead, * if the write-buffer is only partially filled with data, only the used part * of the write-buffer (aligned on @c->min_io_size boundary) is synchronized. * This way we waste less space. */int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf){	struct ubifs_info *c = wbuf->c;	int err, dirt, sync_len;	cancel_wbuf_timer_nolock(wbuf);	if (!wbuf->used || wbuf->lnum == -1)		/* Write-buffer is empty or not seeked */		return 0;	dbg_io("LEB %d:%d, %d bytes, jhead %s",	       wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));	ubifs_assert(!(wbuf->avail & 7));	ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);	ubifs_assert(wbuf->size >= c->min_io_size);	ubifs_assert(wbuf->size <= c->max_write_size);	ubifs_assert(wbuf->size % c->min_io_size == 0);	ubifs_assert(!c->ro_media && !c->ro_mount);	if (c->leb_size - wbuf->offs >= c->max_write_size)		ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));	if (c->ro_error)		return -EROFS;	/*	 * Do not write whole write buffer but write only the minimum necessary	 * amount of min. I/O units.	 */	sync_len = ALIGN(wbuf->used, c->min_io_size);	dirt = sync_len - wbuf->used;	if (dirt)		ubifs_pad(c, wbuf->buf + wbuf->used, dirt);	err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,			    sync_len, wbuf->dtype);	if (err) {		ubifs_err("cannot write %d bytes to LEB %d:%d",			  sync_len, wbuf->lnum, wbuf->offs);		dbg_dump_stack();		return err;	}	spin_lock(&wbuf->lock);	wbuf->offs += sync_len;	/*	 * Now @wbuf->offs is not necessarily aligned to @c->max_write_size.	 * But our goal is to optimize writes and make sure we write in	 * @c->max_write_size chunks and to @c->max_write_size-aligned offset.	 * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make	 * sure that @wbuf->offs + @wbuf->size is aligned to	 * @c->max_write_size. This way we make sure that after next	 * write-buffer flush we are again at the optimal offset (aligned to	 * @c->max_write_size).	 */	if (c->leb_size - wbuf->offs < c->max_write_size)		wbuf->size = c->leb_size - wbuf->offs;	else if (wbuf->offs & (c->max_write_size - 1))		wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;	else		wbuf->size = c->max_write_size;	wbuf->avail = wbuf->size;	wbuf->used = 0;	wbuf->next_ino = 0;	spin_unlock(&wbuf->lock);	if (wbuf->sync_callback)		err = wbuf->sync_callback(c, wbuf->lnum,					  c->leb_size - wbuf->offs, dirt);	return err;}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:82,


示例6: esp6_input

static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb){	struct ipv6hdr *iph;	struct ipv6_esp_hdr *esph;	struct esp_data *esp = x->data;	struct sk_buff *trailer;	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);	int alen = esp->auth.icv_trunc_len;	int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;	int hdr_len = skb->h.raw - skb->nh.raw;	int nfrags;	unsigned char *tmp_hdr = NULL;	int ret = 0;	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {		ret = -EINVAL;		goto out_nofree;	}	if (elen <= 0 || (elen & (blksize-1))) {		ret = -EINVAL;		goto out_nofree;	}	tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);	if (!tmp_hdr) {		ret = -ENOMEM;		goto out_nofree;	}	memcpy(tmp_hdr, skb->nh.raw, hdr_len);	/* If integrity check is required, do this. */        if (esp->auth.icv_full_len) {		u8 sum[esp->auth.icv_full_len];		u8 sum1[alen];		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))			BUG();		if (unlikely(memcmp(sum, sum1, alen))) {			x->stats.integrity_failed++;			ret = -EINVAL;			goto out;		}	}	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {		ret = -EINVAL;		goto out;	}	skb->ip_summed = CHECKSUM_NONE;	esph = (struct ipv6_esp_hdr*)skb->data;	iph = skb->nh.ipv6h;	/* Get ivec. This can be wrong, check against another impls. */	if (esp->conf.ivlen)		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));        {		u8 nexthdr[2];		struct scatterlist *sg = &esp->sgbuf[0];		u8 padlen;		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);			if (!sg) {				ret = -ENOMEM;				goto out;			}		}		skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);		crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);		if (unlikely(sg != &esp->sgbuf[0]))			kfree(sg);		if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))			BUG();		padlen = nexthdr[0];		if (padlen+2 >= elen) {			LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d/n", padlen+2, elen);			ret = -EINVAL;			goto out;		}		/* ... check padding bits here. Silly. :-) */ 		pskb_trim(skb, skb->len - alen - padlen - 2);		skb->h.raw = skb_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen);		skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;		memcpy(skb->nh.raw, tmp_hdr, hdr_len);		skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));		ret = nexthdr[1];	}out://.........这里部分代码省略.........
开发者ID:nighthawk149,项目名称:fvs318g-cfw,代码行数:101,


示例7: iwl_pcie_get_cmd_index

static structiwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,				    struct iwl_txq *txq,				    struct iwl_device_cmd *dev_cmd,				    struct sk_buff *skb,				    struct iwl_cmd_meta *out_meta,				    int hdr_len,				    int tx_cmd_len,				    bool pad){	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);	dma_addr_t tb_phys;	int len, tb1_len, tb2_len;	void *tb1_addr;	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);	/* The first TB points to bi-directional DMA data */	memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);	/*	 * The second TB (tb1) points to the remainder of the TX command	 * and the 802.11 header - dword aligned size	 * (This calculation modifies the TX command, so do it before the	 * setup of the first TB)	 */	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -	      IWL_FIRST_TB_SIZE;	if (pad)		tb1_len = ALIGN(len, 4);	else		tb1_len = len;	/* map the data for TB1 */	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;	tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))		goto out_err;	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,			     IWL_FIRST_TB_SIZE + tb1_len, hdr_len);	/* set up TFD's third entry to point to remainder of skb's head */	tb2_len = skb_headlen(skb) - hdr_len;	if (tb2_len > 0) {		tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,					 tb2_len, DMA_TO_DEVICE);		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))			goto out_err;		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);		trace_iwlwifi_dev_tx_tb(trans->dev, skb,					skb->data + hdr_len,					tb2_len);	}	if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))		goto out_err;	return tfd;out_err:	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);	return NULL;}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:69,


示例8: MSG

h264enc *h264enc_new(const struct h264enc_params *p){	h264enc *c;	int i;	/* check parameter validity */	if (!IS_ALIGNED(p->src_width, 16) || !IS_ALIGNED(p->src_height, 16) ||		!IS_ALIGNED(p->width, 2) || !IS_ALIGNED(p->height, 2) ||		p->width > p->src_width || p->height > p->src_height)	{		MSG("invalid picture size");		return NULL;	}	if (p->qp == 0 || p->qp > 47)	{		MSG("invalid QP");		return NULL;	}	if (p->src_format != H264_FMT_NV12 && p->src_format != H264_FMT_NV16)	{		MSG("invalid color format");		return NULL;	}	/* allocate memory for h264enc structure */	c = calloc(1, sizeof(*c));	if (c == NULL)	{		MSG("can't allocate h264enc data");		return NULL;	}	/* copy parameters */	c->mb_width = DIV_ROUND_UP(p->width, 16);	c->mb_height = DIV_ROUND_UP(p->height, 16);	c->mb_stride = p->src_width / 16;	c->crop_right = (c->mb_width * 16 - p->width) / 2;	c->crop_bottom = (c->mb_height * 16 - p->height) / 2;	c->profile_idc = p->profile_idc;	c->level_idc = p->level_idc;	c->entropy_coding_mode_flag = p->entropy_coding_mode ? 1 : 0;	c->pic_init_qp = p->qp;	c->keyframe_interval = p->keyframe_interval;	c->write_sps_pps = 1;	c->current_frame_num = 0;	/* allocate input buffer */	c->input_color_format = p->src_format;	switch (c->input_color_format)	{	case H264_FMT_NV12:		c->input_buffer_size = p->src_width * (p->src_height + p->src_height / 2);		break;	case H264_FMT_NV16:		c->input_buffer_size = p->src_width * p->src_height * 2;		break;	}	c->luma_buffer = ve_malloc(c->input_buffer_size);	if (c->luma_buffer == NULL)		goto nomem;	c->chroma_buffer = c->luma_buffer + p->src_width * p->src_height;	/* allocate bytestream output buffer */	c->bytestream_buffer_size = 1 * 1024 * 1024;	c->bytestream_buffer = ve_malloc(c->bytestream_buffer_size);	if (c->bytestream_buffer == NULL)		goto nomem;	/* allocate reference picture memory */	unsigned int luma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 16, 32);	unsigned int chroma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 8, 32);	for (i = 0; i < 2; i++)	{		c->ref_picture[i].luma_buffer = ve_malloc(luma_size + chroma_size);		c->ref_picture[i].chroma_buffer = c->ref_picture[i].luma_buffer + luma_size;		c->ref_picture[i].extra_buffer = ve_malloc(luma_size / 4);		if (c->ref_picture[i].luma_buffer == NULL || c->ref_picture[i].extra_buffer == NULL)			goto nomem;	}	/* allocate unknown purpose buffers */	c->extra_buffer_frame = ve_malloc(ALIGN(c->mb_width, 4) * c->mb_height * 8);	c->extra_buffer_line = ve_malloc(c->mb_width * 32);	if (c->extra_buffer_frame == NULL || c->extra_buffer_line == NULL)		goto nomem;	return c;nomem:	MSG("can't allocate VE memory");	h264enc_free(c);	return NULL;//.........这里部分代码省略.........
开发者ID:aRUNTU,项目名称:FFmpeg-Cedrus,代码行数:101,


示例9: sdma_v3_0_init_microcode

/** * sdma_v3_0_init_microcode - load ucode images from disk * * @adev: amdgpu_device pointer * * Use the firmware interface to load the ucode images into * the driver (not loaded into hw). * Returns 0 on success, error on failure. */static int sdma_v3_0_init_microcode(struct amdgpu_device *adev){	const char *chip_name;	char fw_name[30];	int err = 0, i;	struct amdgpu_firmware_info *info = NULL;	const struct common_firmware_header *header = NULL;	const struct sdma_firmware_header_v1_0 *hdr;	DRM_DEBUG("/n");	switch (adev->asic_type) {	case CHIP_TONGA:		chip_name = "tonga";		break;	case CHIP_FIJI:		chip_name = "fiji";		break;	case CHIP_POLARIS11:		chip_name = "polaris11";		break;	case CHIP_POLARIS10:		chip_name = "polaris10";		break;	case CHIP_CARRIZO:		chip_name = "carrizo";		break;	case CHIP_STONEY:		chip_name = "stoney";		break;	default: BUG();	}	for (i = 0; i < adev->sdma.num_instances; i++) {		if (i == 0)			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);		else			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);		if (err)			goto out;		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);		if (err)			goto out;		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);		if (adev->sdma.instance[i].feature_version >= 20)			adev->sdma.instance[i].burst_nop = true;		if (adev->firmware.smu_load) {			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;			info->fw = adev->sdma.instance[i].fw;			header = (const struct common_firmware_header *)info->fw->data;			adev->firmware.fw_size +=				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);		}	}out:	if (err) {		printk(KERN_ERR		       "sdma_v3_0: Failed to load firmware /"%s/"/n",		       fw_name);		for (i = 0; i < adev->sdma.num_instances; i++) {			release_firmware(adev->sdma.instance[i].fw);			adev->sdma.instance[i].fw = NULL;		}	}	return err;}
开发者ID:forgivemyheart,项目名称:linux,代码行数:80,


示例10: wl1271_boot_upload_nvs

static int wl1271_boot_upload_nvs(struct wl1271 *wl){	size_t nvs_len, burst_len;	int i;	u32 dest_addr, val;	u8 *nvs_ptr, *nvs, *nvs_aligned;	nvs = wl->nvs;	if (nvs == NULL)		return -ENODEV;	nvs_ptr = nvs;	nvs_len = wl->nvs_len;	/* Update the device MAC address into the nvs */	nvs[11] = wl->mac_addr[0];	nvs[10] = wl->mac_addr[1];	nvs[6] = wl->mac_addr[2];	nvs[5] = wl->mac_addr[3];	nvs[4] = wl->mac_addr[4];	nvs[3] = wl->mac_addr[5];	/*	 * Layout before the actual NVS tables:	 * 1 byte : burst length.	 * 2 bytes: destination address.	 * n bytes: data to burst copy.	 *	 * This is ended by a 0 length, then the NVS tables.	 */	/* FIXME: Do we need to check here whether the LSB is 1? */	while (nvs_ptr[0]) {		burst_len = nvs_ptr[0];		dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));		/* FIXME: Due to our new wl1271_translate_reg_addr function,		   we need to add the REGISTER_BASE to the destination */		dest_addr += REGISTERS_BASE;		/* We move our pointer to the data */		nvs_ptr += 3;		for (i = 0; i < burst_len; i++) {			val = (nvs_ptr[0] | (nvs_ptr[1] << 8)			       | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));			wl1271_debug(DEBUG_BOOT,				     "nvs burst write 0x%x: 0x%x",				     dest_addr, val);			wl1271_reg_write32(wl, dest_addr, val);			nvs_ptr += 4;			dest_addr += 4;		}	}	/*	 * We've reached the first zero length, the first NVS table	 * is 7 bytes further.	 */	nvs_ptr += 7;	nvs_len -= nvs_ptr - nvs;	nvs_len = ALIGN(nvs_len, 4);	/* FIXME: The driver sets the partition here, but this is not needed,	   since it sets to the same one as currently in use */	/* Now we must set the partition correctly */	wl1271_set_partition(wl,			     part_table[PART_WORK].mem.start,			     part_table[PART_WORK].mem.size,			     part_table[PART_WORK].reg.start,			     part_table[PART_WORK].reg.size);	/* Copy the NVS tables to a new block to ensure alignment */	nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);	/* And finally we upload the NVS tables */	/* FIXME: In wl1271, we upload everything at once.	   No endianness handling needed here?! The ref driver doesn't do	   anything about it at this point */	wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len);	kfree(nvs_aligned);	return 0;}
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:87,


示例11: intelfb_create

static int intelfb_create(struct intel_fbdev *ifbdev,			  struct drm_fb_helper_surface_size *sizes){	struct drm_device *dev = ifbdev->helper.dev;	struct drm_i915_private *dev_priv = dev->dev_private;	struct fb_info *info;	struct drm_framebuffer *fb;	struct drm_mode_fb_cmd2 mode_cmd = {};	struct drm_i915_gem_object *obj;	struct device *device = &dev->pdev->dev;	int size, ret;	/* we don't do packed 24bpp */	if (sizes->surface_bpp == 24)		sizes->surface_bpp = 32;	mode_cmd.width = sizes->surface_width;	mode_cmd.height = sizes->surface_height;	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /						      8), 64);	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,							  sizes->surface_depth);	size = mode_cmd.pitches[0] * mode_cmd.height;	size = ALIGN(size, PAGE_SIZE);	obj = i915_gem_alloc_object(dev, size);	if (!obj) {		DRM_ERROR("failed to allocate framebuffer/n");		ret = -ENOMEM;		goto out;	}	mutex_lock(&dev->struct_mutex);	/* Flush everything out, we'll be doing GTT only from now on */	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);	if (ret) {		DRM_ERROR("failed to pin fb: %d/n", ret);		goto out_unref;	}	info = framebuffer_alloc(0, device);	if (!info) {		ret = -ENOMEM;		goto out_unpin;	}	info->par = ifbdev;	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);	if (ret)		goto out_unpin;	fb = &ifbdev->ifb.base;	ifbdev->helper.fb = fb;	ifbdev->helper.fbdev = info;	strcpy(info->fix.id, "inteldrmfb");	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;	info->fbops = &intelfb_ops;	ret = fb_alloc_cmap(&info->cmap, 256, 0);	if (ret) {		ret = -ENOMEM;		goto out_unpin;	}	/* setup aperture base/size for vesafb takeover */	info->aperture_base = dev->mode_config.fb_base;	if (!IS_GEN2(dev))		info->aperture_size = pci_resource_len(dev->pdev, 2);	else		info->aperture_size = pci_resource_len(dev->pdev, 0);	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;	info->fix.smem_len = size;	info->screen_base =		ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,			   size);	if (!info->screen_base) {		ret = -ENOSPC;		goto out_unpin;	}	info->screen_size = size;//	memset(info->screen_base, 0, size);	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p/n",		      fb->width, fb->height,		      obj->gtt_offset, obj);//.........这里部分代码省略.........
开发者ID:daveti,项目名称:prov-kernel,代码行数:101,


示例12: do_iommu_domain_map

static int do_iommu_domain_map(struct hisi_iommu_domain *hisi_domain,struct scatterlist *sgl,		struct iommu_map_format *format, struct map_result *result){	int ret;	unsigned long phys_len, iova_size;	unsigned long iova_start;	struct gen_pool *pool;	struct iommu_domain *domain;	struct scatterlist *sg;	struct tile_format fmt;	/* calculate whole phys mem length */	for (phys_len = 0, sg = sgl; sg; sg = sg_next(sg)) {		phys_len += (unsigned long)ALIGN(sg->length, PAGE_SIZE);	}	/* get io virtual address size */	if (format->is_tile) {		unsigned long lines;		unsigned long body_size;		body_size = phys_len - format->header_size;		lines = body_size / (format->phys_page_line * PAGE_SIZE);		/*header need more lines virtual space*/		if ( format->header_size ){			unsigned long header_size;			header_size = ALIGN(format->header_size ,format->virt_page_line * PAGE_SIZE);			lines +=  header_size / (format->virt_page_line * PAGE_SIZE);		}		iova_size = lines * format->virt_page_line * PAGE_SIZE ;	} else {		iova_size = phys_len;	}	/* alloc iova */	pool = hisi_domain->iova_pool;	domain = hisi_domain->domain;	iova_start = hisi_alloc_iova(pool,iova_size,hisi_domain->range.align);	if (!iova_start) {		printk("[%s]hisi_alloc_iova alloc 0x%lx failed!/n", __func__, iova_size);		printk("[%s]dump iova pool begain--------------------------/n", __func__);		printk("iova available: 0x%x/n",(unsigned int)hisi_iommu_iova_available());		printk("alloc count: %d, free count: %d/n",				dbg_inf.alloc_iova_count, dbg_inf.free_iova_count);		printk("[%s]dump iova pool end   --------------------------/n", __func__);		return -EINVAL;	}	if (0x100000000 < (iova_start + iova_size)) {		pr_err("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! "				"hisi iommu can not deal with iova 0x%lx size 0x%lx/n",				iova_start, iova_size);	}	/* do map */	if (format->is_tile) {		fmt.is_tile = format->is_tile;		fmt.phys_page_line = format->phys_page_line;		fmt.virt_page_line = format->virt_page_line;		fmt.header_size = format->header_size ;		ret = iommu_map_tile(domain, iova_start, sgl, iova_size, 0,&fmt);	} else {		ret = iommu_map_range(domain, iova_start,sgl,(size_t)iova_size,format->prot);	}	if (ret) {		printk(KERN_ERR "[%s]map failed!/n", __func__);		hisi_free_iova(pool, iova_start, iova_size);		return ret;	}else {		/* out put result */		result->iova_start = iova_start;		result->iova_size = iova_size;	}	return 0;}
开发者ID:XePeleato,项目名称:android_kernel_huawei_venus,代码行数:77,


示例13: mdss_mdp_get_plane_sizes

int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h,			     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode){	struct mdss_mdp_format_params *fmt;	int i, rc;	u32 bpp, ystride0_off, ystride1_off;	if (ps == NULL)		return -EINVAL;	if ((w > MAX_IMG_WIDTH) || (h > MAX_IMG_HEIGHT))		return -ERANGE;	fmt = mdss_mdp_get_format_params(format);	if (!fmt)		return -EINVAL;	bpp = fmt->bpp;	memset(ps, 0, sizeof(struct mdss_mdp_plane_sizes));	if (bwc_mode) {		rc = mdss_mdp_get_rau_strides(w, h, fmt, ps);		if (rc)			return rc;		ystride0_off = DIV_ROUND_UP(h, ps->rau_h[0]);		ystride1_off = DIV_ROUND_UP(h, ps->rau_h[1]);		ps->plane_size[0] = (ps->ystride[0] * ystride0_off) +				    (ps->ystride[1] * ystride1_off);		ps->ystride[0] += ps->ystride[1];		ps->ystride[1] = 2;		ps->plane_size[1] = ps->rau_cnt * ps->ystride[1] *				   (ystride0_off + ystride1_off);	} else {		if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {			ps->num_planes = 1;			ps->plane_size[0] = w * h * bpp;			ps->ystride[0] = w * bpp;		} else if (format == MDP_Y_CBCR_H2V2_VENUS) {			int cf = COLOR_FMT_NV12;			ps->num_planes = 2;			ps->ystride[0] = VENUS_Y_STRIDE(cf, w);			ps->ystride[1] = VENUS_UV_STRIDE(cf, w);			ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *				ps->ystride[0];			ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *				ps->ystride[1];		} else {			u8 hmap[] = { 1, 2, 1, 2 };			u8 vmap[] = { 1, 1, 2, 2 };			u8 horiz, vert, stride_align, height_align;			horiz = hmap[fmt->chroma_sample];			vert = vmap[fmt->chroma_sample];			switch (format) {			case MDP_Y_CR_CB_GH2V2:				stride_align = 16;				height_align = 1;				break;			default:				stride_align = 1;				height_align = 1;				break;			}			ps->ystride[0] = ALIGN(w, stride_align);			ps->ystride[1] = ALIGN(w / horiz, stride_align);			ps->plane_size[0] = ps->ystride[0] *				ALIGN(h, height_align);			ps->plane_size[1] = ps->ystride[1] * (h / vert);			if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {				ps->num_planes = 2;				ps->plane_size[1] *= 2;				ps->ystride[1] *= 2;			} else { /* planar */				ps->num_planes = 3;				ps->plane_size[2] = ps->plane_size[1];				ps->ystride[2] = ps->ystride[1];			}		}	}	for (i = 0; i < ps->num_planes; i++)		ps->total_size += ps->plane_size[i];	return 0;}
开发者ID:grzmot22,项目名称:android_kernel_msm,代码行数:86,


示例14: cvmx_bootmem_phy_named_block_alloc

int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,					   uint64_t max_addr,					   uint64_t alignment,					   char *name,					   uint32_t flags){	int64_t addr_allocated;	struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;#ifdef DEBUG	cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "		     "0x%llx, max: 0x%llx, align: 0x%llx, name: %s/n",		     (unsigned long long)size,		     (unsigned long long)min_addr,		     (unsigned long long)max_addr,		     (unsigned long long)alignment,		     name);#endif	if (cvmx_bootmem_desc->major_version != 3) {		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "			     "%d.%d at addr: %p/n",			     (int)cvmx_bootmem_desc->major_version,			     (int)cvmx_bootmem_desc->minor_version,			     cvmx_bootmem_desc);		return -1;	}	/*	 * Take lock here, as name lookup/block alloc/name add need to	 * be atomic.	 */	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))		cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));	/* Get pointer to first available named block descriptor */	named_block_desc_ptr =		cvmx_bootmem_phy_named_block_find(NULL,						  flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);	/*	 * Check to see if name already in use, return error if name	 * not available or no more room for blocks.	 */	if (cvmx_bootmem_phy_named_block_find(name,					      flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {		if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))			cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));		return -1;	}	/*	 * Round size up to mult of minimum alignment bytes We need	 * the actual size allocated to allow for blocks to be	 * coallesced when they are freed.  The alloc routine does the	 * same rounding up on all allocations.	 */	size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);	addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,						alignment,						flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);	if (addr_allocated >= 0) {		named_block_desc_ptr->base_addr = addr_allocated;		named_block_desc_ptr->size = size;		strncpy(named_block_desc_ptr->name, name,			cvmx_bootmem_desc->named_block_name_len);		named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;	}	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))		cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));	return addr_allocated;}
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:74,


示例15: big_key_preparse

/* * Preparse a big key */int big_key_preparse(struct key_preparsed_payload *prep){	struct path *path = (struct path *)&prep->payload.data[big_key_path];	struct file *file;	u8 *enckey;	u8 *data = NULL;	ssize_t written;	size_t datalen = prep->datalen;	int ret;	ret = -EINVAL;	if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)		goto error;	/* Set an arbitrary quota */	prep->quotalen = 16;	prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;	if (datalen > BIG_KEY_FILE_THRESHOLD) {		/* Create a shmem file to store the data in.  This will permit the data		 * to be swapped out if needed.		 *		 * File content is stored encrypted with randomly generated key.		 */		size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));		/* prepare aligned data to encrypt */		data = kmalloc(enclen, GFP_KERNEL);		if (!data)			return -ENOMEM;		memcpy(data, prep->data, datalen);		memset(data + datalen, 0x00, enclen - datalen);		/* generate random key */		enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);		if (!enckey) {			ret = -ENOMEM;			goto error;		}		ret = big_key_gen_enckey(enckey);		if (ret)			goto err_enckey;		/* encrypt aligned data */		ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);		if (ret)			goto err_enckey;		/* save aligned data to file */		file = shmem_kernel_file_setup("", enclen, 0);		if (IS_ERR(file)) {			ret = PTR_ERR(file);			goto err_enckey;		}		written = kernel_write(file, data, enclen, 0);		if (written != enclen) {			ret = written;			if (written >= 0)				ret = -ENOMEM;			goto err_fput;		}		/* Pin the mount and dentry to the key so that we can open it again		 * later		 */		prep->payload.data[big_key_data] = enckey;		*path = file->f_path;		path_get(path);		fput(file);		kfree(data);	} else {		/* Just store the data in a buffer */		void *data = kmalloc(datalen, GFP_KERNEL);		if (!data)			return -ENOMEM;		prep->payload.data[big_key_data] = data;		memcpy(data, prep->data, prep->datalen);	}	return 0;err_fput:	fput(file);err_enckey:	kfree(enckey);error:	kfree(data);	return ret;}
开发者ID:AK101111,项目名称:linux,代码行数:97,


示例16: prepare_constant_buffer

//.........这里部分代码省略.........       */      assert(MAX_CLIP_PLANES == 6);      for (j = 0; j < MAX_CLIP_PLANES; j++) {	 if (ctx->Transform.ClipPlanesEnabled & (1<<j)) {	    buf[offset + i * 4 + 0] = ctx->Transform._ClipUserPlane[j][0];	    buf[offset + i * 4 + 1] = ctx->Transform._ClipUserPlane[j][1];	    buf[offset + i * 4 + 2] = ctx->Transform._ClipUserPlane[j][2];	    buf[offset + i * 4 + 3] = ctx->Transform._ClipUserPlane[j][3];	    i++;	 }      }   }   /* vertex shader constants */   if (brw->curbe.vs_size) {      GLuint offset = brw->curbe.vs_start * 16;      GLuint nr = brw->vs.prog_data->nr_params / 4;      /* Load the subset of push constants that will get used when       * we also have a pull constant buffer.       */      for (i = 0; i < vp->program.Base.Parameters->NumParameters; i++) {	 if (brw->vs.constant_map[i] != -1) {	    assert(brw->vs.constant_map[i] <= nr);	    memcpy(buf + offset + brw->vs.constant_map[i] * 4,		   vp->program.Base.Parameters->ParameterValues[i],		   4 * sizeof(float));	 }      }   }   if (0) {      for (i = 0; i < sz*16; i+=4) 	 printf("curbe %d.%d: %f %f %f %f/n", i/8, i&4,		buf[i+0], buf[i+1], buf[i+2], buf[i+3]);      printf("last_buf %p buf %p sz %d/%d cmp %d/n",	     brw->curbe.last_buf, buf,	     bufsz, brw->curbe.last_bufsz,	     brw->curbe.last_buf ? memcmp(buf, brw->curbe.last_buf, bufsz) : -1);   }   if (brw->curbe.curbe_bo != NULL &&       bufsz == brw->curbe.last_bufsz &&       memcmp(buf, brw->curbe.last_buf, bufsz) == 0) {      /* constants have not changed */   } else {      /* Update the record of what our last set of constants was.  We       * don't just flip the pointers because we don't fill in the       * data in the padding between the entries.       */      memcpy(brw->curbe.last_buf, buf, bufsz);      brw->curbe.last_bufsz = bufsz;      if (brw->curbe.curbe_bo != NULL &&	  brw->curbe.curbe_next_offset + bufsz > brw->curbe.curbe_bo->size)      {	 drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);	 drm_intel_bo_unreference(brw->curbe.curbe_bo);	 brw->curbe.curbe_bo = NULL;      }      if (brw->curbe.curbe_bo == NULL) {	 /* Allocate a single page for CURBE entries for this batchbuffer.	  * They're generally around 64b.	  */	 brw->curbe.curbe_bo = drm_intel_bo_alloc(brw->intel.bufmgr, "CURBE",						  4096, 1 << 6);	 brw->curbe.curbe_next_offset = 0;	 drm_intel_gem_bo_map_gtt(brw->curbe.curbe_bo);	 assert(bufsz < 4096);      }      brw->curbe.curbe_offset = brw->curbe.curbe_next_offset;      brw->curbe.curbe_next_offset += bufsz;      brw->curbe.curbe_next_offset = ALIGN(brw->curbe.curbe_next_offset, 64);      /* Copy data to the buffer:       */      memcpy(brw->curbe.curbe_bo->virtual + brw->curbe.curbe_offset,	     buf,	     bufsz);   }   brw_add_validated_bo(brw, brw->curbe.curbe_bo);   /* Because this provokes an action (ie copy the constants into the    * URB), it shouldn't be shortcircuited if identical to the    * previous time - because eg. the urb destination may have    * changed, or the urb contents different to last time.    *    * Note that the data referred to is actually copied internally,    * not just used in place according to passed pointer.    *    * It appears that the CS unit takes care of using each available    * URB entry (Const URB Entry == CURBE) in turn, and issuing    * flushes as necessary when doublebuffering of CURBEs isn't    * possible.    */}
开发者ID:GunioRobot,项目名称:mesa-7.10.2-PS3,代码行数:101,


示例17: smbios_write_tables

unsigned long smbios_write_tables(unsigned long current){	struct smbios_entry *se;	unsigned long tables;	int len = 0;	int max_struct_size = 0;	int handle = 0;	current = ALIGN(current, 16);	printk(BIOS_DEBUG, "%s: %08lx/n", __func__, current);	se = (struct smbios_entry *)current;	current += sizeof(struct smbios_entry);	current = ALIGN(current, 16);	tables = current;	update_max(len, max_struct_size, smbios_write_type0(&current,		handle++));	update_max(len, max_struct_size, smbios_write_type1(&current,		handle++));	update_max(len, max_struct_size, smbios_write_type2(&current,		handle, handle + 1)); /* The chassis handle is the next one */	handle++;	update_max(len, max_struct_size, smbios_write_type3(&current,		handle++));	update_max(len, max_struct_size, smbios_write_type4(&current,		handle++));	update_max(len, max_struct_size, smbios_write_type11(&current,		&handle));	if (IS_ENABLED(CONFIG_ELOG))		update_max(len, max_struct_size,			elog_smbios_write_type15(&current,handle++));	update_max(len, max_struct_size, smbios_write_type16(&current,		handle++));	update_max(len, max_struct_size, smbios_write_type17(&current,		&handle));	update_max(len, max_struct_size, smbios_write_type19(&current,		handle++));	update_max(len, max_struct_size, smbios_write_type20(&current,		&handle));	update_max(len, max_struct_size, smbios_write_type32(&current,		handle++));	update_max(len, max_struct_size, smbios_walk_device_tree(all_devices,		&handle, &current));	update_max(len, max_struct_size, smbios_write_type127(&current,		handle++));	memset(se, 0, sizeof(struct smbios_entry));	memcpy(se->anchor, "_SM_", 4);	se->length = sizeof(struct smbios_entry);	se->major_version = 2;	se->minor_version = 7;	se->max_struct_size = max_struct_size;	se->struct_count = handle;	se->smbios_bcd_revision = 0x27;	memcpy(se->intermediate_anchor_string, "_DMI_", 5);	se->struct_table_address = (u32)tables;	se->struct_table_length = len;	se->intermediate_checksum = smbios_checksum((u8 *)se + 0x10,						    sizeof(struct smbios_entry)						    - 0x10);	se->checksum = smbios_checksum((u8 *)se, sizeof(struct smbios_entry));	return current;}
开发者ID:MattDevo,项目名称:coreboot,代码行数:68,


示例18: return

static u8 *iv_of_dmreq(struct crypt_config *cc,		       struct dm_crypt_request *dmreq){	return (u8 *)ALIGN((unsigned long)(dmreq + 1),		crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);}
开发者ID:Slim80,项目名称:Imperium_LG_G4_MM_Kernel,代码行数:6,


示例19: esp6_output

static int esp6_output(struct xfrm_state *x, struct sk_buff *skb){	int err;	int hdr_len;	struct ipv6hdr *top_iph;	struct ipv6_esp_hdr *esph;	struct crypto_tfm *tfm;	struct esp_data *esp;	struct sk_buff *trailer;	int blksize;	int clen;	int alen;	int nfrags;	esp = x->data;	hdr_len = skb->h.raw - skb->data +		  sizeof(*esph) + esp->conf.ivlen;	/* Strip IP+ESP header. */	__skb_pull(skb, hdr_len);	/* Now skb is pure payload to encrypt */	err = -ENOMEM;	/* Round to block size */	clen = skb->len;	alen = esp->auth.icv_trunc_len;	tfm = esp->conf.tfm;	blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);	clen = ALIGN(clen + 2, blksize);	if (esp->conf.padlen)		clen = ALIGN(clen, esp->conf.padlen);	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) {		goto error;	}	/* Fill padding... */	do {		int i;		for (i=0; i<clen-skb->len - 2; i++)			*(u8*)(trailer->tail + i) = i+1;	} while (0);	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;	pskb_put(skb, trailer, clen - skb->len);	top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len);	esph = (struct ipv6_esp_hdr *)skb->h.raw;	top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph));	*(u8*)(trailer->tail - 1) = *skb->nh.raw;	*skb->nh.raw = IPPROTO_ESP;	esph->spi = x->id.spi;	esph->seq_no = htonl(++x->replay.oseq);	if (esp->conf.ivlen)		crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));	do {		struct scatterlist *sg = &esp->sgbuf[0];		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);			if (!sg)				goto error;		}		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);		crypto_cipher_encrypt(tfm, sg, sg, clen);		if (unlikely(sg != &esp->sgbuf[0]))			kfree(sg);	} while (0);	if (esp->conf.ivlen) {		memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));		crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));	}	if (esp->auth.icv_full_len) {		esp->auth.icv(esp, skb, (u8*)esph-skb->data,			sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);		pskb_put(skb, trailer, alen);	}	err = 0;error:	return err;}
开发者ID:nighthawk149,项目名称:fvs318g-cfw,代码行数:89,


示例20: xv_malloc

/** * xv_malloc - Allocate block of given size from pool. * @pool: pool to allocate from * @size: size of block to allocate * @page: page no. that holds the object * @offset: location of object within page * * On success, <page, offset> identifies block allocated * and 0 is returned. On failure, <page, offset> is set to * 0 and -ENOMEM is returned. * * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail. */int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,		u32 *offset, gfp_t flags){	int error;	u32 index, tmpsize, origsize, tmpoffset;	struct block_header *block, *tmpblock;	*page = NULL;	*offset = 0;	origsize = size;	if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))		return -ENOMEM;	size = ALIGN(size, XV_ALIGN);	spin_lock(&pool->lock);	index = find_block(pool, size, page, offset);	if (!*page) {		spin_unlock(&pool->lock);		if (flags & GFP_NOWAIT)			return -ENOMEM;		error = grow_pool(pool, flags);		if (unlikely(error))			return error;		spin_lock(&pool->lock);		index = find_block(pool, size, page, offset);	}	if (!*page) {		spin_unlock(&pool->lock);		return -ENOMEM;	}	block = get_ptr_atomic(*page, *offset, KM_USER0);	remove_block(pool, *page, *offset, block, index);	/* Split the block if required */	tmpoffset = *offset + size + XV_ALIGN;	tmpsize = block->size - size;	tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);	if (tmpsize) {		tmpblock->size = tmpsize - XV_ALIGN;		set_flag(tmpblock, BLOCK_FREE);		clear_flag(tmpblock, PREV_FREE);		set_blockprev(tmpblock, *offset);		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)			insert_block(pool, *page, tmpoffset, tmpblock);		if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {			tmpblock = BLOCK_NEXT(tmpblock);			set_blockprev(tmpblock, tmpoffset);		}	} else {		/* This block is exact fit */		if (tmpoffset != PAGE_SIZE)			clear_flag(tmpblock, PREV_FREE);	}	block->size = origsize;	clear_flag(block, BLOCK_FREE);	put_ptr_atomic(block, KM_USER0);	spin_unlock(&pool->lock);	*offset += XV_ALIGN;	return 0;}
开发者ID:3null,项目名称:fastsocket,代码行数:87,


示例21: write_acpi_tables

unsigned long write_acpi_tables(unsigned long start){	unsigned long current;	acpi_rsdp_t *rsdp;	acpi_rsdt_t *rsdt;	acpi_hpet_t *hpet;	acpi_madt_t *madt;	acpi_srat_t *srat;	acpi_slit_t *slit;	acpi_fadt_t *fadt;	acpi_facs_t *facs;	acpi_header_t *dsdt;	acpi_header_t *ssdt;	acpi_header_t *ssdtx;	void *p;	int i;	get_bus_conf(); //it will get sblk, pci1234, hcdn, and sbdn	/* Align ACPI tables to 16 bytes */	start = ALIGN(start, 16);	current = start;	printk(BIOS_INFO, "ACPI: Writing ACPI tables at %lx.../n", start);	/* We need at least an RSDP and an RSDT Table */	rsdp = (acpi_rsdp_t *) current;	current += sizeof(acpi_rsdp_t);	rsdt = (acpi_rsdt_t *) current;	current += sizeof(acpi_rsdt_t);	/* clear all table memory */	memset((void *)start, 0, current - start);	acpi_write_rsdp(rsdp, rsdt, NULL);	acpi_write_rsdt(rsdt);	/* FACS */	printk(BIOS_DEBUG, "ACPI:    * FACS/n");	facs = (acpi_facs_t *) current;	current += sizeof(acpi_facs_t);	acpi_create_facs(facs);	/* DSDT */	printk(BIOS_DEBUG, "ACPI:    * DSDT at %lx/n", current);	dsdt = (acpi_header_t *)current;	memcpy(dsdt, &AmlCode, sizeof(acpi_header_t));	current += dsdt->length;	memcpy(dsdt, &AmlCode, dsdt->length);	printk(BIOS_DEBUG, "ACPI:    * DSDT @ %p Length %x/n", dsdt, dsdt->length);	/* FADT */	printk(BIOS_DEBUG, "ACPI:    * FADT at %lx/n", current);	fadt = (acpi_fadt_t *) current;	current += sizeof(acpi_fadt_t);	acpi_create_fadt(fadt, facs, dsdt);	acpi_add_table(rsdp, fadt);	/*	 * We explicitly add these tables later on:	 */	printk(BIOS_DEBUG, "ACPI:    * HPET at %lx/n", current);	hpet = (acpi_hpet_t *) current;	current += sizeof(acpi_hpet_t);	acpi_create_hpet(hpet);	acpi_add_table(rsdp, hpet);	/* If we want to use HPET Timers Linux wants an MADT */	printk(BIOS_DEBUG, "ACPI:    * MADT at %lx/n", current);	madt = (acpi_madt_t *) current;	acpi_create_madt(madt);	current+=madt->header.length;	acpi_add_table(rsdp, madt);	/* SRAT */	printk(BIOS_DEBUG, "ACPI:    * SRAT at %lx/n", current);	srat = (acpi_srat_t *) current;	acpi_create_srat(srat);	current+=srat->header.length;	acpi_add_table(rsdp, srat);	/* SLIT */	printk(BIOS_DEBUG, "ACPI:   * SLIT at %lx/n", current);	slit = (acpi_slit_t *) current;	acpi_create_slit(slit);	current+=slit->header.length;	acpi_add_table(rsdp, slit);	/* SSDT */	printk(BIOS_DEBUG, "ACPI:    * SSDT at %lx/n", current);	ssdt = (acpi_header_t *)current;	acpi_create_ssdt_generator(ssdt, ACPI_TABLE_CREATOR);	current += ssdt->length;	acpi_add_table(rsdp, ssdt);#if CONFIG_ACPI_SSDTX_NUM >= 1//.........这里部分代码省略.........
开发者ID:Godkey,项目名称:coreboot,代码行数:101,


示例22: xv_free

/* * Free block identified with <page, offset> */void xv_free(struct xv_pool *pool, struct page *page, u32 offset){	void *page_start;	struct block_header *block, *tmpblock;	offset -= XV_ALIGN;	spin_lock(&pool->lock);	page_start = get_ptr_atomic(page, 0, KM_USER0);	block = (struct block_header *)((char *)page_start + offset);	/* Catch double free bugs */	BUG_ON(test_flag(block, BLOCK_FREE));	block->size = ALIGN(block->size, XV_ALIGN);	tmpblock = BLOCK_NEXT(block);	if (offset + block->size + XV_ALIGN == PAGE_SIZE)		tmpblock = NULL;	/* Merge next block if its free */	if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {		/*		 * Blocks smaller than XV_MIN_ALLOC_SIZE		 * are not inserted in any free list.		 */		if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {			remove_block(pool, page,				    offset + block->size + XV_ALIGN, tmpblock,				    get_index_for_insert(tmpblock->size));		}		block->size += tmpblock->size + XV_ALIGN;	}	/* Merge previous block if its free */	if (test_flag(block, PREV_FREE)) {		tmpblock = (struct block_header *)((char *)(page_start) +						get_blockprev(block));		offset = offset - tmpblock->size - XV_ALIGN;		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)			remove_block(pool, page, offset, tmpblock,				    get_index_for_insert(tmpblock->size));		tmpblock->size += block->size + XV_ALIGN;		block = tmpblock;	}	/* No used objects in this page. Free it. */	if (block->size == PAGE_SIZE - XV_ALIGN) {		put_ptr_atomic(page_start, KM_USER0);		spin_unlock(&pool->lock);		__free_page(page);		stat_dec(&pool->total_pages);		return;	}	set_flag(block, BLOCK_FREE);	if (block->size >= XV_MIN_ALLOC_SIZE)		insert_block(pool, page, offset, block);	if (offset + block->size + XV_ALIGN != PAGE_SIZE) {		tmpblock = BLOCK_NEXT(block);		set_flag(tmpblock, PREV_FREE);		set_blockprev(tmpblock, offset);	}	put_ptr_atomic(page_start, KM_USER0);	spin_unlock(&pool->lock);}
开发者ID:3null,项目名称:fastsocket,代码行数:75,


示例23: _mesa_make_extension_string

/** * Construct the GL_EXTENSIONS string.  Called the first time that * glGetString(GL_EXTENSIONS) is called. */GLubyte*_mesa_make_extension_string(struct gl_context *ctx){   /* The extension string. */   char *exts = 0;   /* Length of extension string. */   size_t length = 0;   /* Number of extensions */   unsigned count;   /* Indices of the extensions sorted by year */   extension_index *extension_indices;   /* String of extra extensions. */   char *extra_extensions = get_extension_override(ctx);   GLboolean *base = (GLboolean *) &ctx->Extensions;   const struct extension *i;   unsigned j;   unsigned maxYear = ~0;   unsigned api_set = (1 << ctx->API);   if (_mesa_is_gles3(ctx))      api_set |= ES3;   /* Check if the MESA_EXTENSION_MAX_YEAR env var is set */   {      const char *env = getenv("MESA_EXTENSION_MAX_YEAR");      if (env) {         maxYear = atoi(env);         _mesa_debug(ctx, "Note: limiting GL extensions to %u or earlier/n",                     maxYear);      }   }   /* Compute length of the extension string. */   count = 0;   for (i = extension_table; i->name != 0; ++i) {      if (base[i->offset] &&          i->year <= maxYear &&          (i->api_set & api_set)) {	 length += strlen(i->name) + 1; /* +1 for space */	 ++count;      }   }   if (extra_extensions != NULL)      length += 1 + strlen(extra_extensions); /* +1 for space */   exts = calloc(ALIGN(length + 1, 4), sizeof(char));   if (exts == NULL) {      free(extra_extensions);      return NULL;   }   extension_indices = malloc(count * sizeof(extension_index));   if (extension_indices == NULL) {      free(exts);      free(extra_extensions);      return NULL;   }   /* Sort extensions in chronological order because certain old applications (e.g.,    * Quake3 demo) store the extension list in a static size buffer so chronologically    * order ensure that the extensions that such applications expect will fit into    * that buffer.    */   j = 0;   for (i = extension_table; i->name != 0; ++i) {      if (base[i->offset] &&          i->year <= maxYear &&          (i->api_set & api_set)) {         extension_indices[j++] = i - extension_table;      }   }   assert(j == count);   qsort(extension_indices, count, sizeof *extension_indices, extension_compare);   /* Build the extension string.*/   for (j = 0; j < count; ++j) {      i = &extension_table[extension_indices[j]];      assert(base[i->offset] && (i->api_set & api_set));      strcat(exts, i->name);      strcat(exts, " ");   }   free(extension_indices);   if (extra_extensions != 0) {      strcat(exts, extra_extensions);      free(extra_extensions);   }   return (GLubyte *) exts;}
开发者ID:xSIMx,项目名称:Mesa-3D,代码行数:92,


示例24: board_init_f

//.........这里部分代码省略.........#ifndef CONFIG_SYS_SKIP_ARM_RELOCATION	/*	 * reserve memory for U-Boot code, data & bss	 * round down to next 4 kB limit	 */	addr -= gd->mon_len;	addr &= ~(4096 - 1);	debug("Reserving %ldk for U-Boot at: %08lx/n", gd->mon_len >> 10, addr);#endif#ifndef CONFIG_SPL_BUILD	/*	 * reserve memory for malloc() arena	 */	addr_sp = addr - TOTAL_MALLOC_LEN;	debug("Reserving %dk for malloc() at: %08lx/n",			TOTAL_MALLOC_LEN >> 10, addr_sp);	/*	 * (permanently) allocate a Board Info struct	 * and a permanent copy of the "global" data	 */	addr_sp -= sizeof (bd_t);	bd = (bd_t *) addr_sp;	gd->bd = bd;	debug("Reserving %zu Bytes for Board Info at: %08lx/n",			sizeof (bd_t), addr_sp);#ifdef CONFIG_MACH_TYPE	gd->bd->bi_arch_number = CONFIG_MACH_TYPE; /* board id for Linux */#endif	addr_sp -= sizeof (gd_t);	id = (gd_t *) addr_sp;	debug("Reserving %zu Bytes for Global Data at: %08lx/n",			sizeof (gd_t), addr_sp);#if defined(CONFIG_OF_SEPARATE) && defined(CONFIG_OF_CONTROL)	/*	 * If the device tree is sitting immediate above our image then we	 * must relocate it. If it is embedded in the data section, then it	 * will be relocated with other data.	 */	if (gd->fdt_blob) {		fdt_size = ALIGN(fdt_totalsize(gd->fdt_blob) + 0x1000, 32);		addr_sp -= fdt_size;		new_fdt = (void *)addr_sp;		debug("Reserving %zu Bytes for FDT at: %08lx/n",		      fdt_size, addr_sp);	}#endif	/* setup stackpointer for exeptions */	gd->irq_sp = addr_sp;#ifdef CONFIG_USE_IRQ	addr_sp -= (CONFIG_STACKSIZE_IRQ+CONFIG_STACKSIZE_FIQ);	debug("Reserving %zu Bytes for IRQ stack at: %08lx/n",		CONFIG_STACKSIZE_IRQ+CONFIG_STACKSIZE_FIQ, addr_sp);#endif	/* leave 3 words for abort-stack    */	addr_sp -= 12;	/* 8-byte alignment for ABI compliance */	addr_sp &= ~0x07;#else	addr_sp += 128;	/* leave 32 words for abort-stack   */	gd->irq_sp = addr_sp;#endif	debug("New Stack Pointer is: %08lx/n", addr_sp);#ifdef CONFIG_POST	post_bootmode_init();	post_run(NULL, POST_ROM | post_bootmode_get(0));#endif	gd->bd->bi_baudrate = gd->baudrate;	/* Ram ist board specific, so move it to board code ... */	dram_init_banksize();	display_dram_config();	/* and display it */#ifdef CONFIG_SYS_SKIP_ARM_RELOCATION	gd->malloc_end = addr;	addr = _TEXT_BASE;#endif	gd->relocaddr = addr;	gd->start_addr_sp = addr_sp;	gd->reloc_off = addr - _TEXT_BASE;	debug("relocation Offset is: %08lx/n", gd->reloc_off);	if (new_fdt) {		memcpy(new_fdt, gd->fdt_blob, fdt_size);		gd->fdt_blob = new_fdt;	}	memcpy(id, (void *)gd, sizeof(gd_t));	relocate_code(addr_sp, id, addr);	/* NOTREACHED - relocate_code() does not return */}
开发者ID:ByeongJong-Kang,项目名称:uboot-imx6dongle,代码行数:101,


示例25: ubifs_wbuf_write_nolock

/** * ubifs_wbuf_write_nolock - write data to flash via write-buffer. * @wbuf: write-buffer * @buf: node to write * @len: node length * * This function writes data to flash via write-buffer @wbuf. This means that * the last piece of the node won't reach the flash media immediately if it * does not take whole max. write unit (@c->max_write_size). Instead, the node * will sit in RAM until the write-buffer is synchronized (e.g., by timer, or * because more data are appended to the write-buffer). * * This function returns zero in case of success and a negative error code in * case of failure. If the node cannot be written because there is no more * space in this logical eraseblock, %-ENOSPC is returned. */int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len){	struct ubifs_info *c = wbuf->c;	int err, written, n, aligned_len = ALIGN(len, 8);	dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,	       dbg_ntype(((struct ubifs_ch *)buf)->node_type),	       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);	ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);	ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);	ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);	ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);	ubifs_assert(wbuf->size >= c->min_io_size);	ubifs_assert(wbuf->size <= c->max_write_size);	ubifs_assert(wbuf->size % c->min_io_size == 0);	ubifs_assert(mutex_is_locked(&wbuf->io_mutex));	ubifs_assert(!c->ro_media && !c->ro_mount);	ubifs_assert(!c->space_fixup);	if (c->leb_size - wbuf->offs >= c->max_write_size)		ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));	if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {		err = -ENOSPC;		goto out;	}	cancel_wbuf_timer_nolock(wbuf);	if (c->ro_error)		return -EROFS;	if (aligned_len <= wbuf->avail) {		/*		 * The node is not very large and fits entirely within		 * write-buffer.		 */		memcpy(wbuf->buf + wbuf->used, buf, len);		if (aligned_len == wbuf->avail) {			dbg_io("flush jhead %s wbuf to LEB %d:%d",			       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);			err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,					    wbuf->offs, wbuf->size,					    wbuf->dtype);			if (err)				goto out;			spin_lock(&wbuf->lock);			wbuf->offs += wbuf->size;			if (c->leb_size - wbuf->offs >= c->max_write_size)				wbuf->size = c->max_write_size;			else				wbuf->size = c->leb_size - wbuf->offs;			wbuf->avail = wbuf->size;			wbuf->used = 0;			wbuf->next_ino = 0;			spin_unlock(&wbuf->lock);		} else {			spin_lock(&wbuf->lock);			wbuf->avail -= aligned_len;			wbuf->used += aligned_len;			spin_unlock(&wbuf->lock);		}		goto exit;	}	written = 0;	if (wbuf->used) {		/*		 * The node is large enough and does not fit entirely within		 * current available space. We have to fill and flush		 * write-buffer and switch to the next max. write unit.		 */		dbg_io("flush jhead %s wbuf to LEB %d:%d",		       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);		memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);		err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,				    wbuf->size, wbuf->dtype);		if (err)			goto out;		wbuf->offs += wbuf->size;//.........这里部分代码省略.........
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:101,


示例26: s5p_cma_region_reserve

void __init s5p_cma_region_reserve(struct cma_region *regions_normal,				      struct cma_region *regions_secure,				      size_t align_secure, const char *map){	struct cma_region *reg;	phys_addr_t paddr_last = 0xFFFFFFFF;	for (reg = regions_normal; reg->size != 0; reg++) {		phys_addr_t paddr;		if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {			pr_debug("S5P/CMA: size of '%s' is NOT page-aligned/n",								reg->name);			reg->size = PAGE_ALIGN(reg->size);		}		if (reg->reserved) {			pr_err("S5P/CMA: '%s' already reserved/n", reg->name);			continue;		}		if (reg->alignment) {			if ((reg->alignment & ~PAGE_MASK) ||				(reg->alignment & ~reg->alignment)) {				pr_err("S5P/CMA: Failed to reserve '%s': "						"incorrect alignment 0x%08x./n",						reg->name, reg->alignment);				continue;			}		} else {			reg->alignment = PAGE_SIZE;		}		if (reg->start) {			if (!memblock_is_region_reserved(reg->start, reg->size)			    && (memblock_reserve(reg->start, reg->size) == 0))				reg->reserved = 1;			else {				pr_err("S5P/CMA: Failed to reserve '%s'/n",				       reg->name);				continue;			}			pr_debug("S5P/CMA: "				 "Reserved 0x%08x/0x%08x for '%s'/n",				 reg->start, reg->size, reg->name);			paddr = reg->start;		} else {			paddr = memblock_find_in_range(0,					MEMBLOCK_ALLOC_ACCESSIBLE,					reg->size, reg->alignment);		}		if (paddr) {			if (memblock_reserve(paddr, reg->size)) {				pr_err("S5P/CMA: Failed to reserve '%s'/n",								reg->name);				continue;			}			reg->start = paddr;			reg->reserved = 1;			pr_info("S5P/CMA: Reserved 0x%08x/0x%08x for '%s'/n",						reg->start, reg->size, reg->name);		} else {			pr_err("S5P/CMA: No free space in memory for '%s'/n",								reg->name);		}		if (cma_early_region_register(reg)) {			pr_err("S5P/CMA: Failed to register '%s'/n",								reg->name);			memblock_free(reg->start, reg->size);		} else {			paddr_last = min(paddr, paddr_last);		}	}	if (align_secure & ~align_secure) {		pr_err("S5P/CMA: "			"Wrong alignment requirement for secure region./n");	} else if (regions_secure && regions_secure->size) {		size_t size_secure = 0;		for (reg = regions_secure; reg->size != 0; reg++)			size_secure += reg->size;		reg--;		/* Entire secure regions will be merged into 2		 * consecutive regions. */		if (align_secure == 0) {			size_t size_region2;			size_t order_region2;			size_t aug_size;			align_secure = 1 <<				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);//.........这里部分代码省略.........
开发者ID:anewkirk,项目名称:AJK,代码行数:101,


示例27: return

sample_t* Buffer::getBuffer() const{	return (sample_t*) ALIGN(m_buffer);}
开发者ID:carldong,项目名称:audaspace,代码行数:4,


示例28: ubifs_log_start_commit

/** * ubifs_log_start_commit - start commit. * @c: UBIFS file-system description object * @ltail_lnum: return new log tail LEB number * * The commit operation starts with writing "commit start" node to the log and * reference nodes for all journal heads which will define new journal after * the commit has been finished. The commit start and reference nodes are * written in one go to the nearest empty log LEB (hence, when commit is * finished UBIFS may safely unmap all the previous log LEBs). This function * returns zero in case of success and a negative error code in case of * failure. */int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum){	void *buf;	struct ubifs_cs_node *cs;	struct ubifs_ref_node *ref;	int err, i, max_len, len;	err = dbg_check_bud_bytes(c);	if (err)		return err;	max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;	max_len = ALIGN(max_len, c->min_io_size);	buf = cs = kmalloc(max_len, GFP_NOFS);	if (!buf)		return -ENOMEM;	cs->ch.node_type = UBIFS_CS_NODE;	cs->cmt_no = cpu_to_le64(c->cmt_no);	ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);	/*	 * Note, we do not lock 'c->log_mutex' because this is the commit start	 * phase and we are exclusively using the log. And we do not lock	 * write-buffer because nobody can write to the file-system at this	 * phase.	 */	len = UBIFS_CS_NODE_SZ;	for (i = 0; i < c->jhead_cnt; i++) {		int lnum = c->jheads[i].wbuf.lnum;		int offs = c->jheads[i].wbuf.offs;		if (lnum == -1 || offs == c->leb_size)			continue;		dbg_log("add ref to LEB %d:%d for jhead %s",			lnum, offs, dbg_jhead(i));		ref = buf + len;		ref->ch.node_type = UBIFS_REF_NODE;		ref->lnum = cpu_to_le32(lnum);		ref->offs = cpu_to_le32(offs);		ref->jhead = cpu_to_le32(i);		ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);		len += UBIFS_REF_NODE_SZ;	}	ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);	#if defined(CONFIG_UBIFS_FS_FULL_USE_LOG) && (MP_NAND_UBIFS == 1)	/* Not Switch to next log LEB, programming next available page in the same log LEB continuously*/	/* if available page is in the end of the LEB, switch to next LEB*/	if(c->lhead_offs >= (c->leb_size - (c->min_io_size * 4)) )	{		c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);		c->lhead_offs = 0;	}		if (c->lhead_offs == 0) {		/* Must ensure next LEB has been unmapped */		err = ubifs_leb_unmap(c, c->lhead_lnum);		if (err)			goto out;	}	len = ALIGN(len, c->min_io_size);	dbg_log("writing commit start at LEB %d:%d, len %d", c->lhead_lnum, c->lhead_offs, len);	err = ubifs_leb_write(c, c->lhead_lnum, cs, c->lhead_offs, len, UBI_SHORTTERM);	if (err)		goto out;	#else	/* Switch to the next log LEB */	if (c->lhead_offs) {		c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);		c->lhead_offs = 0;	}	if (c->lhead_offs == 0) {		/* Must ensure next LEB has been unmapped */		err = ubifs_leb_unmap(c, c->lhead_lnum);		if (err)			goto out;	}	len = ALIGN(len, c->min_io_size);//.........这里部分代码省略.........
开发者ID:Scorpio92,项目名称:mstar6a918,代码行数:101,


示例29: imx_bbu_nand_update

//.........这里部分代码省略.........	 *      continue without robust update	 *   - if only one firmware slot is readable, the ROM uses it	 *   - if both slots are readable, the ROM will use slot 0	 * - Step 1: erase/update the slot currently unused by the ROM	 * - Step 2: Update FCBs/DBBTs, thereby letting Firmware1_startingPage	 *           point to the slot we just updated. From this moment	 *           on the new firmware will be used and running a	 *           refresh/repair after a power failure after this	 *           step will complete the update.	 * - Step 3: erase/update the other firmwre slot	 * - Step 4: Eventually write FCBs/DBBTs again. This may become	 *           necessary when step 3 revealed new bad blocks.	 *	 * This robust update only works when the original FCBs on the device	 * uses the same layout as this code does. In other cases update will	 * also work, but it won't be robust against power failures.	 *	 * Refreshing the firmware which is needed when blocks become unreadable	 * due to read disturbance works the same way, only that the new firmware	 * is the same as the old firmware and that it will only be written when	 * reading from the device returns -EUCLEAN indicating that a block needs	 * to be rewritten.	 */	if (fcb)		read_firmware_all(mtd, fcb, &fw_orig, &fw_orig_len,				  &used_refresh, &unused_refresh, &used);	if (data->image) {		/*		 * We have to write one additional page to make the ROM happy.		 * Maybe the PagesInFirmwarex fields are really the number of pages - 1.		 * kobs-ng has the same.		 */		fw_size = ALIGN(data->len + mtd->writesize, mtd->writesize);		fw = xzalloc(fw_size);		memcpy(fw, data->image, data->len);		free(fw_orig);		used_refresh = 1;		unused_refresh = 1;		free(fcb);		fcb = xzalloc(sizeof(*fcb));		fcb->Firmware1_startingPage = imx_bbu_firmware_start_block(mtd, !used) * pages_per_block;		fcb->Firmware2_startingPage = imx_bbu_firmware_start_block(mtd, used) * pages_per_block;		fcb->PagesInFirmware1 = fw_size / mtd->writesize;		fcb->PagesInFirmware2 = fcb->PagesInFirmware1;		fcb_create(imx_handler, fcb, mtd);	} else {		if (!fcb) {			pr_err("No FCB found on device, cannot refresh/n");			ret = -EINVAL;			goto out;		}		if (!fw_orig) {			pr_err("No firmware found on device, cannot refresh/n");			ret = -EINVAL;			goto out;		}		fw = fw_orig;		fw_size = fw_orig_len;		pr_info("Refreshing existing firmware/n");	}
开发者ID:gazoo74,项目名称:barebox,代码行数:66,


示例30: read_wim_security_data

/* * Reads the security data from the metadata resource of a WIM image. * * @buf *	Buffer containing an uncompressed WIM metadata resource. * @buf_len *	Length of the uncompressed metadata resource, in bytes. * @sd_ret *	On success, a pointer to the resulting security data structure will be *	returned here. * * Note: There is no `offset' argument because the security data is located at * the beginning of the metadata resource. * * Return values: *	WIMLIB_ERR_SUCCESS (0) *	WIMLIB_ERR_INVALID_METADATA_RESOURCE *	WIMLIB_ERR_NOMEM */intread_wim_security_data(const u8 *buf, size_t buf_len,		       struct wim_security_data **sd_ret){	struct wim_security_data *sd;	int ret;	u64 total_len;	u64 sizes_size;	u64 size_no_descriptors;	const struct wim_security_data_disk *sd_disk;	const u8 *p;	if (buf_len < 8)		return WIMLIB_ERR_INVALID_METADATA_RESOURCE;	sd = new_wim_security_data();	if (!sd)		goto out_of_memory;	sd_disk = (const struct wim_security_data_disk *)buf;	sd->total_length = ALIGN(le32_to_cpu(sd_disk->total_length), 8);	sd->num_entries = le32_to_cpu(sd_disk->num_entries);	/* Length field of 0 is a special case that really means length	 * of 8. */	if (sd->total_length == 0)		sd->total_length = 8;	/* The security_id field of each dentry is a signed 32-bit integer, so	 * the possible indices into the security descriptors table are 0	 * through 0x7fffffff.  Which means 0x80000000 security descriptors	 * maximum.  Not like you should ever have anywhere close to that many	 * security descriptors! */	if (sd->num_entries > 0x80000000)		goto out_invalid_sd;	/* Verify the listed total length of the security data is big enough to	 * include the sizes array, verify that the file data is big enough to	 * include it as well, then allocate the array of sizes.	 *	 * Note: The total length of the security data must fit in a 32-bit	 * integer, even though each security descriptor size is a 64-bit	 * integer.  This is stupid, and we need to be careful not to actually	 * let the security descriptor sizes be over 0xffffffff.  */	if (sd->total_length > buf_len)		goto out_invalid_sd;	sizes_size = (u64)sd->num_entries * sizeof(u64);	size_no_descriptors = 8 + sizes_size;	if (size_no_descriptors > sd->total_length)		goto out_invalid_sd;	total_len = size_no_descriptors;	/* Return immediately if no security descriptors. */	if (sd->num_entries == 0)		goto out_descriptors_ready;	/* Allocate a new buffer for the sizes array */	sd->sizes = MALLOC(sizes_size);	if (!sd->sizes)		goto out_of_memory;	/* Copy the sizes array into the new buffer */	for (u32 i = 0; i < sd->num_entries; i++) {		sd->sizes[i] = le64_to_cpu(sd_disk->sizes[i]);		if (sd->sizes[i] > 0xffffffff)			goto out_invalid_sd;	}	p = (const u8*)sd_disk + size_no_descriptors;	/* Allocate the array of pointers to the security descriptors, then read	 * them into separate buffers. */	sd->descriptors = CALLOC(sd->num_entries, sizeof(sd->descriptors[0]));	if (!sd->descriptors)		goto out_of_memory;	for (u32 i = 0; i < sd->num_entries; i++) {		if (sd->sizes[i] == 0)			continue;//.........这里部分代码省略.........
开发者ID:AudienceScience,项目名称:wimlib,代码行数:101,



注:本文中的ALIGN函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ ALIGNED函数代码示例
C++ ALERT函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。