您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ AllocateFile函数代码示例

51自学网 2021-06-01 19:46:37
  C++
这篇教程C++ AllocateFile函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中AllocateFile函数的典型用法代码示例。如果您正苦于以下问题:C++ AllocateFile函数的具体用法?C++ AllocateFile怎么用?C++ AllocateFile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了AllocateFile函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: utl_file_fcopy

/* * CREATE FUNCTION utl_file.fcopy( *     src_location		text, *     src_filename		text, *     dest_location	text, *     dest_filename	text, *     start_line		integer DEFAULT NULL *     end_line			integer DEFAULT NULL) */Datumutl_file_fcopy(PG_FUNCTION_ARGS){	char	   *srcpath;	char	   *dstpath;	int			start_line;	int			end_line;	FILE	   *srcfile;	FILE	   *dstfile;	NOT_NULL_ARG(0);	NOT_NULL_ARG(1);	NOT_NULL_ARG(2);	NOT_NULL_ARG(3);	srcpath = get_safe_path(PG_GETARG_TEXT_P(0), PG_GETARG_TEXT_P(1));	dstpath = get_safe_path(PG_GETARG_TEXT_P(2), PG_GETARG_TEXT_P(3));	start_line = PG_GETARG_IF_EXISTS(4, INT32, 1);	if (start_line <= 0)		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				errmsg("start_line must be positive (%d passed)", start_line)));	end_line = PG_GETARG_IF_EXISTS(5, INT32, INT_MAX);	if (end_line <= 0)		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				errmsg("end_line must be positive (%d passed)", end_line)));	srcfile = AllocateFile(srcpath, "rt");	if (srcfile == NULL)	{		/* failed to open src file. */		IO_EXCEPTION();	}	dstfile = AllocateFile(dstpath, "wt");	if (dstfile == NULL)	{		/* failed to open dst file. */		fclose(srcfile);		IO_EXCEPTION();	}	if (copy_text_file(srcfile, dstfile, start_line, end_line))		IO_EXCEPTION();	FreeFile(srcfile);	FreeFile(dstfile);	PG_RETURN_VOID();}
开发者ID:50wu,项目名称:gpdb,代码行数:62,


示例2: FindMyDatabase

/* * FindMyDatabase -- get the critical info needed to locate my database * * Find the named database in pg_database, return its database OID and the * OID of its default tablespace.  Return TRUE if found, FALSE if not. * * Since we are not yet up and running as a backend, we cannot look directly * at pg_database (we can't obtain locks nor participate in transactions). * So to get the info we need before starting up, we must look at the "flat * file" copy of pg_database that is helpfully maintained by flatfiles.c. * This is subject to various race conditions, so after we have the * transaction infrastructure started, we have to recheck the information; * see InitPostgres. */static boolFindMyDatabase(const char *name, Oid *db_id, Oid *db_tablespace){    bool		result = false;    char	   *filename;    FILE	   *db_file;    char		thisname[NAMEDATALEN];    TransactionId db_frozenxid;    filename = database_getflatfilename();    db_file = AllocateFile(filename, "r");    if (db_file == NULL)        ereport(FATAL,                (errcode_for_file_access(),                 errmsg("could not open file /"%s/": %m", filename)));    while (read_pg_database_line(db_file, thisname, db_id,                                 db_tablespace, &db_frozenxid))    {        if (strcmp(thisname, name) == 0)        {            result = true;            break;        }    }    FreeFile(db_file);    pfree(filename);    return result;}
开发者ID:RuchikaGupta,项目名称:fa12,代码行数:45,


示例3: CreateAsyncSource

static Source *CreateAsyncSource(const char *path, TupleDesc desc){	AsyncSource *self = palloc0(sizeof(AsyncSource));	self->base.read = (SourceReadProc) AsyncSourceRead;	self->base.close = (SourceCloseProc) AsyncSourceClose;	self->size = INITIAL_BUF_LEN;	self->begin = 0;	self->end = 0;	self->buffer = palloc0(self->size);	self->errmsg[0] = '/0';	self->eof = false;	self->fd = AllocateFile(path, "r");	if (self->fd == NULL)		ereport(ERROR, (errcode_for_file_access(),			errmsg("could not open /"%s/" %m", path)));#if defined(USE_POSIX_FADVISE)	posix_fadvise(fileno(self->fd), 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED);#endif	pthread_mutex_init(&self->lock, NULL);	if (pthread_create(&self->th, NULL, AsyncSourceMain, self) != 0)		elog(ERROR, "pthread_create");	return (Source *) self;}
开发者ID:bwtakacy,项目名称:prev_pg_bulkload_repo,代码行数:30,


示例4: pg_record_shmem_shutdown

/* Dumps the histogram data into a file (with a md5 hash of the contents at the beginning). */staticvoid pg_record_shmem_shutdown(int code, Datum arg) {        FILE * file;        /* do we need to write the queries? */    if (query_buffer->next == 0) {        return;    }        prepare_file(log_file, query_buffer, query_buffer->next);        file = AllocateFile(log_file->curr_filename, PG_BINARY_A);    if (file == NULL)        goto error;        /* now write the actual shared segment */    if (fwrite(query_buffer->buffer, query_buffer->next, 1, file) != 1)       goto error;        FreeFile(file);        return;error:    ereport(LOG,            (errcode_for_file_access(),             errmsg("could not write query buffer to the file /"%s/": %m",                    log_file->curr_filename)));    if (file)        FreeFile(file);    }
开发者ID:tvondra,项目名称:query_recorder,代码行数:34,


示例5: XLogArchiveNotify

/* * XLogArchiveNotify * * Create an archive notification file * * The name of the notification file is the message that will be picked up * by the archiver, e.g. we write 0000000100000001000000C6.ready * and the archiver then knows to archive XLOGDIR/0000000100000001000000C6, * then when complete, rename it to 0000000100000001000000C6.done */voidXLogArchiveNotify(const char *xlog){	char		archiveStatusPath[MAXPGPATH];	FILE	   *fd;	/* insert an otherwise empty file called <XLOG>.ready */	StatusFilePath(archiveStatusPath, xlog, ".ready");	fd = AllocateFile(archiveStatusPath, "w");	if (fd == NULL)	{		ereport(LOG,				(errcode_for_file_access(),				 errmsg("could not create archive status file /"%s/": %m",						archiveStatusPath)));		return;	}	if (FreeFile(fd))	{		ereport(LOG,				(errcode_for_file_access(),				 errmsg("could not write archive status file /"%s/": %m",						archiveStatusPath)));		return;	}	/* Notify archiver that it's got something to do */	if (IsUnderPostmaster)		SendPostmasterSignal(PMSIGNAL_WAKEN_ARCHIVER);}
开发者ID:JiannengSun,项目名称:postgres,代码行数:40,


示例6: buffer_write

/* Dumps the histogram data into a file (with a md5 hash of the contents at the beginning). */staticvoid buffer_write() {        FILE * file;        prepare_file(log_file, query_buffer, query_buffer->next);        file = AllocateFile(log_file->curr_filename, PG_BINARY_A);    if (file == NULL)        goto error;        /* now write the actual shared segment */    if (fwrite(query_buffer->buffer, query_buffer->next, 1, file) != 1)        goto error;        FreeFile(file);        return;error:    ereport(LOG,            (errcode_for_file_access(),             errmsg("could not write query histogram file /"%s/": %m",                    log_file->curr_filename)));    if (file)        FreeFile(file);    }
开发者ID:tvondra,项目名称:query_recorder,代码行数:29,


示例7: pg_backup_start_time

/* * Returns start time of an online exclusive backup. * * When there's no exclusive backup in progress, the function * returns NULL. */Datumpg_backup_start_time(PG_FUNCTION_ARGS){	Datum		xtime;	FILE	   *lfp;	char		fline[MAXPGPATH];	char		backup_start_time[30];	/*	 * See if label file is present	 */	lfp = AllocateFile(BACKUP_LABEL_FILE, "r");	if (lfp == NULL)	{		if (errno != ENOENT)			ereport(ERROR,					(errcode_for_file_access(),					 errmsg("could not read file /"%s/": %m",							BACKUP_LABEL_FILE)));		PG_RETURN_NULL();	}	/*	 * Parse the file to find the START TIME line.	 */	backup_start_time[0] = '/0';	while (fgets(fline, sizeof(fline), lfp) != NULL)	{		if (sscanf(fline, "START TIME: %25[^/n]/n", backup_start_time) == 1)			break;	}	/* Check for a read error. */	if (ferror(lfp))		ereport(ERROR,				(errcode_for_file_access(),			   errmsg("could not read file /"%s/": %m", BACKUP_LABEL_FILE)));	/* Close the backup label file. */	if (FreeFile(lfp))		ereport(ERROR,				(errcode_for_file_access(),			  errmsg("could not close file /"%s/": %m", BACKUP_LABEL_FILE)));	if (strlen(backup_start_time) == 0)		ereport(ERROR,				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),				 errmsg("invalid data in file /"%s/"", BACKUP_LABEL_FILE)));	/*	 * Convert the time string read from file to TimestampTz form.	 */	xtime = DirectFunctionCall3(timestamptz_in,								CStringGetDatum(backup_start_time),								ObjectIdGetDatum(InvalidOid),								Int32GetDatum(-1));	PG_RETURN_DATUM(xtime);}
开发者ID:Gordiychuk,项目名称:postgres,代码行数:65,


示例8: ErrorLogWrite

/* * Write into the error log file.  This opens the file every time, * so that we can keep it simple to deal with concurrent write. */static voidErrorLogWrite(CdbSreh *cdbsreh){	HeapTuple	tuple;	char		filename[MAXPGPATH];	FILE	   *fp;	pg_crc32	crc;	Assert(OidIsValid(cdbsreh->relid));	ErrorLogFileName(filename, MyDatabaseId, cdbsreh->relid);	tuple = FormErrorTuple(cdbsreh);	INIT_CRC32C(crc);	COMP_CRC32C(crc, tuple->t_data, tuple->t_len);	FIN_CRC32C(crc);	LWLockAcquire(ErrorLogLock, LW_EXCLUSIVE);	fp = AllocateFile(filename, "a");	if (!fp)	{		mkdir(ErrorLogDir, S_IRWXU);		fp = AllocateFile(filename, "a");	}	if (!fp)		ereport(ERROR,				(errmsg("could not open /"%s/": %m", filename)));	/*	 * format:	 *     0-4: length	 *     5-8: crc	 *     9-n: tuple data	 */	if (fwrite(&tuple->t_len, 1, sizeof(tuple->t_len), fp) != sizeof(tuple->t_len))		elog(ERROR, "could not write tuple length: %m");	if (fwrite(&crc, 1, sizeof(pg_crc32), fp) != sizeof(pg_crc32))		elog(ERROR, "could not write checksum: %m");	if (fwrite(tuple->t_data, 1, tuple->t_len, fp) != tuple->t_len)		elog(ERROR, "could not write tuple data: %m");	FreeFile(fp);	LWLockRelease(ErrorLogLock);	heap_freetuple(tuple);}
开发者ID:LJoNe,项目名称:gpdb,代码行数:50,


示例9: remove_duplicate

static voidremove_duplicate(Spooler *self, Relation heap, IndexTuple itup, const char *relname){	HeapTupleData	tuple;	BlockNumber		blknum;	BlockNumber		offnum;	Buffer			buffer;	Page			page;	ItemId			itemid;	blknum = ItemPointerGetBlockNumber(&itup->t_tid);	offnum = ItemPointerGetOffsetNumber(&itup->t_tid);	buffer = ReadBuffer(heap, blknum);	LockBuffer(buffer, BUFFER_LOCK_SHARE);	page = BufferGetPage(buffer);	itemid = PageGetItemId(page, offnum);	tuple.t_data = ItemIdIsNormal(itemid)		? (HeapTupleHeader) PageGetItem(page, itemid)		: NULL;	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);	if (tuple.t_data != NULL)	{		char		   *str;		TupleDesc		tupdesc;		simple_heap_delete(heap, &itup->t_tid);		/* output duplicate bad file. */		if (self->dup_fp == NULL)			if ((self->dup_fp = AllocateFile(self->dup_badfile, "w")) == NULL)				ereport(ERROR,						(errcode_for_file_access(),						 errmsg("could not open duplicate bad file /"%s/": %m",								self->dup_badfile)));		tupdesc = RelationGetDescr(heap);		tuple.t_len = ItemIdGetLength(itemid);		tuple.t_self = itup->t_tid;		str = tuple_to_cstring(RelationGetDescr(heap), &tuple);		if (fprintf(self->dup_fp, "%s/n", str) < 0 || fflush(self->dup_fp))			ereport(ERROR,					(errcode_for_file_access(),					 errmsg("could not write parse badfile /"%s/": %m",							self->dup_badfile)));		pfree(str);	}	ReleaseBuffer(buffer);	LoggerLog(WARNING, "Duplicate error Record " int64_FMT		": Rejected - duplicate key value violates unique constraint /"%s/"/n",		self->dup_old + self->dup_new, relname);}
开发者ID:chuongnn,项目名称:pg_bulkload,代码行数:57,


示例10: read_binary_file

/* * Read a section of a file, returning it as bytea * * Caller is responsible for all permissions checking. * * We read the whole of the file when bytes_to_read is negative. */bytea *read_binary_file(const char *filename, int64 seek_offset, int64 bytes_to_read){	bytea	   *buf;	size_t		nbytes;	FILE	   *file;	if (bytes_to_read < 0)	{		if (seek_offset < 0)			bytes_to_read = -seek_offset;		else		{			struct stat fst;			if (stat(filename, &fst) < 0)				ereport(ERROR,						(errcode_for_file_access(),						 errmsg("could not stat file /"%s/": %m", filename)));			bytes_to_read = fst.st_size - seek_offset;		}	}	/* not sure why anyone thought that int64 length was a good idea */	if (bytes_to_read > (MaxAllocSize - VARHDRSZ))		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				 errmsg("requested length too large")));	if ((file = AllocateFile(filename, PG_BINARY_R)) == NULL)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not open file /"%s/" for reading: %m",						filename)));	if (fseeko(file, (off_t) seek_offset,			   (seek_offset >= 0) ? SEEK_SET : SEEK_END) != 0)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not seek in file /"%s/": %m", filename)));	buf = (bytea *) palloc((Size) bytes_to_read + VARHDRSZ);	nbytes = fread(VARDATA(buf), 1, (size_t) bytes_to_read, file);	if (ferror(file))		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not read file /"%s/": %m", filename)));	SET_VARSIZE(buf, nbytes + VARHDRSZ);	FreeFile(file);	return buf;}
开发者ID:BioBD,项目名称:Hypothetical_Indexes,代码行数:64,


示例11: pgss_shmem_shutdown

/* * shmem_shutdown hook: Dump statistics into file. * * Note: we don't bother with acquiring lock, because there should be no * other processes running when this is called. */static voidpgss_shmem_shutdown(int code, Datum arg){	FILE	   *file;	HASH_SEQ_STATUS hash_seq;	int32		num_entries;	pgssEntry  *entry;	/* Don't try to dump during a crash. */	if (code)		return;	/* Safety check ... shouldn't get here unless shmem is set up. */	if (!pgss || !pgss_hash)		return;	/* Don't dump if told not to. */	if (!pgss_save)		return;	file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_W);	if (file == NULL)		goto error;	if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)		goto error;	num_entries = hash_get_num_entries(pgss_hash);	if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)		goto error;	hash_seq_init(&hash_seq, pgss_hash);	while ((entry = hash_seq_search(&hash_seq)) != NULL)	{		int			len = entry->key.query_len;		if (fwrite(entry, offsetof(pgssEntry, mutex), 1, file) != 1 ||			fwrite(entry->query, 1, len, file) != len)			goto error;	}	if (FreeFile(file))	{		file = NULL;		goto error;	}	return;error:	ereport(LOG,			(errcode_for_file_access(),			 errmsg("could not write pg_stat_statement file /"%s/": %m",					PGSS_DUMP_FILE)));	if (file)		FreeFile(file);	unlink(PGSS_DUMP_FILE);}
开发者ID:HBPSP8Repo,项目名称:NoDB,代码行数:63,


示例12: pg_read_file

/* * Read a section of a file, returning it as text */Datumpg_read_file(PG_FUNCTION_ARGS){	text	   *filename_t = PG_GETARG_TEXT_P(0);	int64		seek_offset = PG_GETARG_INT64(1);	int64		bytes_to_read = PG_GETARG_INT64(2);	char	   *buf;	size_t		nbytes;	FILE	   *file;	char	   *filename;	if (!superuser())		ereport(ERROR,				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),				 (errmsg("must be superuser to read files"))));	filename = convert_and_check_filename(filename_t);	if ((file = AllocateFile(filename, PG_BINARY_R)) == NULL)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not open file /"%s/" for reading: %m",						filename)));	if (fseeko(file, (off_t) seek_offset,			   (seek_offset >= 0) ? SEEK_SET : SEEK_END) != 0)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not seek in file /"%s/": %m", filename)));	if (bytes_to_read < 0)		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				 errmsg("requested length cannot be negative")));	/* not sure why anyone thought that int64 length was a good idea */	if (bytes_to_read > (MaxAllocSize - VARHDRSZ))		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				 errmsg("requested length too large")));	buf = palloc((Size) bytes_to_read + VARHDRSZ);	nbytes = fread(VARDATA(buf), 1, (size_t) bytes_to_read, file);	if (ferror(file))		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not read file /"%s/": %m", filename)));	SET_VARSIZE(buf, nbytes + VARHDRSZ);	FreeFile(file);	pfree(filename);	PG_RETURN_TEXT_P(buf);}
开发者ID:karthijrk,项目名称:gpdb,代码行数:60,


示例13: load_dh_file

/* *	Load precomputed DH parameters. * *	To prevent "downgrade" attacks, we perform a number of checks *	to verify that the DBA-generated DH parameters file contains *	what we expect it to contain. */static DH  *load_dh_file(char *filename, bool isServerStart){	FILE	   *fp;	DH		   *dh = NULL;	int			codes;	/* attempt to open file.  It's not an error if it doesn't exist. */	if ((fp = AllocateFile(filename, "r")) == NULL)	{		ereport(isServerStart ? FATAL : LOG,				(errcode_for_file_access(),				 errmsg("could not open DH parameters file /"%s/": %m",						filename)));		return NULL;	}	dh = PEM_read_DHparams(fp, NULL, NULL, NULL);	FreeFile(fp);	if (dh == NULL)	{		ereport(isServerStart ? FATAL : LOG,				(errcode(ERRCODE_CONFIG_FILE_ERROR),				 errmsg("could not load DH parameters file: %s",						SSLerrmessage(ERR_get_error()))));		return NULL;	}	/* make sure the DH parameters are usable */	if (DH_check(dh, &codes) == 0)	{		ereport(isServerStart ? FATAL : LOG,				(errcode(ERRCODE_CONFIG_FILE_ERROR),				 errmsg("invalid DH parameters: %s",						SSLerrmessage(ERR_get_error()))));		return NULL;	}	if (codes & DH_CHECK_P_NOT_PRIME)	{		ereport(isServerStart ? FATAL : LOG,				(errcode(ERRCODE_CONFIG_FILE_ERROR),				 errmsg("invalid DH parameters: p is not prime")));		return NULL;	}	if ((codes & DH_NOT_SUITABLE_GENERATOR) &&		(codes & DH_CHECK_P_NOT_SAFE_PRIME))	{		ereport(isServerStart ? FATAL : LOG,				(errcode(ERRCODE_CONFIG_FILE_ERROR),				 errmsg("invalid DH parameters: neither suitable generator or safe prime")));		return NULL;	}	return dh;}
开发者ID:adityavs,项目名称:postgres,代码行数:63,


示例14: pg_file_write

Datumpg_file_write(PG_FUNCTION_ARGS){	FILE	   *f;	char	   *filename;	text	   *data;	int64		count = 0;	requireSuperuser();	filename = convert_and_check_filename(PG_GETARG_TEXT_PP(0), false);	data = PG_GETARG_TEXT_PP(1);	if (!PG_GETARG_BOOL(2))	{		struct stat fst;		if (stat(filename, &fst) >= 0)			ereport(ERROR,					(ERRCODE_DUPLICATE_FILE,					 errmsg("file /"%s/" exists", filename)));		f = AllocateFile(filename, "wb");	}	else		f = AllocateFile(filename, "ab");	if (!f)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not open file /"%s/" for writing: %m",						filename)));	count = fwrite(VARDATA_ANY(data), 1, VARSIZE_ANY_EXHDR(data), f);	if (count != VARSIZE_ANY_EXHDR(data) || FreeFile(f))		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not write file /"%s/": %m", filename)));	PG_RETURN_INT64(count);}
开发者ID:AmiGanguli,项目名称:postgres,代码行数:41,


示例15: set_short_version

/* * write out the PG_VERSION file in the specified directory */static voidset_short_version(const char *path){	char	   *short_version;	bool		gotdot = false;	int			end;	char	   *fullname;	FILE	   *version_file;	/* Construct short version string (should match initdb.c) */	short_version = pstrdup(PG_VERSION);	for (end = 0; short_version[end] != '/0'; end++)	{		if (short_version[end] == '.')		{			Assert(end != 0);			if (gotdot)				break;			else				gotdot = true;		}		else if (short_version[end] < '0' || short_version[end] > '9')		{			/* gone past digits and dots */			break;		}	}	Assert(end > 0 && short_version[end - 1] != '.' && gotdot);	short_version[end] = '/0';	/* Now write the file */	fullname = palloc(strlen(path) + 11 + 1);	sprintf(fullname, "%s/PG_VERSION", path);	version_file = AllocateFile(fullname, PG_BINARY_W);	if (version_file == NULL)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not write to file /"%s/": %m",						fullname)));	fprintf(version_file, "%s/n", short_version);	if (FreeFile(version_file))		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not write to file /"%s/": %m",						fullname)));	pfree(fullname);	pfree(short_version);}
开发者ID:shubham2094,项目名称:postgresql_8.1,代码行数:53,


示例16: tsearch_readline_begin

/* * Set up to read a file using tsearch_readline().  This facility is * better than just reading the file directly because it provides error * context pointing to the specific line where a problem is detected. * * Expected usage is: * *		tsearch_readline_state trst; * *		if (!tsearch_readline_begin(&trst, filename)) *			ereport(ERROR, *					(errcode(ERRCODE_CONFIG_FILE_ERROR), *					 errmsg("could not open stop-word file /"%s/": %m", *							filename))); *		while ((line = tsearch_readline(&trst)) != NULL) *			process line; *		tsearch_readline_end(&trst); * * Note that the caller supplies the ereport() for file open failure; * this is so that a custom message can be provided.  The filename string * passed to tsearch_readline_begin() must remain valid through * tsearch_readline_end(). */booltsearch_readline_begin(tsearch_readline_state *stp,					   const char *filename){	if ((stp->fp = AllocateFile(filename, "r")) == NULL)		return false;	stp->filename = filename;	stp->lineno = 0;	stp->curline = NULL;	/* Setup error traceback support for ereport() */	stp->cb.callback = tsearch_readline_callback;	stp->cb.arg = (void *) stp;	stp->cb.previous = error_context_stack;	error_context_stack = &stp->cb;	return true;}
开发者ID:0x0FFF,项目名称:postgres,代码行数:39,


示例17: XLogArchiveForceDone

/* * XLogArchiveForceDone * * Emit notification forcibly that an XLOG segment file has been successfully * archived, by creating <XLOG>.done regardless of whether <XLOG>.ready * exists or not. */voidXLogArchiveForceDone(const char *xlog){	char		archiveReady[MAXPGPATH];	char		archiveDone[MAXPGPATH];	struct stat stat_buf;	FILE	   *fd;	/* Exit if already known done */	StatusFilePath(archiveDone, xlog, ".done");	if (stat(archiveDone, &stat_buf) == 0)		return;	/* If .ready exists, rename it to .done */	StatusFilePath(archiveReady, xlog, ".ready");	if (stat(archiveReady, &stat_buf) == 0)	{		if (rename(archiveReady, archiveDone) < 0)			ereport(WARNING,					(errcode_for_file_access(),					 errmsg("could not rename file /"%s/" to /"%s/": %m",							archiveReady, archiveDone)));		return;	}	/* insert an otherwise empty file called <XLOG>.done */	fd = AllocateFile(archiveDone, "w");	if (fd == NULL)	{		ereport(LOG,				(errcode_for_file_access(),				 errmsg("could not create archive status file /"%s/": %m",						archiveDone)));		return;	}	if (FreeFile(fd))	{		ereport(LOG,				(errcode_for_file_access(),				 errmsg("could not write archive status file /"%s/": %m",						archiveDone)));		return;	}}
开发者ID:JiannengSun,项目名称:postgres,代码行数:52,


示例18: CreateFileSource

static Source *CreateFileSource(const char *path, TupleDesc desc){	FileSource *self = palloc0(sizeof(FileSource));	self->base.read = (SourceReadProc) FileSourceRead;	self->base.close = (SourceCloseProc) FileSourceClose;	self->fd = AllocateFile(path, "r");	if (self->fd == NULL)		ereport(ERROR, (errcode_for_file_access(),			errmsg("could not open /"%s/" %m", path)));#if defined(USE_POSIX_FADVISE)	posix_fadvise(fileno(self->fd), 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED);#endif	return (Source *) self;}
开发者ID:bwtakacy,项目名称:prev_pg_bulkload_repo,代码行数:18,


示例19: write_nondefault_variables

voidwrite_nondefault_variables(GucContext context){	int			elevel;	FILE	   *fp;	int			i;	Assert(context == PGC_POSTMASTER || context == PGC_SIGHUP);	elevel = (context == PGC_SIGHUP) ? LOG : ERROR;	/*	 * Open file	 */	fp = AllocateFile(CONFIG_EXEC_PARAMS_NEW, "w");	if (!fp)	{		ereport(elevel,				(errcode_for_file_access(),				 errmsg("could not write to file /"%s/": %m",						CONFIG_EXEC_PARAMS_NEW)));		return;	}	for (i = 0; i < num_guc_variables; i++)	{		write_one_nondefault_variable(fp, guc_variables[i]);	}	if (FreeFile(fp))	{		ereport(elevel,				(errcode_for_file_access(),				 errmsg("could not write to file /"%s/": %m",						CONFIG_EXEC_PARAMS_NEW)));		return;	}	/*	 * Put new file in place.  This could delay on Win32, but we don't hold	 * any exclusive locks.	 */	rename(CONFIG_EXEC_PARAMS_NEW, CONFIG_EXEC_PARAMS);}
开发者ID:pganalyze,项目名称:collector,代码行数:44,


示例20: CreateLogger

voidCreateLogger(const char *path, bool verbose, bool writer){	memset(&logger, 0, sizeof(logger));	logger.verbose = verbose;	logger.writer = writer;	if (!is_absolute_path(path))		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				 errmsg("relative path not allowed for LOGFILE: %s", path)));	logger.logfile = pstrdup(path);	logger.fp = AllocateFile(logger.logfile, "at");	if (logger.fp == NULL)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not open loader log file /"%s/": %m",						logger.logfile)));}
开发者ID:Komzpa,项目名称:pg_bulkload,代码行数:21,


示例21: CStoreWriteFooter

/* * CStoreWriteFooter writes the given footer to given file. First, the function * serializes and writes the footer to the file. Then, the function serializes * and writes the postscript. Then, the function writes the postscript size as * the last byte of the file. Last, the function syncs and closes the footer file. */static voidCStoreWriteFooter(StringInfo tableFooterFilename, TableFooter *tableFooter){	FILE *tableFooterFile = NULL;	StringInfo tableFooterBuffer = NULL;	StringInfo postscriptBuffer = NULL;	uint8 postscriptSize = 0;	tableFooterFile = AllocateFile(tableFooterFilename->data, PG_BINARY_W);	if (tableFooterFile == NULL)	{		ereport(ERROR, (errcode_for_file_access(),						errmsg("could not open file /"%s/" for writing: %m",							   tableFooterFilename->data)));	}	/* write the footer */	tableFooterBuffer = SerializeTableFooter(tableFooter);	WriteToFile(tableFooterFile, tableFooterBuffer->data, tableFooterBuffer->len);	/* write the postscript */	postscriptBuffer = SerializePostScript(tableFooterBuffer->len);	WriteToFile(tableFooterFile, postscriptBuffer->data, postscriptBuffer->len);	/* write the 1-byte postscript size */	Assert(postscriptBuffer->len < CSTORE_POSTSCRIPT_SIZE_MAX);	postscriptSize = postscriptBuffer->len;	WriteToFile(tableFooterFile, &postscriptSize, CSTORE_POSTSCRIPT_SIZE_LENGTH);	SyncAndCloseFile(tableFooterFile);	pfree(tableFooterBuffer->data);	pfree(tableFooterBuffer);	pfree(postscriptBuffer->data);	pfree(postscriptBuffer);}
开发者ID:ibrarahmad,项目名称:cstore_fdw,代码行数:42,


示例22: query_write

/* Dumps the histogram data into a file (with a md5 hash of the contents at the beginning). */staticvoid query_write(double duration, const char * query, int len, const char * header, int hlen) {        FILE * file;        /* write the buffer first */    buffer_write();        /* now write the query */    prepare_file(log_file, query_buffer, hlen + len);        file = AllocateFile(log_file->curr_filename, PG_BINARY_A);    if (file == NULL)        goto error;        /* now write the actual shared segment */    if (fwrite(header, hlen, 1, file) != 1)       goto error;        /* now write the actual shared segment */    if (fwrite(query, len, 1, file) != 1)       goto error;        FreeFile(file);        return;error:    ereport(LOG,            (errcode_for_file_access(),             errmsg("could not write query to the file /"%s/": %m",                    log_file->curr_filename)));    if (file)        FreeFile(file);    }
开发者ID:tvondra,项目名称:query_recorder,代码行数:37,


示例23: load_rules

/* * Load rules from the file. * * Parses the pg_limits.conf file and loads all the connection rules that * are defined in it. A syntax error should not result in a failure, * only a WARNING message (and skipping the row). If there are too many * rules in the file (exceeding MAX_RULES), that fails with an ERROR. * * FIXME The current implementation modifies the segment in-place, so *       if a config reload fails, the backends will see the result of *       the failed reload. That's not really nice. This should use a *       local buffer an only copy it in place if everything went OK. * * FIXME The other issue is that we're holding ProcArrayLock while *       parsing the file - at the moment this is necessary because of *       the in-place reload. Once this is fixed, we can hold the lock *       only for the final copy (in case of success). */static voidload_rules(){	FILE   *file;	char	line[LINE_MAXLEN];	char	dbname[NAMEDATALEN], user[NAMEDATALEN], ip[NAMEDATALEN], mask[NAMEDATALEN];	int		limit;	int		line_number = 0;	file = AllocateFile(LIMITS_FILE, "r");	if (file == NULL)	{		ereport(WARNING,				(errcode_for_file_access(),				 errmsg("could not open configuration file /"%s/": %m",						LIMITS_FILE)));		return;	}	/*	 * Use the same lock as when checking the rules (when opening the	 * connection) etc. This is probably the right thing to do.	 */	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);	/* make sure there are no rules (keep the backend info though) */	memset(rules, 0, RULES_SEGMENT_SIZE);	while (fgets(line, LINE_MAXLEN, file) != NULL)	{		/* remove the comment from the line */		char * comment = strchr(line, '#');		if (comment != NULL)			(*comment) = '/0';		/* remove all white-space chars from the end of the line */		comment--;		while (isspace(comment) && (comment >= line))		{			*comment = '/0';			comment--;		}		++line_number;		/* database user ip mask limit */		if (sscanf(line, "%s %s %s %s %d", dbname, user, ip, mask, &limit) == 5)			load_rule(line_number, dbname, user, ip, mask, limit);		/* database user ip/mask limit */		else if (sscanf(line, "%s %s %s %d", dbname, user, ip, &limit) == 4)			load_rule(line_number, dbname, user, ip, NULL, limit);		/* non-empty line with invalid format */		else if (strlen(line) > 0)			elog(WARNING, "invalid rule at line %d", line_number);	}	FreeFile(file);	LWLockRelease(ProcArrayLock);	elog(DEBUG1, "loaded %d connection limit rule(s)", rules->n_rules);}
开发者ID:tvondra,项目名称:connection_limits,代码行数:86,


示例24: pgss_shmem_startup

/* * shmem_startup hook: allocate or attach to shared memory, * then load any pre-existing statistics from file. */static voidpgss_shmem_startup(void){	bool		found;	HASHCTL		info;	FILE	   *file;	uint32		header;	int32		num;	int32		i;	int			query_size;	int			buffer_size;	char	   *buffer = NULL;	if (prev_shmem_startup_hook)		prev_shmem_startup_hook();	/* reset in case this is a restart within the postmaster */	pgss = NULL;	pgss_hash = NULL;	/*	 * Create or attach to the shared memory state, including hash table	 */	LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);	pgss = ShmemInitStruct("pg_stat_statements",						   sizeof(pgssSharedState),						   &found);	if (!found)	{		/* First time through ... */		pgss->lock = LWLockAssign();		pgss->query_size = pgstat_track_activity_query_size;	}	/* Be sure everyone agrees on the hash table entry size */	query_size = pgss->query_size;	memset(&info, 0, sizeof(info));	info.keysize = sizeof(pgssHashKey);	info.entrysize = offsetof(pgssEntry, query) +query_size;	info.hash = pgss_hash_fn;	info.match = pgss_match_fn;	pgss_hash = ShmemInitHash("pg_stat_statements hash",							  pgss_max, pgss_max,							  &info,							  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);	LWLockRelease(AddinShmemInitLock);	/*	 * If we're in the postmaster (or a standalone backend...), set up a shmem	 * exit hook to dump the statistics to disk.	 */	if (!IsUnderPostmaster)		on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);	/*	 * Attempt to load old statistics from the dump file, if this is the first	 * time through and we weren't told not to.	 */	if (found || !pgss_save)		return;	/*	 * Note: we don't bother with locks here, because there should be no other	 * processes running when this code is reached.	 */	file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);	if (file == NULL)	{		if (errno == ENOENT)			return;				/* ignore not-found error */		goto error;	}	buffer_size = query_size;	buffer = (char *) palloc(buffer_size);	if (fread(&header, sizeof(uint32), 1, file) != 1 ||		header != PGSS_FILE_HEADER ||		fread(&num, sizeof(int32), 1, file) != 1)		goto error;	for (i = 0; i < num; i++)	{		pgssEntry	temp;		pgssEntry  *entry;		if (fread(&temp, offsetof(pgssEntry, mutex), 1, file) != 1)			goto error;		/* Encoding is the only field we can easily sanity-check */		if (!PG_VALID_BE_ENCODING(temp.key.encoding))			goto error;//.........这里部分代码省略.........
开发者ID:HBPSP8Repo,项目名称:NoDB,代码行数:101,


示例25: set_short_version

/* * write out the PG_VERSION file in the specified directory. If mirror is true, * mirror the file creation to our segment mirror. * * XXX: API is terrible, make it cleaner */voidset_short_version(const char *path, DbDirNode *dbDirNode, bool mirror){	char	   *short_version;	bool		gotdot = false;	int			end;	char	   *fullname;	FILE	   *version_file;	/* Construct short version string (should match initdb.c) */	short_version = pstrdup(PG_VERSION);	for (end = 0; short_version[end] != '/0'; end++)	{		if (short_version[end] == '.')		{			Assert(end != 0);			if (gotdot)				break;			else				gotdot = true;		}		else if (short_version[end] < '0' || short_version[end] > '9')		{			/* gone past digits and dots */			break;		}	}	Assert(end > 0 && short_version[end - 1] != '.' && gotdot);	short_version[end++] = '/n';	short_version[end] = '/0';	if (mirror)	{		MirroredFlatFileOpen mirroredOpen;		Insist(!PointerIsValid(path));		Insist(PointerIsValid(dbDirNode));		MirroredFlatFile_OpenInDbDir(&mirroredOpen, dbDirNode, "PG_VERSION",							O_CREAT | O_WRONLY | PG_BINARY, S_IRUSR | S_IWUSR,							/* suppressError */ false);		MirroredFlatFile_Append(&mirroredOpen, short_version,								end,								/* suppressError */ false);		MirroredFlatFile_Flush(&mirroredOpen, /* suppressError */ false);		MirroredFlatFile_Close(&mirroredOpen);	}	else	{		Insist(!PointerIsValid(dbDirNode));		Insist(PointerIsValid(path));		/* Now write the file */		fullname = palloc(strlen(path) + 11 + 1);		sprintf(fullname, "%s/PG_VERSION", path);		version_file = AllocateFile(fullname, PG_BINARY_W);		if (version_file == NULL)			ereport(ERROR,					(errcode_for_file_access(),					 errmsg("could not write to file /"%s/": %m",							fullname)));		fprintf(version_file, "%s", short_version);		if (FreeFile(version_file))			ereport(ERROR,					(errcode_for_file_access(),					 errmsg("could not write to file /"%s/": %m",							fullname)));		pfree(fullname);	}	pfree(short_version);}
开发者ID:Mrfuture1,项目名称:gpdb,代码行数:81,


示例26: gp_read_error_log

/* * gp_read_error_log * * Returns set of error log tuples. */Datumgp_read_error_log(PG_FUNCTION_ARGS){	FuncCallContext	   *funcctx;	ReadErrorLogContext *context;	HeapTuple			tuple;	Datum				result;	/*	 * First call setup	 */	if (SRF_IS_FIRSTCALL())	{		MemoryContext	oldcontext;		FILE	   *fp;		text	   *relname;		funcctx = SRF_FIRSTCALL_INIT();		relname = PG_GETARG_TEXT_P(0);		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);		context = palloc0(sizeof(ReadErrorLogContext));		funcctx->user_fctx = (void *) context;		funcctx->tuple_desc = BlessTupleDesc(GetErrorTupleDesc());		/*		 * Though this function is usually executed on segment, we dispatch		 * the execution if it happens to be on QD, and combine the results		 * into one set.		 */		if (Gp_role == GP_ROLE_DISPATCH)		{			struct CdbPgResults cdb_pgresults = {NULL, 0};			StringInfoData sql;			int		i;			initStringInfo(&sql);			/*			 * construct SQL			 */			appendStringInfo(&sql,					"SELECT * FROM pg_catalog.gp_read_error_log(%s) ",							 quote_literal_internal(text_to_cstring(relname)));			CdbDispatchCommand(sql.data, DF_WITH_SNAPSHOT, &cdb_pgresults);			for (i = 0; i < cdb_pgresults.numResults; i++)			{				if (PQresultStatus(cdb_pgresults.pg_results[i]) != PGRES_TUPLES_OK)				{					cdbdisp_clearCdbPgResults(&cdb_pgresults);					elog(ERROR, "unexpected result from segment: %d",								PQresultStatus(cdb_pgresults.pg_results[i]));				}				context->numTuples += PQntuples(cdb_pgresults.pg_results[i]);			}			pfree(sql.data);			context->segResults = cdb_pgresults.pg_results;			context->numSegResults = cdb_pgresults.numResults;		}		else		{			/*			 * In QE, read the error log.			 */			RangeVar	   *relrv;			Oid				relid;			relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));			relid = RangeVarGetRelid(relrv, true);			/*			 * If the relation has gone, silently return no tuples.			 */			if (OidIsValid(relid))			{				AclResult aclresult;				/*				 * Requires SELECT priv to read error log.				 */				aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);				if (aclresult != ACLCHECK_OK)					aclcheck_error(aclresult, ACL_KIND_CLASS, relrv->relname);				ErrorLogFileName(context->filename, MyDatabaseId, relid);				fp = AllocateFile(context->filename, "r");				context->fp = fp;			}		}//.........这里部分代码省略.........
开发者ID:LJoNe,项目名称:gpdb,代码行数:101,


示例27: ImportSnapshot

/* * ImportSnapshot *      Import a previously exported snapshot.  The argument should be a *      filename in SNAPSHOT_EXPORT_DIR.  Load the snapshot from that file. *      This is called by "SET TRANSACTION SNAPSHOT 'foo'". */voidImportSnapshot(const char *idstr){	char		path[MAXPGPATH];	FILE	   *f;	struct stat	stat_buf;	char	   *filebuf;	int			xcnt;	int			i;	TransactionId src_xid;	Oid			src_dbid;	int			src_isolevel;	bool		src_readonly;	SnapshotData snapshot;	/*	 * Must be at top level of a fresh transaction.  Note in particular that	 * we check we haven't acquired an XID --- if we have, it's conceivable	 * that the snapshot would show it as not running, making for very	 * screwy behavior.	 */	if (FirstSnapshotSet ||		GetTopTransactionIdIfAny() != InvalidTransactionId ||		IsSubTransaction())		ereport(ERROR,				(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),				 errmsg("SET TRANSACTION SNAPSHOT must be called before any query")));	/*	 * If we are in read committed mode then the next query would execute	 * with a new snapshot thus making this function call quite useless.	 */	if (!IsolationUsesXactSnapshot())		ereport(ERROR,				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),				 errmsg("a snapshot-importing transaction must have isolation level SERIALIZABLE or REPEATABLE READ")));	/*	 * Verify the identifier: only 0-9, A-F and hyphens are allowed.  We do	 * this mainly to prevent reading arbitrary files.	 */	if (strspn(idstr, "0123456789ABCDEF-") != strlen(idstr))		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				 errmsg("invalid snapshot identifier /"%s/"", idstr)));	/* OK, read the file */	snprintf(path, MAXPGPATH, SNAPSHOT_EXPORT_DIR "/%s", idstr);	f = AllocateFile(path, PG_BINARY_R);	if (!f)		ereport(ERROR,				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),				 errmsg("invalid snapshot identifier /"%s/"", idstr)));	/* get the size of the file so that we know how much memory we need */	if (fstat(fileno(f), &stat_buf))		elog(ERROR, "could not stat file /"%s/": %m", path);	/* and read the file into a palloc'd string */	filebuf = (char *) palloc(stat_buf.st_size + 1);	if (fread(filebuf, stat_buf.st_size, 1, f) != 1)		elog(ERROR, "could not read file /"%s/": %m", path);	filebuf[stat_buf.st_size] = '/0';	FreeFile(f);	/*	 * Construct a snapshot struct by parsing the file content.	 */	memset(&snapshot, 0, sizeof(snapshot));	src_xid = parseXidFromText("xid:", &filebuf, path);	/* we abuse parseXidFromText a bit here ... */	src_dbid = parseXidFromText("dbid:", &filebuf, path);	src_isolevel = parseIntFromText("iso:", &filebuf, path);	src_readonly = parseIntFromText("ro:", &filebuf, path);	snapshot.xmin = parseXidFromText("xmin:", &filebuf, path);	snapshot.xmax = parseXidFromText("xmax:", &filebuf, path);	snapshot.xcnt = xcnt = parseIntFromText("xcnt:", &filebuf, path);	/* sanity-check the xid count before palloc */	if (xcnt < 0 || xcnt > GetMaxSnapshotXidCount())		ereport(ERROR,				(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),				 errmsg("invalid snapshot data in file /"%s/"", path)));	snapshot.xip = (TransactionId *) palloc(xcnt * sizeof(TransactionId));	for (i = 0; i < xcnt; i++)		snapshot.xip[i] = parseXidFromText("xip:", &filebuf, path);//.........这里部分代码省略.........
开发者ID:avontd2868,项目名称:postgres,代码行数:101,


示例28: ExportSnapshot

//.........这里部分代码省略.........	MemoryContextSwitchTo(oldcxt);	snapshot->regd_count++;	RegisteredSnapshots++;	/*	 * Fill buf with a text serialization of the snapshot, plus identification	 * data about this transaction.  The format expected by ImportSnapshot	 * is pretty rigid: each line must be fieldname:value.	 */	initStringInfo(&buf);	appendStringInfo(&buf, "xid:%u/n", topXid);	appendStringInfo(&buf, "dbid:%u/n", MyDatabaseId);	appendStringInfo(&buf, "iso:%d/n", XactIsoLevel);	appendStringInfo(&buf, "ro:%d/n", XactReadOnly);	appendStringInfo(&buf, "xmin:%u/n", snapshot->xmin);	appendStringInfo(&buf, "xmax:%u/n", snapshot->xmax);	/*	 * We must include our own top transaction ID in the top-xid data, since	 * by definition we will still be running when the importing transaction	 * adopts the snapshot, but GetSnapshotData never includes our own XID in	 * the snapshot.  (There must, therefore, be enough room to add it.)	 *	 * However, it could be that our topXid is after the xmax, in which case	 * we shouldn't include it because xip[] members are expected to be before	 * xmax.  (We need not make the same check for subxip[] members, see	 * snapshot.h.)	 */	addTopXid = TransactionIdPrecedes(topXid, snapshot->xmax) ? 1 : 0;	appendStringInfo(&buf, "xcnt:%d/n", snapshot->xcnt + addTopXid);	for (i = 0; i < snapshot->xcnt; i++)		appendStringInfo(&buf, "xip:%u/n", snapshot->xip[i]);	if (addTopXid)		appendStringInfo(&buf, "xip:%u/n", topXid);	/*	 * Similarly, we add our subcommitted child XIDs to the subxid data.	 * Here, we have to cope with possible overflow.	 */	if (snapshot->suboverflowed ||		snapshot->subxcnt + nchildren > GetMaxSnapshotSubxidCount())		appendStringInfoString(&buf, "sof:1/n");	else	{		appendStringInfoString(&buf, "sof:0/n");		appendStringInfo(&buf, "sxcnt:%d/n", snapshot->subxcnt + nchildren);		for (i = 0; i < snapshot->subxcnt; i++)			appendStringInfo(&buf, "sxp:%u/n", snapshot->subxip[i]);		for (i = 0; i < nchildren; i++)			appendStringInfo(&buf, "sxp:%u/n", children[i]);	}	appendStringInfo(&buf, "rec:%u/n", snapshot->takenDuringRecovery);	/*	 * Now write the text representation into a file.  We first write to a	 * ".tmp" filename, and rename to final filename if no error.  This	 * ensures that no other backend can read an incomplete file	 * (ImportSnapshot won't allow it because of its valid-characters check).	 */	XactExportFilePath(pathtmp, topXid, list_length(exportedSnapshots), ".tmp");	if (!(f = AllocateFile(pathtmp, PG_BINARY_W)))		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not create file /"%s/": %m", pathtmp)));	if (fwrite(buf.data, buf.len, 1, f) != 1)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not write to file /"%s/": %m", pathtmp)));	/* no fsync() since file need not survive a system crash */	if (FreeFile(f))		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not write to file /"%s/": %m", pathtmp)));	/*	 * Now that we have written everything into a .tmp file, rename the file	 * to remove the .tmp suffix.	 */	XactExportFilePath(path, topXid, list_length(exportedSnapshots), "");	if (rename(pathtmp, path) < 0)		ereport(ERROR,				(errcode_for_file_access(),				 errmsg("could not rename file /"%s/" to /"%s/": %m",						pathtmp, path)));	/*	 * The basename of the file is what we return from pg_export_snapshot().	 * It's already in path in a textual format and we know that the path	 * starts with SNAPSHOT_EXPORT_DIR.  Skip over the prefix and the slash	 * and pstrdup it so as not to return the address of a local variable.	 */	return pstrdup(path + strlen(SNAPSHOT_EXPORT_DIR) + 1);}
开发者ID:avontd2868,项目名称:postgres,代码行数:101,


示例29: ReaderNext

/** * @brief Read the next tuple from parser. * @param rd  [in/out] reader * @return type */HeapTupleReaderNext(Reader *rd){	HeapTuple		tuple;	MemoryContext	ccxt;	bool			eof;	Parser		   *parser = rd->parser;	ccxt = CurrentMemoryContext;	eof = false;	do	{		tuple = NULL;		parser->parsing_field = -1;		PG_TRY();		{			tuple = ParserRead(parser, &rd->checker);			if (tuple == NULL)				eof = true;			else			{				tuple = CheckerTuple(&rd->checker, tuple,									 &parser->parsing_field);				CheckerConstraints(&rd->checker, tuple, &parser->parsing_field);			}		}		PG_CATCH();		{			ErrorData	   *errdata;			MemoryContext	ecxt;			char		   *message;			StringInfoData	buf;			if (parser->parsing_field < 0)				PG_RE_THROW();	/* should not ignore */			ecxt = MemoryContextSwitchTo(ccxt);			errdata = CopyErrorData();			/* We cannot ignore query aborts. */			switch (errdata->sqlerrcode)			{				case ERRCODE_ADMIN_SHUTDOWN:				case ERRCODE_QUERY_CANCELED:					MemoryContextSwitchTo(ecxt);					PG_RE_THROW();					break;			}			/* Absorb parse errors. */			rd->parse_errors++;			if (errdata->message)				message = pstrdup(errdata->message);			else				message = "<no error message>";			FlushErrorState();			FreeErrorData(errdata);			initStringInfo(&buf);			appendStringInfo(&buf, "Parse error Record " int64_FMT				": Input Record " int64_FMT ": Rejected",				rd->parse_errors, parser->count);			if (parser->parsing_field > 0)				appendStringInfo(&buf, " - column %d", parser->parsing_field);			appendStringInfo(&buf, ". %s/n", message);			LoggerLog(WARNING, buf.data);			/* Terminate if PARSE_ERRORS has been reached. */			if (rd->parse_errors > rd->max_parse_errors)			{				eof = true;				LoggerLog(WARNING,					"Maximum parse error count exceeded - " int64_FMT					" error(s) found in input file/n",					rd->parse_errors);			}			/* output parse bad file. */			if (rd->parse_fp == NULL)				if ((rd->parse_fp = AllocateFile(rd->parse_badfile, "w")) == NULL)					ereport(ERROR,							(errcode_for_file_access(),							 errmsg("could not open parse bad file /"%s/": %m",									rd->parse_badfile)));			ParserDumpRecord(parser, rd->parse_fp, rd->parse_badfile);			MemoryContextReset(ccxt);			// Without the below line, the regression tests shows the different result on debug-build mode.			tuple = NULL;//.........这里部分代码省略.........
开发者ID:gatehouse,项目名称:pg_bulkload,代码行数:101,



注:本文中的AllocateFile函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ AllocateHeap函数代码示例
C++ AllocateEnvironmentData函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。