Merge branch 'master' into concurrent_indexes.
Conflicts: bin/pg_repack.c
This commit is contained in:
		| @ -325,7 +325,7 @@ repack_all_databases(const char *orderby) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	PQclear(result); | ||||
| 	CLEARPGRES(result); | ||||
| } | ||||
|  | ||||
| /* result is not copied */ | ||||
| @ -435,7 +435,7 @@ repack_one_database(const char *orderby, char *errbuf, size_t errsize) | ||||
| 		} | ||||
| 		goto cleanup; | ||||
| 	} | ||||
| 	PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
|  | ||||
| 	/* Disable statement timeout. */ | ||||
| 	command("SET statement_timeout = 0", 0, NULL); | ||||
| @ -560,8 +560,7 @@ repack_one_database(const char *orderby, char *errbuf, size_t errsize) | ||||
| 	ret = true; | ||||
|  | ||||
| cleanup: | ||||
| 	if (res) | ||||
| 		PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
| 	disconnect(); | ||||
| 	termStringInfo(&sql); | ||||
| 	return ret; | ||||
| @ -586,7 +585,7 @@ apply_log(PGconn *conn, const repack_table *table, int count) | ||||
| 					   "SELECT repack.repack_apply($1, $2, $3, $4, $5, $6)", | ||||
| 					   6, params); | ||||
| 	result = atoi(PQgetvalue(res, 0, 0)); | ||||
| 	PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
|  | ||||
| 	return result; | ||||
| } | ||||
| @ -683,7 +682,6 @@ rebuild_indexes(const repack_table *table) | ||||
| 				elog(WARNING, "Error sending async query: %s\n%s", | ||||
| 					 index_jobs[i].create_index, | ||||
| 					 PQerrorMessage(workers.conns[i])); | ||||
| 				PQclear(res); | ||||
| 				have_error = true; | ||||
| 				goto cleanup; | ||||
| 			} | ||||
| @ -692,7 +690,7 @@ rebuild_indexes(const repack_table *table) | ||||
| 		 * available. That's OK, we'll get to them later. | ||||
| 		 */ | ||||
| 	} | ||||
| 	PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
|  | ||||
| 	if (num_workers > 1) | ||||
| 	{ | ||||
| @ -700,7 +698,6 @@ rebuild_indexes(const repack_table *table) | ||||
| 		int ret; | ||||
|  | ||||
| /* Prefer poll() over select(), following PostgreSQL custom. */ | ||||
| #undef HAVE_POLL | ||||
| #ifdef HAVE_POLL | ||||
| 		struct pollfd *input_fds; | ||||
|  | ||||
| @ -749,6 +746,8 @@ rebuild_indexes(const repack_table *table) | ||||
| 			if (ret < 0 && errno != EINTR) | ||||
| 				elog(ERROR, "poll() failed: %d, %d", ret, errno); | ||||
|  | ||||
| 			elog(DEBUG2, "Poll returned: %d", ret); | ||||
|  | ||||
| 			for (i = 0; i < num_indexes; i++) | ||||
| 			{ | ||||
| 				if (index_jobs[i].status == INPROGRESS) | ||||
| @ -774,11 +773,10 @@ rebuild_indexes(const repack_table *table) | ||||
| 							{ | ||||
| 								elog(WARNING, "Error with create index: %s", | ||||
| 									 PQerrorMessage(workers.conns[index_jobs[i].worker_idx])); | ||||
| 								PQclear(res); | ||||
| 								have_error = true; | ||||
| 								goto cleanup; | ||||
| 							} | ||||
| 							PQclear(res); | ||||
| 							CLEARPGRES(res); | ||||
| 						} | ||||
| 						 | ||||
| 						/* We are only going to re-queue one worker, even | ||||
| @ -825,6 +823,7 @@ rebuild_indexes(const repack_table *table) | ||||
| 	} | ||||
|  | ||||
| cleanup: | ||||
| 	CLEARPGRES(res); | ||||
| 	return (!have_error); | ||||
| } | ||||
|  | ||||
| @ -835,7 +834,7 @@ cleanup: | ||||
| static void | ||||
| repack_one_table(const repack_table *table, const char *orderby) | ||||
| { | ||||
| 	PGresult	   *res; | ||||
| 	PGresult	   *res = NULL; | ||||
| 	const char	   *params[2]; | ||||
| 	int				num; | ||||
| 	int				num_waiting = 0; | ||||
| @ -900,11 +899,10 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 			(errcode(E_PG_COMMAND), | ||||
| 			 errmsg("trigger %s conflicted for %s", | ||||
| 					PQgetvalue(res, 0, 0), table->target_name))); | ||||
| 		PQclear(res); | ||||
| 		have_error = true; | ||||
| 		goto cleanup; | ||||
| 	} | ||||
| 	PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
|  | ||||
| 	command(table->create_pktype, 0, NULL); | ||||
| 	command(table->create_log, 0, NULL); | ||||
| @ -928,13 +926,12 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 	if (PQresultStatus(res) != PGRES_TUPLES_OK) | ||||
| 	{ | ||||
| 		printf("%s", PQerrorMessage(conn2)); | ||||
| 		PQclear(res); | ||||
| 		have_error = true; | ||||
| 		goto cleanup; | ||||
| 	} | ||||
| 	buffer[0] = '\0'; | ||||
| 	strncat(buffer, PQgetvalue(res, 0, 0), sizeof(buffer) - 1); | ||||
| 	PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
|  | ||||
| 	/* | ||||
| 	 * Not using lock_access_share() here since we know that | ||||
| @ -995,11 +992,10 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 		if (PQresultStatus(res) != PGRES_COMMAND_OK) | ||||
| 		{ | ||||
| 			elog(WARNING, "Error with LOCK TABLE: %s", PQerrorMessage(conn2)); | ||||
| 			PQclear(res); | ||||
| 			have_error = true; | ||||
| 			goto cleanup; | ||||
| 		} | ||||
| 		PQclear(res); | ||||
| 		CLEARPGRES(res); | ||||
| 	} | ||||
|  | ||||
| 	/* Turn conn2 back into blocking mode for further non-async use. */ | ||||
| @ -1034,11 +1030,10 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 	{ | ||||
| 		elog(WARNING, "Unable to allocate vxid, length: %d\n", | ||||
| 			 PQgetlength(res, 0, 0)); | ||||
| 		PQclear(res); | ||||
| 		have_error = true; | ||||
| 		goto cleanup; | ||||
| 	} | ||||
| 	PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
|  | ||||
| 	/* Delete any existing entries in the log table now, since we have not | ||||
| 	 * yet run the CREATE TABLE ... AS SELECT, which will take in all existing | ||||
| @ -1079,6 +1074,7 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 		have_error = true; | ||||
| 		goto cleanup; | ||||
| 	} | ||||
| 	CLEARPGRES(res); | ||||
|  | ||||
| 	/* | ||||
| 	 * 4. Apply log to temp table until no tuples are left in the log | ||||
| @ -1109,7 +1105,7 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 				num_waiting = num; | ||||
| 			} | ||||
|  | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			sleep(1); | ||||
| 			continue; | ||||
| 		} | ||||
| @ -1117,7 +1113,7 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 		{ | ||||
| 			/* All old transactions are finished; | ||||
| 			 * go to next step. */ | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| @ -1169,6 +1165,7 @@ repack_one_table(const repack_table *table, const char *orderby) | ||||
| 	} | ||||
|  | ||||
| cleanup: | ||||
| 	CLEARPGRES(res); | ||||
| 	termStringInfo(&sql); | ||||
| 	if (vxid) | ||||
| 		free(vxid); | ||||
| @ -1210,7 +1207,7 @@ kill_ddl(PGconn *conn, Oid relid, bool terminate) | ||||
| 			 "Canceled %d unsafe queries. Terminating any remaining PIDs.", | ||||
| 			 PQntuples(res)); | ||||
|  | ||||
| 		PQclear(res); | ||||
| 		CLEARPGRES(res); | ||||
| 		printfStringInfo(&sql, KILL_COMPETING_LOCKS, relid); | ||||
| 		res = pgut_execute(conn, sql.data, 0, NULL); | ||||
| 		if (PQresultStatus(res) != PGRES_TUPLES_OK) | ||||
| @ -1225,7 +1222,7 @@ kill_ddl(PGconn *conn, Oid relid, bool terminate) | ||||
| 	else | ||||
| 		elog(DEBUG2, "No competing DDL to cancel."); | ||||
|  | ||||
| 	PQclear(res); | ||||
| 	CLEARPGRES(res); | ||||
| 	termStringInfo(&sql); | ||||
|  | ||||
| 	return ret; | ||||
| @ -1285,13 +1282,13 @@ lock_access_share(PGconn *conn, Oid relid, const char *target_name) | ||||
| 		res = pgut_execute_elevel(conn, sql.data, 0, NULL, DEBUG2); | ||||
| 		if (PQresultStatus(res) == PGRES_COMMAND_OK) | ||||
| 		{ | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			break; | ||||
| 		} | ||||
| 		else if (sqlstate_equals(res, SQLSTATE_QUERY_CANCELED)) | ||||
| 		{ | ||||
| 			/* retry if lock conflicted */ | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			pgut_rollback(conn); | ||||
| 			continue; | ||||
| 		} | ||||
| @ -1299,7 +1296,7 @@ lock_access_share(PGconn *conn, Oid relid, const char *target_name) | ||||
| 		{ | ||||
| 			/* exit otherwise */ | ||||
| 			elog(WARNING, "%s", PQerrorMessage(connection)); | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			ret = false; | ||||
| 			break; | ||||
| 		} | ||||
| @ -1371,13 +1368,13 @@ lock_exclusive(PGconn *conn, const char *relid, const char *lock_query, bool sta | ||||
| 		res = pgut_execute_elevel(conn, lock_query, 0, NULL, DEBUG2); | ||||
| 		if (PQresultStatus(res) == PGRES_COMMAND_OK) | ||||
| 		{ | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			break; | ||||
| 		} | ||||
| 		else if (sqlstate_equals(res, SQLSTATE_QUERY_CANCELED)) | ||||
| 		{ | ||||
| 			/* retry if lock conflicted */ | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			pgut_rollback(conn); | ||||
| 			continue; | ||||
| 		} | ||||
| @ -1385,7 +1382,7 @@ lock_exclusive(PGconn *conn, const char *relid, const char *lock_query, bool sta | ||||
| 		{ | ||||
| 			/* exit otherwise */ | ||||
| 			printf("%s", PQerrorMessage(connection)); | ||||
| 			PQclear(res); | ||||
| 			CLEARPGRES(res); | ||||
| 			ret = false; | ||||
| 			break; | ||||
| 		} | ||||
|  | ||||
| @ -84,4 +84,9 @@ extern void pgut_readopt(const char *path, pgut_option options[], int elevel); | ||||
| extern void pgut_setopt(pgut_option *opt, const char *optarg, pgut_optsrc src); | ||||
| extern bool pgut_keyeq(const char *lhs, const char *rhs); | ||||
|  | ||||
| /* So we don't need to fret over multiple calls to PQclear(), e.g. | ||||
|  * in cleanup labels. | ||||
|  */ | ||||
| #define CLEARPGRES(pgres)  do { PQclear(pgres); pgres = NULL; } while (0) | ||||
|  | ||||
| #endif   /* PGUT_FE_H */ | ||||
|  | ||||
		Reference in New Issue
	
	Block a user