aboutsummaryrefslogtreecommitdiff
path: root/src/pl/plperl/plperl.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2016-03-12 16:05:10 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2016-03-12 16:05:29 -0500
commit23a27b039d94ba359286694831eafe03cd970eef (patch)
tree4b1d6acb2fe4ae5c1275292adb3f027f2e052ea8 /src/pl/plperl/plperl.c
parente01157500f26342bf4f067a4eb1e45ab9a3cd410 (diff)
downloadpostgresql-23a27b039d94ba359286694831eafe03cd970eef.tar.gz
postgresql-23a27b039d94ba359286694831eafe03cd970eef.zip
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's portalPos field, FuncCallContext's call_cntr and max_calls fields, ExecutorRun's count argument, PortalRunFetch's result, and the max number of rows in a SPITupleTable to uint64, and deals with (I hope) all the ensuing fallout. Some of these values were declared uint32 before, and others "long". I also removed PortalData's posOverflow field, since that logic seems pretty useless given that portalPos is now always 64 bits. The user-visible results are that command tags for SELECT etc will correctly report tuple counts larger than 4G, as will plpgsql's GET GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples than that are still not exactly the norm, but they're becoming more common. Most values associated with FETCH/MOVE distances, such as PortalRun's count argument and the count argument of most SPI functions that have one, remain declared as "long". It's not clear whether it would be worth promoting those to int64; but it would definitely be a large dollop of additional API churn on top of this, and it would only help 32-bit platforms which seem relatively less likely to see any benefit. Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
Diffstat (limited to 'src/pl/plperl/plperl.c')
-rw-r--r--src/pl/plperl/plperl.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index cd917ab8e46..269f7f33220 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -12,8 +12,9 @@
/* system stuff */
#include <ctype.h>
#include <fcntl.h>
-#include <unistd.h>
+#include <limits.h>
#include <locale.h>
+#include <unistd.h>
/* postgreSQL stuff */
#include "access/htup_details.h"
@@ -281,7 +282,7 @@ static Datum plperl_hash_to_datum(SV *src, TupleDesc td);
static void plperl_init_shared_libs(pTHX);
static void plperl_trusted_init(void);
static void plperl_untrusted_init(void);
-static HV *plperl_spi_execute_fetch_result(SPITupleTable *, int, int);
+static HV *plperl_spi_execute_fetch_result(SPITupleTable *, uint64, int);
static char *hek2cstr(HE *he);
static SV **hv_store_string(HV *hv, const char *key, SV *val);
static SV **hv_fetch_string(HV *hv, const char *key);
@@ -1472,7 +1473,7 @@ plperl_ref_from_pg_array(Datum arg, Oid typid)
hv = newHV();
(void) hv_store(hv, "array", 5, av, 0);
- (void) hv_store(hv, "typeoid", 7, newSViv(typid), 0);
+ (void) hv_store(hv, "typeoid", 7, newSVuv(typid), 0);
return sv_bless(newRV_noinc((SV *) hv),
gv_stashpv("PostgreSQL::InServer::ARRAY", 0));
@@ -3091,7 +3092,7 @@ plperl_spi_exec(char *query, int limit)
static HV *
-plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed,
+plperl_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 processed,
int status)
{
HV *result;
@@ -3103,13 +3104,25 @@ plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed,
hv_store_string(result, "status",
cstr2sv(SPI_result_code_string(status)));
hv_store_string(result, "processed",
- newSViv(processed));
+ (processed > (uint64) INT_MAX) ?
+ newSVnv((double) processed) :
+ newSViv((int) processed));
if (status > 0 && tuptable)
{
AV *rows;
SV *row;
- int i;
+ uint64 i;
+
+ /*
+ * av_extend's 2nd argument is declared I32. It's possible we could
+ * nonetheless push more than INT_MAX elements into a Perl array, but
+ * let's just fail instead of trying.
+ */
+ if (processed > (uint64) INT_MAX)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("query result has too many rows to fit in a Perl array")));
rows = newAV();
av_extend(rows, processed);