aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2025-01-31 13:52:40 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2025-01-31 13:52:40 -0500
commit041e8b95b8cd251bfec6a3c9c3dd6614de6a4c9b (patch)
tree58c23012141617cec375499045af409ae0c24afd /src/backend/optimizer
parentf8d8581ed882b79b512daaa7f71ca19c8eafcaef (diff)
downloadpostgresql-041e8b95b8cd251bfec6a3c9c3dd6614de6a4c9b.tar.gz
postgresql-041e8b95b8cd251bfec6a3c9c3dd6614de6a4c9b.zip
Get rid of our dependency on type "long" for memory size calculations.
Consistently use "Size" (or size_t, or in some places int64 or double) as the type for variables holding memory allocation sizes. In most places variables' data types were fine already, but we had an ancient habit of computing bytes from kilobytes-units GUCs with code like "work_mem * 1024L". That risks overflow on Win64 where they did not make "long" as wide as "size_t". We worked around that by restricting such GUCs' ranges, so you couldn't set work_mem et al higher than 2GB on Win64. This patch removes that restriction, after replacing such calculations with "work_mem * (Size) 1024" or variants of that. It should be noted that this patch was constructed by searching outwards from the GUCs that have MAX_KILOBYTES as upper limit. So I can't positively guarantee there are no other places doing memory-size arithmetic in int or long variables. I do however feel pretty confident that increasing MAX_KILOBYTES on Win64 is safe now. Also, nothing in our code should be dealing in multiple-gigabyte allocations without authorization from a relevant GUC, so it seems pretty likely that this search caught everything that could be at risk of overflow. Author: Vladlen Popolitov <v.popolitov@postgrespro.ru> Co-authored-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/1a01f0-66ec2d80-3b-68487680@27595217
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/costsize.c14
-rw-r--r--src/backend/optimizer/plan/planner.c2
2 files changed, 8 insertions, 8 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index ec004ed9493..73d78617009 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1903,7 +1903,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
double input_bytes = relation_byte_size(tuples, width);
double output_bytes;
double output_tuples;
- long sort_mem_bytes = sort_mem * 1024L;
+ int64 sort_mem_bytes = sort_mem * (int64) 1024;
/*
* We want to be sure the cost of a sort is never estimated as zero, even
@@ -2488,7 +2488,7 @@ cost_material(Path *path,
Cost startup_cost = input_startup_cost;
Cost run_cost = input_total_cost - input_startup_cost;
double nbytes = relation_byte_size(tuples, width);
- long work_mem_bytes = work_mem * 1024L;
+ double work_mem_bytes = work_mem * (Size) 1024;
path->rows = tuples;
@@ -4028,7 +4028,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
else if (enable_material && innersortkeys != NIL &&
relation_byte_size(inner_path_rows,
inner_path->pathtarget->width) >
- (work_mem * 1024L))
+ work_mem * (Size) 1024)
path->materialize_inner = true;
else
path->materialize_inner = false;
@@ -4663,7 +4663,7 @@ cost_rescan(PlannerInfo *root, Path *path,
Cost run_cost = cpu_tuple_cost * path->rows;
double nbytes = relation_byte_size(path->rows,
path->pathtarget->width);
- long work_mem_bytes = work_mem * 1024L;
+ double work_mem_bytes = work_mem * (Size) 1024;
if (nbytes > work_mem_bytes)
{
@@ -4690,7 +4690,7 @@ cost_rescan(PlannerInfo *root, Path *path,
Cost run_cost = cpu_operator_cost * path->rows;
double nbytes = relation_byte_size(path->rows,
path->pathtarget->width);
- long work_mem_bytes = work_mem * 1024L;
+ double work_mem_bytes = work_mem * (Size) 1024;
if (nbytes > work_mem_bytes)
{
@@ -6496,7 +6496,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
double pages_fetched;
double tuples_fetched;
double heap_pages;
- long maxentries;
+ double maxentries;
/*
* Fetch total cost of obtaining the bitmap, as well as its total
@@ -6527,7 +6527,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
* the bitmap at one time.)
*/
heap_pages = Min(pages_fetched, baserel->pages);
- maxentries = tbm_calculate_entries(work_mem * 1024L);
+ maxentries = tbm_calculate_entries(work_mem * (Size) 1024);
if (loop_count > 1)
{
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 8a474a50be7..ffd7517ea97 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -6887,7 +6887,7 @@ plan_create_index_workers(Oid tableOid, Oid indexOid)
* parallel worker to sort.
*/
while (parallel_workers > 0 &&
- maintenance_work_mem / (parallel_workers + 1) < 32768L)
+ maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
parallel_workers--;
done: