diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2016-03-23 20:22:08 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2016-03-23 20:22:08 -0400 |
commit | 2c6af4f44228d76d3351fe26f68b00b55cdd239a (patch) | |
tree | 6a5ce32658ba9ad51ea05d6f46ef0aaf44b1f244 /src/backend/parser | |
parent | 3df9c374e279db37b00cd9c86219471d0cdaa97c (diff) | |
download | postgresql-2c6af4f44228d76d3351fe26f68b00b55cdd239a.tar.gz postgresql-2c6af4f44228d76d3351fe26f68b00b55cdd239a.zip |
Move keywords.c/kwlookup.c into src/common/.
Now that we have src/common/ for code shared between frontend and backend,
we can get rid of (most of) the klugy ways that the keyword table and
keyword lookup code were formerly shared between different uses.
This is a first step towards a more general plan of getting rid of
special-purpose kluges for sharing code in src/bin/.
I chose to merge kwlookup.c back into keywords.c, as it once was, and
always has been so far as keywords.h is concerned. We could have
kept them separate, but there is noplace that uses ScanKeywordLookup
without also wanting access to the backend's keyword list, so there
seems little point.
ecpg is still a bit weird, but at least now the trickiness is documented.
I think that the MSVC build script should require no adjustments beyond
what's done here ... but we'll soon find out.
Diffstat (limited to 'src/backend/parser')
-rw-r--r-- | src/backend/parser/Makefile | 4 | ||||
-rw-r--r-- | src/backend/parser/README | 6 | ||||
-rw-r--r-- | src/backend/parser/keywords.c | 27 | ||||
-rw-r--r-- | src/backend/parser/kwlookup.c | 89 |
4 files changed, 6 insertions, 120 deletions
diff --git a/src/backend/parser/Makefile b/src/backend/parser/Makefile index 9cc8946fa1b..fdd8485cec5 100644 --- a/src/backend/parser/Makefile +++ b/src/backend/parser/Makefile @@ -12,7 +12,7 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) -OBJS= analyze.o gram.o scan.o keywords.o kwlookup.o parser.o \ +OBJS= analyze.o gram.o scan.o parser.o \ parse_agg.o parse_clause.o parse_coerce.o parse_collate.o parse_cte.o \ parse_expr.o parse_func.o parse_node.o parse_oper.o parse_param.o \ parse_relation.o parse_target.o parse_type.o parse_utilcmd.o scansup.o @@ -44,7 +44,7 @@ scan.c: FLEX_NO_BACKUP=yes # Force these dependencies to be known even without dependency info built: -gram.o scan.o keywords.o parser.o: gram.h +gram.o scan.o parser.o: gram.h # gram.c, gram.h, and scan.c are in the distribution tarball, so they diff --git a/src/backend/parser/README b/src/backend/parser/README index 08625e427d2..6d8f19b5ca0 100644 --- a/src/backend/parser/README +++ b/src/backend/parser/README @@ -10,8 +10,6 @@ to the optimizer and then executor. parser.c things start here scan.l break query into tokens scansup.c handle escapes in input strings -kwlookup.c turn keywords into specific tokens -keywords.c table of standard keywords (passed to kwlookup.c) gram.y parse the tokens and produce a "raw" parse tree analyze.c top level of parse analysis for optimizable queries parse_agg.c handle aggregates, like SUM(col1), AVG(col2), ... @@ -28,3 +26,7 @@ parse_relation.c support routines for tables and column handling parse_target.c handle the result list of the query parse_type.c support routines for data type handling parse_utilcmd.c parse analysis for utility commands (done at execution time) + +See also src/common/keywords.c, which contains the table of standard +keywords and the keyword lookup function. We separated that out because +various frontend code wants to use it too. diff --git a/src/backend/parser/keywords.c b/src/backend/parser/keywords.c deleted file mode 100644 index 7a4f1286905..00000000000 --- a/src/backend/parser/keywords.c +++ /dev/null @@ -1,27 +0,0 @@ -/*------------------------------------------------------------------------- - * - * keywords.c - * lexical token lookup for key words in PostgreSQL - * - * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/parser/keywords.c - * - *------------------------------------------------------------------------- - */ -#include "postgres.h" - -#include "parser/gramparse.h" - -#define PG_KEYWORD(a,b,c) {a,b,c}, - - -const ScanKeyword ScanKeywords[] = { -#include "parser/kwlist.h" -}; - -const int NumScanKeywords = lengthof(ScanKeywords); diff --git a/src/backend/parser/kwlookup.c b/src/backend/parser/kwlookup.c deleted file mode 100644 index 4406fef37f4..00000000000 --- a/src/backend/parser/kwlookup.c +++ /dev/null @@ -1,89 +0,0 @@ -/*------------------------------------------------------------------------- - * - * kwlookup.c - * lexical token lookup for key words in PostgreSQL - * - * NB - this file is also used by ECPG and several frontend programs in - * src/bin/ including pg_dump and psql - * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/parser/kwlookup.c - * - *------------------------------------------------------------------------- - */ - -/* use c.h so this can be built as either frontend or backend */ -#include "c.h" - -#include <ctype.h> - -#include "parser/keywords.h" - -/* - * ScanKeywordLookup - see if a given word is a keyword - * - * Returns a pointer to the ScanKeyword table entry, or NULL if no match. - * - * The match is done case-insensitively. Note that we deliberately use a - * dumbed-down case conversion that will only translate 'A'-'Z' into 'a'-'z', - * even if we are in a locale where tolower() would produce more or different - * translations. This is to conform to the SQL99 spec, which says that - * keywords are to be matched in this way even though non-keyword identifiers - * receive a different case-normalization mapping. - */ -const ScanKeyword * -ScanKeywordLookup(const char *text, - const ScanKeyword *keywords, - int num_keywords) -{ - int len, - i; - char word[NAMEDATALEN]; - const ScanKeyword *low; - const ScanKeyword *high; - - len = strlen(text); - /* We assume all keywords are shorter than NAMEDATALEN. */ - if (len >= NAMEDATALEN) - return NULL; - - /* - * Apply an ASCII-only downcasing. We must not use tolower() since it may - * produce the wrong translation in some locales (eg, Turkish). - */ - for (i = 0; i < len; i++) - { - char ch = text[i]; - - if (ch >= 'A' && ch <= 'Z') - ch += 'a' - 'A'; - word[i] = ch; - } - word[len] = '\0'; - - /* - * Now do a binary search using plain strcmp() comparison. - */ - low = keywords; - high = keywords + (num_keywords - 1); - while (low <= high) - { - const ScanKeyword *middle; - int difference; - - middle = low + (high - low) / 2; - difference = strcmp(middle->name, word); - if (difference == 0) - return middle; - else if (difference < 0) - low = middle + 1; - else - high = middle - 1; - } - - return NULL; -} |