1
0
Fork 0

Remove pandas dependency by storing database metadata as lists of tuples

This commit is contained in:
Darik Gamble 2015-01-25 11:39:03 -05:00
parent 7e7051fef7
commit 830d1beaa8
10 changed files with 202 additions and 301 deletions

View File

@ -267,10 +267,10 @@ class PGCli(object):
pgexecute = self.pgexecute
schemata, tables, columns = pgexecute.get_metadata()
completer.extend_schemata(schemata)
completer.extend_tables(tables)
completer.extend_columns(columns)
completer.set_search_path(pgexecute.search_path())
completer.extend_schemata(pgexecute.schemata())
completer.extend_tables(pgexecute.tables())
completer.extend_columns(pgexecute.columns())
completer.extend_database_names(pgexecute.databases())
def get_completions(self, text, cursor_positition):

View File

@ -1,7 +1,6 @@
from __future__ import print_function
import re
import sqlparse
from pandas import DataFrame
from sqlparse.sql import IdentifierList, Identifier, Function
from sqlparse.tokens import Keyword, DML, Punctuation
@ -132,12 +131,12 @@ def extract_table_identifiers(token_stream):
def extract_tables(sql):
"""Extract the table names from an SQL statment.
Returns a DataFrame with columns [schema, table, alias]
Returns a list of (schema, table, alias) tuples
"""
parsed = sqlparse.parse(sql)
if not parsed:
return DataFrame({}, columns=['schema', 'table', 'alias'])
return []
# INSERT statements must stop looking for tables at the sign of first
# Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)
@ -145,8 +144,7 @@ def extract_tables(sql):
# we'll identify abc, col1 and col2 as table names.
insert_stmt = parsed[0].token_first().value.lower() == 'insert'
stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)
tables = extract_table_identifiers(stream)
return DataFrame.from_records(tables, columns=['schema', 'table', 'alias'])
return list(extract_table_identifiers(stream))
def find_prev_keyword(sql):
if not sql.strip():

View File

@ -78,12 +78,12 @@ def suggest_based_on_last_token(token, text_before_cursor, full_text):
elif token_v.lower() in ('from', 'update', 'into', 'describe', 'join', 'table'):
return [{'type': 'schema'}, {'type': 'table', 'schema': []}]
elif token_v.lower() == 'on':
tables = extract_tables(full_text)
tables = extract_tables(full_text) # [(schema, table, alias), ...]
# Use table alias if there is one, otherwise the table name
alias = tables['alias'].where(tables['alias'].notnull(), tables['table'])
alias = [t[2] or t[1] for t in tables]
return [{'type': 'alias', 'aliases': list(alias)}]
return [{'type': 'alias', 'aliases': alias}]
elif token_v in ('d',): # \d
return [{'type': 'schema'}, {'type': 'table', 'schema': []}]
@ -102,9 +102,8 @@ def suggest_based_on_last_token(token, text_before_cursor, full_text):
# TABLE.<suggestion> or SCHEMA.TABLE.<suggestion>
tables = extract_tables(full_text)
tables = get_matching_tables(tables, identifier)
suggestions.append({'type': 'column',
'tables': tables[['schema', 'table', 'alias']]})
tables = [t for t in tables if identifies(identifier, *t)]
suggestions.append({'type': 'column', 'tables': tables})
# SCHEMA.<suggestion>
suggestions.append({'type': 'table', 'schema': identifier})
@ -113,20 +112,7 @@ def suggest_based_on_last_token(token, text_before_cursor, full_text):
return [{'type': 'keyword'}]
def get_matching_tables(tables, identifier):
"""
:param tables: DataFrame with columns [schema, table, alias]
:param identifier: a table name, table alias, or fully qualified schema.tablename
:return: a row or rows from tables that match the indentifier
"""
tables['full_name'] = tables.apply(qualify_table_name, axis=1)
#match a table to an identifier if the identifer is equal to any one of the
#table name, table alias, or schema-qualified table name
matches = tables[['table', 'alias', 'full_name']] == identifier
is_match = matches.any(axis=1)
return tables[is_match]
def qualify_table_name(row):
return row['schema'] + '.' + row['table'] if row['schema'] else row['table']
def identifies(id, schema, table, alias):
return id == alias or id == table or (
schema and (id == schema + '.' + table))

View File

@ -1,11 +1,10 @@
from __future__ import print_function
import logging
from collections import defaultdict
from prompt_toolkit.completion import Completer, Completion
from .packages.sqlcompletion import suggest_type
from .packages.parseutils import last_word
from re import compile
from pandas import DataFrame
_logger = logging.getLogger(__name__)
@ -35,11 +34,9 @@ class PGCompleter(Completer):
'UCASE']
special_commands = []
databases = []
schemata = DataFrame({}, columns=['schema'])
tables = DataFrame({}, columns=['schema', 'table', 'alias'])
columns = DataFrame({}, columns=['schema', 'table', 'column'])
dbmetadata = {}
search_path = []
all_completions = set(keywords + functions)
@ -52,7 +49,7 @@ class PGCompleter(Completer):
self.name_pattern = compile("^[_a-z][_a-z0-9\$]*$")
def escape_name(self, name):
if name and not name=='*' and ((not self.name_pattern.match(name))
if name and ((not self.name_pattern.match(name))
or (name.upper() in self.reserved_words)
or (name.upper() in self.functions)):
name = '"%s"' % name
@ -82,41 +79,44 @@ class PGCompleter(Completer):
self.keywords.extend(additional_keywords)
self.all_completions.update(additional_keywords)
def extend_schemata(self, data):
def extend_schemata(self, schemata):
# data is a DataFrame with columns [schema]
data['schema'] = data['schema'].apply(self.escape_name)
self.schemata = self.schemata.append(data)
self.all_completions.update(data['schema'])
schemata = self.escaped_names(schemata)
for schema in schemata:
self.dbmetadata[schema] = {}
def extend_tables(self, data):
self.all_completions.update(schemata)
# data is a DataFrame with columns [schema, table, is_visible]
data[['schema', 'table']] = \
data[['schema', 'table']].apply(self.escaped_names)
self.tables = self.tables.append(data)
def extend_tables(self, table_data):
self.all_completions.update(data['schema'])
self.all_completions.update(data['table'])
# table_data is a list of (schema_name, table_name) tuples
table_data = [self.escaped_names(d) for d in table_data]
# Auto-add '*' as a column in all tables
cols = data[['schema', 'table']].copy()
cols['column'] = '*'
self.columns = self.columns.append(cols)
# dbmetadata['schema_name']['table_name'] should be a list of column
# names. Default to an asterisk
for schema, table in table_data:
self.dbmetadata[schema][table] = ['*']
def extend_columns(self, data):
self.all_completions.update(t[1] for t in table_data)
# data is a DataFrame with columns [schema, table, column]
data[['schema', 'table', 'column']] = \
data[['schema', 'table', 'column']].apply(self.escaped_names)
self.columns = self.columns.append(data)
self.all_completions.update(data.column)
def extend_columns(self, column_data):
# column_data is a list of (schema_name, table_name, column_name) tuples
column_data = [self.escaped_names(d) for d in column_data]
for schema, table, column in column_data:
self.dbmetadata[schema][table].append(column)
self.all_completions.update(t[2] for t in column_data)
def set_search_path(self, search_path):
self.search_path = self.escaped_names(search_path)
def reset_completions(self):
self.databases = []
self.schemata = DataFrame({}, columns=['schema'])
self.tables = DataFrame({}, columns=['schema', 'table', 'alias'])
self.columns = DataFrame({}, columns=['schema', 'table', 'column'])
self.search_path = []
self.dbmetadata = {}
self.all_completions = set(self.keywords)
@staticmethod
@ -155,17 +155,23 @@ class PGCompleter(Completer):
completions.extend(funcs)
elif suggestion['type'] == 'schema':
schema_names = self.schemata['schema']
schema_names = self.dbmetadata.keys()
schema_names = self.find_matches(word_before_cursor, schema_names)
completions.extend(schema_names)
elif suggestion['type'] == 'table':
meta = self.tables
if suggestion['schema']:
tables = meta.table[meta.schema == suggestion['schema']]
try:
tables = self.dbmetadata[suggestion['schema']].keys()
except KeyError:
#schema doesn't exist
tables = []
else:
tables = meta.table[meta.is_visible]
schemas = self.search_path
meta = self.dbmetadata
tables = [tbl for schema in schemas
for tbl in meta[schema].keys()]
tables = self.find_matches(word_before_cursor, tables)
completions.extend(tables)
@ -186,25 +192,34 @@ class PGCompleter(Completer):
def populate_scoped_cols(self, scoped_tbls):
""" Find all columns in a set of scoped_tables
:param scoped_tbls: DataFrame with columns [schema, table, alias]
:param scoped_tbls: list of (schema, table, alias) tuples
:return: list of column names
"""
columns = self.columns # dataframe with columns [schema, table, column]
columns = []
meta = self.dbmetadata
scoped_tbls[['schema', 'table', 'alias']] = \
scoped_tbls[['schema', 'table', 'alias']].apply(self.escaped_names)
for tbl in scoped_tbls:
if tbl[0]:
# A fully qualified schema.table reference
schema = self.escape_name(tbl[0])
table = self.escape_name(tbl[1])
try:
# Get columns from the corresponding schema.table
columns.extend(meta[schema][table])
except KeyError:
# Either the schema or table doesn't exist
pass
else:
for schema in self.search_path:
table = self.escape_name(tbl[1])
try:
columns.extend(meta[schema][table])
break
except KeyError:
pass
return columns
# For fully qualified tables, inner join on (schema, table)
qualed = scoped_tbls.merge(columns, how='inner', on=['schema', 'table'])
# Only allow unqualified table reference on visible tables
vis_tables = self.tables[self.tables['is_visible']]
unqualed_tables = scoped_tbls.merge(vis_tables,
how='inner', on=['table'], suffixes=['_left', '_right'])
unqualed_tables['schema'] = unqualed_tables['schema_right']
unqualed = unqualed_tables.merge(columns, how='inner', on=['schema', 'table'])
return list(qualed['column']) + list(unqualed['column'])

View File

@ -3,7 +3,6 @@ import psycopg2
import psycopg2.extras
import psycopg2.extensions
import sqlparse
from pandas import DataFrame
from .packages import pgspecial
_logger = logging.getLogger(__name__)
@ -24,16 +23,19 @@ psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select)
class PGExecute(object):
search_path_query = '''
SELECT * FROM unnest(current_schemas(false))'''
schemata_query = '''
SELECT nspname
FROM pg_catalog.pg_namespace
WHERE nspname !~ '^pg_'
AND nspname <> 'information_schema' '''
AND nspname <> 'information_schema'
ORDER BY 1 '''
tables_query = '''
SELECT n.nspname schema_name,
c.relname table_name,
pg_catalog.pg_table_is_visible(c.oid) is_visible
c.relname table_name
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
@ -139,34 +141,37 @@ class PGExecute(object):
_logger.debug('No rows in result.')
return (None, None, cur.statusmessage)
def get_metadata(self):
""" Returns a tuple [schemata, tables, columns] of DataFrames
def search_path(self):
"""Returns the current search path as a list of schema names"""
schemata: DataFrame with columns [schema]
tables: DataFrame with columns [schema, table, is_visible]
columns: DataFrame with columns [schema, table, column]
with self.conn.cursor() as cur:
_logger.debug('Search path query. sql: %r', self.search_path_query)
cur.execute(self.search_path_query)
return [x[0] for x in cur.fetchall()]
"""
def schemata(self):
"""Returns a list of schema names in the database"""
with self.conn.cursor() as cur:
_logger.debug('Schemata Query. sql: %r', self.schemata_query)
cur.execute(self.schemata_query)
schemata = DataFrame.from_records(cur,
columns=['schema'])
return [x[0] for x in cur.fetchall()]
def tables(self):
"""Returns a list of (schema_name, table_name) tuples """
with self.conn.cursor() as cur:
_logger.debug('Tables Query. sql: %r', self.tables_query)
cur.execute(self.tables_query)
tables = DataFrame.from_records(cur,
columns=['schema', 'table', 'is_visible'])
return cur.fetchall()
def columns(self):
"""Returns a list of (schema_name, table_name, column_name) tuples"""
with self.conn.cursor() as cur:
_logger.debug('Columns Query. sql: %r', self.columns_query)
cur.execute(self.columns_query)
columns = DataFrame.from_records(cur,
columns=['schema', 'table', 'column'])
return [schemata, tables, columns]
return cur.fetchall()
def databases(self):
with self.conn.cursor() as cur:

View File

@ -28,8 +28,7 @@ setup(
'jedi == 0.8.1', # Temporary fix for installation woes.
'prompt_toolkit==0.26',
'psycopg2 >= 2.5.4',
'sqlparse >= 0.1.14',
'pandas >= 0.15.0'
'sqlparse >= 0.1.14'
],
entry_points='''
[console_scripts]

View File

@ -4,114 +4,81 @@ from pgcli.packages.parseutils import extract_tables
def test_empty_string():
tables = extract_tables('')
assert tables.to_dict('list') == {'schema': [], 'table': [], 'alias': []}
assert tables == []
def test_simple_select_single_table():
tables = extract_tables('select * from abc')
assert tables.to_dict('list') == \
{'schema': [None], 'table': ['abc'], 'alias': [None]}
assert tables == [(None, 'abc', None)]
def test_simple_select_single_table_schema_qualified():
tables = extract_tables('select * from abc.def')
assert tables.to_dict('list') == \
{'schema': ['abc'], 'table': ['def'], 'alias': [None]}
assert tables == [('abc', 'def', None)]
def test_simple_select_multiple_tables():
tables = extract_tables('select * from abc, def')
assert tables.to_dict('list') == \
{'schema': [None, None],
'table': ['abc', 'def'],
'alias': [None, None]}
assert sorted(tables) == [(None, 'abc', None), (None, 'def', None)]
def test_simple_select_multiple_tables_schema_qualified():
tables = extract_tables('select * from abc.def, ghi.jkl')
assert tables.to_dict('list') == \
{'schema': ['abc', 'ghi'],
'table': ['def', 'jkl'],
'alias': [None, None]}
assert sorted(tables) == [('abc', 'def', None), ('ghi', 'jkl', None)]
def test_simple_select_with_cols_single_table():
tables = extract_tables('select a,b from abc')
assert tables.to_dict('list') == \
{'schema': [None], 'table': ['abc'], 'alias': [None]}
assert tables == [(None, 'abc', None)]
def test_simple_select_with_cols_single_table_schema_qualified():
tables = extract_tables('select a,b from abc.def')
assert tables.to_dict('list') == \
{'schema': ['abc'], 'table': ['def'], 'alias': [None]}
assert tables == [('abc', 'def', None)]
def test_simple_select_with_cols_multiple_tables():
tables = extract_tables('select a,b from abc, def')
assert tables.to_dict('list') == \
{'schema': [None, None],
'table': ['abc', 'def'],
'alias': [None, None]}
assert sorted(tables) == [(None, 'abc', None), (None, 'def', None)]
def test_simple_select_with_cols_multiple_tables():
tables = extract_tables('select a,b from abc.def, def.ghi')
assert tables.to_dict('list') == \
{'schema': ['abc', 'def'],
'table': ['def', 'ghi'],
'alias': [None, None]}
assert sorted(tables) == [('abc', 'def', None), ('def', 'ghi', None)]
def test_select_with_hanging_comma_single_table():
tables = extract_tables('select a, from abc')
assert tables.to_dict('list') == \
{'schema': [None],
'table': ['abc'],
'alias': [None]}
assert tables == [(None, 'abc', None)]
def test_select_with_hanging_comma_multiple_tables():
tables = extract_tables('select a, from abc, def')
assert tables.to_dict('list') == \
{'schema': [None, None],
'table': ['abc', 'def'],
'alias': [None, None]}
assert sorted(tables) == [(None, 'abc', None), (None, 'def', None)]
def test_select_with_hanging_period_multiple_tables():
tables = extract_tables('SELECT t1. FROM tabl1 t1, tabl2 t2')
assert tables.to_dict('list') == \
{'schema': [None, None],
'table': ['tabl1', 'tabl2'],
'alias': ['t1', 't2']}
assert sorted(tables) == [(None, 'tabl1', 't1'), (None, 'tabl2', 't2')]
def test_simple_insert_single_table():
tables = extract_tables('insert into abc (id, name) values (1, "def")')
assert tables.to_dict('list') == \
{'schema': [None], 'table': ['abc'], 'alias': ['abc']}
# sqlparse mistakenly assigns an alias to the table
# assert tables == [(None, 'abc', None)]
assert tables == [(None, 'abc', 'abc')]
@pytest.mark.xfail
def test_simple_insert_single_table_schema_qualified():
tables = extract_tables('insert into abc.def (id, name) values (1, "def")')
assert tables.to_dict('list') == \
{'schema': ['abc'], 'table': ['def'], 'alias': [None]}
assert tables == [('abc', 'def', None)]
def test_simple_update_table():
tables = extract_tables('update abc set id = 1')
assert tables.to_dict('list') == \
{'schema': [None], 'table': ['abc'], 'alias': [None]}
assert tables == [(None, 'abc', None)]
def test_simple_update_table():
tables = extract_tables('update abc.def set id = 1')
assert tables.to_dict('list') == \
{'schema': ['abc'], 'table': ['def'], 'alias': [None]}
assert tables == [('abc', 'def', None)]
def test_join_table():
tables = extract_tables('SELECT * FROM abc a JOIN def d ON a.id = d.num')
assert tables.to_dict('list') == \
{'schema': [None, None],
'table': ['abc', 'def'],
'alias': ['a', 'd']}
assert sorted(tables) == [(None, 'abc', 'a'), (None, 'def', 'd')]
def test_join_table_schema_qualified():
tables = extract_tables('SELECT * FROM abc.def x JOIN ghi.jkl y ON x.id = y.num')
assert tables.to_dict('list') == \
{'schema': ['abc', 'ghi'],
'table': ['def', 'jkl'],
'alias': ['x', 'y']}
assert tables == [('abc', 'def', 'x'), ('ghi', 'jkl', 'y')]
def test_join_as_table():
tables = extract_tables('SELECT * FROM my_table AS m WHERE m.a > 5')
assert tables.to_dict('list') == \
{'schema': [None], 'table': ['my_table'], 'alias': ['m']}
assert tables == [(None, 'my_table', 'm')]

View File

@ -23,18 +23,15 @@ def test_schemata_table_and_columns_query(executor):
run(executor, "create table schema1.c (w text)")
run(executor, "create schema schema2")
schemata, tables, columns = executor.get_metadata()
assert schemata.to_dict('list') == {
'schema': ['public', 'schema1', 'schema2']}
assert tables.to_dict('list') == {
'schema': ['public', 'public', 'schema1'],
'table': ['a', 'b', 'c'],
'is_visible': [True, True, False]}
assert executor.schemata() == ['public', 'schema1', 'schema2']
assert executor.tables() == [
('public', 'a'), ('public', 'b'), ('schema1', 'c')]
assert columns.to_dict('list') == {
'schema': ['public', 'public', 'public', 'schema1'],
'table': ['a', 'a', 'b', 'c'],
'column': ['x', 'y', 'z', 'w']}
assert executor.columns() == [
('public', 'a', 'x'), ('public', 'a', 'y'),
('public', 'b', 'z'), ('schema1', 'c', 'w')]
assert executor.search_path() == ['public']
@dbtest
def test_database_list(executor):

View File

@ -1,9 +1,8 @@
import pytest
from pandas import DataFrame
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
schemata = {
metadata = {
'public': {
'users': ['id', 'email', 'first_name', 'last_name'],
'orders': ['id', 'ordered_date', 'status'],
@ -21,24 +20,19 @@ def completer():
import pgcli.pgcompleter as pgcompleter
comp = pgcompleter.PGCompleter(smart_completion=True)
# Table metadata is a dataframe with columns [schema, table, is_visible]
tables = DataFrame.from_records(
((schema, table, schema=='public')
for schema, tables in schemata.items()
for table, columns in tables.items()),
columns=['schema', 'table', 'is_visible'])
schemata, tables, columns = [], [], []
# Column metadata is a dataframe with columns [schema, table, column]
columns = DataFrame.from_records(
((schema, table, column)
for schema, tables in schemata.items()
for table, columns in tables.items()
for column in columns),
columns=['schema', 'table', 'column'])
for schema, tbls in metadata.items():
schemata.append(schema)
comp.extend_schemata(tables[['schema']].drop_duplicates())
for table, cols in tbls.items():
tables.append((schema, table))
columns.extend([(schema, table, col) for col in cols])
comp.extend_schemata(schemata)
comp.extend_tables(tables)
comp.extend_columns(columns)
comp.set_search_path(['public'])
return comp

View File

@ -1,82 +1,57 @@
from pgcli.packages.sqlcompletion import suggest_type
import pytest
def assert_equals(suggestions, expected_suggestions):
""" Wrapper to convert dataframes to structs
"""
for suggestion in suggestions:
if 'tables' in suggestion:
suggestion['tables'] = suggestion['tables'].to_dict('list')
assert sorted(suggestions) == sorted(expected_suggestions)
def test_select_suggests_cols_with_visible_table_scope():
suggestions = suggest_type('SELECT FROM tabl', 'SELECT ')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None],
'table': ['tabl'],
'alias': [None]}},
{'type': 'function'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function'}])
def test_select_suggests_cols_with_qualified_table_scope():
suggestions = suggest_type('SELECT FROM sch.tabl', 'SELECT ')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': ['sch'],
'table': ['tabl'],
'alias': [None]}},
{'type': 'function'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [('sch', 'tabl', None)]},
{'type': 'function'}])
def test_where_suggests_columns_functions():
suggestions = suggest_type('SELECT * FROM tabl WHERE ',
'SELECT * FROM tabl WHERE ')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['tabl'], 'alias': [None]}},
{'type': 'function'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function'}])
def test_lparen_suggests_cols():
suggestion = suggest_type('SELECT MAX( FROM tbl', 'SELECT MAX(')
assert_equals(suggestion,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['tbl'], 'alias': [None]}}])
assert suggestion == [
{'type': 'column', 'tables': [(None, 'tbl', None)]}]
def test_select_suggests_cols_and_funcs():
suggestions = suggest_type('SELECT ', 'SELECT ')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [], 'table': [], 'alias': []}},
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': []},
{'type': 'function'}])
def test_from_suggests_tables_and_schemas():
suggestion = suggest_type('SELECT * FROM ', 'SELECT * FROM ')
assert sorted(suggestion) == sorted([
{'type': 'table', 'schema':[]},
suggestions = suggest_type('SELECT * FROM ', 'SELECT * FROM ')
assert sorted(suggestions) == sorted([
{'type': 'table', 'schema': []},
{'type': 'schema'}])
def test_distinct_suggests_cols():
suggestions = suggest_type('SELECT DISTINCT ', 'SELECT DISTINCT ')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [], 'table': [], 'alias': []}}])
assert suggestions == [{'type': 'column', 'tables': []}]
def test_col_comma_suggests_cols():
suggestion = suggest_type('SELECT a, b, FROM tbl', 'SELECT a, b,')
assert_equals(suggestion,
[{'type': 'column',
'tables': {'schema': [None],
'table': ['tbl'],
'alias': [None]}},
suggestions = suggest_type('SELECT a, b, FROM tbl', 'SELECT a, b,')
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'tbl', None)]},
{'type': 'function'}])
def test_table_comma_suggests_tables_and_schemas():
suggestion = suggest_type('SELECT a, b FROM tbl1, ',
suggestions = suggest_type('SELECT a, b FROM tbl1, ',
'SELECT a, b FROM tbl1, ')
assert sorted(suggestion) == sorted([
{'type': 'table', 'schema':[]},
assert sorted(suggestions) == sorted([
{'type': 'table', 'schema': []},
{'type': 'schema'}])
def test_into_suggests_tables_and_schemas():
@ -87,68 +62,42 @@ def test_into_suggests_tables_and_schemas():
def test_insert_into_lparen_suggests_cols():
suggestions = suggest_type('INSERT INTO abc (', 'INSERT INTO abc (')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None],
'table': ['abc'],
'alias': [None]}}])
assert suggestions == [{'type': 'column', 'tables': [(None, 'abc', None)]}]
def test_insert_into_lparen_partial_text_suggests_cols():
suggestions = suggest_type('INSERT INTO abc (i', 'INSERT INTO abc (i')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None],
'table': ['abc'],
'alias': [None]}}])
assert suggestions == [{'type': 'column', 'tables': [(None, 'abc', None)]}]
def test_insert_into_lparen_comma_suggests_cols():
suggestions = suggest_type('INSERT INTO abc (id,', 'INSERT INTO abc (id,')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None],
'table': ['abc'],
'alias': [None]}}])
assert suggestions == [{'type': 'column', 'tables': [(None, 'abc', None)]}]
def test_partially_typed_col_name_suggests_col_names():
suggestion = suggest_type('SELECT * FROM tabl WHERE col_n',
suggestions = suggest_type('SELECT * FROM tabl WHERE col_n',
'SELECT * FROM tabl WHERE col_n')
assert_equals(suggestion,
[{'type': 'column',
'tables': {'schema': [None],
'table': ['tabl'],
'alias': [None]}},
{'type': 'function'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function'}])
def test_dot_suggests_cols_of_a_table_or_schema_qualified_table():
suggestions = suggest_type('SELECT tabl. FROM tabl', 'SELECT tabl.')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['tabl'], 'alias': [None]}},
{'type': 'table', 'schema': 'tabl'}])
suggestions = suggest_type('SELECT tabl. FROM tabl', 'SELECT tabl.')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['tabl'], 'alias': [None]}},
{'type': 'table', 'schema': 'tabl'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'table', 'schema': 'tabl'}])
def test_dot_suggests_cols_of_an_alias():
suggestions = suggest_type('SELECT t1. FROM tabl1 t1, tabl2 t2',
'SELECT t1.')
assert_equals(suggestions,
[{'type': 'table', 'schema': 't1'},
{'type': 'column',
'tables': {'schema': [None], 'table': ['tabl1'], 'alias': ['t1']}}])
assert sorted(suggestions) == sorted([
{'type': 'table', 'schema': 't1'},
{'type': 'column', 'tables': [(None, 'tabl1', 't1')]}])
def test_dot_col_comma_suggests_cols_or_schema_qualified_table():
suggestions = suggest_type('SELECT t1.a, t2. FROM tabl1 t1, tabl2 t2',
'SELECT t1.a, t2.')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None],
'table': ['tabl2'],
'alias': ['t2']}},
{'type': 'table', 'schema': 't2'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'tabl2', 't2')]},
{'type': 'table', 'schema': 't2'}])
def test_sub_select_suggests_keyword():
suggestion = suggest_type('SELECT * FROM (', 'SELECT * FROM (')
@ -167,27 +116,24 @@ def test_sub_select_table_name_completion():
def test_sub_select_col_name_completion():
suggestions = suggest_type('SELECT * FROM (SELECT FROM abc',
'SELECT * FROM (SELECT ')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['abc'], 'alias': [None]}},
{'type': 'function'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'abc', None)]},
{'type': 'function'}])
@pytest.mark.xfail
def test_sub_select_multiple_col_name_completion():
suggestions = suggest_type('SELECT * FROM (SELECT a, FROM abc',
'SELECT * FROM (SELECT a, ')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['abc'], 'alias': [None]}},
{'type': 'function'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'abc', None)]},
{'type': 'function'}])
def test_sub_select_dot_col_name_completion():
suggestions = suggest_type('SELECT * FROM (SELECT t. FROM tabl t',
'SELECT * FROM (SELECT t.')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['tabl'], 'alias': ['t']}},
{'type': 'table', 'schema': 't'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'tabl', 't')]},
{'type': 'table', 'schema': 't'}])
def test_join_suggests_tables_and_schemas():
suggestion = suggest_type('SELECT * FROM abc a JOIN ',
@ -199,43 +145,37 @@ def test_join_suggests_tables_and_schemas():
def test_join_alias_dot_suggests_cols1():
suggestions = suggest_type('SELECT * FROM abc a JOIN def d ON a.',
'SELECT * FROM abc a JOIN def d ON a.')
assert_equals(suggestions,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['abc'], 'alias': ['a']}},
{'type': 'table', 'schema': 'a'}])
assert sorted(suggestions) == sorted([
{'type': 'column', 'tables': [(None, 'abc', 'a')]},
{'type': 'table', 'schema': 'a'}])
def test_join_alias_dot_suggests_cols2():
suggestion = suggest_type('SELECT * FROM abc a JOIN def d ON a.',
'SELECT * FROM abc a JOIN def d ON a.id = d.')
assert_equals(suggestion,
[{'type': 'column',
'tables': {'schema': [None], 'table': ['def'], 'alias': ['d']}},
{'type': 'table', 'schema': 'd'}])
assert sorted(suggestion) == sorted([
{'type': 'column', 'tables': [(None, 'def', 'd')]},
{'type': 'table', 'schema': 'd'}])
def test_on_suggests_aliases():
suggestions = suggest_type(
'select a.x, b.y from abc a join bcd b on ',
'select a.x, b.y from abc a join bcd b on ')
assert_equals(suggestions,
[{'type': 'alias', 'aliases': ['a', 'b']}])
assert suggestions == [{'type': 'alias', 'aliases': ['a', 'b']}]
def test_on_suggests_tables():
suggestions = suggest_type(
'select abc.x, bcd.y from abc join bcd on ',
'select abc.x, bcd.y from abc join bcd on ')
assert_equals(suggestions,
[{'type': 'alias', 'aliases': ['abc', 'bcd']}])
assert suggestions == [{'type': 'alias', 'aliases': ['abc', 'bcd']}]
def test_on_suggests_aliases_right_side():
suggestions = suggest_type(
'select a.x, b.y from abc a join bcd b on a.id = ',
'select a.x, b.y from abc a join bcd b on a.id = ')
assert_equals(suggestions,
[{'type': 'alias', 'aliases': ['a', 'b']}])
assert suggestions == [{'type': 'alias', 'aliases': ['a', 'b']}]
def test_on_suggests_tables_right_side():
suggestions = suggest_type(
'select abc.x, bcd.y from abc join bcd on ',
'select abc.x, bcd.y from abc join bcd on ')
assert_equals(suggestions,
[{'type': 'alias', 'aliases': ['abc', 'bcd']}])
assert suggestions == [{'type': 'alias', 'aliases': ['abc', 'bcd']}]