Skip to content

Commit

Permalink
update vendor qlbridge
Browse files Browse the repository at this point in the history
  • Loading branch information
araddon committed Dec 24, 2017
1 parent ec2c239 commit 3312170
Show file tree
Hide file tree
Showing 17 changed files with 54 additions and 48 deletions.
30 changes: 15 additions & 15 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions backends/bigquery/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,15 @@ docker run -e "GOOGLE_APPLICATION_CREDENTIALS=/.config/gcloud/application_defaul
mysql -h 127.0.0.1 -P4000


-- Create a new schema = "bq" with one source being
-- a bigquery public dataset is used
-- you must provide your billing account
-- Create a new schema = "bq" with one source
-- a bigquery public dataset is the only source/tables
-- replace BIGQUERY_PROJECT with your billing account project

CREATE source `BIGQUERY_PROJECT` WITH {
"type":"bigquery",
"schema":"bq",
"table_aliases" : {
"bikeshare_stations" : "bigquery-public-data:san_francisco.bikeshare_stations"
"bikeshare_stations" : "bigquery-public-data.san_francisco.bikeshare_stations"
},
"settings" {
"billing_project" : "BIGQUERY_PROJECT",
Expand Down
2 changes: 1 addition & 1 deletion backends/bigquery/bq_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func RunTestServer(t *testing.T) func() {
"schema":"datauxtest",
"type": "bigquery",
"table_aliases" : {
"bikeshare_stations" : "bigquery-public-data:san_francisco.bikeshare_stations"
"bikeshare_stations" : "bigquery-public-data.san_francisco.bikeshare_stations"
},
"settings" : {
"billing_project" : "",
Expand Down
7 changes: 4 additions & 3 deletions backends/bigquery/resultreader.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (m *ResultReader) buildProjection() {
if sql.Star {
// Select Each field, grab fields from Table Schema
for _, fld := range m.Req.tbl.Fields {
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.Type))
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.ValueType()))
}
} else if sql.CountStar() {
// Count *
Expand All @@ -71,7 +71,7 @@ func (m *ResultReader) buildProjection() {
for _, col := range m.Req.sel.Columns {
if fld, ok := m.Req.tbl.FieldMap[col.SourceField]; ok {
//u.Debugf("column: %#v", col)
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.Type))
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.ValueType()))
} else {
u.Debugf("Could not find: '%v' in %#v", col.SourceField, m.Req.tbl.FieldMap)
//u.Warnf("%#v", col)
Expand Down Expand Up @@ -131,8 +131,9 @@ func (m *ResultReader) Run() error {
return err
}

bqWriter := expr.NewDialectWriter('"', '[')
bqWriter := expr.NewDialectWriter('"', '`')
sel.WriteDialect(bqWriter)
u.Infof("%s", bqWriter.String())
q := client.Query(bqWriter.String())

ctx := context.Background()
Expand Down
2 changes: 1 addition & 1 deletion backends/bigquery/source.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func (m *Source) loadSchema() error {
for alias, tableToLoad := range m.schema.Conf.TableAliases {
u.Warnf("table alias: %q : %q", alias, tableToLoad)
if !strings.HasPrefix(tableToLoad, "[") {
m.schema.Conf.TableAliases[alias] = fmt.Sprintf("[%s]", tableToLoad)
m.schema.Conf.TableAliases[alias] = fmt.Sprintf("%s", tableToLoad)
}
}
}
Expand Down
4 changes: 3 additions & 1 deletion backends/bigquery/sql_to_bq.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ var (
_ exec.ExecutorSource = (*SqlToBQ)(nil)
_ schema.ConnMutation = (*SqlToBQ)(nil)

// Timeout default for BigQuery queries
Timeout = 10 * time.Second
globalCtx = context.Background()
)
Expand Down Expand Up @@ -85,6 +86,7 @@ func (m *SqlToBQ) queryRewrite(original *rel.SqlSelect) error {
fqn := m.schema.Conf.TableAliases[strings.ToLower(from.Name)]
if fqn != "" {
from.Name = fqn
u.Warnf("got a from: %q", fqn)
}
}
}
Expand Down Expand Up @@ -225,7 +227,7 @@ func (m *SqlToBQ) Put(ctx context.Context, key schema.Key, val interface{}) (sch
case string, []byte, int, int64, bool, time.Time:
row.vals[f.Name] = val
case []value.Value:
switch f.Type {
switch f.ValueType() {
case value.StringsType:
vals := make([]string, len(val))
for si, sv := range val {
Expand Down
4 changes: 2 additions & 2 deletions backends/bigtable/resultreader.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func (m *ResultReader) buildProjection() {
if sql.Star {
// Select Each field, grab fields from Table Schema
for _, fld := range m.Req.tbl.Fields {
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.Type))
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.ValueType()))
}
} else if sql.CountStar() {
// Count *
Expand All @@ -72,7 +72,7 @@ func (m *ResultReader) buildProjection() {
for _, col := range m.Req.sel.Columns {
if fld, ok := m.Req.tbl.FieldMap[col.SourceField]; ok {
//u.Debugf("column: %#v", col)
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.Type))
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.ValueType()))
} else {
u.Debugf("Could not find: '%v' in %#v", col.SourceField, m.Req.tbl.FieldMap)
u.Warnf("%#v", col)
Expand Down
4 changes: 2 additions & 2 deletions backends/bigtable/sql_to_bt.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ func (m *SqlToBT) Put(ctx context.Context, key schema.Key, val interface{}) (sch
case string, []byte, int, int64, bool, time.Time:
curRow[i] = val
case []value.Value:
switch f.Type {
switch f.ValueType() {
case value.StringsType:
vals := make([]string, len(val))
for si, sv := range val {
Expand Down Expand Up @@ -561,7 +561,7 @@ func (m *SqlToBT) walkFilterBinary(node *expr.BinaryNode) (expr.Node, error) {
case lex.TokenEqual, lex.TokenEqualEqual, lex.TokenNE:
return node, nil
case lex.TokenLE, lex.TokenLT, lex.TokenGE, lex.TokenGT:
if !col.Type.IsNumeric() {
if !col.ValueType().IsNumeric() {
return nil, fmt.Errorf("%s Operator can only act on Numeric Column: [%s]", node.Operator.T, node)
}
return node, nil
Expand Down
4 changes: 2 additions & 2 deletions backends/cassandra/resultreader.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (m *ResultReader) buildProjection() {
if sql.Star {
// Select Each field, grab fields from Table Schema
for _, fld := range m.Req.tbl.Fields {
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.Type))
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.ValueType()))
}
} else if sql.CountStar() {
// Count *
Expand All @@ -71,7 +71,7 @@ func (m *ResultReader) buildProjection() {
for _, col := range m.Req.sel.Columns {
if fld, ok := m.Req.tbl.FieldMap[col.SourceField]; ok {
//u.Debugf("column: %#v", col)
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.Type))
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.ValueType()))
} else {
u.Debugf("Could not find: '%v' in %#v", col.SourceField, m.Req.tbl.FieldMap)
u.Warnf("%#v", col)
Expand Down
8 changes: 4 additions & 4 deletions backends/cassandra/source.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ func (m *Source) loadSchema() error {
f = schema.NewFieldBase(colName, value.StringsType, 256, "[]string")
case gocql.TypeInt, gocql.TypeBigInt, gocql.TypeTinyInt:
f = schema.NewFieldBase(colName, value.SliceValueType, 256, "[]int")
f.NativeType = value.IntType
f.NativeType = uint32(value.IntType)
default:
u.Warnf("SET TYPE CASSANDRA Not handled very well?! %v \n%v", nt.Type(), nt.NativeType.Type())
}
Expand All @@ -230,13 +230,13 @@ func (m *Source) loadSchema() error {
switch col.Type.Type() {
case gocql.TypeVarchar, gocql.TypeText:
f = schema.NewFieldBase(colName, value.MapStringType, 256, "map[string]string")
f.NativeType = value.MapStringType
f.NativeType = uint32(value.MapStringType)
case gocql.TypeInt, gocql.TypeBigInt, gocql.TypeTinyInt:
f = schema.NewFieldBase(colName, value.MapIntType, 256, "map[string]string")
f.NativeType = value.MapStringType
f.NativeType = uint32(value.MapStringType)
case gocql.TypeTimestamp, gocql.TypeTime, gocql.TypeDate:
f = schema.NewFieldBase(colName, value.MapTimeType, 256, "map[string]time")
f.NativeType = value.MapTimeType
f.NativeType = uint32(value.MapTimeType)
}
u.Warnf("MAP TYPE CASSANDRA Not handled very well?!")
default:
Expand Down
4 changes: 2 additions & 2 deletions backends/cassandra/sql_to_cql.go
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ func (m *SqlToCql) Put(ctx context.Context, key schema.Key, val interface{}) (sc
case string, []byte, int, int64, bool, time.Time:
curRow[i] = val
case []value.Value:
switch f.Type {
switch f.ValueType() {
case value.StringsType:
vals := make([]string, len(val))
for si, sv := range val {
Expand Down Expand Up @@ -551,7 +551,7 @@ func (m *SqlToCql) walkFilterBinary(node *expr.BinaryNode) (expr.Node, error) {
case lex.TokenEqual, lex.TokenEqualEqual, lex.TokenNE:
return node, nil
case lex.TokenLE, lex.TokenLT, lex.TokenGE, lex.TokenGT:
if !col.Type.IsNumeric() {
if !col.ValueType().IsNumeric() {
return nil, fmt.Errorf("%s Operator can only act on Numeric Column: [%s]", node.Operator.T, node)
}
return node, nil
Expand Down
4 changes: 2 additions & 2 deletions backends/datastore/resultreader.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (m *ResultReader) buildProjection() {
if sql.Star {
// Select Each field, grab fields from Table Schema
for _, fld := range m.Req.tbl.Fields {
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.Type))
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.ValueType()))
}
} else if sql.CountStar() {
// Count *
Expand All @@ -71,7 +71,7 @@ func (m *ResultReader) buildProjection() {
for _, col := range m.Req.sel.Columns {
if fld, ok := m.Req.tbl.FieldMap[col.SourceField]; ok {
//u.Debugf("column: %#v", col)
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.Type))
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.ValueType()))
} else {
u.Debugf("Could not find: '%v' in %#v", col.SourceField, m.Req.tbl.FieldMap)
u.Warnf("%#v", col)
Expand Down
6 changes: 3 additions & 3 deletions backends/elasticsearch/esresults.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (m *ResultReader) buildProjection() {
// Select Each field, grab fields from Table Schema
for _, fld := range m.Req.tbl.Fields {
//u.Infof("found %#v", fld)
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.Type))
cols = append(cols, rel.NewResultColumn(fld.Name, len(cols), nil, fld.ValueType()))
}
} else if sql.CountStar() {
// Count *
Expand Down Expand Up @@ -94,11 +94,11 @@ func (m *ResultReader) buildProjection() {
for _, col := range m.Req.sel.Columns {
if fld, ok := m.Req.tbl.FieldMap[col.SourceField]; ok {
//u.Debugf("column: %#v", col)
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.Type))
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.ValueType()))
} else {
if fld, ok := m.Req.tbl.FieldMap[col.As]; ok {
//u.Debugf("column: %#v", col)
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.Type))
cols = append(cols, rel.NewResultColumn(col.SourceField, len(cols), col, fld.ValueType()))
} else {
//u.Debugf("col %#v", col)
//u.Debugf("Could not find: %v sourcefield?%v fields:%v", col.As, col.SourceField, m.Req.tbl.Columns())
Expand Down
2 changes: 1 addition & 1 deletion backends/files/filesource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ func TestSelectFilesList(t *testing.T) {
}{}
validateQuerySpec(t, tu.QuerySpec{
Sql: "select file, `table`, size, partition from localfiles_files",
ExpectRowCt: 2,
ExpectRowCt: 3,
ValidateRowData: func() {
u.Infof("%v", data)
// assert.True(t, data.Deleted == false, "Not deleted? %v", data)
Expand Down
3 changes: 3 additions & 0 deletions backends/mongo/sql_to_mgo.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,9 @@ func (m *SqlToMgo) eval(arg expr.Node) (value.Value, bool, bool) {
return value.NewBoolValue(arg.Bool()), true, false
}
return value.NewStringValue(arg.Text), true, true
case *expr.ArrayNode:
val, ok := vm.Eval(nil, arg)
return val, ok, false
}
return nil, false, false
}
Expand Down
8 changes: 4 additions & 4 deletions frontends/mysqlfe/sql_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func typeToMysql(f *schema.Field) string {
// char(60)
// varchar(255)
// text
switch f.Type {
switch f.ValueType() {
case value.IntType:
if f.Length == 64 {
return "bigint"
Expand Down Expand Up @@ -134,7 +134,7 @@ func fieldDescribe(proj *rel.Projection, f *schema.Field) []driver.Value {
typeToMysql(f),
null,
f.Key,
f.DefaultValue,
string(f.DefVal),
f.Description,
}
}
Expand All @@ -149,7 +149,7 @@ func fieldDescribe(proj *rel.Projection, f *schema.Field) []driver.Value {
"", // collation
null,
f.Key,
f.DefaultValue,
string(f.DefVal),
f.Extra,
privileges,
f.Description,
Expand Down Expand Up @@ -178,7 +178,7 @@ func TableCreate(tbl *schema.Table) (string, error) {
func writeField(w *bytes.Buffer, fld *schema.Field) {
fmt.Fprintf(w, "`%s` ", fld.Name)
deflen := fld.Length
switch fld.Type {
switch fld.ValueType() {
case value.BoolType:
fmt.Fprint(w, "tinyint(1) DEFAULT NULL")
case value.IntType:
Expand Down
2 changes: 1 addition & 1 deletion planner/sql_master.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func (m *sqlMasterTask) init() error {
}
} else if f.Tbl.PartitionCt > 0 {
m.partitions = make([]string, f.Tbl.PartitionCt)
m.actorCt = f.Tbl.PartitionCt
m.actorCt = int(f.Tbl.PartitionCt)
for i := 0; i < m.actorCt; i++ {
//u.Warnf("Found Partitions for %q = %#v", f.Tbl.Name, i)
m.partitions[i] = fmt.Sprintf("%d", i)
Expand Down

0 comments on commit 3312170

Please sign in to comment.